Python source code formatting and optimization

pull/1942/head
Axylum 7 months ago
parent 7ec56895a3
commit 71b434f0f8

@ -9,14 +9,15 @@ networks.
import sys
if __name__ == "__main__":
# Check if the user is using the correct version of Python
python_version = sys.version.split()[0]
if sys.version_info < (3, 6):
print("Sherlock requires Python 3.6+\nYou are using Python %s, which is not supported by Sherlock" % (python_version))
print("Sherlock requires Python 3.6+\nYou are using Python %s, which is not supported by Sherlock" % (
python_version))
sys.exit(1)
import sherlock
sherlock.main()

@ -10,10 +10,10 @@ class QueryStatus(Enum):
Describes status of query about a given username.
"""
CLAIMED = "Claimed" # Username Detected
AVAILABLE = "Available" # Username Not Detected
UNKNOWN = "Unknown" # Error Occurred While Trying To Detect Username
ILLEGAL = "Illegal" # Username Not Allowable For This Site
CLAIMED = "Claimed" # Username Detected
AVAILABLE = "Available" # Username Not Detected
UNKNOWN = "Unknown" # Error Occurred While Trying To Detect Username
ILLEGAL = "Illegal" # Username Not Allowable For This Site
def __str__(self):
"""Convert Object To String.
@ -26,11 +26,13 @@ class QueryStatus(Enum):
"""
return self.value
class QueryResult():
"""Query Result Object.
Describes result of query about a given username.
"""
def __init__(self, username, site_name, site_url_user, status,
query_time=None, context=None):
"""Create Query Result Object.
@ -61,12 +63,12 @@ class QueryResult():
Nothing.
"""
self.username = username
self.site_name = site_name
self.username = username
self.site_name = site_name
self.site_url_user = site_url_user
self.status = status
self.query_time = query_time
self.context = context
self.status = status
self.query_time = query_time
self.context = context
return

@ -724,7 +724,8 @@ def main():
]
)
for site in results:
if args.print_found and not args.print_all and results[site]["status"].status != QueryStatus.CLAIMED:
if args.print_found and not args.print_all and results[site][
"status"].status != QueryStatus.CLAIMED:
continue
response_time_s = results[site]["status"].query_time
@ -763,7 +764,9 @@ def main():
exists.append(str(results[site]["status"].status))
http_status.append(results[site]["http_status"])
DataFrame = pd.DataFrame({"username": usernames, "name": names, "url_main": url_main, "url_user": url_user, "exists": exists, "http_status": http_status, "response_time_s": response_time_s})
DataFrame = pd.DataFrame(
{"username": usernames, "name": names, "url_main": url_main, "url_user": url_user, "exists": exists,
"http_status": http_status, "response_time_s": response_time_s})
DataFrame.to_excel(f'{username}.xlsx', sheet_name='sheet1', index=False)
print()

@ -7,9 +7,10 @@ import json
import requests
import secrets
class SiteInformation:
def __init__(self, name, url_home, url_username_format, username_claimed,
information, is_nsfw, username_unclaimed=secrets.token_urlsafe(10)):
information, is_nsfw, username_unclaimed=secrets.token_urlsafe(10)):
"""Create Site Information Object.
Contains information about a specific website.
@ -54,7 +55,7 @@ class SiteInformation:
self.username_claimed = username_claimed
self.username_unclaimed = secrets.token_urlsafe(32)
self.information = information
self.is_nsfw = is_nsfw
self.is_nsfw = is_nsfw
return
@ -67,7 +68,7 @@ class SiteInformation:
Return Value:
Nicely formatted string to get information about this object.
"""
return f"{self.name} ({self.url_home})"
@ -124,7 +125,7 @@ class SitesInformation:
except Exception as error:
raise FileNotFoundError(
f"Problem while attempting to access data file URL '{data_file_path}': {error}"
)
) from error
if response.status_code != 200:
raise FileNotFoundError(f"Bad response while accessing "
@ -135,7 +136,7 @@ class SitesInformation:
except Exception as error:
raise ValueError(
f"Problem parsing json contents at '{data_file_path}': {error}."
)
) from error
else:
# Reference is to a file.
@ -146,12 +147,13 @@ class SitesInformation:
except Exception as error:
raise ValueError(
f"Problem parsing json contents at '{data_file_path}': {error}."
)
) from error
except FileNotFoundError:
raise FileNotFoundError(f"Problem while attempting to access "
f"data file '{data_file_path}'."
)
except FileNotFoundError as e:
raise FileNotFoundError(
f"Problem while attempting to access "
f"data file '{data_file_path}'."
) from e
self.sites = {}
@ -160,18 +162,18 @@ class SitesInformation:
try:
self.sites[site_name] = \
SiteInformation(site_name,
SiteInformation(site_name,
site_data[site_name]["urlMain"],
site_data[site_name]["url"],
site_data[site_name]["username_claimed"],
site_data[site_name],
site_data[site_name].get("isNSFW",False)
site_data[site_name].get("isNSFW", False)
)
except KeyError as error:
raise ValueError(
f"Problem parsing json contents at '{data_file_path}': Missing attribute {error}."
)
) from error
return
@ -185,12 +187,12 @@ class SitesInformation:
Return Value:
None
"""
sites = {}
for site in self.sites:
if self.sites[site].is_nsfw:
continue
sites[site] = self.sites[site]
self.sites = sites
sites = {
site: self.sites[site]
for site in self.sites
if not self.sites[site].is_nsfw
}
self.sites = sites
def site_name_list(self):
"""Get Site Name List.

@ -116,7 +116,7 @@ class SherlockDetectTests(SherlockBaseTest):
# method is very hacky, but it does the job as having hardcoded
# usernames that dont exists will lead to people with ill intent to
# create an account with that username which will break the tests
valid_username = exrex.getone(r"^[a-zA-Z0-9-_]{30}")
valid_username = exrex.getone(r"^[a-zA-Z0-9-_]{30}")
self.username_check([valid_username], [site], exist_check=False)
return

@ -32,16 +32,12 @@ class SherlockBaseTest(unittest.TestCase):
# Create object with all information about sites we are aware of.
sites = SitesInformation(data_file_path=os.path.join(os.path.dirname(__file__), "../resources/data.json"))
# Create original dictionary from SitesInformation() object.
# Eventually, the rest of the code will be updated to use the new object
# directly, but this will glue the two pieces together.
site_data_all = {}
for site in sites:
site_data_all[site.name] = site.information
site_data_all = {site.name: site.information for site in sites}
self.site_data_all = site_data_all
# Load excluded sites list, if any
excluded_sites_path = os.path.join(os.path.dirname(os.path.realpath(sherlock.__file__)), "tests/.excluded_sites")
excluded_sites_path = os.path.join(os.path.dirname(os.path.realpath(sherlock.__file__)),
"tests/.excluded_sites")
try:
with open(excluded_sites_path, "r", encoding="utf-8") as excluded_sites_file:
self.excluded_sites = excluded_sites_file.read().splitlines()
@ -77,7 +73,7 @@ class SherlockBaseTest(unittest.TestCase):
for site in site_list:
with self.subTest(f"Checking test vector Site '{site}' "
f"exists in total site data."
):
):
site_data[site] = self.site_data_all[site]
return site_data
@ -118,20 +114,20 @@ class SherlockBaseTest(unittest.TestCase):
tor=self.tor,
unique_tor=self.unique_tor,
timeout=self.timeout
)
)
for site, result in results.items():
with self.subTest(f"Checking Username '{username}' "
f"{check_type_text} on Site '{site}'"
):
):
if (
(self.skip_error_sites == True) and
(result["status"].status == QueryStatus.UNKNOWN)
):
#Some error connecting to site.
(self.skip_error_sites == True) and
(result["status"].status == QueryStatus.UNKNOWN)
):
# Some error connecting to site.
self.skipTest(f"Skipping Username '{username}' "
f"{check_type_text} on Site '{site}': "
f"Site returned error status."
)
)
self.assertEqual(exist_result_desired,
result["status"].status)
@ -166,16 +162,11 @@ class SherlockBaseTest(unittest.TestCase):
for site, site_data in self.site_data_all.items():
if (
(site in self.excluded_sites) or
(site_data["errorType"] != detect_type) or
(site_data.get("username_claimed") is None) or
(site_data.get("username_unclaimed") is None)
):
# This is either not a site we are interested in, or the
# site does not contain the required information to do
# the tests.
pass
else:
site not in self.excluded_sites
and site_data["errorType"] == detect_type
and site_data.get("username_claimed") is not None
and site_data.get("username_unclaimed") is not None
):
# We should run a test on this site.
# Figure out which type of user
@ -196,7 +187,7 @@ class SherlockBaseTest(unittest.TestCase):
self.username_check([username],
site_list,
exist_check=exist_check
)
)
return
@ -212,13 +203,11 @@ class SherlockBaseTest(unittest.TestCase):
Will trigger an assert if any Site does not have test coverage.
"""
site_no_tests_list = []
for site, site_data in self.site_data_all.items():
if site_data.get("username_claimed") is None:
# Test information not available on this site.
site_no_tests_list.append(site)
site_no_tests_list = [
site
for site, site_data in self.site_data_all.items()
if site_data.get("username_claimed") is None
]
self.assertEqual("", ", ".join(site_no_tests_list))
return

@ -1,6 +1,7 @@
import imp
import unittest
import sys
sys.path.append('../')
import sherlock as sh
@ -10,20 +11,22 @@ checksymbols = ["_", "-", "."]
"""Test for mulriple usernames.
This test ensures that the function MultipleUsernames works properly. More specific,
different scenarios are tested and only usernames that contain this specific sequence: {?}
different scenarios are tested and only usernames that contain this specific sequence: {?}
should return positive.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
"""
class TestMultipleUsernames(unittest.TestCase):
def test_area(self):
test_usernames = ["test{?}test" , "test{?feo" , "test"]
test_usernames = ["test{?}test", "test{?feo", "test"]
for name in test_usernames:
if(sh.CheckForParameter(name)):
self.assertAlmostEqual(sh.MultipleUsernames(name), ["test_test" , "test-test" , "test.test"])
if (sh.CheckForParameter(name)):
self.assertAlmostEqual(sh.MultipleUsernames(name), ["test_test", "test-test", "test.test"])
else:
self.assertAlmostEqual(name, name)
self.assertAlmostEqual(name, name)

@ -16,7 +16,8 @@ with open("sites.md", "w") as site_file:
for social_network, info in social_networks:
url_main = info["urlMain"]
is_nsfw = "**(NSFW)**" if info.get("isNSFW") else ""
site_file.write(f"1. ![](https://www.google.com/s2/favicons?domain={url_main}) [{social_network}]({url_main}) {is_nsfw}\n")
site_file.write(
f"1. ![](https://www.google.com/s2/favicons?domain={url_main}) [{social_network}]({url_main}) {is_nsfw}\n")
# Overwrite the data.json file with sorted data
with open("sherlock/resources/data.json", "w") as data_file:

Loading…
Cancel
Save