Merge remote-tracking branch 'upstream/master' into async-requests

# Conflicts:
#	.gitignore
#	requirements.txt
#	sherlock.py
pull/3/head
Tejasvi Nareddy 6 years ago
commit 78a3451c03

14
.gitignore vendored

@ -1,5 +1,13 @@
# User files
# Virtual Environment
venv/
# Jupyter Notebook
.ipynb_checkpoints
*.ipynb
# Output files, except requirements.txt
*.txt
!requirements.txt
# Virtual Environment
venv/
# Comma-Separated Values (CSV) Reports
*.csv

@ -1,13 +1,15 @@
# Sherlock
> Find usernames across over 75 social networks
> Find usernames across social networks
<p align="center">
<img src="sherlock_preview.png">
<img src="preview.png">
</a>
</p>
## Installation
**NOTE**: Python 3.6 or higher is required.
```bash
# clone the repo
$ git clone https://github.com/sdushantha/sherlock.git
@ -20,10 +22,30 @@ $ pip3 install -r requirements.txt
```
## Usage
Just run ```python3 sherlock.py```
All of the accounts found will be stored in a text file with their usename (e.g ```user123.txt```)
```bash
$ python3 sherlock.py --help
usage: sherlock.py [-h] [--version] [--verbose] [--quiet] [--csv] [--tor] [--unique-tor]
USERNAMES [USERNAMES ...]
Sherlock: Find Usernames Across Social Networks (Version 0.1.0)
positional arguments:
USERNAMES One or more usernames to check with social networks.
optional arguments:
-h, --help show this help message and exit
--version Display version information and dependencies.
--verbose, -v, -d, --debug
Display extra debugging information.
--quiet, -q Disable debugging information (Default Option).
--csv Create Comma-Separated Values (CSV) File.
--tor, -t Make requests over TOR; increases runtime; requires TOR to be installed and in system path.
--unique-tor, -u Make requests over TOR with new TOR circuit after each request; increases runtime; requires TOR to be installed and in system path.
```
For example, run ```python3 sherlock.py user123```, and all of the accounts
found will be stored in a text file with the username (e.g ```user123.txt```).
## License
MIT License

@ -1,368 +1,462 @@
{
"Instagram": {
"url": "https://www.instagram.com/{}",
"urlMain": "https://www.instagram.com/",
"errorType": "message",
"errorMsg": "The link you followed may be broken"
},
"Twitter": {
"url": "https://www.twitter.com/{}",
"urlMain": "https://www.twitter.com/",
"errorType": "message",
"errorMsg": "page doesnt exist"
},
"Facebook": {
"url": "https://www.facebook.com/{}",
"errorType": "message",
"errorMsg": "not found"
"urlMain": "https://www.facebook.com/",
"errorType": "status_code"
},
"YouTube": {
"url": "https://www.youtube.com/{}",
"urlMain": "https://www.youtube.com/",
"errorType": "message",
"errorMsg": "Not Found"
},
"Blogger": {
"url": "https://{}.blogspot.com",
"errorType": "status_code"
"urlMain": "https://www.blogger.com/",
"errorType": "status_code",
"regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$"
},
"Google Plus": {
"url": "https://plus.google.com/+{}",
"urlMain": "https://plus.google.com/",
"errorType": "status_code"
},
"Reddit": {
"url": "https://www.reddit.com/user/{}",
"urlMain": "https://www.reddit.com/",
"errorType": "message",
"errorMsg":"page not found"
},
"Pinterest": {
"url": "https://www.pinterest.com/{}",
"urlMain": "https://www.pinterest.com/",
"errorType": "response_url",
"errorMsgInUrl": "?show_error"
"errorUrl": "https://www.pinterest.com/?show_error=true"
},
"GitHub": {
"url": "https://www.github.com/{}",
"errorType": "message",
"errorMsg": "404 Not Found"
"urlMain": "https://www.github.com/",
"errorType": "status_code",
"regexCheck": "^[a-zA-Z0-9](?:[a-zA-Z0-9]|-(?=[a-zA-Z0-9])){0,38}$"
},
"Steam": {
"url": "https://steamcommunity.com/id/{}",
"urlMain": "https://steamcommunity.com/",
"errorType": "message",
"errorMsg": "The specified profile could not be found"
},
"Vimeo": {
"url": "https://vimeo.com/{}",
"urlMain": "https://vimeo.com/",
"errorType": "message",
"errorMsg": "404 Not Found"
},
"SoundCloud": {
"url": "https://soundcloud.com/{}",
"errorType": "message",
"errorMsg": "404 Not Found"
},
"Tumblr": {
"url": "https://{}.tumblr.com",
"errorType": "message",
"errorMsg": " There's nothing here"
"urlMain": "https://soundcloud.com/",
"errorType": "status_code"
},
"Disqus": {
"url": "https://disqus.com/{}",
"errorType": "message",
"errorMsg": "404 NOT FOUND"
"urlMain": "https://disqus.com/",
"errorType": "status_code"
},
"Medium": {
"url": "https://medium.com/@{}",
"urlMain": "https://medium.com/",
"errorType": "status_code"
},
"DeviantART": {
"url": "https://{}.deviantart.com",
"errorType": "status_code"
"urlMain": "https://deviantart.com",
"errorType": "status_code",
"regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$"
},
"VK": {
"url": "https://vk.com/{}",
"urlMain": "https://vk.com/",
"errorType": "status_code"
},
"About.me": {
"url": "https://about.me/{}",
"urlMain": "https://about.me/",
"errorType": "status_code"
},
"Imgur": {
"url": "https://imgur.com/user/{}",
"urlMain": "https://imgur.com/",
"errorType": "status_code"
},
"Flipboard": {
"url": "https://flipboard.com/@{}",
"errorType": "status_code"
"urlMain": "https://flipboard.com/",
"errorType": "message",
"errorMsg": "loading"
},
"SlideShare": {
"url": "https://slideshare.net/{}",
"urlMain": "https://slideshare.net/",
"errorType": "status_code"
},
"Fotolog": {
"url": "https://fotolog.com/{}",
"urlMain": "https://fotolog.com/",
"errorType": "status_code"
},
"Spotify": {
"url": "https://open.spotify.com/user/{}",
"urlMain": "https://open.spotify.com/",
"errorType": "status_code"
},
"MixCloud": {
"url": "https://www.mixcloud.com/{}",
"urlMain": "https://www.mixcloud.com/",
"errorType": "message",
"errorMsg": "Page Not Found"
},
"Scribd": {
"url": "https://www.scribd.com/{}",
"urlMain": "https://www.scribd.com/",
"errorType": "message",
"errorMsg": "Page not found"
},
"Patreon": {
"url": "https://www.patreon.com/{}",
"urlMain": "https://www.patreon.com/",
"errorType": "status_code"
},
"BitBucket": {
"url": "https://bitbucket.org/{}",
"urlMain": "https://bitbucket.org/",
"errorType": "status_code"
},
"Roblox": {
"url": "https://www.roblox.com/user.aspx?username={}",
"urlMain": "https://www.roblox.com/",
"errorType": "message",
"errorMsg": "Page cannot be found or no longer exists"
},
"Gravatar": {
"url": "http://en.gravatar.com/{}",
"errorType": "message",
"errorMsg": "Were sorry, we couldn't find that profile"
"urlMain": "http://en.gravatar.com/",
"errorType": "status_code"
},
"iMGSRC.RU": {
"url": "https://imgsrc.ru/main/user.php?user={}",
"errorType": "message",
"errorMsg": "Rapidly growing community of over a million users, dedicated to sharing."
"urlMain": "https://imgsrc.ru/",
"errorType": "response_url",
"errorUrl": "https://imgsrc.ru/"
},
"DailyMotion": {
"url": "https://www.dailymotion.com/{}",
"urlMain": "https://www.dailymotion.com/",
"errorType": "message",
"errorMsg": "Page not found"
},
"Etsy": {
"url": "https://www.etsy.com/shop/{}",
"urlMain": "https://www.etsy.com/",
"errorType": "status_code"
},
"CashMe": {
"url": "https://cash.me/{}",
"errorType": "message",
"errorMsg": "Oh no"
"urlMain": "https://cash.me/",
"errorType": "status_code"
},
"Behance": {
"url": "https://www.behance.net/{}",
"urlMain": "https://www.behance.net/",
"errorType": "message",
"errorMsg": "Oops! We cant find that page."
},
"GoodReads": {
"url": "https://www.goodreads.com/{}",
"errorType": "message",
"errorMsg": "Sorry you lost your way."
"urlMain": "https://www.goodreads.com/",
"errorType": "status_code"
},
"Instructables": {
"url": "https://www.instructables.com/member/{}",
"urlMain": "https://www.instructables.com/",
"errorType": "message",
"errorMsg": "404: We're sorry, things break sometimes"
},
"Keybase": {
"url": "https://keybase.io/{}",
"errorType": "message",
"errorMsg": "Sorry, what you are looking for...it does not exist."
"urlMain": "https://keybase.io/",
"errorType": "status_code"
},
"Kongregate": {
"url": "https://www.kongregate.com/accounts/{}",
"urlMain": "https://www.kongregate.com/",
"errorType": "message",
"errorMsg": "Sorry, no account with that name was found."
"errorMsg": "Sorry, no account with that name was found.",
"regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$"
},
"LiveJournal": {
"url": "https://{}.livejournal.com",
"urlMain": "https://www.livejournal.com/",
"errorType": "message",
"errorMsg": "Unknown Journal"
"errorMsg": "Unknown Journal",
"regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$"
},
"VSCO": {
"url": "https://vsco.co/{}",
"errorType": "message",
"errorMsg": "This page does not exist"
"urlMain": "https://vsco.co/",
"errorType": "status_code"
},
"AngelList": {
"url": "https://angel.co/{}",
"urlMain": "https://angel.co/",
"errorType": "message",
"errorMsg": "We couldn't find what you were looking for."
},
"last.fm": {
"url": "https://last.fm/user/{}",
"urlMain": "https://last.fm/",
"errorType": "message",
"errorMsg": "Whoops! Sorry, but this page doesn't exist."
},
"Dribbble": {
"url": "https://dribbble.com/{}",
"urlMain": "https://dribbble.com/",
"errorType": "message",
"errorMsg": "Whoops, that page is gone."
"errorMsg": "Whoops, that page is gone.",
"regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$"
},
"Codecademy": {
"url": "https://www.codecademy.com/{}",
"urlMain": "https://www.codecademy.com/",
"errorType": "message",
"errorMsg": "404 error"
},
"Pastebin": {
"url": "https://pastebin.com/u/{}",
"urlMain": "https://pastebin.com/",
"errorType": "response_url",
"errorMsgInUrl": "index"
"errorUrl": "https://pastebin.com/index"
},
"Foursquare": {
"url": "https://foursquare.com/{}",
"errorType": "message",
"errorMsg": "We couldn't find the page you're looking for."
"urlMain": "https://foursquare.com/",
"errorType": "status_code"
},
"Gumroad": {
"url": "https://www.gumroad.com/{}",
"urlMain": "https://www.gumroad.com/",
"errorType": "message",
"errorMsg": "Page not found."
},
"Newgrounds": {
"url": "https://{}.newgrounds.com",
"errorType": "message",
"errorMsg": "ERROR — No user"
"urlMain": "https://newgrounds.com",
"errorType": "status_code",
"regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$"
},
"Wattpad": {
"url": "https://www.wattpad.com/user/{}",
"urlMain": "https://www.wattpad.com/",
"errorType": "message",
"errorMsg": "This page seems to be missing..."
},
"Canva": {
"url": "https://www.canva.com/{}",
"urlMain": "https://www.canva.com/",
"errorType": "message",
"errorMsg": "Not found (404)"
},
"Trakt": {
"url": "https://www.trakt.tv/users/{}",
"urlMain": "https://www.trakt.tv/",
"errorType": "message",
"errorMsg": "404"
},
"500px": {
"url": "https://500px.com/{}",
"urlMain": "https://500px.com/",
"errorType": "message",
"errorMsg": "Sorry, no such page."
},
"BuzzFeed": {
"url": "https://buzzfeed.com/{}",
"urlMain": "https://buzzfeed.com/",
"errorType": "message",
"errorMsg": "We can't find the page you're looking for."
},
"TripAdvisor": {
"url": "https://tripadvisor.com/members/{}",
"urlMain": "https://tripadvisor.com/",
"errorType": "message",
"errorMsg": "This page is on vacation…"
},
"Contently": {
"url": "https://{}.contently.com/",
"urlMain": "https://contently.com/",
"errorType": "message",
"errorMsg": "We can't find that page!"
},
"Houzz": {
"url": "https://houzz.com/user/{}",
"errorType": "message",
"errorMsg": "The page you requested was not found."
"errorMsg": "We can't find that page!",
"regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$"
},
"Houzz": {
"url": "https://houzz.com/user/{}",
"urlMain": "https://houzz.com/",
"errorType": "message",
"errorMsg": "The page you requested was not found."
},
"BLIP.fm": {
"url": "https://blip.fm/{}",
"urlMain": "https://blip.fm/",
"errorType": "message",
"errorMsg": "404 Page Not Found"
"errorMsg": "Page Not Found"
},
"HackerNews": {
"url": "https://news.ycombinator.com/user?id={}",
"urlMain": "https://news.ycombinator.com/",
"errorType": "message",
"errorMsg": "No such user."
},
"Codementor": {
"url": "https://www.codementor.io/{}",
"urlMain": "https://www.codementor.io/",
"errorType": "message",
"errorMsg": "404"
},
"ReverbNation": {
"url": "https://www.reverbnation.com/{}",
"urlMain": "https://www.reverbnation.com/",
"errorType": "message",
"errorMsg": "Sorry, we couldn't find that page"
},
"Designspiration": {
"url": "https://www.designspiration.net/{}",
"urlMain": "https://www.designspiration.net/",
"errorType": "message",
"errorMsg": "Content Not Found"
},
"Bandcamp": {
"url": "https://www.bandcamp.com/{}",
"urlMain": "https://www.bandcamp.com/",
"errorType": "message",
"errorMsg": "Sorry, that something isnt here"
},
"ColourLovers": {
"url": "https://www.colourlovers.com/love/{}",
"urlMain": "https://www.colourlovers.com/",
"errorType": "message",
"errorMsg": "Page Not Loved"
},
"IFTTT": {
"url": "https://www.ifttt.com/p/{}",
"urlMain": "https://www.ifttt.com/",
"errorType": "message",
"errorMsg": "The requested page or file does not exist"
},
"Ebay": {
"url": "https://www.ebay.com/usr/{}",
"urlMain": "https://www.ebay.com/",
"errorType": "message",
"errorMsg": "The User ID you entered was not found"
},
"Slack": {
"url": "https://{}.slack.com",
"errorType": "message",
"errorMsg": "Theres been a glitch…"
"urlMain": "https://slack.com",
"errorType": "status_code",
"regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$"
},
"Trip": {
"url": "https://www.trip.skyscanner.com/user/{}",
"urlMain": "https://www.trip.skyscanner.com/",
"errorType": "message",
"errorMsg": "Page not found"
},
"Ello": {
"url": "https://ello.co/{}",
"urlMain": "https://ello.co/",
"errorType": "message",
"errorMsg": "We couldn't find the page you're looking for"
},
"HackerOne": {
"url": "https://hackerone.com/{}",
"urlMain": "https://hackerone.com/",
"errorType": "message",
"errorMsg": "Page not found"
},
"Tinder": {
"url": "https://www.gotinder.com/@{}",
"urlMain": "https://tinder.com/",
"errorType": "message",
"errorMsg": "Looking for Someone?"
},
"We Heart It": {
"url": "https://weheartit.com/{}",
"urlMain": "https://weheartit.com/",
"errorType": "message",
"errorMsg": "Oops! You've landed on a moving target!"
},
"Flickr": {
"url": "https://www.flickr.com/people/{}",
"errorType": "message",
"errorMsg": "This is not the page youre looking for"
"urlMain": "https://www.flickr.com/",
"errorType": "status_code"
},
"WordPress": {
"url": "https://{}.wordpress.com",
"errorType": "message",
"errorMsg": "Do you want to register"
"urlMain": "https://wordpress.com",
"errorType": "response_url",
"errorUrl": "wordpress.com/typo/?subdomain=",
"regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$"
},
"Unsplash": {
"url": "https://unsplash.com/@{}",
"errorType": "message",
"errorMsg": "Hm, the page you were looking for doesn't seem to exist anymore"
"urlMain": "https://unsplash.com/",
"errorType": "status_code"
},
"Pexels": {
"url": "https://www.pexels.com/@{}",
"urlMain": "https://www.pexels.com/",
"errorType": "message",
"errorMsg": "Ouch, something went wrong!"
},
"devRant": {
"url": "https://devrant.com/users/{}",
"urlMain": "https://devrant.com/",
"errorType": "response_url",
"errorUrl": "https://devrant.com/"
},
"MyAnimeList": {
"url": "https://myanimelist.net/profile/{}",
"urlMain": "https://myanimelist.net/",
"errorType": "status_code"
},
"ImageShack": {
"url": "https://imageshack.us/user/{}",
"urlMain": "https://imageshack.us/",
"errorType": "response_url",
"errorUrl": "https://imageshack.us/"
},
"Badoo": {
"url": "https://badoo.com/profile/{}",
"urlMain": "https://badoo.com/",
"errorType": "status_code"
},
"MeetMe": {
"url": "https://www.meetme.com/{}",
"urlMain": "https://www.meetme.com/",
"errorType": "response_url",
"errorUrl": "https://www.meetme.com/"
},
"Quora": {
"url": "https://www.quora.com/profile/{}",
"urlMain": "https://www.quora.com/",
"errorType": "status_code"
}
}

@ -1 +1,3 @@
requests_futures
requests
requests_futures
torrequest

@ -1,13 +1,24 @@
"""Sherlock: Find Usernames Across Social Networks Module
This module contains the main logic to search for usernames at social
networks.
"""
import requests
from concurrent.futures import ThreadPoolExecutor
from requests_futures.sessions import FuturesSession
import json
import os
import re
import csv
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import platform
from torrequest import TorRequest
raw = open("data.json", "r")
data = json.load(raw)
module_name = "Sherlock: Find Usernames Across Social Networks"
__version__ = "0.1.0"
# Allow 1 thread for each external service, so `len(data)` threads total
session = FuturesSession(executor=ThreadPoolExecutor(max_workers=len(data)))
# TODO: fix tumblr
def write_to_file(url, fname):
@ -15,90 +26,287 @@ def write_to_file(url, fname):
f.write(url + "\n")
def main():
# Not sure why, but the banner messes up if i put into one print function
print(" .\"\"\"-.")
print(" / \\")
print("\033[37;1m ____ _ _ _ | _..--'-.")
print("\033[37;1m/ ___|| |__ ___ _ __| | ___ ___| |__ >.`__.-\"\"\;\"`")
print("\033[37;1m\___ \| '_ \ / _ \ '__| |/ _ \ / __| |/ / / /( ^\\")
print("\033[37;1m ___) | | | | __/ | | | (_) | (__| < '-`) =|-.")
print("\033[37;1m|____/|_| |_|\___|_| |_|\___/ \___|_|\_\ /`--.'--' \ .-.")
print("\033[37;1m .'`-._ `.\ | J /")
print("\033[37;1m / `--.| \__/\033[0m")
username = input("\033[92;1m[\033[37;1m?\033[92;1m]\033[92;1m Input Username: \033[0m")
print()
def print_error(err, errstr, var, debug=False):
if debug:
print(f"\033[37;1m[\033[91;1m-\033[37;1m]\033[91;1m {errstr}\033[93;1m {err}")
else:
print(f"\033[37;1m[\033[91;1m-\033[37;1m]\033[91;1m {errstr}\033[93;1m {var}")
def get_response(request_future, error_type, social_network, verbose=False):
try:
rsp = request_future.result()
if rsp.status_code:
return rsp, error_type
except requests.exceptions.HTTPError as errh:
print_error(errh, "HTTP Error:", social_network, verbose)
except requests.exceptions.ConnectionError as errc:
print_error(errc, "Error Connecting:", social_network, verbose)
except requests.exceptions.Timeout as errt:
print_error(errt, "Timeout Error:", social_network, verbose)
except requests.exceptions.RequestException as err:
print_error(err, "Unknown error:", social_network, verbose)
return None, ""
def sherlock(username, verbose=False, tor=False, unique_tor=False):
"""Run Sherlock Analysis.
Checks for existence of username on various social media sites.
Keyword Arguments:
username -- String indicating username that report
should be created against.
verbose -- Boolean indicating whether to give verbose output.
tor -- Boolean indicating whether to use a tor circuit for the requests.
unique_tor -- Boolean indicating whether to use a new tor circuit for each request.
Return Value:
Dictionary containing results from report. Key of dictionary is the name
of the social network site, and the value is another dictionary with
the following keys:
url_main: URL of main site.
url_user: URL of user on site (if account exists).
exists: String indicating results of test for account existence.
http_status: HTTP status code of query which checked for existence on
site.
response_text: Text that came back from request. May be None if
there was an HTTP error when checking for existence.
"""
fname = username + ".txt"
if os.path.isfile(fname):
os.remove(fname)
print(
"\033[1;92m[\033[0m\033[1;77m*\033[0m\033[1;92m] Removing previous file:\033[1;37m {}\033[0m".format(fname))
print("\033[1;92m[\033[0m\033[1;77m*\033[0m\033[1;92m] Removing previous file:\033[1;37m {}\033[0m".format(fname))
print(
"\033[1;92m[\033[0m\033[1;77m*\033[0m\033[1;92m] Checking username\033[0m\033[1;37m {}\033[0m\033[1;92m on: "
"\033[0m".format(
username))
print("\033[1;92m[\033[0m\033[1;77m*\033[0m\033[1;92m] Checking username\033[0m\033[1;37m {}\033[0m\033[1;92m on: \033[0m".format(username))
# User agent is needed because some sites does not
# User agent is needed because some sites do not
# return the correct information because it thinks that
# we are bot
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0'
}
# Create futures for all requests
# Load the data
raw = open("data.json", "r")
data = json.load(raw)
# Allow 1 thread for each external service, so `len(data)` threads total
executor = ThreadPoolExecutor(max_workers=len(data))
# Create session based on request methodology
underlying_session = requests.session()
underlying_request = requests.Request()
if tor or unique_tor:
underlying_request = TorRequest()
underlying_session = underlying_request.session()
# Create multi-threaded session for all requests
session = FuturesSession(executor=executor, session=underlying_session)
# Results from analysis of all sites
results_total = {}
# First create futures for all requests. This allows for the requests to run in parallel
for social_network in data:
url = data[social_network]['url'].format(username)
# This future starts running the request in a new thread, doesn't block the main thread
future = session.get(url=url, headers=headers)
# Results from analysis of this specific site
results_site = {}
# Record URL of main site
results_site['url_main'] = data.get(social_network).get("urlMain")
# Don't make request if username is invalid for the site
regex_check = data.get(social_network).get("regexCheck")
if regex_check and re.search(regex_check, username) is None:
# No need to do the check at the site: this user name is not allowed.
print("\033[37;1m[\033[91;1m-\033[37;1m]\033[92;1m {}:\033[93;1m Illegal Username Format For This Site!".format(social_network))
results_site["exists"] = "illegal"
else:
# URL of user on site (if it exists)
url = data.get(social_network).get("url").format(username)
results_site["url_user"] = url
# Store future in data for access later
data[social_network]['request'] = future
# This future starts running the request in a new thread, doesn't block the main thread
future = session.get(url=url, headers=headers)
# Print results
# Store future in data for access later
data.get(social_network)["request_future"] = future
# Reset identify for tor (if needed)
if unique_tor:
underlying_request.reset_identity()
# Add this site's results into final dictionary with all of the other results.
results_total[social_network] = results_site
# Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses
for social_network in data:
url = data[social_network]['url'].format(username)
error_type = data[social_network]['errorType']
# Retrieve results again
results_site = results_total.get(social_network)
# Retrieve other site information again
url = results_site.get("url_user")
exists = results_site.get("exists")
if exists is not None:
# We have already determined the user doesn't exist here
continue
# Get the expected error type
error_type = data.get(social_network).get("errorType")
# Default data in case there are any failures in doing a request.
http_status = "?"
response_text = ""
# Retrieve future and ensure it has finished
future = data[social_network]['request']
response = future.result()
future = data.get(social_network).get("request_future")
r, error_type = get_response(request_future=future,
error_type=error_type,
social_network=social_network,
verbose=verbose)
# Print result
if error_type == "message":
error = data[social_network]['errorMsg']
# Attempt to get request information
try:
http_status = r.status_code
except:
pass
try:
response_text = r.text.encode(r.encoding)
except:
pass
if not error in response.text:
if error_type == "message":
error = data.get(social_network).get("errorMsg")
# Checks if the error message is in the HTML
if not error in r.text:
print("\033[37;1m[\033[92;1m+\033[37;1m]\033[92;1m {}:\033[0m".format(social_network), url)
write_to_file(url, fname)
exists = "yes"
else:
print("\033[37;1m[\033[91;1m-\033[37;1m]\033[92;1m {}:\033[93;1m Not Found!".format(social_network))
exists = "no"
elif error_type == "status_code":
if not response.status_code == 404:
# Checks if the status code of the response is 404
if not r.status_code == 404:
print("\033[37;1m[\033[92;1m+\033[37;1m]\033[92;1m {}:\033[0m".format(social_network), url)
write_to_file(url, fname)
exists = "yes"
else:
print("\033[37;1m[\033[91;1m-\033[37;1m]\033[92;1m {}:\033[93;1m Not Found!".format(social_network))
exists = "no"
elif error_type == "response_url":
error = data.get(social_network).get("errorMsgInUrl")
if not error in response.url:
error = data.get(social_network).get("errorUrl")
# Checks if the redirect url is the same as the one defined in data.json
if not error in r.url:
print("\033[37;1m[\033[92;1m+\033[37;1m]\033[92;1m {}:\033[0m".format(social_network), url)
write_to_file(url, fname)
exists = "yes"
else:
print("\033[37;1m[\033[91;1m-\033[37;1m]\033[92;1m {}:\033[93;1m Not Found!".format(social_network))
exists = "no"
elif error_type == "":
print("\033[37;1m[\033[91;1m-\033[37;1m]\033[92;1m {}:\033[93;1m Error!".format(social_network))
exists = "error"
# Save exists flag
results_site['exists'] = exists
# Save results from request
results_site['http_status'] = http_status
results_site['response_text'] = response_text
# Add this site's results into final dictionary with all of the other results.
results_total[social_network] = results_site
print("\033[1;92m[\033[0m\033[1;77m*\033[0m\033[1;92m] Saved: \033[37;1m{}\033[0m".format(username+".txt"))
return results_total
def main():
version_string = f"%(prog)s {__version__}\n" + \
f"{requests.__description__}: {requests.__version__}\n" + \
f"Python: {platform.python_version()}"
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
description=f"{module_name} (Version {__version__})"
)
parser.add_argument("--version",
action="version", version=version_string,
help="Display version information and dependencies."
)
parser.add_argument("--verbose", "-v", "-d", "--debug",
action="store_true", dest="verbose", default=False,
help="Display extra debugging information."
)
parser.add_argument("--quiet", "-q",
action="store_false", dest="verbose",
help="Disable debugging information (Default Option)."
)
parser.add_argument("--tor", "-t",
action="store_true", dest="tor", default=False,
help="Make requests over TOR; increases runtime; requires TOR to be installed and in system path.")
parser.add_argument("--unique-tor", "-u",
action="store_true", dest="unique_tor", default=False,
help="Make requests over TOR with new TOR circuit after each request; increases runtime; requires TOR to be installed and in system path.")
parser.add_argument("--csv",
action="store_true", dest="csv", default=False,
help="Create Comma-Separated Values (CSV) File."
)
parser.add_argument("username",
nargs='+', metavar='USERNAMES',
action="store",
help="One or more usernames to check with social networks."
)
args = parser.parse_args()
# Banner
print(
"""\033[37;1m .\"\"\"-.
\033[37;1m / \\
\033[37;1m ____ _ _ _ | _..--'-.
\033[37;1m/ ___|| |__ ___ _ __| | ___ ___| |__ >.`__.-\"\"\;\"`
\033[37;1m\___ \| '_ \ / _ \ '__| |/ _ \ / __| |/ / / /( ^\\
\033[37;1m ___) | | | | __/ | | | (_) | (__| < '-`) =|-.
\033[37;1m|____/|_| |_|\___|_| |_|\___/ \___|_|\_\ /`--.'--' \ .-.
\033[37;1m .'`-._ `.\ | J /
\033[37;1m / `--.| \__/\033[0m""")
if args.tor or args.unique_tor:
print("Warning: some websites might refuse connecting over TOR, so note that using this option might increase connection errors.")
print("\033[1;92m[\033[0m\033[1;77m*\033[0m\033[1;92m] Saved: \033[37;1m{}\033[0m".format(username + ".txt"))
# Run report on all specified users.
for username in args.username:
print()
results = sherlock(username, verbose=args.verbose, tor=args.tor, unique_tor=args.unique_tor)
if args.csv == True:
with open(username + ".csv", "w", newline='') as csv_report:
writer = csv.writer(csv_report)
writer.writerow(['username',
'name',
'url_main',
'url_user',
'exists',
'http_status'
]
)
for site in results:
writer.writerow([username,
site,
results[site]['url_main'],
results[site]['url_user'],
results[site]['exists'],
results[site]['http_status']
]
)
main()
if __name__ == "__main__":
main()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 137 KiB

Loading…
Cancel
Save