code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def time_get(self):
"""Benchmark the time to get a dataset"""
for i in range(1,1001):
self.catalog.get(f"dataset_{i}") | Benchmark the time to get a dataset | time_get | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_iter(self):
"""Benchmark the time to iterate over the catalog"""
for dataset in self.catalog:
pass | Benchmark the time to iterate over the catalog | time_iter | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_keys(self):
"""Benchmark the time to get the keys of the catalog"""
self.catalog.keys() | Benchmark the time to get the keys of the catalog | time_keys | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_values(self):
"""Benchmark the time to get the items of the catalog"""
self.catalog.values() | Benchmark the time to get the items of the catalog | time_values | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_items(self):
"""Benchmark the time to get the items of the catalog"""
self.catalog.items() | Benchmark the time to get the items of the catalog | time_items | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_setitem(self):
"""Benchmark the time to set a dataset"""
for i in range(1,1001):
self.catalog[f"dataset_new_{i}"] = CSVDataset(filepath="data.csv") | Benchmark the time to set a dataset | time_setitem | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_setitem_raw(self):
"""Benchmark the time to add a memory dataset"""
for i in range(1,1001):
self.catalog[f"param_{i}"] = self.feed_dict[f"param_{i}"] | Benchmark the time to add a memory dataset | time_setitem_raw | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_save(self):
"""Benchmark the time to save datasets"""
for i in range(1,1001):
self.catalog.save(f"dataset_{i}", self.dataframe) | Benchmark the time to save datasets | time_save | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_load(self):
"""Benchmark the time to load datasets"""
for i in range(1,1001):
self.catalog.load(f"dataset_load_{i}") | Benchmark the time to load datasets | time_load | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_exists(self):
"""Benchmark the time to check if datasets exist"""
for i in range(1,1001):
self.catalog.exists(f"dataset_{i}") | Benchmark the time to check if datasets exist | time_exists | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_release(self):
"""Benchmark the time to release datasets"""
for i in range(1,1001):
self.catalog.release(f"dataset_{i}") | Benchmark the time to release datasets | time_release | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_list(self):
"""Benchmark the time to list all datasets"""
self.catalog.list() | Benchmark the time to list all datasets | time_list | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_shallow_copy(self):
"""Benchmark the time to shallow copy the catalog"""
# Will be removed
self.catalog.shallow_copy() | Benchmark the time to shallow copy the catalog | time_shallow_copy | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_resolve_factory(self):
"""Benchmark the time to resolve factory"""
for i in range(1,1001):
self.catalog.get(f"dataset_factory_{i}") | Benchmark the time to resolve factory | time_resolve_factory | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_add_runtime_patterns(self):
"""Benchmark the time to add runtime patterns"""
for i in range(1,1001):
self.catalog.config_resolver.add_runtime_patterns(runtime_patterns) | Benchmark the time to add runtime patterns | time_add_runtime_patterns | python | kedro-org/kedro | kedro_benchmarks/benchmark_kedrodatacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_kedrodatacatalog.py | Apache-2.0 |
def time_init(self):
"""Benchmark the time to initialize the catalog"""
DataCatalog.from_config(base_catalog) | Benchmark the time to initialize the catalog | time_init | python | kedro-org/kedro | kedro_benchmarks/benchmark_datacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_datacatalog.py | Apache-2.0 |
def time_save(self):
"""Benchmark the time to save datasets"""
for i in range(1,1001):
self.catalog.save(f"dataset_{i}", self.dataframe) | Benchmark the time to save datasets | time_save | python | kedro-org/kedro | kedro_benchmarks/benchmark_datacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_datacatalog.py | Apache-2.0 |
def time_load(self):
"""Benchmark the time to load datasets"""
for i in range(1,1001):
self.catalog.load(f"dataset_load_{i}") | Benchmark the time to load datasets | time_load | python | kedro-org/kedro | kedro_benchmarks/benchmark_datacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_datacatalog.py | Apache-2.0 |
def time_exists(self):
"""Benchmark the time to check if datasets exist"""
for i in range(1,1001):
self.catalog.exists(f"dataset_{i}") | Benchmark the time to check if datasets exist | time_exists | python | kedro-org/kedro | kedro_benchmarks/benchmark_datacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_datacatalog.py | Apache-2.0 |
def time_release(self):
"""Benchmark the time to release datasets"""
for i in range(1,1001):
self.catalog.release(f"dataset_{i}") | Benchmark the time to release datasets | time_release | python | kedro-org/kedro | kedro_benchmarks/benchmark_datacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_datacatalog.py | Apache-2.0 |
def time_add_all(self):
"""Benchmark the time to add all datasets"""
# Have to initialise a new DataCatalog to avoid failing with DatasetAlreadyExistsError
catalog = DataCatalog.from_config(base_catalog)
catalog.add_all(self.datasets) | Benchmark the time to add all datasets | time_add_all | python | kedro-org/kedro | kedro_benchmarks/benchmark_datacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_datacatalog.py | Apache-2.0 |
def time_feed_dict(self):
"""Benchmark the time to add feed dict"""
# Have to initialise a new DataCatalog to avoid failing with DatasetAlreadyExistsError
catalog = DataCatalog.from_config(base_catalog)
catalog.add_feed_dict(self.feed_dict) | Benchmark the time to add feed dict | time_feed_dict | python | kedro-org/kedro | kedro_benchmarks/benchmark_datacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_datacatalog.py | Apache-2.0 |
def time_list(self):
"""Benchmark the time to list all datasets"""
self.catalog.list() | Benchmark the time to list all datasets | time_list | python | kedro-org/kedro | kedro_benchmarks/benchmark_datacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_datacatalog.py | Apache-2.0 |
def time_shallow_copy(self):
"""Benchmark the time to shallow copy the catalog"""
self.catalog.shallow_copy() | Benchmark the time to shallow copy the catalog | time_shallow_copy | python | kedro-org/kedro | kedro_benchmarks/benchmark_datacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_datacatalog.py | Apache-2.0 |
def time_resolve_factory(self):
"""Benchmark the time to resolve factory"""
for i in range(1,1001):
self.catalog._get_dataset(f"dataset_factory_{i}") | Benchmark the time to resolve factory | time_resolve_factory | python | kedro-org/kedro | kedro_benchmarks/benchmark_datacatalog.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_datacatalog.py | Apache-2.0 |
def create_data_catalog():
"""
Use dataset factory pattern to make sure the benchmark cover the slowest path.
"""
catalog_conf = """
'output_{pattern}':
type: pandas.CSVDataset
filepath: benchmarks/data/'{pattern}.csv'
'numpy_{pattern}':
type: pickle.PickleDataset
filepath: benchmarks/data/'{pattern}.pkl'
'{catch_all_dataset_pattern}':
type: pandas.CSVDataset
filepath: benchmarks/data/data.csv
"""
catalog_conf = yaml.safe_load(catalog_conf)
catalog = DataCatalog.from_config(catalog_conf)
return catalog | Use dataset factory pattern to make sure the benchmark cover the slowest path. | create_data_catalog | python | kedro-org/kedro | kedro_benchmarks/benchmark_runner.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_runner.py | Apache-2.0 |
def time_io_bound_runner(self, runner):
"""IO bound pipeline"""
catalog = create_data_catalog()
test_pipeline = create_io_bound_pipeline()
runner_module = importlib.import_module("kedro.runner")
runner_obj = getattr(runner_module, runner)()
runner_obj.run(test_pipeline, catalog=catalog) | IO bound pipeline | time_io_bound_runner | python | kedro-org/kedro | kedro_benchmarks/benchmark_runner.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_runner.py | Apache-2.0 |
def time_loading_catalog(self):
"""Benchmark the time to load the catalog"""
self.loader["catalog"] | Benchmark the time to load the catalog | time_loading_catalog | python | kedro-org/kedro | kedro_benchmarks/benchmark_ocl.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_ocl.py | Apache-2.0 |
def time_loading_parameters(self):
"""Benchmark the time to load the parameters"""
self.loader["parameters"] | Benchmark the time to load the parameters | time_loading_parameters | python | kedro-org/kedro | kedro_benchmarks/benchmark_ocl.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_ocl.py | Apache-2.0 |
def time_loading_globals(self):
"""Benchmark the time to load global configuration"""
self.loader["globals"] | Benchmark the time to load global configuration | time_loading_globals | python | kedro-org/kedro | kedro_benchmarks/benchmark_ocl.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_ocl.py | Apache-2.0 |
def time_loading_parameters_runtime(self):
"""Benchmark the time to load parameters with runtime configuration"""
self.loader.runtime_params = _generate_params(2001, 2002)
self.loader["parameters"] | Benchmark the time to load parameters with runtime configuration | time_loading_parameters_runtime | python | kedro-org/kedro | kedro_benchmarks/benchmark_ocl.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_ocl.py | Apache-2.0 |
def time_merge_soft_strategy(self):
"""Benchmark the time to load and soft-merge configurations"""
self.loader.merge_strategy = {"catalog": "soft"}
self.loader["catalog"] | Benchmark the time to load and soft-merge configurations | time_merge_soft_strategy | python | kedro-org/kedro | kedro_benchmarks/benchmark_ocl.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_ocl.py | Apache-2.0 |
def time_loading_catalog(self):
"""Benchmark the time to load the catalog"""
self.loader["catalog"] | Benchmark the time to load the catalog | time_loading_catalog | python | kedro-org/kedro | kedro_benchmarks/benchmark_ocl.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_ocl.py | Apache-2.0 |
def time_loading_parameters(self):
"""Benchmark the time to load parameters with global interpolation"""
self.loader["parameters"] | Benchmark the time to load parameters with global interpolation | time_loading_parameters | python | kedro-org/kedro | kedro_benchmarks/benchmark_ocl.py | https://github.com/kedro-org/kedro/blob/master/kedro_benchmarks/benchmark_ocl.py | Apache-2.0 |
def run_cli(args:str = "") -> str:
"""Pass arguments to Sherlock as a normal user on the command line"""
# Adapt for platform differences (Windows likes to be special)
if platform.system() == "Windows":
command:str = f"py -m sherlock_project {args}"
else:
command:str = f"sherlock {args}"
proc_out:str = ""
try:
proc_out = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
return proc_out.decode()
except subprocess.CalledProcessError as e:
raise InteractivesSubprocessError(e.output.decode()) | Pass arguments to Sherlock as a normal user on the command line | run_cli | python | sherlock-project/sherlock | tests/sherlock_interactives.py | https://github.com/sherlock-project/sherlock/blob/master/tests/sherlock_interactives.py | MIT |
def walk_sherlock_for_files_with(pattern: str) -> list[str]:
"""Check all files within the Sherlock package for matching patterns"""
pattern:re.Pattern = re.compile(pattern)
matching_files:list[str] = []
for root, dirs, files in os.walk("sherlock_project"):
for file in files:
file_path = os.path.join(root,file)
if "__pycache__" in file_path:
continue
with open(file_path, 'r', errors='ignore') as f:
if pattern.search(f.read()):
matching_files.append(file_path)
return matching_files | Check all files within the Sherlock package for matching patterns | walk_sherlock_for_files_with | python | sherlock-project/sherlock | tests/sherlock_interactives.py | https://github.com/sherlock-project/sherlock/blob/master/tests/sherlock_interactives.py | MIT |
def test_validate_manifest_against_local_schema():
"""Ensures that the manifest matches the local schema, for situations where the schema is being changed."""
json_relative: str = '../sherlock_project/resources/data.json'
schema_relative: str = '../sherlock_project/resources/data.schema.json'
json_path: str = os.path.join(os.path.dirname(__file__), json_relative)
schema_path: str = os.path.join(os.path.dirname(__file__), schema_relative)
with open(json_path, 'r') as f:
jsondat = json.load(f)
with open(schema_path, 'r') as f:
schemadat = json.load(f)
validate(instance=jsondat, schema=schemadat) | Ensures that the manifest matches the local schema, for situations where the schema is being changed. | test_validate_manifest_against_local_schema | python | sherlock-project/sherlock | tests/test_manifest.py | https://github.com/sherlock-project/sherlock/blob/master/tests/test_manifest.py | MIT |
def test_validate_manifest_against_remote_schema(remote_schema):
"""Ensures that the manifest matches the remote schema, so as to not unexpectedly break clients."""
json_relative: str = '../sherlock_project/resources/data.json'
json_path: str = os.path.join(os.path.dirname(__file__), json_relative)
with open(json_path, 'r') as f:
jsondat = json.load(f)
validate(instance=jsondat, schema=remote_schema) | Ensures that the manifest matches the remote schema, so as to not unexpectedly break clients. | test_validate_manifest_against_remote_schema | python | sherlock-project/sherlock | tests/test_manifest.py | https://github.com/sherlock-project/sherlock/blob/master/tests/test_manifest.py | MIT |
def response_time(resp, *args, **kwargs):
"""Response Time Hook.
Keyword Arguments:
resp -- Response object.
args -- Arguments.
kwargs -- Keyword arguments.
Return Value:
Nothing.
"""
resp.elapsed = monotonic() - start
return | Response Time Hook.
Keyword Arguments:
resp -- Response object.
args -- Arguments.
kwargs -- Keyword arguments.
Return Value:
Nothing. | request.response_time | python | sherlock-project/sherlock | sherlock_project/sherlock.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sherlock.py | MIT |
def request(self, method, url, hooks=None, *args, **kwargs):
"""Request URL.
This extends the FuturesSession request method to calculate a response
time metric to each request.
It is taken (almost) directly from the following Stack Overflow answer:
https://github.com/ross/requests-futures#working-in-the-background
Keyword Arguments:
self -- This object.
method -- String containing method desired for request.
url -- String containing URL for request.
hooks -- Dictionary containing hooks to execute after
request finishes.
args -- Arguments.
kwargs -- Keyword arguments.
Return Value:
Request object.
"""
# Record the start time for the request.
if hooks is None:
hooks = {}
start = monotonic()
def response_time(resp, *args, **kwargs):
"""Response Time Hook.
Keyword Arguments:
resp -- Response object.
args -- Arguments.
kwargs -- Keyword arguments.
Return Value:
Nothing.
"""
resp.elapsed = monotonic() - start
return
# Install hook to execute when response completes.
# Make sure that the time measurement hook is first, so we will not
# track any later hook's execution time.
try:
if isinstance(hooks["response"], list):
hooks["response"].insert(0, response_time)
elif isinstance(hooks["response"], tuple):
# Convert tuple to list and insert time measurement hook first.
hooks["response"] = list(hooks["response"])
hooks["response"].insert(0, response_time)
else:
# Must have previously contained a single hook function,
# so convert to list.
hooks["response"] = [response_time, hooks["response"]]
except KeyError:
# No response hook was already defined, so install it ourselves.
hooks["response"] = [response_time]
return super(SherlockFuturesSession, self).request(
method, url, hooks=hooks, *args, **kwargs
) | Request URL.
This extends the FuturesSession request method to calculate a response
time metric to each request.
It is taken (almost) directly from the following Stack Overflow answer:
https://github.com/ross/requests-futures#working-in-the-background
Keyword Arguments:
self -- This object.
method -- String containing method desired for request.
url -- String containing URL for request.
hooks -- Dictionary containing hooks to execute after
request finishes.
args -- Arguments.
kwargs -- Keyword arguments.
Return Value:
Request object. | request | python | sherlock-project/sherlock | sherlock_project/sherlock.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sherlock.py | MIT |
def check_for_parameter(username):
"""checks if {?} exists in the username
if exist it means that sherlock is looking for more multiple username"""
return "{?}" in username | checks if {?} exists in the username
if exist it means that sherlock is looking for more multiple username | check_for_parameter | python | sherlock-project/sherlock | sherlock_project/sherlock.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sherlock.py | MIT |
def multiple_usernames(username):
"""replace the parameter with with symbols and return a list of usernames"""
allUsernames = []
for i in checksymbols:
allUsernames.append(username.replace("{?}", i))
return allUsernames | replace the parameter with with symbols and return a list of usernames | multiple_usernames | python | sherlock-project/sherlock | sherlock_project/sherlock.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sherlock.py | MIT |
def sherlock(
username: str,
site_data: dict,
query_notify: QueryNotify,
tor: bool = False,
unique_tor: bool = False,
dump_response: bool = False,
proxy: Optional[str] = None,
timeout: int = 60,
):
"""Run Sherlock Analysis.
Checks for existence of username on various social media sites.
Keyword Arguments:
username -- String indicating username that report
should be created against.
site_data -- Dictionary containing all of the site data.
query_notify -- Object with base type of QueryNotify().
This will be used to notify the caller about
query results.
tor -- Boolean indicating whether to use a tor circuit for the requests.
unique_tor -- Boolean indicating whether to use a new tor circuit for each request.
proxy -- String indicating the proxy URL
timeout -- Time in seconds to wait before timing out request.
Default is 60 seconds.
Return Value:
Dictionary containing results from report. Key of dictionary is the name
of the social network site, and the value is another dictionary with
the following keys:
url_main: URL of main site.
url_user: URL of user on site (if account exists).
status: QueryResult() object indicating results of test for
account existence.
http_status: HTTP status code of query which checked for existence on
site.
response_text: Text that came back from request. May be None if
there was an HTTP error when checking for existence.
"""
# Notify caller that we are starting the query.
query_notify.start(username)
# Create session based on request methodology
if tor or unique_tor:
try:
from torrequest import TorRequest # noqa: E402
except ImportError:
print("Important!")
print("> --tor and --unique-tor are now DEPRECATED, and may be removed in a future release of Sherlock.")
print("> If you've installed Sherlock via pip, you can include the optional dependency via `pip install 'sherlock-project[tor]'`.")
print("> Other packages should refer to their documentation, or install it separately with `pip install torrequest`.\n")
sys.exit(query_notify.finish())
print("Important!")
print("> --tor and --unique-tor are now DEPRECATED, and may be removed in a future release of Sherlock.")
# Requests using Tor obfuscation
try:
underlying_request = TorRequest()
except OSError:
print("Tor not found in system path. Unable to continue.\n")
sys.exit(query_notify.finish())
underlying_session = underlying_request.session
else:
# Normal requests
underlying_session = requests.session()
underlying_request = requests.Request()
# Limit number of workers to 20.
# This is probably vastly overkill.
if len(site_data) >= 20:
max_workers = 20
else:
max_workers = len(site_data)
# Create multi-threaded session for all requests.
session = SherlockFuturesSession(
max_workers=max_workers, session=underlying_session
)
# Results from analysis of all sites
results_total = {}
# First create futures for all requests. This allows for the requests to run in parallel
for social_network, net_info in site_data.items():
# Results from analysis of this specific site
results_site = {"url_main": net_info.get("urlMain")}
# Record URL of main site
# A user agent is needed because some sites don't return the correct
# information since they think that we are bots (Which we actually are...)
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:129.0) Gecko/20100101 Firefox/129.0",
}
if "headers" in net_info:
# Override/append any extra headers required by a given site.
headers.update(net_info["headers"])
# URL of user on site (if it exists)
url = interpolate_string(net_info["url"], username.replace(' ', '%20'))
# Don't make request if username is invalid for the site
regex_check = net_info.get("regexCheck")
if regex_check and re.search(regex_check, username) is None:
# No need to do the check at the site: this username is not allowed.
results_site["status"] = QueryResult(
username, social_network, url, QueryStatus.ILLEGAL
)
results_site["url_user"] = ""
results_site["http_status"] = ""
results_site["response_text"] = ""
query_notify.update(results_site["status"])
else:
# URL of user on site (if it exists)
results_site["url_user"] = url
url_probe = net_info.get("urlProbe")
request_method = net_info.get("request_method")
request_payload = net_info.get("request_payload")
request = None
if request_method is not None:
if request_method == "GET":
request = session.get
elif request_method == "HEAD":
request = session.head
elif request_method == "POST":
request = session.post
elif request_method == "PUT":
request = session.put
else:
raise RuntimeError(f"Unsupported request_method for {url}")
if request_payload is not None:
request_payload = interpolate_string(request_payload, username)
if url_probe is None:
# Probe URL is normal one seen by people out on the web.
url_probe = url
else:
# There is a special URL for probing existence separate
# from where the user profile normally can be found.
url_probe = interpolate_string(url_probe, username)
if request is None:
if net_info["errorType"] == "status_code":
# In most cases when we are detecting by status code,
# it is not necessary to get the entire body: we can
# detect fine with just the HEAD response.
request = session.head
else:
# Either this detect method needs the content associated
# with the GET response, or this specific website will
# not respond properly unless we request the whole page.
request = session.get
if net_info["errorType"] == "response_url":
# Site forwards request to a different URL if username not
# found. Disallow the redirect so we can capture the
# http status from the original URL request.
allow_redirects = False
else:
# Allow whatever redirect that the site wants to do.
# The final result of the request will be what is available.
allow_redirects = True
# This future starts running the request in a new thread, doesn't block the main thread
if proxy is not None:
proxies = {"http": proxy, "https": proxy}
future = request(
url=url_probe,
headers=headers,
proxies=proxies,
allow_redirects=allow_redirects,
timeout=timeout,
json=request_payload,
)
else:
future = request(
url=url_probe,
headers=headers,
allow_redirects=allow_redirects,
timeout=timeout,
json=request_payload,
)
# Store future in data for access later
net_info["request_future"] = future
# Reset identify for tor (if needed)
if unique_tor:
underlying_request.reset_identity()
# Add this site's results into final dictionary with all the other results.
results_total[social_network] = results_site
# Open the file containing account links
# Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses
for social_network, net_info in site_data.items():
# Retrieve results again
results_site = results_total.get(social_network)
# Retrieve other site information again
url = results_site.get("url_user")
status = results_site.get("status")
if status is not None:
# We have already determined the user doesn't exist here
continue
# Get the expected error type
error_type = net_info["errorType"]
# Retrieve future and ensure it has finished
future = net_info["request_future"]
r, error_text, exception_text = get_response(
request_future=future, error_type=error_type, social_network=social_network
)
# Get response time for response of our request.
try:
response_time = r.elapsed
except AttributeError:
response_time = None
# Attempt to get request information
try:
http_status = r.status_code
except Exception:
http_status = "?"
try:
response_text = r.text.encode(r.encoding or "UTF-8")
except Exception:
response_text = ""
query_status = QueryStatus.UNKNOWN
error_context = None
# As WAFs advance and evolve, they will occasionally block Sherlock and
# lead to false positives and negatives. Fingerprints should be added
# here to filter results that fail to bypass WAFs. Fingerprints should
# be highly targetted. Comment at the end of each fingerprint to
# indicate target and date fingerprinted.
WAFHitMsgs = [
r'.loading-spinner{visibility:hidden}body.no-js .challenge-running{display:none}body.dark{background-color:#222;color:#d9d9d9}body.dark a{color:#fff}body.dark a:hover{color:#ee730a;text-decoration:underline}body.dark .lds-ring div{border-color:#999 transparent transparent}body.dark .font-red{color:#b20f03}body.dark', # 2024-05-13 Cloudflare
r'<span id="challenge-error-text">', # 2024-11-11 Cloudflare error page
r'AwsWafIntegration.forceRefreshToken', # 2024-11-11 Cloudfront (AWS)
r'{return l.onPageView}}),Object.defineProperty(r,"perimeterxIdentifiers",{enumerable:' # 2024-04-09 PerimeterX / Human Security
]
if error_text is not None:
error_context = error_text
elif any(hitMsg in r.text for hitMsg in WAFHitMsgs):
query_status = QueryStatus.WAF
elif error_type == "message":
# error_flag True denotes no error found in the HTML
# error_flag False denotes error found in the HTML
error_flag = True
errors = net_info.get("errorMsg")
# errors will hold the error message
# it can be string or list
# by isinstance method we can detect that
# and handle the case for strings as normal procedure
# and if its list we can iterate the errors
if isinstance(errors, str):
# Checks if the error message is in the HTML
# if error is present we will set flag to False
if errors in r.text:
error_flag = False
else:
# If it's list, it will iterate all the error message
for error in errors:
if error in r.text:
error_flag = False
break
if error_flag:
query_status = QueryStatus.CLAIMED
else:
query_status = QueryStatus.AVAILABLE
elif error_type == "status_code":
error_codes = net_info.get("errorCode")
query_status = QueryStatus.CLAIMED
# Type consistency, allowing for both singlets and lists in manifest
if isinstance(error_codes, int):
error_codes = [error_codes]
if error_codes is not None and r.status_code in error_codes:
query_status = QueryStatus.AVAILABLE
elif r.status_code >= 300 or r.status_code < 200:
query_status = QueryStatus.AVAILABLE
elif error_type == "response_url":
# For this detection method, we have turned off the redirect.
# So, there is no need to check the response URL: it will always
# match the request. Instead, we will ensure that the response
# code indicates that the request was successful (i.e. no 404, or
# forward to some odd redirect).
if 200 <= r.status_code < 300:
query_status = QueryStatus.CLAIMED
else:
query_status = QueryStatus.AVAILABLE
else:
# It should be impossible to ever get here...
raise ValueError(
f"Unknown Error Type '{error_type}' for " f"site '{social_network}'"
)
if dump_response:
print("+++++++++++++++++++++")
print(f"TARGET NAME : {social_network}")
print(f"USERNAME : {username}")
print(f"TARGET URL : {url}")
print(f"TEST METHOD : {error_type}")
try:
print(f"STATUS CODES : {net_info['errorCode']}")
except KeyError:
pass
print("Results...")
try:
print(f"RESPONSE CODE : {r.status_code}")
except Exception:
pass
try:
print(f"ERROR TEXT : {net_info['errorMsg']}")
except KeyError:
pass
print(">>>>> BEGIN RESPONSE TEXT")
try:
print(r.text)
except Exception:
pass
print("<<<<< END RESPONSE TEXT")
print("VERDICT : " + str(query_status))
print("+++++++++++++++++++++")
# Notify caller about results of query.
result = QueryResult(
username=username,
site_name=social_network,
site_url_user=url,
status=query_status,
query_time=response_time,
context=error_context,
)
query_notify.update(result)
# Save status of request
results_site["status"] = result
# Save results from request
results_site["http_status"] = http_status
results_site["response_text"] = response_text
# Add this site's results into final dictionary with all of the other results.
results_total[social_network] = results_site
return results_total | Run Sherlock Analysis.
Checks for existence of username on various social media sites.
Keyword Arguments:
username -- String indicating username that report
should be created against.
site_data -- Dictionary containing all of the site data.
query_notify -- Object with base type of QueryNotify().
This will be used to notify the caller about
query results.
tor -- Boolean indicating whether to use a tor circuit for the requests.
unique_tor -- Boolean indicating whether to use a new tor circuit for each request.
proxy -- String indicating the proxy URL
timeout -- Time in seconds to wait before timing out request.
Default is 60 seconds.
Return Value:
Dictionary containing results from report. Key of dictionary is the name
of the social network site, and the value is another dictionary with
the following keys:
url_main: URL of main site.
url_user: URL of user on site (if account exists).
status: QueryResult() object indicating results of test for
account existence.
http_status: HTTP status code of query which checked for existence on
site.
response_text: Text that came back from request. May be None if
there was an HTTP error when checking for existence. | sherlock | python | sherlock-project/sherlock | sherlock_project/sherlock.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sherlock.py | MIT |
def timeout_check(value):
"""Check Timeout Argument.
Checks timeout for validity.
Keyword Arguments:
value -- Time in seconds to wait before timing out request.
Return Value:
Floating point number representing the time (in seconds) that should be
used for the timeout.
NOTE: Will raise an exception if the timeout in invalid.
"""
float_value = float(value)
if float_value <= 0:
raise ArgumentTypeError(
f"Invalid timeout value: {value}. Timeout must be a positive number."
)
return float_value | Check Timeout Argument.
Checks timeout for validity.
Keyword Arguments:
value -- Time in seconds to wait before timing out request.
Return Value:
Floating point number representing the time (in seconds) that should be
used for the timeout.
NOTE: Will raise an exception if the timeout in invalid. | timeout_check | python | sherlock-project/sherlock | sherlock_project/sherlock.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sherlock.py | MIT |
def handler(signal_received, frame):
"""Exit gracefully without throwing errors
Source: https://www.devdungeon.com/content/python-catch-sigint-ctrl-c
"""
sys.exit(0) | Exit gracefully without throwing errors
Source: https://www.devdungeon.com/content/python-catch-sigint-ctrl-c | handler | python | sherlock-project/sherlock | sherlock_project/sherlock.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sherlock.py | MIT |
def __str__(self):
"""Convert Object To String.
Keyword Arguments:
self -- This object.
Return Value:
Nicely formatted string to get information about this object.
"""
return self.value | Convert Object To String.
Keyword Arguments:
self -- This object.
Return Value:
Nicely formatted string to get information about this object. | __str__ | python | sherlock-project/sherlock | sherlock_project/result.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/result.py | MIT |
def __init__(self, username, site_name, site_url_user, status,
query_time=None, context=None):
"""Create Query Result Object.
Contains information about a specific method of detecting usernames on
a given type of web sites.
Keyword Arguments:
self -- This object.
username -- String indicating username that query result
was about.
site_name -- String which identifies site.
site_url_user -- String containing URL for username on site.
NOTE: The site may or may not exist: this
just indicates what the name would
be, if it existed.
status -- Enumeration of type QueryStatus() indicating
the status of the query.
query_time -- Time (in seconds) required to perform query.
Default of None.
context -- String indicating any additional context
about the query. For example, if there was
an error, this might indicate the type of
error that occurred.
Default of None.
Return Value:
Nothing.
"""
self.username = username
self.site_name = site_name
self.site_url_user = site_url_user
self.status = status
self.query_time = query_time
self.context = context
return | Create Query Result Object.
Contains information about a specific method of detecting usernames on
a given type of web sites.
Keyword Arguments:
self -- This object.
username -- String indicating username that query result
was about.
site_name -- String which identifies site.
site_url_user -- String containing URL for username on site.
NOTE: The site may or may not exist: this
just indicates what the name would
be, if it existed.
status -- Enumeration of type QueryStatus() indicating
the status of the query.
query_time -- Time (in seconds) required to perform query.
Default of None.
context -- String indicating any additional context
about the query. For example, if there was
an error, this might indicate the type of
error that occurred.
Default of None.
Return Value:
Nothing. | __init__ | python | sherlock-project/sherlock | sherlock_project/result.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/result.py | MIT |
def __str__(self):
"""Convert Object To String.
Keyword Arguments:
self -- This object.
Return Value:
Nicely formatted string to get information about this object.
"""
status = str(self.status)
if self.context is not None:
# There is extra context information available about the results.
# Append it to the normal response text.
status += f" ({self.context})"
return status | Convert Object To String.
Keyword Arguments:
self -- This object.
Return Value:
Nicely formatted string to get information about this object. | __str__ | python | sherlock-project/sherlock | sherlock_project/result.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/result.py | MIT |
def __init__(self, name, url_home, url_username_format, username_claimed,
information, is_nsfw, username_unclaimed=secrets.token_urlsafe(10)):
"""Create Site Information Object.
Contains information about a specific website.
Keyword Arguments:
self -- This object.
name -- String which identifies site.
url_home -- String containing URL for home of site.
url_username_format -- String containing URL for Username format
on site.
NOTE: The string should contain the
token "{}" where the username should
be substituted. For example, a string
of "https://somesite.com/users/{}"
indicates that the individual
usernames would show up under the
"https://somesite.com/users/" area of
the website.
username_claimed -- String containing username which is known
to be claimed on website.
username_unclaimed -- String containing username which is known
to be unclaimed on website.
information -- Dictionary containing all known information
about website.
NOTE: Custom information about how to
actually detect the existence of the
username will be included in this
dictionary. This information will
be needed by the detection method,
but it is only recorded in this
object for future use.
is_nsfw -- Boolean indicating if site is Not Safe For Work.
Return Value:
Nothing.
"""
self.name = name
self.url_home = url_home
self.url_username_format = url_username_format
self.username_claimed = username_claimed
self.username_unclaimed = secrets.token_urlsafe(32)
self.information = information
self.is_nsfw = is_nsfw
return | Create Site Information Object.
Contains information about a specific website.
Keyword Arguments:
self -- This object.
name -- String which identifies site.
url_home -- String containing URL for home of site.
url_username_format -- String containing URL for Username format
on site.
NOTE: The string should contain the
token "{}" where the username should
be substituted. For example, a string
of "https://somesite.com/users/{}"
indicates that the individual
usernames would show up under the
"https://somesite.com/users/" area of
the website.
username_claimed -- String containing username which is known
to be claimed on website.
username_unclaimed -- String containing username which is known
to be unclaimed on website.
information -- Dictionary containing all known information
about website.
NOTE: Custom information about how to
actually detect the existence of the
username will be included in this
dictionary. This information will
be needed by the detection method,
but it is only recorded in this
object for future use.
is_nsfw -- Boolean indicating if site is Not Safe For Work.
Return Value:
Nothing. | __init__ | python | sherlock-project/sherlock | sherlock_project/sites.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sites.py | MIT |
def __str__(self):
"""Convert Object To String.
Keyword Arguments:
self -- This object.
Return Value:
Nicely formatted string to get information about this object.
"""
return f"{self.name} ({self.url_home})" | Convert Object To String.
Keyword Arguments:
self -- This object.
Return Value:
Nicely formatted string to get information about this object. | __str__ | python | sherlock-project/sherlock | sherlock_project/sites.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sites.py | MIT |
def __init__(self, data_file_path=None):
"""Create Sites Information Object.
Contains information about all supported websites.
Keyword Arguments:
self -- This object.
data_file_path -- String which indicates path to data file.
The file name must end in ".json".
There are 3 possible formats:
* Absolute File Format
For example, "c:/stuff/data.json".
* Relative File Format
The current working directory is used
as the context.
For example, "data.json".
* URL Format
For example,
"https://example.com/data.json", or
"http://example.com/data.json".
An exception will be thrown if the path
to the data file is not in the expected
format, or if there was any problem loading
the file.
If this option is not specified, then a
default site list will be used.
Return Value:
Nothing.
"""
if not data_file_path:
# The default data file is the live data.json which is in the GitHub repo. The reason why we are using
# this instead of the local one is so that the user has the most up-to-date data. This prevents
# users from creating issue about false positives which has already been fixed or having outdated data
data_file_path = "https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock_project/resources/data.json"
# Ensure that specified data file has correct extension.
if not data_file_path.lower().endswith(".json"):
raise FileNotFoundError(f"Incorrect JSON file extension for data file '{data_file_path}'.")
# if "http://" == data_file_path[:7].lower() or "https://" == data_file_path[:8].lower():
if data_file_path.lower().startswith("http"):
# Reference is to a URL.
try:
response = requests.get(url=data_file_path)
except Exception as error:
raise FileNotFoundError(
f"Problem while attempting to access data file URL '{data_file_path}': {error}"
)
if response.status_code != 200:
raise FileNotFoundError(f"Bad response while accessing "
f"data file URL '{data_file_path}'."
)
try:
site_data = response.json()
except Exception as error:
raise ValueError(
f"Problem parsing json contents at '{data_file_path}': {error}."
)
else:
# Reference is to a file.
try:
with open(data_file_path, "r", encoding="utf-8") as file:
try:
site_data = json.load(file)
except Exception as error:
raise ValueError(
f"Problem parsing json contents at '{data_file_path}': {error}."
)
except FileNotFoundError:
raise FileNotFoundError(f"Problem while attempting to access "
f"data file '{data_file_path}'."
)
site_data.pop('$schema', None)
self.sites = {}
# Add all site information from the json file to internal site list.
for site_name in site_data:
try:
self.sites[site_name] = \
SiteInformation(site_name,
site_data[site_name]["urlMain"],
site_data[site_name]["url"],
site_data[site_name]["username_claimed"],
site_data[site_name],
site_data[site_name].get("isNSFW",False)
)
except KeyError as error:
raise ValueError(
f"Problem parsing json contents at '{data_file_path}': Missing attribute {error}."
)
except TypeError:
print(f"Encountered TypeError parsing json contents for target '{site_name}' at {data_file_path}\nSkipping target.\n")
return | Create Sites Information Object.
Contains information about all supported websites.
Keyword Arguments:
self -- This object.
data_file_path -- String which indicates path to data file.
The file name must end in ".json".
There are 3 possible formats:
* Absolute File Format
For example, "c:/stuff/data.json".
* Relative File Format
The current working directory is used
as the context.
For example, "data.json".
* URL Format
For example,
"https://example.com/data.json", or
"http://example.com/data.json".
An exception will be thrown if the path
to the data file is not in the expected
format, or if there was any problem loading
the file.
If this option is not specified, then a
default site list will be used.
Return Value:
Nothing. | __init__ | python | sherlock-project/sherlock | sherlock_project/sites.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sites.py | MIT |
def remove_nsfw_sites(self, do_not_remove: list = []):
"""
Remove NSFW sites from the sites, if isNSFW flag is true for site
Keyword Arguments:
self -- This object.
Return Value:
None
"""
sites = {}
do_not_remove = [site.casefold() for site in do_not_remove]
for site in self.sites:
if self.sites[site].is_nsfw and site.casefold() not in do_not_remove:
continue
sites[site] = self.sites[site]
self.sites = sites | Remove NSFW sites from the sites, if isNSFW flag is true for site
Keyword Arguments:
self -- This object.
Return Value:
None | remove_nsfw_sites | python | sherlock-project/sherlock | sherlock_project/sites.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sites.py | MIT |
def site_name_list(self):
"""Get Site Name List.
Keyword Arguments:
self -- This object.
Return Value:
List of strings containing names of sites.
"""
return sorted([site.name for site in self], key=str.lower) | Get Site Name List.
Keyword Arguments:
self -- This object.
Return Value:
List of strings containing names of sites. | site_name_list | python | sherlock-project/sherlock | sherlock_project/sites.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sites.py | MIT |
def __iter__(self):
"""Iterator For Object.
Keyword Arguments:
self -- This object.
Return Value:
Iterator for sites object.
"""
for site_name in self.sites:
yield self.sites[site_name] | Iterator For Object.
Keyword Arguments:
self -- This object.
Return Value:
Iterator for sites object. | __iter__ | python | sherlock-project/sherlock | sherlock_project/sites.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sites.py | MIT |
def __len__(self):
"""Length For Object.
Keyword Arguments:
self -- This object.
Return Value:
Length of sites object.
"""
return len(self.sites) | Length For Object.
Keyword Arguments:
self -- This object.
Return Value:
Length of sites object. | __len__ | python | sherlock-project/sherlock | sherlock_project/sites.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/sites.py | MIT |
def __init__(self, result=None):
"""Create Query Notify Object.
Contains information about a specific method of notifying the results
of a query.
Keyword Arguments:
self -- This object.
result -- Object of type QueryResult() containing
results for this query.
Return Value:
Nothing.
"""
self.result = result | Create Query Notify Object.
Contains information about a specific method of notifying the results
of a query.
Keyword Arguments:
self -- This object.
result -- Object of type QueryResult() containing
results for this query.
Return Value:
Nothing. | __init__ | python | sherlock-project/sherlock | sherlock_project/notify.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/notify.py | MIT |
def start(self, message=None):
"""Notify Start.
Notify method for start of query. This method will be called before
any queries are performed. This method will typically be
overridden by higher level classes that will inherit from it.
Keyword Arguments:
self -- This object.
message -- Object that is used to give context to start
of query.
Default is None.
Return Value:
Nothing.
""" | Notify Start.
Notify method for start of query. This method will be called before
any queries are performed. This method will typically be
overridden by higher level classes that will inherit from it.
Keyword Arguments:
self -- This object.
message -- Object that is used to give context to start
of query.
Default is None.
Return Value:
Nothing. | start | python | sherlock-project/sherlock | sherlock_project/notify.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/notify.py | MIT |
def update(self, result):
"""Notify Update.
Notify method for query result. This method will typically be
overridden by higher level classes that will inherit from it.
Keyword Arguments:
self -- This object.
result -- Object of type QueryResult() containing
results for this query.
Return Value:
Nothing.
"""
self.result = result | Notify Update.
Notify method for query result. This method will typically be
overridden by higher level classes that will inherit from it.
Keyword Arguments:
self -- This object.
result -- Object of type QueryResult() containing
results for this query.
Return Value:
Nothing. | update | python | sherlock-project/sherlock | sherlock_project/notify.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/notify.py | MIT |
def finish(self, message=None):
"""Notify Finish.
Notify method for finish of query. This method will be called after
all queries have been performed. This method will typically be
overridden by higher level classes that will inherit from it.
Keyword Arguments:
self -- This object.
message -- Object that is used to give context to start
of query.
Default is None.
Return Value:
Nothing.
""" | Notify Finish.
Notify method for finish of query. This method will be called after
all queries have been performed. This method will typically be
overridden by higher level classes that will inherit from it.
Keyword Arguments:
self -- This object.
message -- Object that is used to give context to start
of query.
Default is None.
Return Value:
Nothing. | finish | python | sherlock-project/sherlock | sherlock_project/notify.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/notify.py | MIT |
def __str__(self):
"""Convert Object To String.
Keyword Arguments:
self -- This object.
Return Value:
Nicely formatted string to get information about this object.
"""
return str(self.result) | Convert Object To String.
Keyword Arguments:
self -- This object.
Return Value:
Nicely formatted string to get information about this object. | __str__ | python | sherlock-project/sherlock | sherlock_project/notify.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/notify.py | MIT |
def __init__(self, result=None, verbose=False, print_all=False, browse=False):
"""Create Query Notify Print Object.
Contains information about a specific method of notifying the results
of a query.
Keyword Arguments:
self -- This object.
result -- Object of type QueryResult() containing
results for this query.
verbose -- Boolean indicating whether to give verbose output.
print_all -- Boolean indicating whether to only print all sites, including not found.
browse -- Boolean indicating whether to open found sites in a web browser.
Return Value:
Nothing.
"""
super().__init__(result)
self.verbose = verbose
self.print_all = print_all
self.browse = browse
return | Create Query Notify Print Object.
Contains information about a specific method of notifying the results
of a query.
Keyword Arguments:
self -- This object.
result -- Object of type QueryResult() containing
results for this query.
verbose -- Boolean indicating whether to give verbose output.
print_all -- Boolean indicating whether to only print all sites, including not found.
browse -- Boolean indicating whether to open found sites in a web browser.
Return Value:
Nothing. | __init__ | python | sherlock-project/sherlock | sherlock_project/notify.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/notify.py | MIT |
def start(self, message):
"""Notify Start.
Will print the title to the standard output.
Keyword Arguments:
self -- This object.
message -- String containing username that the series
of queries are about.
Return Value:
Nothing.
"""
title = "Checking username"
print(Style.BRIGHT + Fore.GREEN + "[" +
Fore.YELLOW + "*" +
Fore.GREEN + f"] {title}" +
Fore.WHITE + f" {message}" +
Fore.GREEN + " on:")
# An empty line between first line and the result(more clear output)
print('\r')
return | Notify Start.
Will print the title to the standard output.
Keyword Arguments:
self -- This object.
message -- String containing username that the series
of queries are about.
Return Value:
Nothing. | start | python | sherlock-project/sherlock | sherlock_project/notify.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/notify.py | MIT |
def countResults(self):
"""This function counts the number of results. Every time the function is called,
the number of results is increasing.
Keyword Arguments:
self -- This object.
Return Value:
The number of results by the time we call the function.
"""
global globvar
globvar += 1
return globvar | This function counts the number of results. Every time the function is called,
the number of results is increasing.
Keyword Arguments:
self -- This object.
Return Value:
The number of results by the time we call the function. | countResults | python | sherlock-project/sherlock | sherlock_project/notify.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/notify.py | MIT |
def update(self, result):
"""Notify Update.
Will print the query result to the standard output.
Keyword Arguments:
self -- This object.
result -- Object of type QueryResult() containing
results for this query.
Return Value:
Nothing.
"""
self.result = result
response_time_text = ""
if self.result.query_time is not None and self.verbose is True:
response_time_text = f" [{round(self.result.query_time * 1000)}ms]"
# Output to the terminal is desired.
if result.status == QueryStatus.CLAIMED:
self.countResults()
print(Style.BRIGHT + Fore.WHITE + "[" +
Fore.GREEN + "+" +
Fore.WHITE + "]" +
response_time_text +
Fore.GREEN +
f" {self.result.site_name}: " +
Style.RESET_ALL +
f"{self.result.site_url_user}")
if self.browse:
webbrowser.open(self.result.site_url_user, 2)
elif result.status == QueryStatus.AVAILABLE:
if self.print_all:
print(Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
response_time_text +
Fore.GREEN + f" {self.result.site_name}:" +
Fore.YELLOW + " Not Found!")
elif result.status == QueryStatus.UNKNOWN:
if self.print_all:
print(Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
Fore.GREEN + f" {self.result.site_name}:" +
Fore.RED + f" {self.result.context}" +
Fore.YELLOW + " ")
elif result.status == QueryStatus.ILLEGAL:
if self.print_all:
msg = "Illegal Username Format For This Site!"
print(Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
Fore.GREEN + f" {self.result.site_name}:" +
Fore.YELLOW + f" {msg}")
elif result.status == QueryStatus.WAF:
if self.print_all:
print(Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
Fore.GREEN + f" {self.result.site_name}:" +
Fore.RED + " Blocked by bot detection" +
Fore.YELLOW + " (proxy may help)")
else:
# It should be impossible to ever get here...
raise ValueError(
f"Unknown Query Status '{result.status}' for site '{self.result.site_name}'"
)
return | Notify Update.
Will print the query result to the standard output.
Keyword Arguments:
self -- This object.
result -- Object of type QueryResult() containing
results for this query.
Return Value:
Nothing. | update | python | sherlock-project/sherlock | sherlock_project/notify.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/notify.py | MIT |
def finish(self, message="The processing has been finished."):
"""Notify Start.
Will print the last line to the standard output.
Keyword Arguments:
self -- This object.
message -- The 2 last phrases.
Return Value:
Nothing.
"""
NumberOfResults = self.countResults() - 1
print(Style.BRIGHT + Fore.GREEN + "[" +
Fore.YELLOW + "*" +
Fore.GREEN + "] Search completed with" +
Fore.WHITE + f" {NumberOfResults} " +
Fore.GREEN + "results" + Style.RESET_ALL
) | Notify Start.
Will print the last line to the standard output.
Keyword Arguments:
self -- This object.
message -- The 2 last phrases.
Return Value:
Nothing. | finish | python | sherlock-project/sherlock | sherlock_project/notify.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/notify.py | MIT |
def __str__(self):
"""Convert Object To String.
Keyword Arguments:
self -- This object.
Return Value:
Nicely formatted string to get information about this object.
"""
return str(self.result) | Convert Object To String.
Keyword Arguments:
self -- This object.
Return Value:
Nicely formatted string to get information about this object. | __str__ | python | sherlock-project/sherlock | sherlock_project/notify.py | https://github.com/sherlock-project/sherlock/blob/master/sherlock_project/notify.py | MIT |
def prune_browsers(self, browser) -> None:
"""Remove all the browsers with the same password as the given browser"""
with self.lock_browsers:
for br in list(self.browsers):
if br == browser:
continue
if br.password != browser.password:
continue
try:
self.browsers.remove(br)
except ValueError:
pass
br.close()
br.proxy.decr_usage()
self.proxy_manager.dispose(br.proxy)
with self.lock_unstarted_browsers:
for br in list(self.unstarted_browsers):
if br.password == browser.password:
try:
self.unstarted_browsers.remove(br)
except ValueError:
pass | Remove all the browsers with the same password as the given browser | prune_browsers | python | Bitwise-01/Instagram- | lib/bruter.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/bruter.py | MIT |
def create_tables(self):
self.db_execute(
"""
CREATE TABLE IF NOT EXISTS
Session(
session_id TEXT,
attempts INTEGER,
list TEXT,
PRIMARY KEY(session_id)
);
"""
) | CREATE TABLE IF NOT EXISTS
Session(
session_id TEXT,
attempts INTEGER,
list TEXT,
PRIMARY KEY(session_id)
); | create_tables | python | Bitwise-01/Instagram- | lib/database.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/database.py | MIT |
def _write(self, attempts, _list):
if not self.exists:
self.db_execute(
"""
INSERT INTO Session(session_id, attempts, list)
VALUES(?, ?, ?);
""",
args=[self.fingerprint, attempts, json.dumps(_list)],
)
return
self.db_execute(
"""
UPDATE Session
SET attempts=?, list=?
WHERE session_id=?;
""",
args=[attempts, json.dumps(_list), self.fingerprint],
) | INSERT INTO Session(session_id, attempts, list)
VALUES(?, ?, ?);
""",
args=[self.fingerprint, attempts, json.dumps(_list)],
)
return
self.db_execute( | _write | python | Bitwise-01/Instagram- | lib/database.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/database.py | MIT |
def create_tables(self) -> None:
self.db_execute(
"""
CREATE TABLE IF NOT EXISTS
Proxy(
proxy_id TEXT,
ip TEXT,
port INTEGER,
proxy_type TEXT,
PRIMARY KEY(proxy_id)
);
"""
)
self.db_execute(
"""
CREATE TABLE IF NOT EXISTS
ProxyStatus(
proxy_status_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
time_added FLOAT,
last_used FLOAT,
last_updated FLOAT,
total_used INTEGER DEFAULT 1,
total_passed INTEGER DEFAULT 0,
proxy_id TEXT,
FOREIGN KEY(proxy_id) REFERENCES Proxy(proxy_id) ON DELETE CASCADE
);
"""
) | CREATE TABLE IF NOT EXISTS
Proxy(
proxy_id TEXT,
ip TEXT,
port INTEGER,
proxy_type TEXT,
PRIMARY KEY(proxy_id)
); | create_tables | python | Bitwise-01/Instagram- | lib/database.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/database.py | MIT |
def __exists(self, proxy_id: str) -> bool:
"""Returns True if a proxy by the given proxy id exists"""
return (
self.db_query(
"SELECT COUNT(*) FROM Proxy WHERE proxy_id=?;",
[proxy_id],
)
!= 0
) | Returns True if a proxy by the given proxy id exists | __exists | python | Bitwise-01/Instagram- | lib/database.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/database.py | MIT |
def add_proxy(
self, *, ip: str, port: int, proxy_type: str = "http"
) -> typing.Optional[str]:
"""Add a proxy into the database.
Returns: proxy_id when successful
"""
# preprocess
ip = ip.strip()
proxy_type = proxy_type.strip().lower()
proxy_id = self.__get_signature(ip=ip, port=port)
if proxy_type not in ["http", "https", "socks4", "socks5"]:
return None
# check for existance
if self.__exists(proxy_id):
return None
# add to database
self.db_execute(
"""
INSERT INTO Proxy(proxy_id, ip, port, proxy_type)
VALUES(?, ?, ?, ?);
""",
args=[proxy_id, ip, port, proxy_type],
)
self.db_execute(
"""
INSERT INTO ProxyStatus(
time_added,
proxy_id
)
VALUES(?, ?);
""",
args=[time.time(), proxy_id],
)
return proxy_id | Add a proxy into the database.
Returns: proxy_id when successful | add_proxy | python | Bitwise-01/Instagram- | lib/database.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/database.py | MIT |
def delete_proxy(self, proxy_id: str) -> bool:
"""Delete a proxy from the database
Returns:
True: if proxy has been deleted
"""
if not self.__exists(proxy_id):
return False
self.db_execute(
"""
DELETE FROM Proxy
WHERE proxy_id=?;
""",
args=[proxy_id],
) | Delete a proxy from the database
Returns:
True: if proxy has been deleted | delete_proxy | python | Bitwise-01/Instagram- | lib/database.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/database.py | MIT |
def __parse_proxy(self, proxy_data: tuple) -> dict:
"""Get a tuple of proxy and turns it into a dict."""
ip, port, proxy_type = proxy_data[1], proxy_data[2], proxy_data[3]
if proxy_type == "http" or proxy_type == "https":
proxy_addr = f"http://{ip}:{port}"
elif proxy_type == "socks4":
proxy_addr = f"socks4://{ip}:{port}"
elif proxy_type == "socks5":
proxy_addr = f"socks5://{ip}:{port}"
addr = {
"http": proxy_addr,
"https": proxy_addr,
}
return {
"ip": proxy_data[1],
"port": proxy_data[2],
"proxy_type": proxy_data[3],
"time_added": proxy_data[4],
"last_used": proxy_data[5],
"last_updated": proxy_data[6],
"total_used": proxy_data[7],
"total_passed": proxy_data[8],
"score": proxy_data[10],
"addr": addr,
} | Get a tuple of proxy and turns it into a dict. | __parse_proxy | python | Bitwise-01/Instagram- | lib/database.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/database.py | MIT |
def get_proxy(self, proxy_id: str) -> dict:
"""Get details of a proxy with the given proxy id"""
if not self.__exists(proxy_id):
return {}
proxy_data = self.db_query(
"""
SELECT *,
(CAST(ProxyStatus.total_passed AS FLOAT) / CAST(ProxyStatus.total_used AS FLOAT)) AS score
FROM Proxy
INNER JOIN ProxyStatus on ProxyStatus.proxy_id = Proxy.proxy_id
WHERE Proxy.proxy_id=?;
""",
args=[proxy_id],
fetchone=False,
)[0]
return self.__parse_proxy(proxy_data) | Get details of a proxy with the given proxy id | get_proxy | python | Bitwise-01/Instagram- | lib/database.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/database.py | MIT |
def prune(self, threshold: float) -> int:
before_rows = self.db_query(
"""
SELECT COUNT(*)
FROM Proxy;
"""
)
self.db_execute(
"""
DELETE
FROM Proxy
WHERE proxy_id IN (
SELECT Proxy.proxy_id
FROM Proxy
INNER JOIN ProxyStatus on ProxyStatus.proxy_id = Proxy.proxy_id
WHERE (CAST(ProxyStatus.total_passed AS FLOAT) / CAST(ProxyStatus.total_used AS FLOAT)) < ?
);
""",
args=[threshold],
)
after_rows = self.db_query(
"""
SELECT COUNT(*)
FROM Proxy;
"""
)
return before_rows - after_rows | SELECT COUNT(*)
FROM Proxy; | prune | python | Bitwise-01/Instagram- | lib/database.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/database.py | MIT |
def calc_q1(self) -> float:
"""Calculate the first quartile of the scores."""
scores = self.db_query(
"""
SELECT score
FROM
(
SELECT
(CAST(ProxyStatus.total_passed AS FLOAT) / CAST(ProxyStatus.total_used AS FLOAT)) AS score
FROM Proxy
INNER JOIN ProxyStatus on ProxyStatus.proxy_id = Proxy.proxy_id
)
ORDER BY
score ASC;
""",
fetchone=False,
)
q1 = 0.0
if scores[0][0]:
scores = [score[0] for score in scores]
mid = len(scores) / 2
if isinstance(mid, float):
mid = int(mid)
q1 = (scores[mid] + scores[mid + 1]) / 2
else:
q1 = (sum(scores[:mid]) / mid) if mid else q1
return q1 | Calculate the first quartile of the scores. | calc_q1 | python | Bitwise-01/Instagram- | lib/database.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/database.py | MIT |
def incr_success(self) -> None:
"""Incremented when proxy works"""
self.__total_passed += 1 | Incremented when proxy works | incr_success | python | Bitwise-01/Instagram- | lib/proxy.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/proxy.py | MIT |
def decr_usage(self) -> None:
"""Takes away usage data for this session"""
self.__total_used = 0
self.__total_passed = 0 | Takes away usage data for this session | decr_usage | python | Bitwise-01/Instagram- | lib/proxy.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/proxy.py | MIT |
def __http_proxies(self) -> None:
"""Get proxies from http://www.sslproxies.org/"""
with HTMLSession() as session:
r = session.get(self.__http_proxies_url)
table = r.html.find(".table", first=True)
tr = table.find("tr")
for row in tr[1:]:
td = row.find("td")
proxy = {
"ip": td[0].text,
"port": td[1].text,
"proxy_type": "http",
}
self.__add_proxy(proxy) | Get proxies from http://www.sslproxies.org/ | __http_proxies | python | Bitwise-01/Instagram- | lib/proxy_manager.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/proxy_manager.py | MIT |
def get_proxies(self) -> typing.List[typing.Optional[typing.Dict]]:
"""Get public proxies"""
if self.last_updated is None:
self.last_updated = time.time()
else:
if time.time() - self.last_updated < self.__fetch_interval_sec:
return []
# http proxies
try:
self.__http_proxies()
except Exception as e:
pass
# socks5 proxies
try:
pass
# self.__get_socks_proxies()
except Exception as e:
raise e
pass
self.last_updated = time.time()
proxies = []
size = len(self.proxies)
for i in range(size):
if i % 2:
proxies.append(self.proxies.pop(0))
else:
proxies.append(self.proxies.pop())
return proxies | Get public proxies | get_proxies | python | Bitwise-01/Instagram- | lib/proxy_manager.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/proxy_manager.py | MIT |
def write2db(self, proxylist_path: str) -> int:
"""Read proxies from the file and write it into the database.
File must contain ip:port format.
Returns: Number of rows written into the database.
"""
total_written = 0
with io.open(proxylist_path, mode="rt", encoding="utf-8") as f:
proxy = database.Proxy()
for line in f:
ip, port = line.split(":")
ip = ip.strip()
port = port.split()[0].strip()
if proxy.add_proxy(ip=ip, port=port):
total_written += 1
return total_written | Read proxies from the file and write it into the database.
File must contain ip:port format.
Returns: Number of rows written into the database. | write2db | python | Bitwise-01/Instagram- | lib/proxy_manager.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/proxy_manager.py | MIT |
def dispose(self, proxy: proxy.Proxy) -> None:
"""Dispose of a proxy.
A proxy will be updated after usage session.
"""
info = proxy.info()
basic_info = {"ip": info.get("ip"), "port": info.get("port")}
if info.get("total_used"):
self.db_proxy.update_status(
info.get("ip"),
info.get("port"),
info.get("last_used"),
info.get("total_used") or 0,
info.get("total_passed") or 0,
)
with self.lock_active_proxies:
if basic_info in self.active_proxies:
self.active_proxies.remove(basic_info) | Dispose of a proxy.
A proxy will be updated after usage session. | dispose | python | Bitwise-01/Instagram- | lib/proxy_manager.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/proxy_manager.py | MIT |
def add_public_proxies(self) -> None:
"""Add public proxies to the database"""
for proxy in self.proxy_finder.get_proxies():
self.db_proxy.add_proxy(
ip=proxy.get("ip"),
port=proxy.get("port"),
proxy_type=proxy.get("proxy_type"),
) | Add public proxies to the database | add_public_proxies | python | Bitwise-01/Instagram- | lib/proxy_manager.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/proxy_manager.py | MIT |
def pop_list(self) -> None:
"""Populates queue using database"""
self.add_public_proxies()
proxies = self.db_proxy.get_proxies(
self.__offset, self.__limit, min_score=self.__min_score
)
for proxy in proxies:
basic_info = {
"ip": proxy.get("ip"),
"port": proxy.get("port"),
}
with self.lock_active_proxies:
if basic_info in self.active_proxies:
continue
last_used = proxy.get("last_used")
if last_used:
if time.time() - last_used <= self.__cooloff_period_seconds:
continue
self.proxies.put(proxy)
with self.lock_active_proxies:
self.active_proxies.append(basic_info)
if proxies:
self.__offset += self.__limit
else:
self.__offset = 0
self.__min_score = self.db_proxy.calc_q1() | Populates queue using database | pop_list | python | Bitwise-01/Instagram- | lib/proxy_manager.py | https://github.com/Bitwise-01/Instagram-/blob/master/lib/proxy_manager.py | MIT |
def get_lr(step, total_steps, lr_max, lr_min):
"""Compute learning rate according to cosine annealing schedule."""
return lr_min + (lr_max - lr_min) * 0.5 * (1 +
np.cos(step / total_steps * np.pi)) | Compute learning rate according to cosine annealing schedule. | get_lr | python | google-research/augmix | cifar.py | https://github.com/google-research/augmix/blob/master/cifar.py | Apache-2.0 |
def aug(image, preprocess):
"""Perform AugMix augmentations and compute mixture.
Args:
image: PIL.Image input image
preprocess: Preprocessing function which should return a torch tensor.
Returns:
mixed: Augmented and mixed image.
"""
aug_list = augmentations.augmentations
if args.all_ops:
aug_list = augmentations.augmentations_all
ws = np.float32(np.random.dirichlet([1] * args.mixture_width))
m = np.float32(np.random.beta(1, 1))
mix = torch.zeros_like(preprocess(image))
for i in range(args.mixture_width):
image_aug = image.copy()
depth = args.mixture_depth if args.mixture_depth > 0 else np.random.randint(
1, 4)
for _ in range(depth):
op = np.random.choice(aug_list)
image_aug = op(image_aug, args.aug_severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * preprocess(image_aug)
mixed = (1 - m) * preprocess(image) + m * mix
return mixed | Perform AugMix augmentations and compute mixture.
Args:
image: PIL.Image input image
preprocess: Preprocessing function which should return a torch tensor.
Returns:
mixed: Augmented and mixed image. | aug | python | google-research/augmix | cifar.py | https://github.com/google-research/augmix/blob/master/cifar.py | Apache-2.0 |
def train(net, train_loader, optimizer, scheduler):
"""Train for one epoch."""
net.train()
loss_ema = 0.
for i, (images, targets) in enumerate(train_loader):
optimizer.zero_grad()
if args.no_jsd:
images = images.cuda()
targets = targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
else:
images_all = torch.cat(images, 0).cuda()
targets = targets.cuda()
logits_all = net(images_all)
logits_clean, logits_aug1, logits_aug2 = torch.split(
logits_all, images[0].size(0))
# Cross-entropy is only computed on clean images
loss = F.cross_entropy(logits_clean, targets)
p_clean, p_aug1, p_aug2 = F.softmax(
logits_clean, dim=1), F.softmax(
logits_aug1, dim=1), F.softmax(
logits_aug2, dim=1)
# Clamp mixture distribution to avoid exploding KL divergence
p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7, 1).log()
loss += 12 * (F.kl_div(p_mixture, p_clean, reduction='batchmean') +
F.kl_div(p_mixture, p_aug1, reduction='batchmean') +
F.kl_div(p_mixture, p_aug2, reduction='batchmean')) / 3.
loss.backward()
optimizer.step()
scheduler.step()
loss_ema = loss_ema * 0.9 + float(loss) * 0.1
if i % args.print_freq == 0:
print('Train Loss {:.3f}'.format(loss_ema))
return loss_ema | Train for one epoch. | train | python | google-research/augmix | cifar.py | https://github.com/google-research/augmix/blob/master/cifar.py | Apache-2.0 |
def test(net, test_loader):
"""Evaluate network on given dataset."""
net.eval()
total_loss = 0.
total_correct = 0
with torch.no_grad():
for images, targets in test_loader:
images, targets = images.cuda(), targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
pred = logits.data.max(1)[1]
total_loss += float(loss.data)
total_correct += pred.eq(targets.data).sum().item()
return total_loss / len(test_loader.dataset), total_correct / len(
test_loader.dataset) | Evaluate network on given dataset. | test | python | google-research/augmix | cifar.py | https://github.com/google-research/augmix/blob/master/cifar.py | Apache-2.0 |
def test_c(net, test_data, base_path):
"""Evaluate network on given corrupted dataset."""
corruption_accs = []
for corruption in CORRUPTIONS:
# Reference to original data is mutated
test_data.data = np.load(base_path + corruption + '.npy')
test_data.targets = torch.LongTensor(np.load(base_path + 'labels.npy'))
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
test_loss, test_acc = test(net, test_loader)
corruption_accs.append(test_acc)
print('{}\n\tTest Loss {:.3f} | Test Error {:.3f}'.format(
corruption, test_loss, 100 - 100. * test_acc))
return np.mean(corruption_accs) | Evaluate network on given corrupted dataset. | test_c | python | google-research/augmix | cifar.py | https://github.com/google-research/augmix/blob/master/cifar.py | Apache-2.0 |
def normalize(image):
"""Normalize input image channel-wise to zero mean and unit variance."""
image = image.transpose(2, 0, 1) # Switch to channel-first
mean, std = np.array(MEAN), np.array(STD)
image = (image - mean[:, None, None]) / std[:, None, None]
return image.transpose(1, 2, 0) | Normalize input image channel-wise to zero mean and unit variance. | normalize | python | google-research/augmix | augment_and_mix.py | https://github.com/google-research/augmix/blob/master/augment_and_mix.py | Apache-2.0 |
def augment_and_mix(image, severity=3, width=3, depth=-1, alpha=1.):
"""Perform AugMix augmentations and compute mixture.
Args:
image: Raw input image as float32 np.ndarray of shape (h, w, c)
severity: Severity of underlying augmentation operators (between 1 to 10).
width: Width of augmentation chain
depth: Depth of augmentation chain. -1 enables stochastic depth uniformly
from [1, 3]
alpha: Probability coefficient for Beta and Dirichlet distributions.
Returns:
mixed: Augmented and mixed image.
"""
ws = np.float32(
np.random.dirichlet([alpha] * width))
m = np.float32(np.random.beta(alpha, alpha))
mix = np.zeros_like(image)
for i in range(width):
image_aug = image.copy()
d = depth if depth > 0 else np.random.randint(1, 4)
for _ in range(d):
op = np.random.choice(augmentations.augmentations)
image_aug = apply_op(image_aug, op, severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * normalize(image_aug)
mixed = (1 - m) * normalize(image) + m * mix
return mixed | Perform AugMix augmentations and compute mixture.
Args:
image: Raw input image as float32 np.ndarray of shape (h, w, c)
severity: Severity of underlying augmentation operators (between 1 to 10).
width: Width of augmentation chain
depth: Depth of augmentation chain. -1 enables stochastic depth uniformly
from [1, 3]
alpha: Probability coefficient for Beta and Dirichlet distributions.
Returns:
mixed: Augmented and mixed image. | augment_and_mix | python | google-research/augmix | augment_and_mix.py | https://github.com/google-research/augmix/blob/master/augment_and_mix.py | Apache-2.0 |
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR (linearly scaled to batch size) decayed by 10 every n / 3 epochs."""
b = args.batch_size / 256.
k = args.epochs // 3
if epoch < k:
m = 1
elif epoch < 2 * k:
m = 0.1
else:
m = 0.01
lr = args.learning_rate * m * b
for param_group in optimizer.param_groups:
param_group['lr'] = lr | Sets the learning rate to the initial LR (linearly scaled to batch size) decayed by 10 every n / 3 epochs. | adjust_learning_rate | python | google-research/augmix | imagenet.py | https://github.com/google-research/augmix/blob/master/imagenet.py | Apache-2.0 |
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k."""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res | Computes the accuracy over the k top predictions for the specified values of k. | accuracy | python | google-research/augmix | imagenet.py | https://github.com/google-research/augmix/blob/master/imagenet.py | Apache-2.0 |
def compute_mce(corruption_accs):
"""Compute mCE (mean Corruption Error) normalized by AlexNet performance."""
mce = 0.
for i in range(len(CORRUPTIONS)):
avg_err = 1 - np.mean(corruption_accs[CORRUPTIONS[i]])
ce = 100 * avg_err / ALEXNET_ERR[i]
mce += ce / 15
return mce | Compute mCE (mean Corruption Error) normalized by AlexNet performance. | compute_mce | python | google-research/augmix | imagenet.py | https://github.com/google-research/augmix/blob/master/imagenet.py | Apache-2.0 |
def aug(image, preprocess):
"""Perform AugMix augmentations and compute mixture.
Args:
image: PIL.Image input image
preprocess: Preprocessing function which should return a torch tensor.
Returns:
mixed: Augmented and mixed image.
"""
aug_list = augmentations.augmentations
if args.all_ops:
aug_list = augmentations.augmentations_all
ws = np.float32(
np.random.dirichlet([args.aug_prob_coeff] * args.mixture_width))
m = np.float32(np.random.beta(args.aug_prob_coeff, args.aug_prob_coeff))
mix = torch.zeros_like(preprocess(image))
for i in range(args.mixture_width):
image_aug = image.copy()
depth = args.mixture_depth if args.mixture_depth > 0 else np.random.randint(
1, 4)
for _ in range(depth):
op = np.random.choice(aug_list)
image_aug = op(image_aug, args.aug_severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * preprocess(image_aug)
mixed = (1 - m) * preprocess(image) + m * mix
return mixed | Perform AugMix augmentations and compute mixture.
Args:
image: PIL.Image input image
preprocess: Preprocessing function which should return a torch tensor.
Returns:
mixed: Augmented and mixed image. | aug | python | google-research/augmix | imagenet.py | https://github.com/google-research/augmix/blob/master/imagenet.py | Apache-2.0 |
def train(net, train_loader, optimizer):
"""Train for one epoch."""
net.train()
data_ema = 0.
batch_ema = 0.
loss_ema = 0.
acc1_ema = 0.
acc5_ema = 0.
end = time.time()
for i, (images, targets) in enumerate(train_loader):
# Compute data loading time
data_time = time.time() - end
optimizer.zero_grad()
if args.no_jsd:
images = images.cuda()
targets = targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
acc1, acc5 = accuracy(logits, targets, topk=(1, 5)) # pylint: disable=unbalanced-tuple-unpacking
else:
images_all = torch.cat(images, 0).cuda()
targets = targets.cuda()
logits_all = net(images_all)
logits_clean, logits_aug1, logits_aug2 = torch.split(
logits_all, images[0].size(0))
# Cross-entropy is only computed on clean images
loss = F.cross_entropy(logits_clean, targets)
p_clean, p_aug1, p_aug2 = F.softmax(
logits_clean, dim=1), F.softmax(
logits_aug1, dim=1), F.softmax(
logits_aug2, dim=1)
# Clamp mixture distribution to avoid exploding KL divergence
p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7, 1).log()
loss += 12 * (F.kl_div(p_mixture, p_clean, reduction='batchmean') +
F.kl_div(p_mixture, p_aug1, reduction='batchmean') +
F.kl_div(p_mixture, p_aug2, reduction='batchmean')) / 3.
acc1, acc5 = accuracy(logits_clean, targets, topk=(1, 5)) # pylint: disable=unbalanced-tuple-unpacking
loss.backward()
optimizer.step()
# Compute batch computation time and update moving averages.
batch_time = time.time() - end
end = time.time()
data_ema = data_ema * 0.1 + float(data_time) * 0.9
batch_ema = batch_ema * 0.1 + float(batch_time) * 0.9
loss_ema = loss_ema * 0.1 + float(loss) * 0.9
acc1_ema = acc1_ema * 0.1 + float(acc1) * 0.9
acc5_ema = acc5_ema * 0.1 + float(acc5) * 0.9
if i % args.print_freq == 0:
print(
'Batch {}/{}: Data Time {:.3f} | Batch Time {:.3f} | Train Loss {:.3f} | Train Acc1 '
'{:.3f} | Train Acc5 {:.3f}'.format(i, len(train_loader), data_ema,
batch_ema, loss_ema, acc1_ema,
acc5_ema))
return loss_ema, acc1_ema, batch_ema | Train for one epoch. | train | python | google-research/augmix | imagenet.py | https://github.com/google-research/augmix/blob/master/imagenet.py | Apache-2.0 |
def test(net, test_loader):
"""Evaluate network on given dataset."""
net.eval()
total_loss = 0.
total_correct = 0
with torch.no_grad():
for images, targets in test_loader:
images, targets = images.cuda(), targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
pred = logits.data.max(1)[1]
total_loss += float(loss.data)
total_correct += pred.eq(targets.data).sum().item()
return total_loss / len(test_loader.dataset), total_correct / len(
test_loader.dataset) | Evaluate network on given dataset. | test | python | google-research/augmix | imagenet.py | https://github.com/google-research/augmix/blob/master/imagenet.py | Apache-2.0 |
def test_c(net, test_transform):
"""Evaluate network on given corrupted dataset."""
corruption_accs = {}
for c in CORRUPTIONS:
print(c)
for s in range(1, 6):
valdir = os.path.join(args.corrupted_data, c, str(s))
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, test_transform),
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
loss, acc1 = test(net, val_loader)
if c in corruption_accs:
corruption_accs[c].append(acc1)
else:
corruption_accs[c] = [acc1]
print('\ts={}: Test Loss {:.3f} | Test Acc1 {:.3f}'.format(
s, loss, 100. * acc1))
return corruption_accs | Evaluate network on given corrupted dataset. | test_c | python | google-research/augmix | imagenet.py | https://github.com/google-research/augmix/blob/master/imagenet.py | Apache-2.0 |
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / 10) | Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`. | int_parameter | python | google-research/augmix | augmentations.py | https://github.com/google-research/augmix/blob/master/augmentations.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.