response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Return a decorator that turns functions into no-ops if the bot is
untrusted. | def untrusted_noop(return_value=None):
"""Return a decorator that turns functions into no-ops if the bot is
untrusted."""
def decorator(func):
"""Decorator function."""
@functools.wraps(func)
def wrapped(*args, **kwargs):
if environment.is_untrusted_worker():
return return_value
return func(*args, **kwargs)
return wrapped
return decorator |
Return the internal network domain. | def internal_network_domain():
"""Return the internal network domain."""
return '.c.%s.internal' % utils.get_application_id() |
"Get the untrusted platform name. | def platform_name(project, platform):
""""Get the untrusted platform name."""
return project.upper() + '_' + platform.upper() |
Get the untrusted queue name for the project and platform. | def queue_name(project, platform):
"""Get the untrusted queue name for the project and platform."""
return tasks.queue_for_platform(platform_name(project, platform)) |
Return datetime.datetime.utcnow(). We need this method because we can't
mock built-in methods. | def utcnow():
"""Return datetime.datetime.utcnow(). We need this method because we can't
mock built-in methods."""
return datetime.datetime.utcnow() |
Returns current date and time. | def current_date_time():
"""Returns current date and time."""
return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC') |
Converts a (UTC) datetime.date to a UNIX timestamp. | def utc_date_to_timestamp(date):
"""Converts a (UTC) datetime.date to a UNIX timestamp."""
return (date - datetime.date(1970, 1, 1)).total_seconds() |
Converts a (UTC) datetime.date to a UNIX timestamp. | def utc_datetime_to_timestamp(dt):
"""Converts a (UTC) datetime.date to a UNIX timestamp."""
return (dt - datetime.datetime.utcfromtimestamp(0)).total_seconds() |
Decode object to unicode encoding. | def decode_to_unicode(obj):
"""Decode object to unicode encoding."""
if not hasattr(obj, 'decode'):
return obj
return obj.decode('utf-8', errors='ignore') |
Encode a string as unicode, or leave bytes as they are. | def encode_as_unicode(obj):
"""Encode a string as unicode, or leave bytes as they are."""
if not hasattr(obj, 'encode'):
return obj
return obj.encode('utf-8') |
Fetch url content. | def fetch_url(url):
"""Fetch url content."""
operations_timeout = environment.get_value('URL_BLOCKING_OPERATIONS_TIMEOUT')
response = requests.get(url, timeout=operations_timeout)
if response.status_code == 404:
return None
response.raise_for_status()
return response.text |
Match fields of two strings, separated by a |field_separator|. Empty fields
can be ignored via |allow_empty_fields| flag. | def fields_match(string_1,
string_2,
field_separator=':',
allow_empty_fields=True):
"""Match fields of two strings, separated by a |field_separator|. Empty fields
can be ignored via |allow_empty_fields| flag."""
if string_1 is None or string_2 is None:
return False
if string_1 == string_2:
return True
string_1_fields = string_1.split(field_separator)
string_2_fields = string_2.split(field_separator)
if not allow_empty_fields and len(string_1_fields) != len(string_2_fields):
return False
min_fields_length = min(len(string_1_fields), len(string_2_fields))
for i in range(min_fields_length):
if string_1_fields[i] != string_2_fields[i]:
return False
return True |
Return a path as a file scheme url. | def file_path_to_file_url(path):
"""Return a path as a file scheme url."""
if not path:
return ''
path = path.lstrip(WINDOWS_PREFIX_PATH)
return urllib.parse.urljoin('file:', urllib.request.pathname2url(path)) |
Filters file list by removing duplicates, non-existent files
and directories. | def filter_file_list(file_list):
"""Filters file list by removing duplicates, non-existent files
and directories."""
filtered_file_list = []
for file_path in file_list:
if not os.path.exists(file_path):
continue
if os.path.isdir(file_path):
continue
# Do a os specific case normalization before comparison.
if (os.path.normcase(file_path) in list(
map(os.path.normcase, filtered_file_list))):
continue
filtered_file_list.append(file_path)
if len(filtered_file_list) != len(file_list):
logs.log('Filtered file list (%s) from (%s).' % (str(filtered_file_list),
str(file_list)))
return filtered_file_list |
Find the path to a binary given the app directory and the file name.
This is necessary as cov files are created in the root app directory, and we
need a way to find the corresponding binary to symbolize addresses. | def find_binary_path(app_directory, binary_file_subpath):
"""Find the path to a binary given the app directory and the file name.
This is necessary as cov files are created in the root app directory, and we
need a way to find the corresponding binary to symbolize addresses."""
binary_path = os.path.join(app_directory, binary_file_subpath)
if os.path.exists(binary_path):
# Common case: the binary exists in the root directory.
return binary_path
# Match the longest file sub-path suffix.
binary_file_subpath_with_sep = binary_file_subpath
if not binary_file_subpath_with_sep.startswith(os.sep):
binary_file_subpath_with_sep = os.sep + binary_file_subpath_with_sep
for root, _, filenames in os.walk(app_directory):
for filename in filenames:
file_path = os.path.join(root, filename)
if file_path.endswith(binary_file_subpath_with_sep):
return file_path
# Otherwise, do a search for the filename.
binary_filename = os.path.basename(binary_file_subpath)
for root, _, filenames in os.walk(app_directory):
for filename in filenames:
if filename == binary_filename:
file_path = os.path.join(root, filename)
return file_path
return None |
Return application id. Code simplified based off original implementation in
AppEngine SDK get_identity.get_application_id. | def get_application_id():
"""Return application id. Code simplified based off original implementation in
AppEngine SDK get_identity.get_application_id."""
app_id = environment.get_value('APPLICATION_ID')
if app_id is None:
return None
psep = app_id.find('~')
if psep > 0:
app_id = app_id[psep + 1:]
return app_id |
Get the service account name. | def service_account_email():
"""Get the service account name."""
# TODO(ochang): Detect GCE and return the GCE service account instead.
email_id = get_application_id()
if ':' in email_id:
domain, application_id = email_id.split(':')
email_id = application_id + '.' + domain
return email_id + '@appspot.gserviceaccount.com' |
Returns path to bot-specific fuzzed testcases. | def get_bot_testcases_file_path(input_directory):
"""Returns path to bot-specific fuzzed testcases."""
# Using |FUZZ_INPUTS| prevents putting high load on nfs servers for cases
# when |input_directory| is a cloud storage data bundle. We can't rely
# on |FUZZ_INPUTS| always since it might not be available during local fuzzer
# testing, so use |input_directory| if it is not defined.
local_testcases_directory = environment.get_value('FUZZ_INPUTS')
bot_testcases_directory = (
local_testcases_directory
if local_testcases_directory else input_directory)
bot_name = environment.get_value('BOT_NAME')
bot_testcases_filename = '.%s_testcases' % bot_name
bot_testcases_file_path = os.path.join(bot_testcases_directory,
bot_testcases_filename)
return bot_testcases_file_path |
Return output string with symbolized and unsymbolized stacktraces
combined. | def get_crash_stacktrace_output(application_command_line,
symbolized_stacktrace,
unsymbolized_stacktrace=None,
build_type=None):
"""Return output string with symbolized and unsymbolized stacktraces
combined."""
def _guess_build_type(application_command_line):
if 'stable' in application_command_line:
return 'stable'
if 'beta' in application_command_line:
return 'beta'
if sub_string_exists_in(['debug', 'dbg'], application_command_line):
return 'debug'
return 'release'
separator = '-' * 40
if not build_type:
build_type = _guess_build_type(application_command_line)
crash_stacktraces_output = environment.get_environment_settings_as_string()
if application_command_line:
crash_stacktraces_output += (
'[Command line] %s\n\n' % application_command_line)
crash_stacktraces_output += ('+%s%s Build Stacktrace%s+\n%s' % (
separator, build_type.capitalize(), separator, symbolized_stacktrace))
# No unsymbolized stack available. Bail out.
if not unsymbolized_stacktrace:
return crash_stacktraces_output
unsymbolized_stacktrace_diff = get_unique_lines_in_unsymbolized_stack(
symbolized_stacktrace, unsymbolized_stacktrace)
if unsymbolized_stacktrace_diff:
crash_stacktraces_output += (
'\n\n+%s%s Build Unsymbolized Stacktrace (diff)%s+\n\n%s' %
(separator, build_type.capitalize(), separator,
unsymbolized_stacktrace_diff))
return crash_stacktraces_output |
Return the directory hash for a file path (excludes file name). | def get_directory_hash_for_path(file_path):
"""Return the directory hash for a file path (excludes file name)."""
root_directory = environment.get_value('ROOT_DIR')
directory_path = os.path.dirname(file_path)
normalized_directory_path = remove_prefix(directory_path,
root_directory + os.sep)
normalized_directory_path = normalized_directory_path.replace('\\', '/')
return string_hash(normalized_directory_path) |
Return the contents of the specified file, or None on error. | def get_file_contents_with_fatal_error_on_failure(path):
"""Return the contents of the specified file, or None on error."""
try:
with open(path, 'rb') as file_handle:
data = file_handle.read()
return data
except OSError:
logs.log_error('Unable to read file `%s\'' % path)
raise errors.BadStateError |
Return a line separator with an optional label. | def get_line_seperator(label=''):
"""Return a line separator with an optional label."""
separator = '-' * 40
result = '\n\n%s%s%s\n\n' % (separator, label, separator)
return result |
Return normalized relative path for file w.r.t to a directory. | def get_normalized_relative_path(file_path, directory_path):
"""Return normalized relative path for file w.r.t to a directory."""
normalized_relative_file_path = remove_prefix(file_path,
directory_path + os.sep)
normalized_relative_file_path = (
normalized_relative_file_path.replace('\\', '/'))
return normalized_relative_file_path |
Return a path excluding the extension. | def get_path_without_ext(path):
"""Return a path excluding the extension."""
return os.path.splitext(path)[0] |
Return list of pids for a process and its descendants. | def get_process_ids(process_id, recursive=True):
"""Return list of pids for a process and its descendants."""
# Try to find the running process.
if not psutil.pid_exists(process_id):
return []
pids = [process_id]
try:
psutil_handle = psutil.Process(process_id)
children = psutil_handle.children(recursive=recursive)
for child in children:
pids.append(child.pid)
except psutil.NoSuchProcess:
# Avoid too much logging when the process already died.
return []
except (psutil.AccessDenied, OSError):
logs.log_warn('Failed to get process children.')
return []
return pids |
Return string representation for size. | def get_line_count_string(line_count):
"""Return string representation for size."""
if line_count == 0:
return 'empty'
if line_count == 1:
return '1 line'
return '%d lines' % line_count |
Return string representation for size. | def get_size_string(size):
"""Return string representation for size."""
if size < 1 << 10:
return '%d B' % size
if size < 1 << 20:
return '%d KB' % (size >> 10)
if size < 1 << 30:
return '%d MB' % (size >> 20)
return '%d GB' % (size >> 30) |
Return unique lines in unsymbolized stacktrace that are not in the
symbolized stacktrace. | def get_unique_lines_in_unsymbolized_stack(symbolized_stacktrace,
unsymbolized_stacktrace):
"""Return unique lines in unsymbolized stacktrace that are not in the
symbolized stacktrace."""
if symbolized_stacktrace == unsymbolized_stacktrace:
return ''
symbolized_stacktrace_lines = symbolized_stacktrace.splitlines()
unsymbolized_stacktrace_lines = unsymbolized_stacktrace.splitlines()
stripped_symbolized_stacktrace_lines = set()
for line in symbolized_stacktrace_lines:
stripped_symbolized_stacktrace_lines.add(line.strip())
index = 0
last_index = len(unsymbolized_stacktrace_lines) - 1
start = -1
end = -1
while index <= last_index:
if (unsymbolized_stacktrace_lines[index].strip() not in
stripped_symbolized_stacktrace_lines):
if start == -1:
start = index
end = index + 1
else:
end = index
index += 1
if start == -1:
# Nothing unique found, return empty string.
return ''
line_gap = 2
start = max(0, start - line_gap)
end = min(end + line_gap, last_index + 1)
result = '\n'.join(unsymbolized_stacktrace_lines[start:end])
return result |
Indents a string by x number of characters. | def indent_string(string, chars):
"""Indents a string by x number of characters."""
indented_string = ''
for line in string.splitlines():
indented_string += '%s%s\n' % ((' ' * chars), line)
# Strip the ending '\n' and return result.
return indented_string[0:-1] |
Return true if the file looks like a binary file. | def is_binary_file(file_path, bytes_to_read=1024):
"""Return true if the file looks like a binary file."""
file_extension = os.path.splitext(file_path)[1].lower()
if file_extension in BINARY_EXTENSIONS:
return True
if file_extension in TEXT_EXTENSIONS:
return False
text_characters = list(map(chr, list(range(32, 128)))) + ['\r', '\n', '\t']
try:
with open(file_path, 'rb') as file_handle:
data = file_handle.read(bytes_to_read)
except:
logs.log_error('Could not read file %s in is_binary_file.' % file_path)
return None
binary_data = [char for char in data if char not in text_characters]
return len(binary_data) > len(data) * 0.1 |
Returns true if the caller function is called recursively. | def is_recursive_call():
"""Returns true if the caller function is called recursively."""
try:
stack_frames = inspect.stack()
caller_name = stack_frames[1][3]
for stack_frame_index in range(2, len(stack_frames)):
if caller_name == stack_frames[stack_frame_index][3]:
return True
except:
pass
return False |
Return true if the file looks like a testcase file. | def is_valid_testcase_file(file_path,
check_if_exists=True,
size_limit=None,
allowed_extensions=None):
"""Return true if the file looks like a testcase file."""
filename = os.path.basename(file_path)
if filename.startswith('.') or filename.startswith(FUZZ_PREFIX):
return False
if allowed_extensions:
file_extension = os.path.splitext(file_path)[1].lower()
if file_extension not in allowed_extensions:
return False
directories_to_ignore = ['.git', '.hg', '.svn']
for directory_to_ignore in directories_to_ignore:
directory_string = '%s%s%s' % (os.sep, directory_to_ignore, os.sep)
if directory_string in file_path:
return False
if (check_if_exists or size_limit) and not os.path.exists(file_path):
return False
if size_limit and os.path.getsize(file_path) > size_limit:
return False
return True |
Return maximum number of parallel processes allowed. Adjust it based
on thread multiplier. | def maximum_parallel_processes_allowed():
"""Return maximum number of parallel processes allowed. Adjust it based
on thread multiplier."""
if environment.is_trusted_host():
# gRPC only supports 1 thread/process.
return 1
max_parallel_process_count = environment.get_value('MAX_FUZZ_THREADS', 1)
thread_multiplier = environment.get_value('THREAD_MULTIPLIER', 1)
max_parallel_process_count *= thread_multiplier
return int(max_parallel_process_count) |
Normalize path. This is needed on windows because windows' paths are
case-insensitive. | def normalize_path(path):
"""Normalize path. This is needed on windows because windows' paths are
case-insensitive."""
return os.path.normcase(os.path.normpath(path)) |
Call python's garbage collector. | def python_gc():
"""Call python's garbage collector."""
# gc_collect isn't perfectly synchronous, because it may
# break reference cycles that then take time to fully
# finalize. Call it thrice and hope for the best.
for _ in range(3):
gc.collect() |
Returns a random element from list. | def random_element_from_list(element_list):
"""Returns a random element from list."""
return element_list[random.SystemRandom().randint(0, len(element_list) - 1)] |
Returns a random number between start and end. | def random_number(start, end):
"""Returns a random number between start and end."""
return random.SystemRandom().randint(start, end) |
Returns a random element from list taking its weight into account. | def random_weighted_choice(element_list, weight_attribute='weight'):
"""Returns a random element from list taking its weight into account."""
total = sum(getattr(e, weight_attribute) for e in element_list)
random_pick = random.SystemRandom().uniform(0, total)
temp = 0
for element in element_list:
element_weight = getattr(element, weight_attribute)
if element_weight == 0:
continue
if temp + element_weight >= random_pick:
return element
temp += element_weight
assert False, 'Failed to make a random weighted choice.' |
Returns eval-ed data from file. | def read_data_from_file(file_path, eval_data=True, default=None):
"""Returns eval-ed data from file."""
if not os.path.exists(file_path):
return default
failure_wait_interval = environment.get_value('FAIL_WAIT', 0)
file_content = None
retry_limit = environment.get_value('FAIL_RETRIES', 1)
for _ in range(retry_limit):
try:
with open(file_path, 'rb') as file_handle:
file_content = file_handle.read()
except:
file_content = None
logs.log_warn('Error occurred while reading %s, retrying.' % file_path)
time.sleep(random.uniform(1, failure_wait_interval))
continue
if file_content is None:
logs.log_error('Failed to read data from file %s.' % file_path)
return None
if not eval_data:
return file_content
if not file_content:
return default
try:
return ast.literal_eval(file_content.decode('utf-8'))
except (SyntaxError, TypeError):
return None |
Strips the prefix from a string. | def remove_prefix(string, prefix):
"""Strips the prefix from a string."""
if string.startswith(prefix):
return string[len(prefix):]
return string |
Strips substrings from a given string. | def remove_sub_strings(string, substrings):
"""Strips substrings from a given string."""
result = string
for substring in substrings:
result = result.replace(substring, '')
return result |
Restart machine. | def restart_machine():
"""Restart machine."""
if environment.platform() == 'WINDOWS':
os.system('shutdown /f /r /t 0')
else:
# POSIX platforms.
os.system('sudo shutdown -r now') |
Helper to search for bytes in a large binary file without memory
issues. | def search_bytes_in_file(search_bytes, file_handle):
"""Helper to search for bytes in a large binary file without memory
issues.
"""
# TODO(aarya): This is too brittle and will fail if we have a very large
# line.
for line in file_handle:
if search_bytes in line:
return True
return False |
Returns a SHA-1 hash of the object. Not used for security purposes. | def string_hash(obj):
"""Returns a SHA-1 hash of the object. Not used for security purposes."""
return hashlib.sha1(str(obj).encode('utf-8')).hexdigest() |
Returns a deterministic hash of a ndb entity.
If an entity has been recently modified, put() must be called on it before
this function will pick up the changes. | def entity_hash(obj):
"""Returns a deterministic hash of a ndb entity.
If an entity has been recently modified, put() must be called on it before
this function will pick up the changes.
"""
hasher = hashlib.sha1()
entity_dict = obj.to_dict()
for key in sorted(entity_dict.keys()):
hasher.update(str(entity_dict[key]).encode('utf-8'))
return hasher.hexdigest() |
Check to see if a string has a value that should be treated as True. | def string_is_true(value):
"""Check to see if a string has a value that should be treated as True."""
return value and value != 'false' and value != 'False' and value != '0' |
Strip a prefix from start from string. | def strip_from_left(string, prefix):
"""Strip a prefix from start from string."""
if not string.startswith(prefix):
return string
return string[len(prefix):] |
Strip a suffix from end of string. | def strip_from_right(string, suffix):
"""Strip a suffix from end of string."""
if not string.endswith(suffix):
return string
return string[:len(string) - len(suffix)] |
Return true if one of the substring in the list is found in |string|. | def sub_string_exists_in(substring_list, string):
"""Return true if one of the substring in the list is found in |string|."""
for substring in substring_list:
if substring in string:
return True
return False |
Return time difference as a string. | def time_difference_string(timestamp):
"""Return time difference as a string."""
if not timestamp:
return ''
delta = int((datetime.datetime.utcnow() - timestamp).total_seconds())
d_minutes = delta // 60
d_hours = d_minutes // 60
d_days = d_hours // 24
if d_days > 6:
return '%s' % str(timestamp).split()[0]
if d_days > 1:
return '%s days ago' % d_days # starts at 2 days.
if d_hours > 1:
return '%s hours ago' % d_hours # starts at 2 hours.
if d_minutes > 1:
return '%s minutes ago' % d_minutes
if d_minutes > 0:
return '1 minute ago'
if delta > -30:
return 'moments ago'
# Only say something is in the future if it is more than just clock skew.
return 'in the future' |
Timeout decorator for functions. | def timeout(duration):
"""Timeout decorator for functions."""
def decorator(func):
"""Decorates the given function."""
if environment.is_running_on_app_engine():
# multiprocessing doesn't work on App Engine.
return func
@functools.wraps(func)
def _wrapper(*args, **kwargs):
"""Wrapper."""
# FIXME: Weird exceptions in imports, might be something relating to our
# reload module. Needs further investigation, try this as a temporary fix.
import multiprocessing.pool
import threading
# Fix for Python < 2.7.2.
if not hasattr(threading.current_thread(), '_children'):
# pylint: disable=protected-access
threading.current_thread()._children = weakref.WeakKeyDictionary()
global THREAD_POOL
if THREAD_POOL is None:
THREAD_POOL = multiprocessing.pool.ThreadPool(processes=3)
try:
from clusterfuzz._internal.datastore import \
ndb_init # Avoid circular import.
async_result = THREAD_POOL.apply_async(
ndb_init.thread_wrapper(func), args=args, kwds=kwargs)
return async_result.get(timeout=duration)
except multiprocessing.TimeoutError:
# Sleep for some minutes in order to wait for flushing metrics.
time.sleep(120)
# If we don't exit here, we will cause threads to pile up and leading to
# out-of-memory. Safe to just exit here.
logs.log_fatal_and_exit(
f'Exception occurred in function {func}: args: {args}, kwargs: '
f'{kwargs} exception: {sys.exc_info()[1]}')
return _wrapper
return decorator |
Wait for all threads to finish unless the given timeout is reached.
If no thread is alive, it waits much shorter than the given timeout.
Return True if timeout is exceeded, and return False otherwise. | def wait_until_timeout(threads, thread_timeout):
"""Wait for all threads to finish unless the given timeout is reached.
If no thread is alive, it waits much shorter than the given timeout.
Return True if timeout is exceeded, and return False otherwise.
"""
thread_alive_check_interval = environment.get_value(
'THREAD_ALIVE_CHECK_INTERVAL')
if not thread_alive_check_interval:
time.sleep(thread_timeout)
return False
wait_timeout = time.time() + thread_timeout
while time.time() < wait_timeout:
time.sleep(thread_alive_check_interval)
thread_alive = False
for thread in threads:
if thread.is_alive():
thread_alive = True
break
if not thread_alive:
return False
return True |
Writes data to file. | def write_data_to_file(content, file_path, append=False):
"""Writes data to file."""
failure_wait_interval = environment.get_value('FAIL_WAIT')
file_mode = 'ab' if append else 'wb'
retry_limit = environment.get_value('FAIL_RETRIES')
# TODO(mbarbella): One extra iteration is allowed for the type conversion hack
# included here. Once this function is converted to only accept bytes-like
# objects, it should be adjusted back to the normal retry limit.
for _ in range(retry_limit + 1):
try:
with open(file_path, file_mode) as file_handle:
file_handle.write(content)
except TypeError:
# If we saw a TypeError, content was not bytes-like. Convert it.
content = str(content).encode('utf-8')
continue
except OSError:
# An EnvironmentError signals a problem writing the file. Retry in case
# it was a spurious error.
logs.log_warn('Error occurred while writing %s, retrying.' % file_path)
time.sleep(random.uniform(1, failure_wait_interval))
continue
# Successfully written data file.
return
logs.log_error('Failed to write data to file %s.' % file_path) |
Return the default backup bucket for this instance of ClusterFuzz. | def default_backup_bucket():
"""Return the default backup bucket for this instance of ClusterFuzz."""
# Do not use |BACKUP_BUCKET| environment variable as that is the overridden
# backup bucket from job type and is not the default backup bucket.
return local_config.ProjectConfig().get('env.BACKUP_BUCKET') |
Return the default project name for this instance of ClusterFuzz. | def default_project_name():
"""Return the default project name for this instance of ClusterFuzz."""
# Do not use |PROJECT_NAME| environment variable as that is the overridden
# project name from job type and is not the default project name.
return local_config.ProjectConfig().get('env.PROJECT_NAME') |
Return the project for the current job, or the default project. | def current_project():
"""Return the project for the current job, or the default project."""
return environment.get_value('PROJECT_NAME', default_project_name()) |
Return the current source revision. | def current_source_version():
"""Return the current source revision."""
# For test use.
source_version_override = environment.get_value('SOURCE_VERSION_OVERRIDE')
if source_version_override:
return source_version_override
root_directory = environment.get_value('ROOT_DIR')
local_manifest_path = os.path.join(root_directory, LOCAL_SOURCE_MANIFEST)
if os.path.exists(local_manifest_path):
return read_data_from_file(
local_manifest_path, eval_data=False).strip().decode('utf-8')
return None |
Read from file handle, limiting output to |max_len| by removing output in
the middle. | def read_from_handle_truncated(file_handle, max_len):
"""Read from file handle, limiting output to |max_len| by removing output in
the middle."""
file_handle.seek(0, os.SEEK_END)
file_size = file_handle.tell()
file_handle.seek(0, os.SEEK_SET)
if file_size <= max_len:
return file_handle.read()
# Read first and last |half_max_len| bytes.
half_max_len = max_len // 2
start = file_handle.read(half_max_len)
file_handle.seek(file_size - half_max_len, os.SEEK_SET)
end = file_handle.read(half_max_len)
truncated_marker = b'\n...truncated %d bytes...\n' % (file_size - max_len)
return start + truncated_marker + end |
Normalize an email address. | def normalize_email(email):
"""Normalize an email address."""
# TODO(ochang): Investigate whether if it makes sense to replace
# @googlemail.com with @gmail.com.
return email.lower() |
Return whether or not the 2 emails are equal after being normalized. | def emails_equal(first, second):
"""Return whether or not the 2 emails are equal after being normalized."""
if not first or not second:
return False
return normalize_email(first) == normalize_email(second) |
Parse a delimter separated value. | def parse_delimited(value_or_handle, delimiter, strip=False,
remove_empty=False):
"""Parse a delimter separated value."""
if hasattr(value_or_handle, 'read'):
results = value_or_handle.read().split(delimiter)
else:
results = value_or_handle.split(delimiter)
if not strip and not remove_empty:
return results
processed_results = []
for result in results:
if strip:
result = result.strip()
if remove_empty and not result:
continue
processed_results.append(result)
return processed_results |
If this is an instance of OSS-Fuzz. | def is_oss_fuzz():
"""If this is an instance of OSS-Fuzz."""
return default_project_name() == 'oss-fuzz' |
If this is an instance of chromium fuzzing. | def is_chromium():
"""If this is an instance of chromium fuzzing."""
return default_project_name() in ('chromium', 'chromium-testing') |
Returns the SHA-1 hash of |file_path| contents. | def file_hash(file_path):
"""Returns the SHA-1 hash of |file_path| contents."""
chunk_size = 51200 # Read in 50 KB chunks.
digest = hashlib.sha1()
with open(file_path, 'rb') as file_handle:
chunk = file_handle.read(chunk_size)
while chunk:
digest.update(chunk)
chunk = file_handle.read(chunk_size)
return digest.hexdigest() |
Get the CPU count. | def cpu_count():
"""Get the CPU count."""
# Does not import on App Engine.
import multiprocessing
return environment.get_value('CPU_COUNT_OVERRIDE',
multiprocessing.cpu_count()) |
Set extra sanitizers based on crash_type. | def set_extra_sanitizers(crash_type):
"""Set extra sanitizers based on crash_type."""
if crash_type in crash_analyzer.EXTRA_SANITIZERS_SECURITY:
environment.set_value('USE_EXTRA_SANITIZERS', True)
environment.disable_lsan()
else:
environment.set_value('USE_EXTRA_SANITIZERS', False) |
Create a testcase list file for tests in a directory. | def create_testcase_list_file(output_directory):
"""Create a testcase list file for tests in a directory."""
files_list = []
files_list_file_path = os.path.join(output_directory, TESTCASE_LIST_FILENAME)
for root, _, files in shell.walk(output_directory):
for filename in files:
if filename.endswith(INFO_FILE_EXTENSION):
# Skip an info file.
continue
file_path = os.path.join(root, filename)
if not utils.is_valid_testcase_file(file_path, check_if_exists=False):
continue
normalized_relative_file_path = utils.get_normalized_relative_path(
file_path, output_directory)
files_list.append(normalized_relative_file_path)
utils.write_data_to_file('\n'.join(sorted(files_list)), files_list_file_path) |
Returns all testcases from testcase directories. | def get_testcases_from_directories(directories):
"""Returns all testcases from testcase directories."""
testcase_paths = []
max_testcases = environment.get_value('MAX_TESTCASES')
generators = []
for directory in directories:
if not directory.strip():
continue
generators.append(shell.walk(directory))
for generator in generators:
for structure in generator:
base_directory = structure[0]
for filename in structure[2]:
if not filename.startswith(FUZZ_PREFIX):
continue
if filename.endswith(COVERAGE_SUFFIX):
continue
file_path = os.path.join(base_directory, filename)
if not os.path.getsize(file_path):
continue
testcase_paths.append(utils.normalize_path(file_path))
if len(testcase_paths) == max_testcases:
return testcase_paths
return testcase_paths |
Returns true if this is a testcase or its resource dependency. | def is_testcase_resource(filename):
"""Returns true if this is a testcase or its resource dependency."""
if filename.startswith(FUZZ_PREFIX):
return True
if filename.startswith(FLAGS_PREFIX):
return True
if filename.startswith(DEPENDENCY_PREFIX):
return True
if filename.startswith(RESOURCES_PREFIX):
return True
if filename.endswith(COVERAGE_SUFFIX):
return True
return False |
Removes all testcases and their dependencies from testcase directories. | def remove_testcases_from_directories(directories):
"""Removes all testcases and their dependencies from testcase directories."""
generators = []
for directory in directories:
if not directory.strip():
continue
# If there is a bot-specific files list, delete it now.
bot_testcases_file_path = utils.get_bot_testcases_file_path(directory)
shell.remove_file(bot_testcases_file_path)
generators.append(shell.walk(directory))
for generator in generators:
for structure in generator:
base_directory = structure[0]
for filename in structure[2]:
if not is_testcase_resource(filename):
continue
if filename.startswith(RESOURCES_PREFIX):
# In addition to removing this file, remove all resources.
resources_file_path = os.path.join(base_directory, filename)
resources = read_resource_list(resources_file_path)
for resource in resources:
shell.remove_file(resource)
file_path = os.path.join(base_directory, filename)
shell.remove_file(file_path) |
Generate a resource list. | def read_resource_list(resource_file_path):
"""Generate a resource list."""
if not os.path.exists(resource_file_path):
return []
resources = []
base_directory = os.path.dirname(resource_file_path)
with open(resource_file_path, encoding='utf-8') as file_handle:
resource_file_contents = file_handle.read()
for line in resource_file_contents.splitlines():
resource = os.path.join(base_directory, line.strip())
if not os.path.exists(resource):
break
resources.append(resource)
return resources |
Returns the list of testcase resource dependencies. | def get_resource_dependencies(testcase_absolute_path, test_prefix=FUZZ_PREFIX):
"""Returns the list of testcase resource dependencies."""
resources = []
if not os.path.exists(testcase_absolute_path):
return resources
base_directory = os.path.dirname(testcase_absolute_path)
testcase_filename = os.path.basename(testcase_absolute_path)
# FIXME(mbarbella): Remove this when all fuzzers are using "resources-".
# This code includes the dependencies that begin with
# dependency prefix and are referenced in the testcase.
testcase_contents = None
for filename in os.listdir(base_directory):
if filename.startswith(DEPENDENCY_PREFIX):
# Only load the testcase contents if necessary.
if not testcase_contents:
with open(testcase_absolute_path, 'rb') as file_handle:
testcase_contents = file_handle.read()
if filename.encode('utf-8') in testcase_contents:
file_path = os.path.join(base_directory, filename)
resources.append(file_path)
# This code includes the dependencies in cases when the testcase itself is a
# just a wrapper file around the actual testcase.
if DEPENDENCY_PREFIX in testcase_absolute_path:
dependency_filename = os.path.splitext(testcase_filename)[0]
dependency_filename = re.compile(DEPENDENCY_PREFIX).sub(
'', dependency_filename, 1)
dependency_filename = re.compile(FUZZ_PREFIX).sub('', dependency_filename,
1)
dependency_filename = re.compile(HTTP_PREFIX).sub('', dependency_filename,
1)
dependency_file_path = os.path.join(base_directory, dependency_filename)
resources.append(dependency_file_path)
# Check to see if this test case lists all resources in a resources file.
if testcase_filename.startswith(test_prefix):
stripped_testcase_name = testcase_filename[len(test_prefix):]
resources_filename = '%s%s' % (RESOURCES_PREFIX, stripped_testcase_name)
resources_file_path = os.path.join(base_directory, resources_filename)
resources += read_resource_list(resources_file_path)
# For extensions, archive everything in the extension directory.
if APPS_PREFIX in testcase_filename or EXTENSIONS_PREFIX in testcase_filename:
for root, _, files in shell.walk(base_directory):
for filename in files:
file_path = os.path.join(root, filename)
if file_path == testcase_absolute_path:
continue
resources.append(file_path)
return resources |
Returns command line flags to use for a testcase. | def get_command_line_flags(testcase_path):
"""Returns command line flags to use for a testcase."""
arguments = environment.get_value('APP_ARGS')
additional_arguments = get_additional_command_line_flags(testcase_path)
if arguments:
arguments += ' ' + additional_arguments
else:
arguments = additional_arguments
return arguments.strip() |
Returns additional command line flags to use for a testcase. | def get_additional_command_line_flags(testcase_path):
"""Returns additional command line flags to use for a testcase."""
# Get the initial flags list from the environment value.
additional_command_line_flags = (
environment.get_value('ADDITIONAL_COMMAND_LINE_FLAGS', ''))
# If we don't have a fuzz prefix, no need to look further for flags file.
testcase_filename = os.path.basename(testcase_path)
if not testcase_filename.startswith(FUZZ_PREFIX):
return additional_command_line_flags
# Gets the flags list from the flags file.
stripped_testcase_name = testcase_filename[len(FUZZ_PREFIX):]
flags_filename = '%s%s' % (FLAGS_PREFIX, stripped_testcase_name)
flags_file_path = os.path.join(os.path.dirname(testcase_path), flags_filename)
flags_file_content = utils.read_data_from_file(
flags_file_path, eval_data=False)
if flags_file_content:
additional_command_line_flags += ' ' + flags_file_content.decode('utf-8')
return additional_command_line_flags.strip() |
Run a single testcase and return crash results in the crash queue. | def run_testcase(thread_index, file_path, gestures, env_copy):
"""Run a single testcase and return crash results in the crash queue."""
try:
# Update environment with environment copy from parent.
if env_copy:
os.environ.update(env_copy)
# Initialize variables.
needs_http = '-http-' in file_path
test_timeout = environment.get_value('TEST_TIMEOUT', 10)
app_directory = environment.get_value('APP_DIR')
environment.set_value('PIDS', '[]')
# Get command line options.
command = get_command_line_for_application(
file_path, user_profile_index=thread_index, needs_http=needs_http)
# Run testcase.
return process_handler.run_process(
command,
timeout=test_timeout,
gestures=gestures,
env_copy=env_copy,
current_working_directory=app_directory)
except Exception:
logs.log_error('Exception occurred while running run_testcase.')
return None, None, None |
Read the urls from the output. | def get_resource_paths(output):
"""Read the urls from the output."""
resource_paths = set()
for line in output.splitlines():
match = CHROME_URL_LOAD_REGEX.match(line)
if not match:
continue
local_path = convert_dependency_url_to_local_path(match.group(2))
if local_path:
logs.log('Detected resource: %s.' % local_path)
resource_paths.add(local_path)
return list(resource_paths) |
Convert a dependency URL to a corresponding local path. | def convert_dependency_url_to_local_path(url):
"""Convert a dependency URL to a corresponding local path."""
# Bot-specific import.
from clusterfuzz._internal.bot.webserver import http_server
logs.log('Process dependency: %s.' % url)
file_match = FILE_URL_REGEX.search(url)
http_match = HTTP_URL_REGEX.search(url)
platform = environment.platform()
local_path = None
if file_match:
file_path = file_match.group(1)
logs.log('Detected file dependency: %s.' % file_path)
if platform == 'WINDOWS':
local_path = file_path
else:
local_path = '/' + file_path
# Convert remote to local path for android.
if environment.is_android():
remote_testcases_directory = android.constants.DEVICE_TESTCASES_DIR
local_testcases_directory = environment.get_value('FUZZ_INPUTS')
local_path = local_path.replace(remote_testcases_directory,
local_testcases_directory)
elif http_match:
relative_http_path = os.path.sep + http_match.group(2)
logs.log('Detected http dependency: %s.' % relative_http_path)
local_path = http_server.get_absolute_testcase_file(relative_http_path)
if not local_path:
# This needs to be a warning since in many cases, it is actually a
# non-existent path. For others, we need to add the directory aliases in
# file http_server.py.
logs.log_warn(
'Unable to find server resource %s, skipping.' % relative_http_path)
if local_path:
local_path = utils.normalize_path(local_path)
return local_path |
Returns the timestamp of a testcase. | def _get_testcase_time(testcase_path):
"""Returns the timestamp of a testcase."""
stats = fuzzer_stats.TestcaseRun.read_from_disk(testcase_path)
if stats:
return datetime.datetime.utcfromtimestamp(float(stats.timestamp))
return None |
Uploads testcase so that a log file can be matched with it folder. | def upload_testcase(testcase_path, log_time):
"""Uploads testcase so that a log file can be matched with it folder."""
fuzz_logs_bucket = environment.get_value('FUZZ_LOGS_BUCKET')
if not fuzz_logs_bucket:
return
if not os.path.exists(testcase_path):
return
with open(testcase_path, 'rb') as file_handle:
testcase_contents = file_handle.read()
fuzzer_logs.upload_to_logs(
fuzz_logs_bucket,
testcase_contents,
time=log_time,
file_extension='.testcase') |
Returns crash part of the output, excluding unrelated content (e.g. output
from corpus merge, etc). | def _get_crash_output(output):
"""Returns crash part of the output, excluding unrelated content (e.g. output
from corpus merge, etc)."""
if output is None:
return None
crash_stacktrace_end_marker_index = output.find(
data_types.CRASH_STACKTRACE_END_MARKER)
if crash_stacktrace_end_marker_index == -1:
return output
return output[:crash_stacktrace_end_marker_index] |
Run a single testcase and return crash results in the crash queue. | def run_testcase_and_return_result_in_queue(crash_queue,
thread_index,
file_path,
gestures,
env_copy,
upload_output=False):
"""Run a single testcase and return crash results in the crash queue."""
# Since this is running in its own process, initialize the log handler again.
# This is needed for Windows where instances are not shared across child
# processes. See:
# https://stackoverflow.com/questions/34724643/python-logging-with-multiprocessing-root-logger-different-in-windows
logs.configure('run_testcase', {
'testcase_path': file_path,
})
# Also reinitialize NDB context for the same reason as above.
with ndb_init.context():
_do_run_testcase_and_return_result_in_queue(
crash_queue,
thread_index,
file_path,
gestures,
env_copy,
upload_output=upload_output) |
Run a single testcase and return crash results in the crash queue. | def _do_run_testcase_and_return_result_in_queue(crash_queue,
thread_index,
file_path,
gestures,
env_copy,
upload_output=False):
"""Run a single testcase and return crash results in the crash queue."""
try:
# Run testcase and check whether a crash occurred or not.
return_code, crash_time, output = run_testcase(thread_index, file_path,
gestures, env_copy)
# Pull testcase directory to host to get any stats files.
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import file_host
file_host.pull_testcases_from_worker()
# Analyze the crash.
crash_output = _get_crash_output(output)
crash_result = CrashResult(return_code, crash_time, crash_output)
# To provide consistency between stats and logs, we use timestamp taken
# from stats when uploading logs and testcase.
if upload_output:
log_time = _get_testcase_time(file_path)
if crash_result.is_crash():
# Initialize resource list with the testcase path.
resource_list = [file_path]
resource_list += get_resource_paths(crash_output)
# Store the crash stack file in the crash stacktrace directory
# with filename as the hash of the testcase path.
crash_stacks_directory = environment.get_value('CRASH_STACKTRACES_DIR')
stack_file_path = os.path.join(crash_stacks_directory,
utils.string_hash(file_path))
utils.write_data_to_file(crash_output, stack_file_path)
# Put crash/no-crash results in the crash queue.
crash_queue.put(
Crash(
file_path=file_path,
crash_time=crash_time,
return_code=return_code,
resource_list=resource_list,
gestures=gestures,
stack_file_path=stack_file_path))
# Don't upload uninteresting testcases (no crash) or if there is no log to
# correlate it with (not upload_output).
if upload_output:
upload_testcase(file_path, log_time)
if upload_output:
# Include full output for uploaded logs (crash output, merge output, etc).
crash_result_full = CrashResult(return_code, crash_time, output)
log = prepare_log_for_upload(crash_result_full.get_stacktrace(),
return_code)
upload_log(log, log_time)
except Exception:
logs.log_error('Exception occurred while running '
'run_testcase_and_return_result_in_queue.') |
Do engine reproduction. | def engine_reproduce(engine_impl: engine.Engine, target_name, testcase_path,
arguments, timeout) -> engine.ReproduceResult:
"""Do engine reproduction."""
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import tasks_host
return tasks_host.engine_reproduce(engine_impl, target_name, testcase_path,
arguments, timeout)
build_dir = environment.get_value('BUILD_DIR')
target_path = engine_common.find_fuzzer_path(build_dir, target_name)
if not target_path:
raise TargetNotFoundError('Failed to find target ' + target_name)
result = engine_impl.reproduce(target_path, testcase_path, list(arguments),
timeout)
# This matches the check in process_handler.run_process.
if not result.return_code and \
(crash_analyzer.is_memory_tool_crash(result.output) or
crash_analyzer.is_check_failure_crash(result.output)):
result.return_code = 1
return result |
Test for a crash and return crash parameters like crash type, crash state,
crash stacktrace, etc. | def test_for_crash_with_retries(fuzz_target,
testcase,
testcase_path,
test_timeout,
http_flag=False,
use_gestures=True,
compare_crash=True,
crash_retries=None):
"""Test for a crash and return crash parameters like crash type, crash state,
crash stacktrace, etc."""
logs.log('Testing for crash.')
set_extra_sanitizers(testcase.crash_type)
gestures = testcase.gestures if use_gestures else None
runner = TestcaseRunner(fuzz_target, testcase_path, test_timeout, gestures,
http_flag)
if crash_retries is None:
crash_retries = environment.get_value('CRASH_RETRIES')
if compare_crash and testcase.crash_type not in IGNORE_STATE_CRASH_TYPES:
expected_state = testcase.crash_state
expected_security_flag = testcase.security_flag
else:
expected_state = None
expected_security_flag = None
try:
return runner.reproduce_with_retries(crash_retries, expected_state,
expected_security_flag,
testcase.flaky_stack)
except TargetNotFoundError:
# If a target isn't found, treat it as not crashing.
return CrashResult(return_code=0, crash_time=0, output='') |
Preprocess function for users of test_for_reproducibility. | def preprocess_testcase_manager(testcase, uworker_input):
"""Preprocess function for users of test_for_reproducibility."""
# TODO(metzman): Make this work for test_for_crash_with_retries.
fuzz_target = testcase.get_fuzz_target()
engine_obj = engine.get(testcase.fuzzer_name)
if engine_obj and not fuzz_target:
raise TargetNotFoundError
if not fuzz_target:
return
uworker_input.fuzz_target.CopyFrom(uworker_io.entity_to_protobuf(fuzz_target)) |
Test to see if a crash is fully reproducible or is a one-time crasher. | def test_for_reproducibility(fuzz_target,
testcase_path,
crash_type,
expected_state,
expected_security_flag,
test_timeout,
http_flag,
gestures,
arguments=None) -> bool:
"""Test to see if a crash is fully reproducible or is a one-time crasher."""
set_extra_sanitizers(crash_type)
runner = TestcaseRunner(
fuzz_target,
testcase_path,
test_timeout,
gestures,
http_flag,
arguments=arguments)
crash_retries = environment.get_value('CRASH_RETRIES')
return runner.test_reproduce_reliability(crash_retries, expected_state,
expected_security_flag) |
Prepare log for upload. | def prepare_log_for_upload(symbolized_output, return_code):
"""Prepare log for upload."""
# Add revision information to the logs.
app_revision = environment.get_value('APP_REVISION')
job_name = environment.get_value('JOB_NAME')
components = revisions.get_component_list(app_revision, job_name)
component_revisions = (
revisions.format_revision_list(components, use_html=False) or
'Not available.\n')
revisions_header =\
f'Component revisions (build r{app_revision}):\n{component_revisions}\n'
bot_name = environment.get_value('BOT_NAME')
bot_header = f'Bot name: {bot_name}\n'
if environment.is_android():
bot_header += f'Device serial: {environment.get_value("ANDROID_SERIAL")}\n'
return_code_header = "Return code: %s\n\n" % return_code
result = revisions_header + bot_header + return_code_header +\
symbolized_output
return result.encode('utf-8') |
Upload the output into corresponding GCS logs bucket. | def upload_log(log, log_time):
"""Upload the output into corresponding GCS logs bucket."""
fuzz_logs_bucket = environment.get_value('FUZZ_LOGS_BUCKET')
if not fuzz_logs_bucket:
return
fuzzer_logs.upload_to_logs(fuzz_logs_bucket, log, time=log_time) |
Returns a user profile directory from a directory index. | def get_user_profile_directory(user_profile_index):
"""Returns a user profile directory from a directory index."""
temp_directory = environment.get_value('BOT_TMPDIR')
user_profile_in_memory = environment.get_value('USER_PROFILE_IN_MEMORY')
user_profile_root_directory = (
temp_directory if user_profile_in_memory else
environment.get_value('USER_PROFILE_ROOT_DIR'))
# Create path to user profile directory.
user_profile_directory_name = 'user_profile_%d' % user_profile_index
user_profile_directory = os.path.join(user_profile_root_directory,
user_profile_directory_name)
return user_profile_directory |
Returns the complete command line required to execute application. | def get_command_line_for_application(file_to_run='',
user_profile_index=0,
app_path=None,
app_args=None,
needs_http=False,
write_command_line_file=False,
get_arguments_only=False):
"""Returns the complete command line required to execute application."""
if app_args is None:
app_args = environment.get_value('APP_ARGS')
if app_path is None:
app_path = environment.get_value('APP_PATH')
if not app_path:
# No APP_PATH is available for e.g. grey box fuzzers.
return ''
additional_command_line_flags = get_additional_command_line_flags(file_to_run)
app_args_append_testcase = environment.get_value('APP_ARGS_APPEND_TESTCASE')
app_directory = environment.get_value('APP_DIR')
app_name = environment.get_value('APP_NAME')
apps_argument = environment.get_value('APPS_ARG')
crash_stacks_directory = environment.get_value('CRASH_STACKTRACES_DIR')
debugger = environment.get_value('DEBUGGER_PATH')
device_testcases_directory = android.constants.DEVICE_TESTCASES_DIR
fuzzer_directory = environment.get_value('FUZZER_DIR')
extension_argument = environment.get_value('EXTENSION_ARG')
input_directory = environment.get_value('INPUT_DIR')
launcher = environment.get_value('LAUNCHER_PATH')
is_android = environment.is_android()
root_directory = environment.get_value('ROOT_DIR')
temp_directory = environment.get_value('BOT_TMPDIR')
user_profile_argument = environment.get_value('USER_PROFILE_ARG')
window_argument = environment.get_value('WINDOW_ARG')
user_profile_directory = get_user_profile_directory(user_profile_index)
# Create user profile directory and setup contents if needed.
setup_user_profile_directory_if_needed(user_profile_directory)
# Handle spaces in APP_PATH.
# If application path has spaces, then we need to quote it.
if ' ' in app_path:
app_path = '"%s"' % app_path
interpreter = shell.get_interpreter(app_name)
if get_arguments_only:
# If we are only returning the arguments, do not return the application
# path or anything else required to run it such as an interpreter.
app_path = ''
elif interpreter:
# Prepend command with interpreter if it is a script.
app_path = '%s %s' % (interpreter, app_path)
# Start creating the command line.
command = ''
# Rebase the file_to_run and launcher paths to the worker's root.
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import file_host
file_to_run = file_host.rebase_to_worker_root(file_to_run)
launcher = file_host.rebase_to_worker_root(launcher)
# Default case.
testcase_path = file_to_run
testcase_filename = os.path.basename(testcase_path)
testcase_directory = os.path.dirname(testcase_path)
testcase_file_url = utils.file_path_to_file_url(testcase_path)
testcase_http_url = ''
# Determine where |testcase_file_url| should point depending on platform and
# whether or not a launcher script is used.
if file_to_run:
if launcher:
# In the case of launcher scripts, the testcase file to be run resides on
# the host running the launcher script. Thus |testcase_file_url|, which
# may point to a location on the device for Android job types, does not
# apply. Instead, the launcher script should be passed the original file
# to run. By setting |testcase_file_url| to |file_to_run|, we avoid
# duplicating job definitions solely for supporting launcher scripts.
testcase_file_url = file_to_run
# Jobs that have a launcher script which needs to be run on the host will
# have app_name == launcher. In this case don't prepend launcher to
# command - just use app_name.
if os.path.basename(launcher) != app_name:
launcher_with_interpreter = shell.get_execute_command(launcher)
command += launcher_with_interpreter + ' '
elif is_android:
# Android-specific testcase path fixup for fuzzers that don't rely on
# launcher scripts.
local_testcases_directory = environment.get_value('FUZZ_INPUTS')
# Check if the file to run is in fuzzed testcases folder. If yes, then we
# can substitute with a local device path. Otherwise, it is part of some
# data bundle with resource dependencies and we just need to use http
# host forwarder for that.
if file_to_run.startswith(local_testcases_directory):
testcase_relative_path = (
file_to_run[len(local_testcases_directory) + 1:])
testcase_path = os.path.join(device_testcases_directory,
testcase_relative_path)
testcase_file_url = utils.file_path_to_file_url(testcase_path)
else:
# Force use of host_forwarder based on comment above.
needs_http = True
# Check if the testcase needs to be loaded over http.
# TODO(ochang): Make this work for trusted/untrusted.
http_ip = '127.0.0.1'
http_port_1 = environment.get_value('HTTP_PORT_1', 8000)
relative_testcase_path = file_to_run[len(input_directory + os.path.sep):]
relative_testcase_path = relative_testcase_path.replace('\\', '/')
testcase_http_url = 'http://%s:%d/%s' % (http_ip, http_port_1,
relative_testcase_path)
if needs_http:
# TODO(unassigned): Support https.
testcase_file_url = testcase_http_url
testcase_path = testcase_http_url
# Compose app arguments.
all_app_args = ''
if user_profile_argument:
all_app_args += ' %s=%s' % (user_profile_argument, user_profile_directory)
if extension_argument and EXTENSIONS_PREFIX in testcase_filename:
all_app_args += ' %s=%s' % (extension_argument, testcase_directory)
if apps_argument and APPS_PREFIX in testcase_filename:
all_app_args += ' %s=%s' % (apps_argument, testcase_directory)
if window_argument:
all_app_args += ' %s' % window_argument
if additional_command_line_flags:
all_app_args += ' %s' % additional_command_line_flags.strip()
if app_args:
all_app_args += ' %s' % app_args.strip()
# Append %TESTCASE% at end if no testcase pattern is found in app arguments.
if not utils.sub_string_exists_in(
['%TESTCASE%', '%TESTCASE_FILE_URL%', '%TESTCASE_HTTP_URL%'],
all_app_args) and app_args_append_testcase:
all_app_args += ' %TESTCASE%'
all_app_args = all_app_args.strip()
# Build the actual command to run now.
if debugger:
command += '%s ' % debugger
if app_path:
command += app_path
if all_app_args:
command += ' %s' % all_app_args
command = command.replace('%APP_DIR%', app_directory)
command = command.replace('%CRASH_STACKTRACES_DIR%', crash_stacks_directory)
command = command.replace('%DEVICE_TESTCASES_DIR%',
device_testcases_directory)
command = command.replace('%FUZZER_DIR%', fuzzer_directory)
command = command.replace('%INPUT_DIR%', input_directory)
command = command.replace('%ROOT_DIR%', root_directory)
command = command.replace('%TESTCASE%', testcase_path)
command = command.replace('%TESTCASE_FILE_URL%', testcase_file_url)
command = command.replace('%TESTCASE_HTTP_URL%', testcase_http_url)
command = command.replace('%TMP_DIR%', temp_directory)
command = command.replace('%USER_PROFILE_DIR%', user_profile_directory)
if is_android and not launcher:
# Initial setup phase for command line.
if write_command_line_file:
android.adb.write_command_line_file(command, app_path)
return android.app.get_launch_command(all_app_args, testcase_path,
testcase_file_url)
# Decide which directory we will run the application from.
# We are using |app_directory| since it helps to locate pdbs
# in same directory, other dependencies, etc.
if os.path.exists(app_directory):
os.chdir(app_directory)
return str(command) |
Set user profile directory if it does not exist. | def setup_user_profile_directory_if_needed(user_profile_directory):
"""Set user profile directory if it does not exist."""
if os.path.exists(user_profile_directory):
# User profile directory already exists. Bail out.
return
shell.create_directory(user_profile_directory)
# Create a file in user profile directory based on format:
# filename;base64 encoded zlib compressed file contents.
user_profile_file = environment.get_value('USER_PROFILE_FILE')
if user_profile_file and ';' in user_profile_file:
user_profile_filename, encoded_file_contents = (
user_profile_file.split(';', 1))
user_profile_file_contents = zlib.decompress(
base64.b64decode(encoded_file_contents))
user_profile_file_path = os.path.join(user_profile_directory,
user_profile_filename)
utils.write_data_to_file(user_profile_file_contents, user_profile_file_path)
# For Firefox, we need to install a special fuzzPriv extension that exposes
# special functions to javascript, e.g. gc(), etc.
app_name = environment.get_value('APP_NAME')
if app_name.startswith('firefox'):
# Create extensions directory.
extensions_directory = os.path.join(user_profile_directory, 'extensions')
shell.create_directory(extensions_directory)
# Unpack the fuzzPriv extension.
extension_archive = os.path.join(environment.get_resources_directory(),
'firefox', 'fuzzPriv-extension.zip')
with archive.open(extension_archive) as reader:
reader.extract_all(extensions_directory)
# Add this extension in the extensions configuration file.
extension_config_file_path = os.path.join(user_profile_directory,
'extensions.ini')
fuzz_extension_directory = os.path.join(extensions_directory,
'[email protected]')
extension_config_file_contents = (
'[ExtensionDirs]\r\n'
'Extension0=%s\r\n'
'\r\n'
'[ThemeDirs]\r\n' % fuzz_extension_directory)
utils.write_data_to_file(extension_config_file_contents,
extension_config_file_path) |
Checks whether the target binary fails to execute at the given revision.
Arguments:
job_type (str): The type of job we are executing on.
crash_revision (int): The revision at which the target was built.
Returns a UworkerMsg with the following attributes:
is_bad_build (bool): Whether the target build is bad. If True, the target
cannot be used for executing testcases.
should_ignore_crash (bool): True iff the target crashed, but we should
ignore it.
build_run_console_output (Optional[str]): The build run output, containing
crash stacktraces (if any). | def check_for_bad_build(job_type: str,
crash_revision: int) -> uworker_msg_pb2.BuildData:
"""
Checks whether the target binary fails to execute at the given revision.
Arguments:
job_type (str): The type of job we are executing on.
crash_revision (int): The revision at which the target was built.
Returns a UworkerMsg with the following attributes:
is_bad_build (bool): Whether the target build is bad. If True, the target
cannot be used for executing testcases.
should_ignore_crash (bool): True iff the target crashed, but we should
ignore it.
build_run_console_output (Optional[str]): The build run output, containing
crash stacktraces (if any).
"""
# Check the bad build check flag to see if we want do this.
if not environment.get_value('BAD_BUILD_CHECK'):
# should_ignore_crash_result set to True because build metadata does not
# need to be updated in this case.
return uworker_msg_pb2.BuildData(
revision=crash_revision,
is_bad_build=False,
should_ignore_crash_result=True,
build_run_console_output='')
# Create a blank command line with no file to run and no http.
command = get_command_line_for_application(file_to_run='', needs_http=False)
# When checking for bad builds, we use the default window size.
# We don't want to pick a custom size since it can potentially cause a
# startup crash and cause a build to be detected incorrectly as bad.
default_window_argument = environment.get_value('WINDOW_ARG', '')
if default_window_argument:
command = command.replace(' %s' % default_window_argument, '')
# TSAN is slow, and boots slow on first startup. Increase the warmup
# timeout for this case.
if environment.tool_matches('TSAN', job_type):
fast_warmup_timeout = environment.get_value('WARMUP_TIMEOUT')
else:
fast_warmup_timeout = environment.get_value('FAST_WARMUP_TIMEOUT')
# Initialize helper variables.
is_bad_build = False
build_run_console_output = ''
app_directory = environment.get_value('APP_DIR')
# Exit all running instances.
process_handler.terminate_stale_application_instances()
# Check if the build is bad.
return_code, crash_time, output = process_handler.run_process(
command,
timeout=fast_warmup_timeout,
current_working_directory=app_directory)
crash_result = CrashResult(return_code, crash_time, output)
# 1. Need to account for startup crashes with no crash state. E.g. failed to
# load shared library. So, ignore state for comparison.
# 2. Ignore leaks as they don't block a build from reporting regular crashes
# and also don't impact regression range calculations.
if (crash_result.is_crash(ignore_state=True) and
not crash_result.should_ignore() and
not crash_result.get_type() in ['Direct-leak', 'Indirect-leak']):
is_bad_build = True
build_run_console_output = utils.get_crash_stacktrace_output(
command,
crash_result.get_stacktrace(symbolized=True),
crash_result.get_stacktrace(symbolized=False))
logs.log(
f'Bad build for {job_type} detected at r{crash_revision}: ' +
f'return code = {return_code}, crash type = {crash_result.get_type()}',
raw_output=output,
output=build_run_console_output)
# Exit all running instances.
process_handler.terminate_stale_application_instances()
# Any of the conditions below indicate that bot is in a bad state and it is
# not caused by the build itself. In that case, just exit.
if is_bad_build and utils.sub_string_exists_in(BAD_STATE_HINTS, output):
logs.log_fatal_and_exit(
'Bad bot environment detected, exiting.',
output=build_run_console_output,
snapshot=process_handler.get_runtime_snapshot())
return uworker_msg_pb2.BuildData(
revision=crash_revision,
is_bad_build=is_bad_build,
should_ignore_crash_result=crash_result.should_ignore(),
build_run_console_output=build_run_console_output) |
Updates the corresponding build metadata.
This method is intended to be called (from a trusted worker) on the result of
check_for_bad_build. It adds the corresponding build metadata if ncecessary.
Arguments:
job_type (str): The type of job we are executing on.
build_data (BuildData): the result of check_for_bad_build call. | def update_build_metadata(job_type: str, build_data: uworker_msg_pb2.BuildData):
"""
Updates the corresponding build metadata.
This method is intended to be called (from a trusted worker) on the result of
check_for_bad_build. It adds the corresponding build metadata if ncecessary.
Arguments:
job_type (str): The type of job we are executing on.
build_data (BuildData): the result of check_for_bad_build call.
"""
if build_data.should_ignore_crash_result:
return
build_state = data_handler.get_build_state(job_type, build_data.revision)
# If none of the other bots have added information about this build,
# then add it now.
if build_state == data_types.BuildState.UNMARKED:
data_handler.add_build_metadata(job_type, build_data.revision,
build_data.is_bad_build,
build_data.build_run_console_output) |
Get the corpus directory given a project qualified fuzz target name. | def get_corpus_directory(input_directory, project_qualified_name):
"""Get the corpus directory given a project qualified fuzz target name."""
corpus_directory = os.path.join(input_directory, project_qualified_name)
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import file_host
corpus_directory = file_host.rebase_to_worker_root(corpus_directory)
# Create corpus directory if it does not exist already.
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import file_host
file_host.create_directory(corpus_directory, create_intermediates=True)
else:
shell.create_directory(corpus_directory)
return corpus_directory |
Return the full fuzzer path and actual binary name of |fuzzer_name|. | def _get_fuzzer_path(target_list, fuzzer_name):
"""Return the full fuzzer path and actual binary name of |fuzzer_name|."""
fuzzer_filename = environment.get_executable_filename(fuzzer_name)
for path in target_list:
if os.path.basename(path) == fuzzer_filename:
return path
raise BuiltinFuzzerError('Failed to find chosen target ' + fuzzer_name) |
Extract a dictionary element from the given string. | def extract_dictionary_element(line):
"""Extract a dictionary element from the given string."""
# An element should start and end with a double-quote.
start_index = line.find('"')
end_index = line.rfind('"')
if start_index == -1 or end_index == -1 or start_index == end_index:
return None
element = line[start_index:end_index + 1]
return element |
Return default dictionary path. | def get_default_dictionary_path(fuzz_target_path):
"""Return default dictionary path."""
return fuzzer_utils.get_supporting_file(fuzz_target_path,
DICTIONARY_FILE_EXTENSION) |
Calculate number of dictionary elements in the given string. | def get_dictionary_size(dictionary_content):
"""Calculate number of dictionary elements in the given string."""
count = 0
for line in dictionary_content.splitlines():
if extract_dictionary_element(line):
count += 1
return count |
Calculate size of manual section of given dictionary. | def get_stats_for_dictionary_file(dictionary_path):
"""Calculate size of manual section of given dictionary."""
if not dictionary_path or not os.path.exists(dictionary_path):
return 0
dictionary_content = utils.read_data_from_file(
dictionary_path, eval_data=False).decode('utf-8')
dictionaries = dictionary_content.split(RECOMMENDED_DICTIONARY_HEADER)
# If there are any elements before RECOMMENDED_DICTIONARY_HEADER, those are
# from "manual" dictionary stored in the repository.
manual_dictionary_size = get_dictionary_size(dictionaries[0])
return manual_dictionary_size |
Correct a single dictionary line. | def _fix_dictionary_line(line, dict_path):
"""Correct a single dictionary line."""
# Ignore blank and comment lines.
if not line or line.strip().startswith('#'):
return line
match = DICTIONARY_PART_PATTERN.match(line)
# We expect this pattern to match even invalid dictionary entries. Failures
# to match should be treated as bugs in this function.
if not match:
raise errors.BadStateError(
'Failed to correct dictionary line "{line}" in {path}.'.format(
line=line, path=dict_path))
name_part = match.group(1) or ''
entry = match.group(2)
# In some cases, we'll detect the user's intended entry as a token name. This
# can happen if the user included unquoted tokens such as "!=" or ">=".
if not entry and name_part:
entry = name_part
name_part = ''
# Handle quote entries as a special case. This simplifies later logic.
if entry == '"':
entry = '"\\\""'
if entry.startswith('"') and entry.endswith('"'):
return name_part + entry
# In this case, we know the entry is invalid. Escape any unescaped quotes
# within it, then append quotes to the front and back.
new_entry = ''
prev_character = ''
for character in entry:
if character == '"' and prev_character != '\\':
new_entry += '\\'
new_entry += character
prev_character = character
new_entry = '"{entry}"'.format(entry=new_entry)
return name_part + new_entry |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.