response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Unlocks the screen if it is locked. | def unlock_screen():
"""Unlocks the screen if it is locked."""
window_dump_output = adb.run_shell_command(['dumpsys', 'window'])
if 'mShowingLockscreen=true' not in window_dump_output:
# Screen is not locked, no work to do.
return
# Quick power on and off makes this more reliable.
adb.run_shell_command(['input', 'keyevent', 'KEYCODE_POWER'])
adb.run_shell_command(['input', 'keyevent', 'KEYCODE_POWER'])
# This key does the unlock.
adb.run_shell_command(['input', 'keyevent', 'KEYCODE_MENU'])
# Artificial delay to let the unlock to complete.
time.sleep(1) |
Returns device path for the given local path. | def get_device_path(local_path):
"""Returns device path for the given local path."""
root_directory = environment.get_root_directory()
return os.path.join(android.constants.DEVICE_FUZZING_DIR,
os.path.relpath(local_path, root_directory)) |
Returns local path for the given device path. | def get_local_path(device_path):
"""Returns local path for the given device path."""
if not device_path.startswith(android.constants.DEVICE_FUZZING_DIR + '/'):
logs.log_error('Bad device path: ' + device_path)
return None
root_directory = environment.get_root_directory()
return os.path.join(
root_directory,
os.path.relpath(device_path, android.constants.DEVICE_FUZZING_DIR)) |
Whether or not the Android device is deprecated. | def is_testcase_deprecated(platform_id=None):
"""Whether or not the Android device is deprecated."""
# Platform ID for Android is of the form as shown below
# |android:{codename}_{sanitizer}:{build_version}|
platform_id_fields = platform_id.split(':')
if len(platform_id_fields) != 3:
return False
codename_fields = platform_id_fields[1].split('_')
# Check if device is deprecated
if codename_fields[0] in android.constants.DEPRECATED_DEVICE_LIST:
return True
# Check if branch is deprecated
# Currently only "main" or "m" is active
# All other branches including "master" have been deprecated
branch = platform_id_fields[2]
if (branch <= 'v' or branch == 'master') and branch != 'm':
return True
return False |
Whether or not the testcase can run on the current Android device. | def can_testcase_run_on_platform(testcase_platform_id, current_platform_id):
"""Whether or not the testcase can run on the current Android device."""
del testcase_platform_id # Unused argument for now
# Platform ID for Android is of the form as shown below
# |android:{codename}_{sanitizer}:{build_version}|
current_platform_id_fields = current_platform_id.split(':')
if len(current_platform_id_fields) != 3:
return False
# Deprecated testcase should run on any latest device and on main
# So ignore device information and check for current version
# If the current version is 'm' or main, run the test case
if current_platform_id_fields[2] == 'm':
return True
return False |
Disable wifi. | def disable():
"""Disable wifi."""
adb.run_shell_command(['svc', 'wifi', 'disable']) |
Enable wifi. | def enable():
"""Enable wifi."""
adb.run_shell_command(['svc', 'wifi', 'enable']) |
Disable airplane mode. | def disable_airplane_mode():
"""Disable airplane mode."""
adb.run_shell_command(['settings', 'put', 'global', 'airplane_mode_on', '0'])
adb.run_shell_command([
'am', 'broadcast', '-a', 'android.intent.action.AIRPLANE_MODE', '--ez',
'state', 'false'
]) |
Configure airplane mode and wifi on device. | def configure(force_enable=False):
"""Configure airplane mode and wifi on device."""
# Airplane mode should be disabled in all cases. This can get inadvertently
# turned on via gestures.
disable_airplane_mode()
# Need to disable wifi before changing configuration.
disable()
# Check if wifi needs to be enabled. If not, then no need to modify the
# supplicant file.
wifi_enabled = force_enable or environment.get_value('WIFI', True)
if not wifi_enabled:
# No more work to do, we already disabled it at start.
return
# Wait 2 seconds to allow the wifi to be enabled.
enable()
time.sleep(2)
# Install helper apk to configure wifi.
wifi_util_apk_path = os.path.join(
environment.get_platform_resources_directory(), 'wifi_util.apk')
if not app.is_installed(WIFI_UTIL_PACKAGE_NAME):
app.install(wifi_util_apk_path)
# Get ssid and password from admin configuration.
if environment.is_android_cuttlefish():
wifi_ssid = 'VirtWifi'
wifi_password = ''
else:
config = db_config.get()
if not config.wifi_ssid:
logs.log('No wifi ssid is set, skipping wifi config.')
return
wifi_ssid = config.wifi_ssid
wifi_password = config.wifi_password or ''
connect_wifi_command = (
'am instrument -e method connectToNetwork -e ssid {ssid} ')
if wifi_password:
connect_wifi_command += '-e psk {password} '
connect_wifi_command += '-w {call_path}'
output = adb.run_shell_command(
connect_wifi_command.format(
ssid=shlex.quote(wifi_ssid),
password=shlex.quote(wifi_password),
call_path=WIFI_UTIL_CALL_PATH))
if 'result=true' not in output:
logs.log_warn('Failed to connect to wifi.', output=output) |
Record a handle as potentially needing to be cleaned up on restart. | def add_running_handle(handle):
"""Record a handle as potentially needing to be cleaned up on restart."""
new_handle_list = list(set(get_running_handles()) | {handle})
persistent_cache.set_value(
HANDLE_CACHE_KEY, new_handle_list, persist_across_reboots=True) |
Remove a handle from the tracked set. | def remove_running_handle(handle):
"""Remove a handle from the tracked set."""
new_handle_list = list(set(get_running_handles()) - {handle})
persistent_cache.set_value(
HANDLE_CACHE_KEY, new_handle_list, persist_across_reboots=True) |
Get a list of potentially stale handles from previous runs. | def get_running_handles():
"""Get a list of potentially stale handles from previous runs."""
return persistent_cache.get_value(HANDLE_CACHE_KEY, default_value=[]) |
Define a tempdir for undercoat to store its data in.
This tempdir needs to be of a scope that persists across invocations of the
bot, to ensure proper cleanup of stale handles/data. | def get_temp_dir():
"""Define a tempdir for undercoat to store its data in.
This tempdir needs to be of a scope that persists across invocations of the
bot, to ensure proper cleanup of stale handles/data."""
temp_dir = os.path.join(environment.get_value('ROOT_DIR'), 'bot', 'undercoat')
os.makedirs(temp_dir, exist_ok=True)
return temp_dir |
Make an API call to the undercoat binary. | def undercoat_api_command(*args, timeout=None):
"""Make an API call to the undercoat binary."""
logs.log(f'Running undercoat command {args}')
bundle_dir = environment.get_value('FUCHSIA_RESOURCES_DIR')
undercoat_path = os.path.join(bundle_dir, 'undercoat', 'undercoat')
undercoat = new_process.ProcessRunner(undercoat_path, args)
# The undercoat log is sent to stderr, which we capture to a tempfile
with tempfile.TemporaryFile() as undercoat_log:
result = undercoat.run_and_wait(
timeout=timeout,
stderr=undercoat_log,
extra_env={'TMPDIR': get_temp_dir()})
result.output = utils.decode_to_unicode(result.output)
if result.return_code != 0:
# Dump the undercoat log to assist in debugging
log_data = utils.read_from_handle_truncated(undercoat_log, 1024 * 1024)
logs.log_warn('Log output from undercoat: ' +
utils.decode_to_unicode(log_data))
# The API error message is returned on stdout
raise UndercoatError(
'Error running undercoat command %s: %s' % (args, result.output))
return result |
Helper for the subset of undercoat commands that operate on an instance. | def undercoat_instance_command(command,
handle,
*args,
timeout=None,
abort_on_error=True):
"""Helper for the subset of undercoat commands that operate on an instance."""
try:
return undercoat_api_command(
command, '-handle', handle, *args, timeout=timeout)
except UndercoatError:
if abort_on_error:
# Try to print extra logs and shut down
# TODO(eep): Should we be attempting to automatically restart?
dump_instance_logs(handle)
stop_instance(handle)
raise |
Get undercoat API version as (major, minor, patch) tuple. | def get_version():
"""Get undercoat API version as (major, minor, patch) tuple."""
version = undercoat_api_command('version', timeout=30).output
if not version.startswith('v'):
raise UndercoatError('Invalid version reported: %s' % version)
parts = version[1:].split('.')
if len(parts) != 3:
raise UndercoatError('Invalid version reported: %s' % version)
try:
return tuple(int(part) for part in parts)
except ValueError as e:
raise UndercoatError('Invalid version reported: %s' % version) from e |
Check that the undercoat API version is supported. Raises an error if it is
not. | def validate_api_version():
"""Check that the undercoat API version is supported. Raises an error if it is
not."""
major, minor, patch = get_version()
if major > 0:
raise UndercoatError(
'Unsupported API version: %d.%d.%d' % (major, minor, patch)) |
Dump logs from an undercoat instance. | def dump_instance_logs(handle):
"""Dump logs from an undercoat instance."""
qemu_log = undercoat_instance_command(
'get_logs', handle, abort_on_error=False).output
logs.log_warn(qemu_log[-QEMU_LOG_LIMIT:]) |
Start an instance via undercoat. | def start_instance():
"""Start an instance via undercoat."""
handle = undercoat_api_command('start_instance').output.strip()
logs.log('Started undercoat instance with handle %s' % handle)
# Immediately save the handle in case we crash before stop_instance()
# is called
add_running_handle(handle)
return handle |
Attempt to stop any running undercoat instances that may have not been
cleanly shut down. | def stop_all():
"""Attempt to stop any running undercoat instances that may have not been
cleanly shut down."""
for handle in get_running_handles():
try:
undercoat_instance_command('stop_instance', handle, abort_on_error=False)
except UndercoatError:
pass
# Even if we failed to stop_instance above, there's no point in trying
# again later
remove_running_handle(handle)
# At this point, all handles/data should have been cleaned up, but if any is
# remaining then we clear it out here
shutil.rmtree(get_temp_dir()) |
Stop a running undercoat instance. | def stop_instance(handle):
"""Stop a running undercoat instance."""
result = undercoat_instance_command(
'stop_instance', handle, abort_on_error=False)
# Mark the corresponding handle as having been cleanly shut down
remove_running_handle(handle)
return result |
List fuzzers available on an instance, via undercoat. | def list_fuzzers(handle):
"""List fuzzers available on an instance, via undercoat."""
return undercoat_instance_command('list_fuzzers', handle).output.split('\n') |
Prepare a fuzzer of the given name for use, via undercoat. | def prepare_fuzzer(handle, fuzzer):
"""Prepare a fuzzer of the given name for use, via undercoat."""
return undercoat_instance_command('prepare_fuzzer', handle, '-fuzzer', fuzzer) |
Run a fuzzer of the given name, via undercoat. | def run_fuzzer(handle, fuzzer, outdir, args, timeout=None):
"""Run a fuzzer of the given name, via undercoat."""
# TODO(fxbug.dev/47490): Pass back raw return code from libFuzzer?
undercoat_args = ['-fuzzer', fuzzer]
if outdir:
undercoat_args += ['-artifact-dir', outdir]
args = undercoat_args + ['--'] + args
return undercoat_instance_command(
'run_fuzzer', handle, *args, timeout=timeout) |
Put files for a fuzzer onto an instance, via undercoat.
If src is a directory, it will be copied recursively. Standard globs are
supported. | def put_data(handle, fuzzer, src, dst):
"""Put files for a fuzzer onto an instance, via undercoat.
If src is a directory, it will be copied recursively. Standard globs are
supported."""
return undercoat_instance_command('put_data', handle, '-fuzzer', fuzzer,
'-src', src, '-dst', dst).output |
Get files from a fuzzer on an instance, via undercoat.
If src is a directory, it will be copied recursively. Standard globs are
supported. | def get_data(handle, fuzzer, src, dst):
"""Get files from a fuzzer on an instance, via undercoat.
If src is a directory, it will be copied recursively. Standard globs are
supported."""
try:
return undercoat_instance_command(
'get_data',
handle,
'-fuzzer',
fuzzer,
'-src',
src,
'-dst',
dst,
abort_on_error=False).output
except UndercoatError:
return None |
Return full path to xdotool. | def _xdotool_path():
"""Return full path to xdotool."""
return shutil.which('xdotool') |
Return visible windows belonging to a process. | def find_windows_for_process(process_id):
"""Return visible windows belonging to a process."""
pids = utils.get_process_ids(process_id)
if not pids:
return []
xdotool_path = _xdotool_path()
if not xdotool_path:
logs.log_error('Xdotool not installed, cannot locate process windows.')
return []
visible_windows = []
for pid in pids:
windows = (
shell.execute_command(
'%s search --all --pid %d --onlyvisible' % (xdotool_path, pid)))
for line in windows.splitlines():
if not line.isdigit():
continue
visible_windows.append(int(line))
return visible_windows |
Return list of random gesture command strings. | def get_random_gestures(gesture_count):
"""Return list of random gesture command strings."""
gesture_types = [
'click --repeat TIMES,mbutton',
'drag',
'key,ctrl+minus',
'key,ctrl+plus',
'key,Function',
'key,Letter',
'key,Letters',
'key,Modifier+Letter',
'keydown,Letter',
'keyup,Letter',
'type,Chars',
'type,Chars',
'mousedown,mbutton',
'mousemove --sync,x y',
'mousemove_relative --sync,nx ny',
'mouseup,mbutton',
]
if not random.randint(0, 3):
gesture_types.append('windowsize,P P')
gestures = []
for _ in range(gesture_count):
random_gesture = utils.random_element_from_list(gesture_types)
if random_gesture == 'drag':
gestures.append('mousemove,%d %d' % (random.randint(0, SCREEN_WIDTH),
random.randint(0, SCREEN_HEIGHT)))
gestures.append('mousedown,1')
gestures.append('mousemove_relative,0 1')
gestures.append('mousemove_relative,0 -')
gestures.append('mousemove,%d %d' % (random.randint(0, SCREEN_WIDTH),
random.randint(0, SCREEN_HEIGHT)))
gestures.append('mouseup,1')
continue
if 'Function' in random_gesture:
random_gesture = (
random_gesture.replace('Function', 'F%d' % random.randint(1, 12)))
elif 'mbutton' in random_gesture:
random_gesture = random_gesture.replace('TIMES', str(
random.randint(1, 3)))
picked_button = 1 if random.randint(0, 4) else random.randint(2, 5)
random_gesture = random_gesture.replace('mbutton', str(picked_button))
elif ',x y' in random_gesture:
random_gesture = random_gesture.replace(
',x y', ',%d %d' % (random.randint(0, SCREEN_WIDTH),
random.randint(0, SCREEN_HEIGHT)))
elif ',nx ny' in random_gesture:
random_gesture = random_gesture.replace(
',nx ny', ',%d %d' %
(random.randint(COORDINATE_DELTA_MIN, COORDINATE_DELTA_MAX),
random.randint(COORDINATE_DELTA_MIN, COORDINATE_DELTA_MAX)))
elif ',P P' in random_gesture:
random_gesture = random_gesture.replace(
',P P',
',%d%% %d%%' % (random.randint(10, 100), random.randint(10, 100)))
elif 'Chars' in random_gesture:
random_gesture = random_gesture.replace('Chars',
"'%s'" % get_text_to_type())
else:
if 'Modifier' in random_gesture:
random_gesture = random_gesture.replace(
'Modifier',
utils.random_element_from_list([
'alt', 'ctrl', 'control', 'meta', 'super', 'shift', 'ctrl+shift'
]))
if 'Letters' in random_gesture:
num_letters = random.randint(1, 10)
letters = []
for _ in range(num_letters):
letters.append(
utils.random_element_from_list([
'Escape', 'BackSpace', 'Delete', 'Tab', 'space', 'Down',
'Return', 'Up', 'Down', 'Left', 'Right',
chr(random.randint(48, 57)),
chr(random.randint(65, 90)),
chr(random.randint(97, 122))
]))
random_gesture = random_gesture.replace('Letters', ' '.join(letters))
elif 'Letter' in random_gesture:
random_gesture = random_gesture.replace(
'Letter',
utils.random_element_from_list([
'space',
chr(random.randint(48, 57)),
chr(random.randint(65, 90)),
chr(random.randint(97, 122))
]))
if 'ctrl+c' in random_gesture.lower():
continue
gestures.append(random_gesture)
return gestures |
Return text to type. | def get_text_to_type():
"""Return text to type."""
chars = []
chars_to_type_count = random.randint(1, MAX_CHARS_TO_TYPE)
meta_chars = [
'|', '&', ';', '(', ')', '<', '>', ' ', '\t', ',', '\'', '"', '`', '[',
']', '{', '}'
]
for _ in range(chars_to_type_count):
char_code = random.randint(32, 126)
char = chr(char_code)
if char in meta_chars:
continue
chars.append(char)
return ''.join(chars) |
Run the provided interaction gestures. | def run_gestures(gestures, process_id, process_status, start_time, timeout,
windows):
"""Run the provided interaction gestures."""
xdotool_path = _xdotool_path()
if not xdotool_path:
logs.log_error('Xdotool not installed, cannot emulate gestures.')
return
if not windows:
windows += find_windows_for_process(process_id)
for window in windows:
# Activate the window so that it can receive gestures.
shell.execute_command(
'%s windowactivate --sync %d' % (xdotool_path, window))
for gesture in gestures:
# If process had exited or our timeout interval has exceeded,
# just bail out.
if process_status.finished or time.time() - start_time >= timeout:
return
gesture_type, gesture_cmd = gesture.split(',')
if gesture_type == 'windowsize':
shell.execute_command(
'%s %s %d %s' % (xdotool_path, gesture_type, window, gesture_cmd))
else:
shell.execute_command(
'%s %s -- %s' % (xdotool_path, gesture_type, gesture_cmd)) |
Return True if we should continue to download symbols. | def _should_download_symbols():
"""Return True if we should continue to download symbols."""
# For local testing, we do not have access to the cloud storage bucket with
# the symbols. In this case, just bail out.
return not environment.get_value('LOCAL_DEVELOPMENT') |
Download repo.prop and return the full hash and prefix. | def get_kernel_prefix_and_full_hash(build_id):
"""Download repo.prop and return the full hash and prefix."""
android_kernel_repo_data = _get_repo_prop_data(build_id,
constants.LKL_BUILD_TARGET)
if android_kernel_repo_data:
for line in android_kernel_repo_data.splitlines():
if line.startswith(constants.LKL_REPO_KERNEL_PREFIX):
# line is of form: prefix u'hash'
return (constants.LKL_REPO_KERNEL_PREFIX, line.split(' ',
1)[1].strip('u\''))
return None, None |
Downloads repo.prop and returuns the data based on build_id and target. | def _get_repo_prop_data(build_id, fuzz_target):
"""Downloads repo.prop and returuns the data based on build_id and target."""
symbols_directory = os.path.join(
environment.get_value('SYMBOLS_DIR'), fuzz_target)
repro_filename = symbols_downloader.get_repo_prop_archive_filename(
build_id, fuzz_target)
# Grab repo.prop, it is not on the device nor in the build_dir.
_download_kernel_repo_prop_if_needed(symbols_directory, build_id, fuzz_target)
local_repo_path = utils.find_binary_path(symbols_directory, repro_filename)
if local_repo_path and os.path.exists(local_repo_path):
return utils.read_data_from_file(local_repo_path, eval_data=False).decode()
return None |
Downloads the repo.prop for an LKL fuzzer | def _download_kernel_repo_prop_if_needed(symbols_directory, build_id,
fuzz_target):
"""Downloads the repo.prop for an LKL fuzzer"""
if not _should_download_symbols():
return
symbols_downloader.download_repo_prop_if_needed(
symbols_directory, build_id, fuzz_target, [fuzz_target], 'lkl_fuzzer') |
Returns the lkl binary name from a stack trace. | def get_lkl_binary_name(unsymbolized_crash_stacktrace_split):
"""Returns the lkl binary name from a stack trace."""
for line in unsymbolized_crash_stacktrace_split:
match = constants.LINUX_KERNEL_LIBRARY_ASSERT_REGEX.match(line)
if match:
return match.group(1)
return None |
Is this an lkl stack trace? | def is_lkl_stack_trace(unsymbolized_crash_stacktrace):
"""Is this an lkl stack trace?"""
return (
environment.is_lkl_job() and
constants.LINUX_KERNEL_MODULE_STACK_TRACE in unsymbolized_crash_stacktrace
) |
Return visible windows belonging to a process. | def find_windows_for_process(process_id):
"""Return visible windows belonging to a process."""
pids = utils.get_process_ids(process_id)
if not pids:
return []
visible_windows = []
for pid in pids:
app = application.Application()
try:
app.connect(process=pid)
except:
logs.log_warn('Unable to connect to process.')
continue
try:
windows = app.windows()
except:
logs.log_warn('Unable to get application windows.')
continue
for window in windows:
try:
window.type_keys('')
except:
continue
visible_windows.append(window)
return visible_windows |
Return list of random gesture command strings. | def get_random_gestures(gesture_count):
"""Return list of random gesture command strings."""
gestures_types = [
'key,Letters', 'key,Letters', 'key,Letters', 'key,Letters', 'mouse,MA',
'mousedrag,MB', 'mousemove,MC'
]
gestures = []
for _ in range(gesture_count):
random_gesture = utils.random_element_from_list(gestures_types)
if 'Letters' in random_gesture:
num_letters = random.randint(1, 10)
letters = []
for _ in range(num_letters):
if not random.randint(0, 7):
letters.append(
utils.random_element_from_list([
'{BACK}', '{BACKSPACE}', '{BKSP}', '{CAP}', '{DEL}',
'{DELETE}', '{DOWN}', '{DOWN}', '{END}', '{ENTER}', '{ENTER}',
'{ENTER}', 'A{ESC}', '{F1}', '{F2}', '{F3}', 'A{F4}', '{F5}',
'{F6}', '{F7}', '{F8}', '{F9}', '{F10}', '{F11}', '{F12}',
'{HOME}', '{INSERT}', '{LEFT}', '{PGDN}', '{PGUP}', '{RIGHT}',
'{SPACE}', '{TAB}', '{TAB}', '{TAB}', '{TAB}', '{UP}', '{UP}',
'+', '^'
]))
else:
letters.append(
utils.random_element_from_list(
['{TAB}', '^=', '^-',
'{%s}' % chr(random.randint(32, 126))]))
random_gesture = random_gesture.replace('Letters', ''.join(letters))
if ('^c' in random_gesture.lower() or '^d' in random_gesture.lower() or
'^z' in random_gesture.lower()):
continue
if ',MA' in random_gesture:
button = utils.random_element_from_list(['left', 'right', 'middle'])
coords = '(%d,%d)' % (random.randint(0, 1000), random.randint(0, 1000))
double = utils.random_element_from_list(['True', 'False'])
random_gesture = random_gesture.replace(
'MA', '%s;%s;%s' % (button, coords, double))
if ',MB' in random_gesture:
button = utils.random_element_from_list(['left', 'right', 'middle'])
coords1 = '(%d,%d)' % (random.randint(0, 1000), random.randint(0, 1000))
coords2 = '(%d,%d)' % (random.randint(0, 1000), random.randint(0, 1000))
random_gesture = random_gesture.replace(
'MB', '%s;%s;%s' % (button, coords1, coords2))
if ',MC' in random_gesture:
button = utils.random_element_from_list(['left', 'right', 'middle'])
coords = '(%d,%d)' % (random.randint(0, 1000), random.randint(0, 1000))
random_gesture = random_gesture.replace('MC', '%s;%s' % (button, coords))
gestures.append(random_gesture)
return gestures |
Run the provided interaction gestures. | def run_gestures(gestures, process_id, process_status, start_time, timeout,
windows):
"""Run the provided interaction gestures."""
if not windows:
windows += find_windows_for_process(process_id)
for window in windows:
for gesture in gestures:
# If process had exited or our timeout interval has exceeded,
# just bail out.
if process_status.finished or time.time() - start_time >= timeout:
return
try:
tokens = gesture.split(',')
command = tokens.pop(0)
value = ','.join(tokens)
if command == 'key':
window.type_keys(value)
elif command == 'mouse':
button, coords, double = value.split(';')
window.click_input(
button=button,
coords=ast.literal_eval(coords),
double=ast.literal_eval(double))
elif command == 'mousedrag':
button, coords1, coords2 = value.split(';')
window.drag_mouse(
button=button,
press_coords=ast.literal_eval(coords1),
release_coords=ast.literal_eval(coords2))
elif command == 'mousemove':
button, coords = value.split(';')
window.move_mouse(pressed=button, coords=ast.literal_eval(coords))
except Exception:
# Several types of errors can happen. Just ignore them until a better
# solution is available. E.g. controls not visible, gestures cannot be
# run, invalid window handle, window failed to respond to gesture, etc.
pass |
Runs a command and prints it. | def _run_command(command):
"""Runs a command and prints it."""
print(
'Running command [{time}]:'.format(
time=datetime.datetime.now().strftime('%H:%M:%S')),
' '.join(command))
for _ in range(RETRY_COUNT):
try:
return subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print('Command failed with non-zero exit code. Output:\n%s' % e.output)
print('Sleeping a few seconds before retrying.')
time.sleep(random.randint(0, SLEEP_WAIT))
print('Failed to run command, exiting.')
sys.exit(-1) |
Copy corpus from a source bucket to target bucket, keeping their project
names into account. | def _copy_corpus(source_bucket, source_project, target_bucket, target_project):
"""Copy corpus from a source bucket to target bucket, keeping their project
names into account."""
# Ensure that gsutil is installed.
subprocess.check_call([GSUTIL_CMD, '-v'])
source_urls_fetch_command = [
GSUTIL_CMD, 'ls', 'gs://{bucket}/*/'.format(bucket=source_bucket)
]
source_urls = _run_command(source_urls_fetch_command).splitlines()
filtered_source_urls = [
s.rstrip('/') for s in source_urls if s.strip() and not s.endswith(':')
]
assert filtered_source_urls, 'No matching items found in source corpus.'
for source_url in filtered_source_urls:
url_part, fuzz_target = source_url.rsplit('/', 1)
# Strip source project prefix and add target project prefix (if exists).
if source_project and fuzz_target.startswith(source_project + '_'):
fuzz_target = fuzz_target[len(source_project) + 1:]
if target_project:
fuzz_target = '%s_%s' % (target_project, fuzz_target)
# Replace source bucket with target bucket for target url.
url_part = url_part.replace('gs://%s' % source_bucket,
'gs://%s' % target_bucket)
target_url = '%s/%s' % (url_part, fuzz_target)
_run_command(
[GSUTIL_CMD, '-m', 'rsync', '-d', '-r', source_url, target_url])
print('Copy corpus finished successfully.') |
Detects whether there is a path traversal attempt.
Args:
archive_name: the name of the archive.
output_dir: the output directory.
filename: the name of the file being checked.
Returns:
Whether there is a path traversal attempt | def _is_attempting_path_traversal(archive_name: StrBytesPathLike,
output_dir: StrBytesPathLike,
filename: StrBytesPathLike) -> bool:
"""Detects whether there is a path traversal attempt.
Args:
archive_name: the name of the archive.
output_dir: the output directory.
filename: the name of the file being checked.
Returns:
Whether there is a path traversal attempt
"""
output_dir = os.path.realpath(output_dir)
absolute_file_path = os.path.join(output_dir, os.path.normpath(filename))
real_file_path = os.path.realpath(absolute_file_path)
if real_file_path == output_dir:
# Workaround for https://bugs.python.org/issue28488.
# Ignore directories named '.'.
return False
if real_file_path != absolute_file_path:
logs.log_error('Directory traversal attempted while unpacking archive %s '
'(file path=%s, actual file path=%s). Aborting.' %
(archive_name, absolute_file_path, real_file_path))
return True
return False |
Opens the archive and gets the appropriate archive reader based on the
`archive_path`. If `file_obj` is not none, the binary file-like object will be
used to read the archive instead of opening `archive_path`.
Args:
archive_path: the path to the archive.
file_obj: a file-like object containing the archive.
Raises:
If the file could not be opened or if the archive type cannot be handled.
See `is_archive()` to check whether the archive type is handled.
Returns:
the archive reader. | def open(archive_path: str,
file_obj: Optional[BinaryIO] = None) -> ArchiveReader:
"""Opens the archive and gets the appropriate archive reader based on the
`archive_path`. If `file_obj` is not none, the binary file-like object will be
used to read the archive instead of opening `archive_path`.
Args:
archive_path: the path to the archive.
file_obj: a file-like object containing the archive.
Raises:
If the file could not be opened or if the archive type cannot be handled.
See `is_archive()` to check whether the archive type is handled.
Returns:
the archive reader.
"""
archive_type = get_archive_type(archive_path)
if archive_type == ArchiveType.ZIP:
return ZipArchiveReader(file_obj or archive_path)
if archive_type in (ArchiveType.TAR_LZMA, ArchiveType.TAR):
return TarArchiveReader(archive_path, file_obj=file_obj)
raise ArchiveError('Unhandled archive type.') |
Get the type of the archive.
Args:
archive_path: the path to the archive.
Returns:
the type of the archive, or ArchiveType.UNKNOWN if unknown. | def get_archive_type(archive_path: str) -> ArchiveType:
"""Get the type of the archive.
Args:
archive_path: the path to the archive.
Returns:
the type of the archive, or ArchiveType.UNKNOWN if unknown.
"""
def has_extension(extensions):
"""Returns True if |archive_path| endswith an extension in |extensions|."""
for extension in extensions:
if archive_path.endswith(extension):
return True
return False
if has_extension(ZIP_FILE_EXTENSIONS):
return ArchiveType.ZIP
if has_extension(TAR_FILE_EXTENSIONS):
return ArchiveType.TAR
if has_extension(LZMA_FILE_EXTENSIONS):
return ArchiveType.TAR_LZMA
return ArchiveType.UNKNOWN |
Return true if the file is an archive.
Args:
filename: the path to a file.
Returns:
whether the provided file is an archive. | def is_archive(filename: str) -> bool:
"""Return true if the file is an archive.
Args:
filename: the path to a file.
Returns:
whether the provided file is an archive.
"""
return get_archive_type(filename) != ArchiveType.UNKNOWN |
Returns evaluated value. | def _eval_value(value_string):
"""Returns evaluated value."""
try:
return ast.literal_eval(value_string)
except:
# String fallback.
return value_string |
Joins a dict holding memory tool options into a string that can be set in
the environment. | def join_memory_tool_options(options):
"""Joins a dict holding memory tool options into a string that can be set in
the environment."""
return ':'.join(f'{key}={str(val)}' for key, val in sorted(options.items())) |
Returns the int representation contained by string |value| if it contains
one. Otherwise returns |value|. | def _maybe_convert_to_int(value):
"""Returns the int representation contained by string |value| if it contains
one. Otherwise returns |value|."""
try:
return int(value)
except ValueError:
return value |
Parses memory tool options into a dict. | def _parse_memory_tool_options(options_str):
"""Parses memory tool options into a dict."""
parsed = {}
for item in UNQUOTED_COLON_REGEX.split(options_str):
# Regex split can give us empty strings at the beginning and the end. Skip
# these.
if not item:
continue
# Regex split gives us each ':'. Skip these.
if item == ':':
continue
values = item.split('=', 1)
if len(values) != 2:
# TODO(mbarbella): Factor this out of environment, and switch to logging
# an error and continuing. This error should be recoverable.
raise ValueError('Invalid memory tool option "%s"' % item)
option_name = values[0]
option_value = _maybe_convert_to_int(values[1])
parsed[option_name] = option_value
return parsed |
Quote environment value as needed for certain platforms like Windows. | def _quote_value_if_needed(value):
"""Quote environment value as needed for certain platforms like Windows."""
result = value
if ' ' in result or ':' in result:
result = '"%s"' % result
return result |
Return a safe copy of the environment. | def copy():
"""Return a safe copy of the environment."""
environment_copy = os.environ.copy()
return environment_copy |
Disable leak detection (if enabled). | def disable_lsan():
"""Disable leak detection (if enabled)."""
if get_current_memory_tool_var() != 'ASAN_OPTIONS':
return
sanitizer_options = get_memory_tool_options('ASAN_OPTIONS', {})
sanitizer_options['detect_leaks'] = 0
set_memory_tool_options('ASAN_OPTIONS', sanitizer_options) |
Generates default ASAN options. | def get_asan_options(redzone_size, malloc_context_size, quarantine_size_mb,
bot_platform, leaks, disable_ubsan):
"""Generates default ASAN options."""
asan_options = {}
# Default options needed for all cases.
asan_options['alloc_dealloc_mismatch'] = 0
asan_options['print_scariness'] = 1
asan_options['strict_memcmp'] = 0
# Set provided redzone size.
if redzone_size:
asan_options['redzone'] = redzone_size
# This value is used in determining whether to report OOM crashes or not.
set_value('REDZONE', redzone_size)
# Set maximum number of stack frames to report.
if malloc_context_size:
asan_options['malloc_context_size'] = malloc_context_size
# Set quarantine size.
if quarantine_size_mb:
asan_options['quarantine_size_mb'] = quarantine_size_mb
# Test for leaks if this is an LSan-enabled job type.
if get_value('LSAN') and leaks and not get_value('USE_EXTRA_SANITIZERS'):
lsan_options = join_memory_tool_options(get_lsan_options())
set_value('LSAN_OPTIONS', lsan_options)
asan_options['detect_leaks'] = 1
else:
remove_key('LSAN_OPTIONS')
asan_options['detect_leaks'] = 0
# FIXME: Support container overflow on Android.
if is_android(bot_platform):
asan_options['detect_container_overflow'] = 0
# Enable stack use-after-return.
asan_options['detect_stack_use_after_return'] = 1
# Other less important default options for all cases.
asan_options.update({
'allocator_may_return_null': 1,
'allow_user_segv_handler': 0,
'check_malloc_usable_size': 0,
'detect_odr_violation': 0,
'fast_unwind_on_fatal': 1,
'print_suppressions': 0,
})
# Add common sanitizer options.
asan_options.update(COMMON_SANITIZER_OPTIONS)
# FIXME: For Windows, rely on online symbolization since llvm-symbolizer.exe
# in build archive does not work.
asan_options['symbolize'] = int(bot_platform == 'WINDOWS')
# For Android, allow user defined segv handler to work.
if is_android(bot_platform):
asan_options['allow_user_segv_handler'] = 1
# Check if UBSAN is enabled as well for this ASAN build.
# If yes, set UBSAN_OPTIONS and enable suppressions.
if get_value('UBSAN'):
if disable_ubsan:
ubsan_options = get_ubsan_disabled_options()
else:
ubsan_options = get_ubsan_options()
# Remove |symbolize| explicitly to avoid overridding ASan defaults.
ubsan_options.pop('symbolize', None)
set_value('UBSAN_OPTIONS', join_memory_tool_options(ubsan_options))
return asan_options |
Return cpu architecture. | def get_cpu_arch():
"""Return cpu architecture."""
if is_android():
# FIXME: Handle this import in a cleaner way.
from clusterfuzz._internal.platforms import android
return android.settings.get_cpu_arch()
# FIXME: Add support for desktop architectures as needed.
return None |
Get the environment variable name for the current job type's sanitizer. | def get_current_memory_tool_var():
"""Get the environment variable name for the current job type's sanitizer."""
memory_tool_name = get_memory_tool_name(get_value('JOB_NAME'))
if not memory_tool_name:
return None
return memory_tool_name + '_OPTIONS' |
Get the current memory tool options as a dict. Returns |default_value| if
|env_var| isn't set. Otherwise returns a dictionary containing the memory tool
options and their values. | def get_memory_tool_options(env_var, default_value=None):
"""Get the current memory tool options as a dict. Returns |default_value| if
|env_var| isn't set. Otherwise returns a dictionary containing the memory tool
options and their values."""
env_value = get_value(env_var)
if env_value is not None:
return _parse_memory_tool_options(env_value)
return default_value |
Get the instrumented libraries path for the current sanitizer. | def get_instrumented_libraries_paths():
"""Get the instrumented libraries path for the current sanitizer."""
memory_tool_name = get_memory_tool_name(get_value('JOB_NAME'))
if not memory_tool_name:
return None
if memory_tool_name == 'MSAN':
if 'no-origins' in get_value('BUILD_URL', ''):
memory_tool_name += '_NO_ORIGINS'
else:
memory_tool_name += '_CHAINED'
paths = get_value('INSTRUMENTED_LIBRARIES_PATHS_' + memory_tool_name)
if not paths:
return None
return paths.split(':') |
Get the default tool for this platform (from scripts/ dir). | def get_default_tool_path(tool_name):
"""Get the default tool for this platform (from scripts/ dir)."""
if is_android():
# For android devices, we do symbolization on the host machine, which is
# linux. So, we use the linux version of llvm-symbolizer.
platform_override = 'linux'
else:
# No override needed, use default.
platform_override = None
tool_filename = get_executable_filename(tool_name)
tool_path = os.path.join(
get_platform_resources_directory(platform_override), tool_filename)
return tool_path |
Return environment settings as a string. Includes settings for memory
debugging tools (e.g. ASAN_OPTIONS for ASAN), application binary revision,
application command line, etc. | def get_environment_settings_as_string():
"""Return environment settings as a string. Includes settings for memory
debugging tools (e.g. ASAN_OPTIONS for ASAN), application binary revision,
application command line, etc."""
environment_string = ''
# Add Android specific variables.
if is_android():
# FIXME: Handle this import in a cleaner way.
from clusterfuzz._internal.platforms import android
build_fingerprint = get_value(
'BUILD_FINGERPRINT') or android.settings.get_build_fingerprint()
environment_string += '[Environment] Build fingerprint: %s\n' % (
build_fingerprint)
security_patch_level = get_value(
'SECURITY_PATCH_LEVEL') or android.settings.get_security_patch_level()
environment_string += (
'[Environment] Patch level: %s\n' % security_patch_level)
environment_string += (
'[Environment] Local properties file "%s" with contents:\n%s\n' %
(android.device.LOCAL_PROP_PATH,
android.adb.read_data_from_file(android.device.LOCAL_PROP_PATH)))
command_line = get_value('COMMAND_LINE_PATH')
if command_line:
environment_string += (
'[Environment] Command line file "%s" with contents:\n%s\n' %
(command_line, android.adb.read_data_from_file(command_line)))
asan_options = get_value('ASAN_OPTIONS')
if asan_options:
# FIXME: Need better documentation for Chrome builds. Chrome builds use
# asan_device_setup.sh and we send this options file path as an include
# to extra-options parameter.
sanitizer_options_file_path = (
android.sanitizer.get_options_file_path('ASAN'))
environment_string += (
'[Environment] ASAN options file "%s" with contents:\n%s\n' %
(sanitizer_options_file_path, asan_options))
else:
# For desktop platforms, add |*_OPTIONS| variables from environment.
for sanitizer_option in get_sanitizer_options_for_display():
environment_string += '[Environment] %s\n' % sanitizer_option
return environment_string |
Return a list of sanitizer options with quoted values. | def get_sanitizer_options_for_display():
"""Return a list of sanitizer options with quoted values."""
result = []
for tool in SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS:
options_variable = tool + '_OPTIONS'
options_value = os.getenv(options_variable)
if not options_value:
continue
result.append('{options_variable}={options_value}'.format(
options_variable=options_variable, options_value=options_value))
return result |
Get the path of the llvm-symbolizer binary. | def get_llvm_symbolizer_path():
"""Get the path of the llvm-symbolizer binary."""
llvm_symbolizer_path = get_value('LLVM_SYMBOLIZER_PATH')
if llvm_symbolizer_path and os.path.exists(llvm_symbolizer_path):
# Make sure that llvm symbolizer binary is executable.
os.chmod(llvm_symbolizer_path, 0o750)
return_code = subprocess.call(
[llvm_symbolizer_path, '--help'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if return_code == 0:
# llvm-symbolize works, return it.
return llvm_symbolizer_path
# Either
# 1. llvm-symbolizer was not found in build archive. OR
# 2. llvm-symbolizer fails due to dependency issue, clang regression, etc.
# So, use our own version of llvm-symbolizer.
llvm_symbolizer_path = get_default_tool_path('llvm-symbolizer')
# Make sure that we have a default llvm-symbolizer for this platform.
if not os.path.exists(llvm_symbolizer_path):
return None
# Make sure that llvm symbolizer binary is executable.
os.chmod(llvm_symbolizer_path, 0o750)
return llvm_symbolizer_path |
Return root directory. | def get_root_directory():
"""Return root directory."""
return get_value('ROOT_DIR') |
Return path to startup scripts. | def get_startup_scripts_directory():
"""Return path to startup scripts."""
return os.path.join(get_value('ROOT_DIR'), 'src', 'python', 'bot', 'startup') |
Return the path to the configs directory. | def get_config_directory():
"""Return the path to the configs directory."""
config_dir = get_value('CONFIG_DIR_OVERRIDE')
if config_dir:
return config_dir
if is_running_on_app_engine():
# Root is already src/appengine.
return 'config'
# Running on bot, give path to config folder inside appengine dir.
return os.path.join(get_root_directory(), 'src', 'appengine', 'config') |
Return the path to the google appengine configs directory. | def get_gae_config_directory():
"""Return the path to the google appengine configs directory."""
return os.path.join(get_config_directory(), 'gae') |
Return the path to the google compute engine configs directory. | def get_gce_config_directory():
"""Return the path to the google compute engine configs directory."""
return os.path.join(get_config_directory(), 'gce') |
Return the path to the resources directory. | def get_resources_directory():
"""Return the path to the resources directory."""
return os.path.join(get_root_directory(), 'resources') |
Return the path to platform-specific resources directory. | def get_platform_resources_directory(platform_override=None):
"""Return the path to platform-specific resources directory."""
plt = platform_override or platform()
# Android resources share the same android directory.
if is_android(plt):
plt = 'ANDROID'
return os.path.join(get_resources_directory(), 'platform', plt.lower()) |
Return the path to the suppressions directory. | def get_suppressions_directory():
"""Return the path to the suppressions directory."""
return os.path.join(get_config_directory(), 'suppressions') |
Return the path to sanitizer suppressions file, if exists. | def get_suppressions_file(sanitizer, suffix='suppressions'):
"""Return the path to sanitizer suppressions file, if exists."""
sanitizer_suppressions_filename = '{sanitizer}_{suffix}.txt'.format(
sanitizer=sanitizer, suffix=suffix)
sanitizer_suppressions_file_path = os.path.join(
get_suppressions_directory(), sanitizer_suppressions_filename)
if not os.path.exists(sanitizer_suppressions_file_path):
return None
if not os.path.getsize(sanitizer_suppressions_file_path):
return None
return sanitizer_suppressions_file_path |
Generates default LSAN options. | def get_lsan_options():
"""Generates default LSAN options."""
lsan_suppressions_path = get_suppressions_file('lsan')
lsan_options = {
'print_suppressions': 0,
}
# Add common sanitizer options.
lsan_options.update(COMMON_SANITIZER_OPTIONS)
if lsan_suppressions_path:
lsan_options['suppressions'] = lsan_suppressions_path
return lsan_options |
Generates default KASAN options. | def get_kasan_options():
"""Generates default KASAN options."""
kasan_options = {'symbolize': 0}
# Add common sanitizer options.
kasan_options.update(COMMON_SANITIZER_OPTIONS)
return kasan_options |
Generates default MSAN options. | def get_msan_options():
"""Generates default MSAN options."""
msan_options = {'symbolize': 0}
# Add common sanitizer options.
msan_options.update(COMMON_SANITIZER_OPTIONS)
return msan_options |
Return a platform id as a lowercase string. | def get_platform_id():
"""Return a platform id as a lowercase string."""
bot_platform = platform()
if is_android_cuttlefish():
return bot_platform.lower()
if is_android(bot_platform):
# FIXME: Handle this import in a cleaner way.
from clusterfuzz._internal.platforms import android
platform_id = get_value('PLATFORM_ID', android.settings.get_platform_id())
return platform_id.lower()
return bot_platform.lower() |
Return the platform group (specified via QUEUE_OVERRIDE) if it
exists, otherwise platform(). | def get_platform_group():
"""Return the platform group (specified via QUEUE_OVERRIDE) if it
exists, otherwise platform()."""
platform_group = get_value('QUEUE_OVERRIDE')
if platform_group:
return platform_group
return platform() |
Figures out name of memory debugging tool. | def get_memory_tool_name(job_name):
"""Figures out name of memory debugging tool."""
for tool in SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS:
if tool_matches(tool, job_name):
return tool
# If no tool specified, assume it is ASAN. Also takes care of LSAN job type.
return 'ASAN' |
Return memory tool string for a testcase. | def get_memory_tool_display_string(job_name):
"""Return memory tool string for a testcase."""
memory_tool_name = get_memory_tool_name(job_name)
sanitizer_name = SANITIZER_NAME_MAP.get(memory_tool_name)
if not sanitizer_name:
return 'Memory Tool: %s' % memory_tool_name
return 'Sanitizer: %s (%s)' % (sanitizer_name, memory_tool_name) |
Return the filename for the given executable. | def get_executable_filename(executable_name):
"""Return the filename for the given executable."""
if platform() != 'WINDOWS':
return executable_name
extension = '.exe'
if executable_name.endswith(extension):
return executable_name
return executable_name + extension |
Generates default TSAN options. | def get_tsan_options():
"""Generates default TSAN options."""
tsan_suppressions_path = get_suppressions_file('tsan')
tsan_options = {
'atexit_sleep_ms': 200,
'flush_memory_ms': 2000,
'history_size': 3,
'print_suppressions': 0,
'report_thread_leaks': 0,
'report_signal_unsafe': 0,
'stack_trace_format': 'DEFAULT',
'symbolize': 1,
}
# Add common sanitizer options.
tsan_options.update(COMMON_SANITIZER_OPTIONS)
if tsan_suppressions_path:
tsan_options['suppressions'] = tsan_suppressions_path
return tsan_options |
Generates default UBSAN options. | def get_ubsan_options():
"""Generates default UBSAN options."""
# Note that UBSAN can work together with ASAN as well.
ubsan_suppressions_path = get_suppressions_file('ubsan')
ubsan_options = {
'halt_on_error': 1,
'print_stacktrace': 1,
'print_suppressions': 0,
# We use -fsanitize=unsigned-integer-overflow as an additional coverage
# signal and do not want those errors to be reported by UBSan as bugs.
# See https://github.com/google/oss-fuzz/issues/910 for additional info.
'silence_unsigned_overflow': 1,
'symbolize': 1,
}
# Add common sanitizer options.
ubsan_options.update(COMMON_SANITIZER_OPTIONS)
# TODO(crbug.com/877070): Make this code configurable on a per job basis.
if ubsan_suppressions_path and not is_chromeos_system_job():
ubsan_options['suppressions'] = ubsan_suppressions_path
return ubsan_options |
Generates ubsan options | def get_ubsan_disabled_options():
"""Generates ubsan options """
return {
'halt_on_error': 0,
'print_stacktrace': 0,
'print_suppressions': 0,
} |
Get environment variable (as a string). | def get_value_string(environment_variable, default_value=None):
"""Get environment variable (as a string)."""
return os.getenv(environment_variable, default_value) |
Return an environment variable value. | def get_value(environment_variable, default_value=None, env=None):
"""Return an environment variable value."""
if env is None:
env = os.environ
value_string = env.get(environment_variable)
# value_string will be None if the variable is not defined.
if value_string is None:
return default_value
# Exception for ANDROID_SERIAL. Sometimes serial can be just numbers,
# so we don't want to it eval it.
if environment_variable == 'ANDROID_SERIAL':
return value_string
# Evaluate the value of the environment variable with string fallback.
return _eval_value(value_string) |
Return a bool on whether a string exists in a provided job name or
use from environment if available (case insensitive). | def _job_substring_match(search_string, job_name):
"""Return a bool on whether a string exists in a provided job name or
use from environment if available (case insensitive)."""
job_name = job_name or get_value('JOB_NAME')
if not job_name:
return False
return search_string in job_name.lower() |
Return True if the current job uses AFL. | def is_afl_job(job_name=None):
"""Return True if the current job uses AFL."""
return get_engine_for_job(job_name) == 'afl' |
Return True if the current job is for iOS. | def is_ios_job(job_name=None):
"""Return True if the current job is for iOS."""
return _job_substring_match('ios_', job_name) |
Return True if the current job is for ChromeOS. | def is_chromeos_job(job_name=None):
"""Return True if the current job is for ChromeOS."""
return _job_substring_match('chromeos', job_name) |
Return True if the current job is for ChromeOS. | def is_lkl_job(job_name=None):
"""Return True if the current job is for ChromeOS."""
return _job_substring_match('lkl', job_name) |
Return True if the current job is for ChromeOS system (i.e. not libFuzzer
or entire Chrome browser for Chrome on ChromeOS). | def is_chromeos_system_job(job_name=None):
"""Return True if the current job is for ChromeOS system (i.e. not libFuzzer
or entire Chrome browser for Chrome on ChromeOS)."""
return is_chromeos_job(job_name) and get_value('CHROMEOS_SYSTEM') |
Return True if the current job uses libFuzzer. | def is_libfuzzer_job(job_name=None):
"""Return True if the current job uses libFuzzer."""
return get_engine_for_job(job_name) == 'libFuzzer' |
Return True if the current job uses honggfuzz. | def is_honggfuzz_job(job_name=None):
"""Return True if the current job uses honggfuzz."""
return get_engine_for_job(job_name) == 'honggfuzz' |
Return True if the current job uses syzkaller. | def is_kernel_fuzzer_job(job_name=None):
"""Return True if the current job uses syzkaller."""
return get_engine_for_job(job_name) == 'syzkaller' |
Return True if the current job uses Centipede. | def is_centipede_fuzzer_job(job_name=None):
"""Return True if the current job uses Centipede."""
return get_engine_for_job(job_name) == 'centipede' |
Return True if this is an engine fuzzer. | def is_engine_fuzzer_job(job_name=None):
"""Return True if this is an engine fuzzer."""
return bool(get_engine_for_job(job_name)) |
Get the engine for the given job. | def get_engine_for_job(job_name=None):
"""Get the engine for the given job."""
if not job_name:
job_name = get_value('JOB_NAME')
for engine in fuzzing.ENGINES:
if engine.lower() in job_name:
return engine
return None |
Return True if the current job supports minimization.
Currently blackbox-fuzzer jobs or libfuzzer support minimization. | def is_minimization_supported():
"""Return True if the current job supports minimization.
Currently blackbox-fuzzer jobs or libfuzzer support minimization.
"""
return not is_engine_fuzzer_job() or is_libfuzzer_job() |
Return True if we are on a posix platform (linux/unix and mac os). | def is_posix():
"""Return True if we are on a posix platform (linux/unix and mac os)."""
return os.name == 'posix' |
Return whether or not the current bot is a trusted host. | def is_trusted_host(ensure_connected=True):
"""Return whether or not the current bot is a trusted host."""
return get_value('TRUSTED_HOST') and (not ensure_connected or
get_value('WORKER_BOT_NAME')) |
Return whether or not the current bot is an untrusted worker. | def is_untrusted_worker():
"""Return whether or not the current bot is an untrusted worker."""
return get_value('UNTRUSTED_WORKER') |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.