response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Rename a DLL to allow for updates. | def _rename_dll_for_update(absolute_filepath):
"""Rename a DLL to allow for updates."""
backup_filepath = absolute_filepath + '.bak.' + str(int(time.time()))
os.rename(absolute_filepath, backup_filepath) |
Return the platform deployment filename. | def _platform_deployment_filename():
"""Return the platform deployment filename."""
platform_mappings = {
'Linux': 'linux',
'Windows': 'windows',
'Darwin': 'macos'
}
base_filename = platform_mappings[platform.system()]
if sys.version_info.major == 3:
base_filename += '-3'
return base_filename + '.zip' |
Helper to return deployment file url. | def _deployment_file_url(filename):
"""Helper to return deployment file url."""
deployment_bucket = local_config.ProjectConfig().get('deployment.bucket')
if not deployment_bucket:
return None
if environment.get_value('USE_TEST_DEPLOYMENT'):
return f'gs://{deployment_bucket}/test-deployment/{filename}'
return f'gs://{deployment_bucket}/{filename}' |
Return the source URL. | def get_source_url():
"""Return the source URL."""
return _deployment_file_url(_platform_deployment_filename()) |
Return the source manifest URL. | def get_source_manifest_url():
"""Return the source manifest URL."""
return _deployment_file_url(MANIFEST_FILENAME) |
Remove files from the directory that isn't in the given file list. | def clear_old_files(directory, extracted_file_set):
"""Remove files from the directory that isn't in the given file list."""
for root_directory, _, filenames in shell.walk(directory):
for filename in filenames:
file_path = os.path.join(root_directory, filename)
if file_path not in extracted_file_set:
shell.remove_file(file_path)
shell.remove_empty_directories(directory) |
Recursively remove all .pyc files from the given directory | def clear_pyc_files(directory):
"""Recursively remove all .pyc files from the given directory"""
for root_directory, _, filenames in shell.walk(directory):
for filename in filenames:
if not filename.endswith('.pyc'):
continue
file_path = os.path.join(root_directory, filename)
shell.remove_file(file_path) |
Get the local revision and report as a metric. | def track_revision():
"""Get the local revision and report as a metric."""
revision = get_local_source_revision() or ''
monitoring_metrics.BOT_COUNT.set(1, {'revision': revision}) |
Return the local source revision. | def get_local_source_revision():
"""Return the local source revision."""
return utils.current_source_version() |
Get remote revision. We refactor this method out, so that we can mock
it. | def get_remote_source_revision(source_manifest_url):
"""Get remote revision. We refactor this method out, so that we can mock
it."""
return storage.read_data(source_manifest_url).decode('utf-8').strip() |
Returns the latest source revision if there is an update, or None if the
current source is up to date. | def get_newer_source_revision():
"""Returns the latest source revision if there is an update, or None if the
current source is up to date."""
if (environment.get_value('LOCAL_SRC') or
environment.get_value('LOCAL_DEVELOPMENT')):
logs.log('Using local source, skipping source code update.')
return None
root_directory = environment.get_value('ROOT_DIR')
temp_directory = environment.get_value('BOT_TMPDIR')
source_manifest_url = get_source_manifest_url()
if (not get_source_url() or not source_manifest_url or not temp_directory or
not root_directory):
logs.log('Skipping source code update.')
return None
logs.log('Checking source code for updates.')
try:
source_version = get_remote_source_revision(source_manifest_url)
except Exception:
logs.log_error('Error occurred while checking source version.')
return None
local_source_version = get_local_source_revision()
if not local_source_version:
logs.log('No manifest found. Forcing an update.')
return source_version
logs.log('Local source code version: %s.' % local_source_version)
logs.log('Remote source code version: %s.' % source_version)
if local_source_version >= source_version:
logs.log('Remote souce code <= local source code. No update.')
# No source code update found. Source code is current, bail out.
return None
logs.log(f'New source code: {source_version}')
return source_version |
Run platform specific initialization scripts. | def run_platform_init_scripts():
"""Run platform specific initialization scripts."""
logs.log('Running platform initialization scripts.')
plt = environment.platform()
if environment.is_android():
android_init.run()
elif plt == 'CHROMEOS':
chromeos_init.run()
elif plt == 'FUCHSIA':
fuchsia_init.run()
elif plt == 'LINUX':
linux_init.run()
elif plt == 'MAC':
mac_init.run()
elif plt == 'WINDOWS':
windows_init.run()
else:
raise RuntimeError('Unsupported platform')
logs.log('Completed running platform initialization scripts.') |
Updates source code files with latest version from appengine. | def update_source_code():
"""Updates source code files with latest version from appengine."""
process_handler.cleanup_stale_processes()
shell.clear_temp_directory()
# ROOT_DIR just means the clusterfuzz directory.
root_directory = environment.get_value('ROOT_DIR')
cf_source_root_parent_dir = os.path.dirname(root_directory)
temp_archive = os.path.join(cf_source_root_parent_dir,
'clusterfuzz-source.zip')
try:
storage.copy_file_from(get_source_url(), temp_archive)
except Exception:
logs.log_error('Could not retrieve source code archive from url.')
return
try:
reader = archive.open(temp_archive)
except Exception:
logs.log_error('Bad zip file.')
return
src_directory = os.path.join(root_directory, 'src')
error_occurred = False
normalized_file_set = set()
for file in reader.list_members():
filename = os.path.basename(file.name)
# This file cannot be updated on the fly since it is running as server.
if filename == 'adb':
continue
absolute_filepath = os.path.join(cf_source_root_parent_dir, file.name)
if os.path.altsep:
absolute_filepath = absolute_filepath.replace(os.path.altsep, os.path.sep)
if os.path.realpath(absolute_filepath) != absolute_filepath:
continue
normalized_file_set.add(absolute_filepath)
try:
file_extension = os.path.splitext(filename)[1]
# Remove any .so files first before overwriting, as they can be loaded
# in the memory of existing processes. Overwriting them directly causes
# segfaults in existing processes (e.g. run.py).
if file_extension == '.so' and os.path.exists(absolute_filepath):
os.remove(absolute_filepath)
# On Windows, to update DLLs (and native .pyd extensions), we rename it
# first so that we can install the new version.
if (environment.platform() == 'WINDOWS' and
file_extension in ['.dll', '.pyd'] and
os.path.exists(absolute_filepath)):
_rename_dll_for_update(absolute_filepath)
except Exception:
logs.log_error('Failed to remove or move %s before extracting new '
'version.' % absolute_filepath)
try:
extracted_path = reader.extract(
file.name, cf_source_root_parent_dir, trusted=True)
mode = file.mode
mode |= 0o440
os.chmod(extracted_path, mode)
except:
error_occurred = True
logs.log_error(f'Failed to extract file {file.name} from source archive.')
reader.close()
if error_occurred:
return
clear_pyc_files(src_directory)
clear_old_files(src_directory, normalized_file_set)
local_manifest_path = os.path.join(root_directory,
utils.LOCAL_SOURCE_MANIFEST)
source_version = utils.read_data_from_file(
local_manifest_path, eval_data=False).decode('utf-8').strip()
os.remove(temp_archive)
logs.log('Source code updated to %s.' % source_version) |
Updates layout tests every day. | def update_tests_if_needed():
"""Updates layout tests every day."""
data_directory = environment.get_value('FUZZ_DATA')
error_occured = False
expected_task_duration = 60 * 60 # 1 hour.
retry_limit = environment.get_value('FAIL_RETRIES')
temp_archive = os.path.join(data_directory, 'temp.zip')
tests_url = environment.get_value('WEB_TESTS_URL')
# Check if we have a valid tests url.
if not tests_url:
return
# Layout test updates are usually disabled to speedup local testing.
if environment.get_value('LOCAL_DEVELOPMENT'):
return
# |UPDATE_WEB_TESTS| env variable can be used to control our update behavior.
if not environment.get_value('UPDATE_WEB_TESTS'):
return
last_modified_time = persistent_cache.get_value(
TESTS_LAST_UPDATE_KEY, constructor=datetime.datetime.utcfromtimestamp)
if (last_modified_time is not None and not dates.time_has_expired(
last_modified_time, days=TESTS_UPDATE_INTERVAL_DAYS)):
return
logs.log('Updating layout tests.')
tasks.track_task_start(
tasks.Task('update_tests', '', ''), expected_task_duration)
# Download and unpack the tests archive.
for _ in range(retry_limit):
try:
shell.remove_directory(data_directory, recreate=True)
storage.copy_file_from(tests_url, temp_archive)
with archive.open(temp_archive) as reader:
reader.extract_all(data_directory, trusted=True)
shell.remove_file(temp_archive)
error_occured = False
break
except:
logs.log_error(
'Could not retrieve and unpack layout tests archive. Retrying.')
error_occured = True
if not error_occured:
persistent_cache.set_value(
TESTS_LAST_UPDATE_KEY, time.time(), persist_across_reboots=True)
tasks.track_task_end() |
Run update task. | def run():
"""Run update task."""
# Since this code is particularly sensitive for bot stability, continue
# execution but store the exception if anything goes wrong during one of these
# steps.
try:
# Update heartbeat with current time.
data_handler.update_heartbeat()
# Check overall free disk space. If we are running too low, clear all
# data directories like builds, fuzzers, data bundles, etc.
shell.clear_data_directories_on_low_disk_space()
# Download new layout tests once per day.
if not environment.is_uworker():
update_tests_if_needed()
except Exception:
logs.log_error('Error occurred while running update task.')
# Even if there is an exception in one of the other steps, we want to try to
# update the source. If for some reason the source code update fails, it is
# not necessary to run the init scripts.
try:
# If there is a newer revision, exit and let run.py update the source code.
if get_newer_source_revision() is not None:
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import host
host.update_worker()
sys.exit(0)
# Run platform specific initialization scripts.
run_platform_init_scripts()
except Exception:
logs.log_error('Error occurred while running update task.') |
Adds the default issue metadata (e.g. components, labels) to testcase. | def _add_default_issue_metadata(testcase, fuzz_target_metadata):
"""Adds the default issue metadata (e.g. components, labels) to testcase."""
testcase_metadata = testcase.get_metadata()
for key, default_value in fuzz_target_metadata.items():
# Only string metadata are supported.
if not isinstance(default_value, str):
continue
# Add the default issue metadata first. This gives preference to uploader
# specified issue metadata.
new_value_list = utils.parse_delimited(
default_value, delimiter=',', strip=True, remove_empty=True)
# Append uploader specified testcase metadata value to end (for preference).
uploader_value = testcase_metadata.get(key, '')
uploader_value_list = utils.parse_delimited(
uploader_value, delimiter=',', strip=True, remove_empty=True)
for value in uploader_value_list:
if value not in new_value_list:
new_value_list.append(value)
new_value = ','.join(new_value_list)
if new_value == uploader_value:
continue
logs.log('Updating issue metadata for {} from {} to {}.'.format(
key, uploader_value, new_value))
testcase.set_metadata(key, new_value) |
Set up a custom or regular build based on revision. For regular builds,
if a provided revision is not found, set up a build with the
closest revision <= provided revision. | def setup_build(testcase: data_types.Testcase,
bad_revisions) -> Optional[uworker_msg_pb2.Output]:
"""Set up a custom or regular build based on revision. For regular builds,
if a provided revision is not found, set up a build with the
closest revision <= provided revision."""
revision = testcase.crash_revision
if revision and not build_manager.is_custom_binary():
build_bucket_path = build_manager.get_primary_bucket_path()
revision_list = build_manager.get_revisions_list(
build_bucket_path, bad_revisions, testcase=testcase)
if not revision_list:
return uworker_msg_pb2.Output(
error_type=uworker_msg_pb2.ErrorType.ANALYZE_NO_REVISIONS_LIST)
revision_index = revisions.find_min_revision_index(revision_list, revision)
if revision_index is None:
return uworker_msg_pb2.Output(
error_type=uworker_msg_pb2.ErrorType.ANALYZE_NO_REVISION_INDEX)
revision = revision_list[revision_index]
build_manager.setup_build(revision)
return None |
Prepares the environment for execute_task. | def prepare_env_for_main(testcase_upload_metadata):
"""Prepares the environment for execute_task."""
# Reset redzones.
environment.reset_current_memory_tool_options(redzone_size=128)
# Unset window location size and position properties so as to use default.
environment.set_value('WINDOW_ARG', '')
# Adjust the test timeout, if user has provided one.
if testcase_upload_metadata.timeout:
environment.set_value('TEST_TIMEOUT', testcase_upload_metadata.timeout)
# Adjust the number of retries, if user has provided one.
if testcase_upload_metadata.retries is not None:
environment.set_value('CRASH_RETRIES', testcase_upload_metadata.retries) |
Sets up the |testcase| and builds. Returns the path to the testcase on
success, None on error. | def setup_testcase_and_build(
testcase, job_type, setup_input,
bad_revisions) -> (Optional[str], Optional[uworker_msg_pb2.Output]):
"""Sets up the |testcase| and builds. Returns the path to the testcase on
success, None on error."""
# Set up testcase and get absolute testcase path.
_, testcase_file_path, error = setup.setup_testcase(testcase, job_type,
setup_input)
if error:
return None, error
# Set up build.
error = setup_build(testcase, bad_revisions)
if error:
return None, error
# Check if we have an application path. If not, our build failed
# to setup correctly.
if not build_manager.check_app_path():
# Let postprocess handle ANALYZE_BUILD_SETUP and restart tasks if needed.
return None, uworker_msg_pb2.Output(
error_type=uworker_msg_pb2.ErrorType.ANALYZE_BUILD_SETUP)
update_testcase_after_build_setup(testcase)
testcase.absolute_path = testcase_file_path
return testcase_file_path, None |
Updates the testcase entity with values from global state that was set
during build setup. | def update_testcase_after_build_setup(testcase):
"""Updates the testcase entity with values from global state that was set
during build setup."""
# NOTE: This must be done after setting up the build, which also sets
# environment variables consumed by set_initial_testcase_metadata. See
# https://crbug.com/1453576.
# Set initial testcase metadata fields (e.g. build url, etc).
data_handler.set_initial_testcase_metadata(testcase)
# Update minimized arguments and use ones provided during user upload.
if not testcase.minimized_arguments:
minimized_arguments = environment.get_value('APP_ARGS') or ''
additional_command_line_flags = testcase.get_metadata(
'uploaded_additional_args')
if additional_command_line_flags:
minimized_arguments += ' %s' % additional_command_line_flags
environment.set_value('APP_ARGS', minimized_arguments)
testcase.minimized_arguments = minimized_arguments |
Initializes a testcase for the crash testing phase. | def initialize_testcase_for_main(testcase, job_type):
"""Initializes a testcase for the crash testing phase."""
# Update initial testcase information.
testcase.job_type = job_type
testcase.queue = tasks.default_queue()
testcase.crash_state = ''
testcase.put() |
Tests for a crash with retries. Tries with HTTP (with retries) if initial
attempts fail. Returns the most recent crash result and the possibly updated
HTTP flag. | def test_for_crash_with_retries(fuzz_target, testcase, testcase_file_path,
test_timeout):
"""Tests for a crash with retries. Tries with HTTP (with retries) if initial
attempts fail. Returns the most recent crash result and the possibly updated
HTTP flag."""
# Get the crash output.
http_flag = testcase.http_flag
result = testcase_manager.test_for_crash_with_retries(
fuzz_target,
testcase,
testcase_file_path,
test_timeout,
http_flag=http_flag,
compare_crash=False)
# If we don't get a crash, try enabling http to see if we can get a crash.
# Skip engine fuzzer jobs (e.g. libFuzzer, AFL) for which http testcase paths
# are not applicable.
if (not result.is_crash() and not http_flag and
not environment.is_engine_fuzzer_job()):
result_with_http = testcase_manager.test_for_crash_with_retries(
fuzz_target,
testcase,
testcase_file_path,
test_timeout,
http_flag=True,
compare_crash=False)
if result_with_http.is_crash():
logs.log('Testcase needs http flag for crash.')
http_flag = True
result = result_with_http
return result, http_flag
return result, http_flag |
Handles a non-crashing testcase. Either deletes the testcase or schedules
another, final analysis. | def handle_noncrash(output):
"""Handles a non-crashing testcase. Either deletes the testcase or schedules
another, final analysis."""
# Could not reproduce the crash.
testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id)
log_message = (
f'Testcase didn\'t crash in {output.test_timeout} seconds (with retries)')
data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED,
log_message)
# For an unreproducible testcase, retry once on another bot to confirm
# our results and in case this bot is in a bad state which we didn't catch
# through our usual means.
if is_first_analyze_attempt(testcase):
testcase.status = 'Unreproducible, retrying'
testcase.put()
tasks.add_task('analyze', output.uworker_input.testcase_id,
output.uworker_input.job_type)
return
testcase_upload_metadata = query_testcase_upload_metadata(
output.uworker_input.testcase_id)
data_handler.mark_invalid_uploaded_testcase(
testcase, testcase_upload_metadata, 'Unreproducible') |
Updates |testcase| based on |state|. | def update_testcase_after_crash(testcase, state, job_type, http_flag,
analyze_task_output):
"""Updates |testcase| based on |state|."""
testcase.crash_type = state.crash_type
testcase.crash_address = state.crash_address
testcase.crash_state = state.crash_state
testcase.http_flag = http_flag
testcase.security_flag = crash_analyzer.is_security_issue(
state.crash_stacktrace, state.crash_type, state.crash_address)
# These are passed back to postprocess to update the testcase.
analyze_task_output.crash_info_set = True
analyze_task_output.http_flag = http_flag
analyze_task_output.crash_type = state.crash_type
analyze_task_output.crash_address = state.crash_address
analyze_task_output.crash_state = state.crash_state
analyze_task_output.security_flag = testcase.security_flag
# If it is, guess the severity.
if testcase.security_flag:
testcase.security_severity = severity_analyzer.get_security_severity(
state.crash_type, state.crash_stacktrace, job_type,
bool(testcase.gestures))
if testcase.security_severity is not None:
analyze_task_output.security_severity = testcase.security_severity |
Runs preprocessing for analyze task. | def utask_preprocess(testcase_id, job_type, uworker_env):
"""Runs preprocessing for analyze task."""
# Get the testcase from the database and mark it as started.
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)
testcase_upload_metadata = query_testcase_upload_metadata(testcase_id)
if not testcase_upload_metadata:
logs.log_error(
'Testcase %s has no associated upload metadata.' % testcase_id)
testcase.key.delete()
return None
# Store the bot name and timestamp in upload metadata.
testcase_upload_metadata.bot_name = environment.get_value('BOT_NAME')
testcase_upload_metadata.timestamp = datetime.datetime.utcnow()
testcase_upload_metadata.put()
initialize_testcase_for_main(testcase, job_type)
setup_input = setup.preprocess_setup_testcase(testcase, uworker_env)
analyze_task_input = get_analyze_task_input()
uworker_input = uworker_msg_pb2.Input(
testcase_upload_metadata=uworker_io.entity_to_protobuf(
testcase_upload_metadata),
testcase=uworker_io.entity_to_protobuf(testcase),
testcase_id=testcase_id,
uworker_env=uworker_env,
setup_input=setup_input,
job_type=job_type,
analyze_task_input=analyze_task_input,
)
testcase_manager.preprocess_testcase_manager(testcase, uworker_input)
return uworker_input |
Copies the testcase updated fields to analyze_task_output to be updated in
postprocess. | def _build_task_output(
testcase: data_types.Testcase) -> uworker_msg_pb2.AnalyzeTaskOutput:
"""Copies the testcase updated fields to analyze_task_output to be updated in
postprocess."""
analyze_task_output = uworker_msg_pb2.AnalyzeTaskOutput()
analyze_task_output.crash_revision = int(testcase.crash_revision)
analyze_task_output.absolute_path = testcase.absolute_path
analyze_task_output.minimized_arguments = testcase.minimized_arguments
if testcase.get_metadata('build_key'):
analyze_task_output.build_key = testcase.get_metadata('build_key')
if testcase.get_metadata('build_url'):
analyze_task_output.build_url = testcase.get_metadata('build_url')
if testcase.get_metadata('gn_args'):
analyze_task_output.gn_args = testcase.get_metadata('gn_args')
if testcase.platform:
analyze_task_output.platform = testcase.platform
if testcase.platform_id:
analyze_task_output.platform_id = testcase.platform_id
return analyze_task_output |
Executes the untrusted part of analyze_task. | def utask_main(uworker_input):
"""Executes the untrusted part of analyze_task."""
testcase_upload_metadata = uworker_io.entity_from_protobuf(
uworker_input.testcase_upload_metadata, data_types.TestcaseUploadMetadata)
testcase = uworker_io.entity_from_protobuf(uworker_input.testcase,
data_types.Testcase)
uworker_io.check_handling_testcase_safe(testcase)
prepare_env_for_main(testcase_upload_metadata)
is_lsan_enabled = environment.get_value('LSAN')
if is_lsan_enabled:
# Creates empty local blacklist so all leaks will be visible to uploader.
leak_blacklist.create_empty_local_blacklist()
testcase_file_path, output = setup_testcase_and_build(
testcase, uworker_input.job_type, uworker_input.setup_input,
uworker_input.analyze_task_input.bad_revisions)
testcase.crash_revision = environment.get_value('APP_REVISION')
if not testcase_file_path:
return output
analyze_task_output = _build_task_output(testcase)
# Initialize some variables.
test_timeout = environment.get_value('TEST_TIMEOUT')
fuzz_target = testcase_manager.get_fuzz_target_from_input(uworker_input)
result, http_flag = test_for_crash_with_retries(
fuzz_target, testcase, testcase_file_path, test_timeout)
# Set application command line with the correct http flag.
application_command_line = (
testcase_manager.get_command_line_for_application(
testcase_file_path, needs_http=http_flag))
# Get the crash data.
crashed = result.is_crash()
crash_time = result.get_crash_time()
state = result.get_symbolized_data()
unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
# In the general case, we will not attempt to symbolize if we do not detect
# a crash. For user uploads, we should symbolize anyway to provide more
# information about what might be happening.
crash_stacktrace_output = utils.get_crash_stacktrace_output(
application_command_line, state.crash_stacktrace,
unsymbolized_crash_stacktrace)
testcase.crash_stacktrace = data_handler.filter_stacktrace(
crash_stacktrace_output)
analyze_task_output.crash_stacktrace = testcase.crash_stacktrace
if not crashed:
return uworker_msg_pb2.Output(
analyze_task_output=analyze_task_output,
error_type=uworker_msg_pb2.ErrorType.ANALYZE_NO_CRASH,
test_timeout=test_timeout)
# Update testcase crash parameters.
update_testcase_after_crash(testcase, state, uworker_input.job_type,
http_flag, analyze_task_output)
# See if we have to ignore this crash.
if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
# TODO(metzman): Handle this by closing the testcase on the trusted worker.
# Also, deal with the other cases where we are updating testcase comment
# in untrusted.
data_handler.close_invalid_uploaded_testcase(
testcase, testcase_upload_metadata, 'Irrelevant')
return uworker_msg_pb2.Output(
analyze_task_output=analyze_task_output,
error_type=uworker_msg_pb2.ErrorType.UNHANDLED)
test_for_reproducibility(fuzz_target, testcase, testcase_file_path, state,
test_timeout)
analyze_task_output.one_time_crasher_flag = testcase.one_time_crasher_flag
fuzz_target_metadata = engine_common.get_fuzz_target_issue_metadata(
fuzz_target)
return uworker_msg_pb2.Output(
analyze_task_output=analyze_task_output,
test_timeout=test_timeout,
crash_time=crash_time,
issue_metadata=fuzz_target_metadata) |
Handles errors for scenarios where build setup fails. | def handle_build_setup_error(output):
"""Handles errors for scenarios where build setup fails."""
testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
'Build setup failed')
if is_first_analyze_attempt(testcase):
task_name = 'analyze'
testcase_fail_wait = environment.get_value('FAIL_WAIT')
tasks.add_task(
task_name,
output.uworker_input.testcase_id,
output.uworker_input.job_type,
wait_time=testcase_fail_wait)
return
testcase_upload_metadata = query_testcase_upload_metadata(
output.uworker_input.testcase_id)
data_handler.mark_invalid_uploaded_testcase(
testcase, testcase_upload_metadata, 'Build setup failed') |
Updates the testcase using the info passed from utask_main. | def _update_testcase(output):
"""Updates the testcase using the info passed from utask_main."""
if not output.HasField('analyze_task_output'):
return
testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id)
analyze_task_output = output.analyze_task_output
testcase.crash_revision = analyze_task_output.crash_revision
testcase.absolute_path = analyze_task_output.absolute_path
testcase.minimized_arguments = analyze_task_output.minimized_arguments
testcase.crash_stacktrace = analyze_task_output.crash_stacktrace
if analyze_task_output.crash_info_set:
testcase.http_flag = analyze_task_output.http_flag
testcase.crash_type = analyze_task_output.crash_type
testcase.crash_address = analyze_task_output.crash_address
testcase.crash_state = analyze_task_output.crash_state
testcase.security_flag = analyze_task_output.security_flag
if testcase.security_flag:
if analyze_task_output.HasField('security_severity'):
testcase.security_severity = analyze_task_output.security_severity
else:
testcase.security_severity = None
testcase.one_time_crasher_flag = analyze_task_output.one_time_crasher_flag
# For the following fields, we are assuming an empty string/ None is invalid.
if analyze_task_output.build_key:
testcase.set_metadata(
'build_key', analyze_task_output.build_key, update_testcase=False)
if analyze_task_output.build_url:
testcase.set_metadata(
'build_url', analyze_task_output.build_url, update_testcase=False)
if analyze_task_output.gn_args:
testcase.set_metadata(
'gn_args', analyze_task_output.gn_args, update_testcase=False)
if analyze_task_output.platform:
testcase.platform = analyze_task_output.platform
if analyze_task_output.platform_id:
testcase.platform_id = analyze_task_output.platform_id
testcase.put() |
Trusted: Cleans up after a uworker execute_task, writing anything needed to
the db. | def utask_postprocess(output):
"""Trusted: Cleans up after a uworker execute_task, writing anything needed to
the db."""
_update_testcase(output)
if output.error_type != uworker_msg_pb2.ErrorType.NO_ERROR:
_ERROR_HANDLER.handle(output)
return
testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id)
testcase_upload_metadata = query_testcase_upload_metadata(
output.uworker_input.testcase_id)
log_message = (f'Testcase crashed in {output.test_timeout} seconds '
f'(r{testcase.crash_revision})')
data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED,
log_message)
# Check to see if this is a duplicate.
data_handler.check_uploaded_testcase_duplicate(testcase,
testcase_upload_metadata)
# Set testcase and metadata status if not set already.
if testcase.status == 'Duplicate':
# For testcase uploaded by bots (with quiet flag), don't create additional
# tasks.
if testcase_upload_metadata.quiet_flag:
data_handler.close_invalid_uploaded_testcase(
testcase, testcase_upload_metadata, 'Duplicate')
return
else:
# New testcase.
testcase.status = 'Processed'
testcase_upload_metadata.status = 'Confirmed'
# Reset the timestamp as well, to respect
# data_types.MIN_ELAPSED_TIME_SINCE_REPORT. Otherwise it may get filed by
# triage task prematurely without the grouper having a chance to run on this
# testcase.
testcase.timestamp = utils.utcnow()
# Add new leaks to global blacklist to avoid detecting duplicates.
# Only add if testcase has a direct leak crash and if it's reproducible.
is_lsan_enabled = output.uworker_input.uworker_env.get('LSAN')
if is_lsan_enabled:
leak_blacklist.add_crash_to_global_blacklist_if_needed(testcase)
# Update the testcase values.
testcase.put()
# Update the upload metadata.
testcase_upload_metadata.security_flag = testcase.security_flag
testcase_upload_metadata.put()
_add_default_issue_metadata(testcase, output.issue_metadata)
logs.log('Creating post-analyze tasks.')
# Create tasks to
# 1. Minimize testcase (minimize).
# 2. Find regression range (regression).
# 3. Find testcase impact on production branches (impact).
# 4. Check whether testcase is fixed (progression).
# 5. Get second stacktrace from another job in case of
# one-time crashes (stack).
task_creation.create_tasks(testcase) |
Return full paths to corpus files in |corpus_path|. | def _get_corpus_file_paths(corpus_path):
"""Return full paths to corpus files in |corpus_path|."""
return [
os.path.join(corpus_path, filename)
for filename in os.listdir(corpus_path)
] |
Limit number of files and size of a corpus. | def _limit_corpus_size(corpus_url):
"""Limit number of files and size of a corpus."""
corpus_count = 0
corpus_size = 0
deleted_corpus_count = 0
bucket, _ = storage.get_bucket_name_and_path(corpus_url)
for corpus_file in storage.get_blobs(corpus_url):
corpus_count += 1
corpus_size += corpus_file['size']
if (corpus_count > CORPUS_FILES_LIMIT_FOR_FAILURES or
corpus_size > CORPUS_SIZE_LIMIT_FOR_FAILURES):
path_to_delete = storage.get_cloud_storage_file_path(
bucket, corpus_file['name'])
storage.delete(path_to_delete)
deleted_corpus_count += 1
if deleted_corpus_count:
logs.log('Removed %d files from oversized corpus: %s.' %
(deleted_corpus_count, corpus_url)) |
Return time remaining. | def _get_time_remaining(start_time):
"""Return time remaining."""
time_used = int((datetime.datetime.utcnow() - start_time).total_seconds())
return CORPUS_PRUNING_TIMEOUT - time_used |
Fills the cross pollination statistics in the corpus pruning output. | def _fill_cross_pollination_stats(stats, output):
"""Fills the cross pollination statistics in the corpus pruning output."""
if not stats:
return
statistics = uworker_msg_pb2.CrossPollinationStatistics(
project_qualified_name=stats.project_qualified_name,
sources=stats.sources,
initial_corpus_size=stats.initial_corpus_size,
corpus_size=stats.corpus_size,
initial_edge_coverage=stats.initial_edge_coverage,
edge_coverage=stats.edge_coverage,
initial_feature_coverage=stats.initial_feature_coverage,
feature_coverage=stats.feature_coverage)
output.corpus_pruning_task_output.cross_pollination_stats.CopyFrom(statistics) |
Log stats about cross pollination in BigQuery. | def _record_cross_pollination_stats(output):
"""Log stats about cross pollination in BigQuery."""
# If no stats were gathered due to a timeout or lack of corpus, return.
if not output.corpus_pruning_task_output.HasField('cross_pollination_stats'):
return
stats = output.corpus_pruning_task_output.cross_pollination_stats
bigquery_row = {
'project_qualified_name': stats.project_qualified_name,
'sources': stats.sources,
'initial_corpus_size': stats.initial_corpus_size,
'corpus_size': stats.corpus_size,
'initial_edge_coverage': stats.initial_edge_coverage,
'edge_coverage': stats.edge_coverage,
'initial_feature_coverage': stats.initial_feature_coverage,
'feature_coverage': stats.feature_coverage
}
# BigQuery not available in local development. This is necessary because the
# untrusted runner is in a separate process and can't be easily mocked.
# Check here instead of earlier to test as much of the function as we can.
if environment.get_value('LOCAL_DEVELOPMENT') or environment.get_value(
'PY_UNITTESTS'):
return
client = big_query.Client(
dataset_id='main', table_id='cross_pollination_statistics')
client.insert([big_query.Insert(row=bigquery_row, insert_id=None)]) |
Run corpus pruning. | def do_corpus_pruning(context, revision):
"""Run corpus pruning."""
# Set |FUZZ_TARGET| environment variable to help with unarchiving only fuzz
# target and its related files.
environment.set_value('FUZZ_TARGET', context.fuzz_target.binary)
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import tasks_host
return tasks_host.do_corpus_pruning(context, revision)
if not build_manager.setup_build(revision=revision):
raise CorpusPruningError('Failed to setup build.')
build_directory = environment.get_value('BUILD_DIR')
start_time = datetime.datetime.utcnow()
runner = Runner(build_directory, context)
pruner = CorpusPruner(runner)
fuzzer_binary_name = os.path.basename(runner.target_path)
# Get initial corpus to process from GCS.
context.sync_to_disk()
initial_corpus_size = shell.get_directory_file_count(
context.initial_corpus_path)
# Restore a small batch of quarantined units back to corpus.
context.restore_quarantined_units()
# Shrink to a minimized corpus using corpus merge.
pruner_stats = pruner.run(context.initial_corpus_path,
context.minimized_corpus_path,
context.bad_units_path)
# Sync minimized corpus back to GCS.
context.sync_to_gcs()
logs.log('Saved minimize corpus.')
# Create corpus backup.
# Temporarily copy the past crash regressions folder into the minimized corpus
# so that corpus backup archive can have both.
regressions_input_dir = os.path.join(context.initial_corpus_path,
'regressions')
regressions_output_dir = os.path.join(context.minimized_corpus_path,
'regressions')
if shell.get_directory_file_count(regressions_input_dir):
shutil.copytree(regressions_input_dir, regressions_output_dir)
backup_succeeded = corpus_manager.backup_corpus(
context.dated_backup_signed_url, context.corpus,
context.minimized_corpus_path)
corpus_backup_location = (
context.dated_backup_gcs_url if backup_succeeded else None)
shell.remove_directory(regressions_output_dir)
minimized_corpus_size_units = shell.get_directory_file_count(
context.minimized_corpus_path)
minimized_corpus_size_bytes = shell.get_directory_size(
context.minimized_corpus_path)
logs.log('Corpus pruned from %d to %d units.' % (initial_corpus_size,
minimized_corpus_size_units))
# Process bad units found during merge.
# Mapping of crash state -> CorpusCrash
crashes = {}
pruner.process_bad_units(context.bad_units_path,
context.quarantine_corpus_path, crashes)
context.quarantine_corpus.rsync_from_disk(context.quarantine_corpus_path)
# Store corpus stats into CoverageInformation entity.
project_qualified_name = context.fuzz_target.project_qualified_name()
today = datetime.datetime.utcnow()
coverage_info = data_types.CoverageInformation(
fuzzer=project_qualified_name, date=today)
quarantine_corpus_size = shell.get_directory_file_count(
context.quarantine_corpus_path)
quarantine_corpus_dir_size = shell.get_directory_size(
context.quarantine_corpus_path)
# Save the minimize corpus size before cross pollination to put in BigQuery.
pre_pollination_corpus_size = minimized_corpus_size_units
# Populate coverage stats.
coverage_info.corpus_size_units = minimized_corpus_size_units
coverage_info.corpus_size_bytes = minimized_corpus_size_bytes
coverage_info.quarantine_size_units = quarantine_corpus_size
coverage_info.quarantine_size_bytes = quarantine_corpus_dir_size
coverage_info.corpus_backup_location = corpus_backup_location
coverage_info.corpus_location = context.corpus.get_gcs_url()
coverage_info.quarantine_location = context.quarantine_corpus.get_gcs_url()
# Calculate remaining time to use for shared corpus merging.
time_remaining = _get_time_remaining(start_time)
if time_remaining <= 0:
logs.log_warn('Not enough time for shared corpus merging.')
return None
cross_pollinator = CrossPollinator(runner)
pollinator_stats = cross_pollinator.run(time_remaining)
context.sync_to_gcs()
# Update corpus size stats.
minimized_corpus_size_units = shell.get_directory_file_count(
context.minimized_corpus_path)
minimized_corpus_size_bytes = shell.get_directory_size(
context.minimized_corpus_path)
coverage_info.corpus_size_units = minimized_corpus_size_units
coverage_info.corpus_size_bytes = minimized_corpus_size_bytes
logs.log('Finished.')
sources = ','.join([
fuzzer.fuzz_target.project_qualified_name()
for fuzzer in context.cross_pollinate_fuzzers
])
cross_pollination_stats = None
if pruner_stats and pollinator_stats:
cross_pollination_stats = CrossPollinationStats(
project_qualified_name, sources, initial_corpus_size,
pre_pollination_corpus_size, pruner_stats['edge_coverage'],
pollinator_stats['edge_coverage'], pruner_stats['feature_coverage'],
pollinator_stats['feature_coverage'])
return CorpusPruningResult(
coverage_info=coverage_info,
crashes=list(crashes.values()),
fuzzer_binary_name=fuzzer_binary_name,
revision=environment.get_value('APP_REVISION'),
cross_pollination_stats=cross_pollination_stats) |
If running on a trusted host, updates the crash unit_path after copying
the file locally. | def _update_crash_unit_path(context, crash):
"""If running on a trusted host, updates the crash unit_path after copying
the file locally."""
if not environment.is_trusted_host():
return
from clusterfuzz._internal.bot.untrusted_runner import file_host
unit_path = os.path.join(context.bad_units_path,
os.path.basename(crash.unit_path))
# Prevent the worker from escaping out of |context.bad_units_path|.
if not file_host.is_directory_parent(unit_path, context.bad_units_path):
raise CorpusPruningError('Invalid units path from worker.')
file_host.copy_file_from_worker(crash.unit_path, unit_path)
crash.unit_path = unit_path |
Packs the corpus crashes in a zip file. The file is then uploaded
using the signed upload url from the input. | def _upload_corpus_crashes_zip(context, result, corpus_crashes_blob_name,
corpus_crashes_upload_url):
"""Packs the corpus crashes in a zip file. The file is then uploaded
using the signed upload url from the input."""
temp_dir = environment.get_value('BOT_TMPDIR')
zip_filename = os.path.join(temp_dir, corpus_crashes_blob_name)
with zipfile.ZipFile(zip_filename, 'w') as zip_file:
for crash in result.crashes:
_update_crash_unit_path(context, crash)
unit_name = os.path.basename(crash.unit_path)
zip_file.write(crash.unit_path, unit_name, zipfile.ZIP_DEFLATED)
with open(zip_filename, 'rb') as fp:
data = fp.read()
storage.upload_signed_url(data, corpus_crashes_upload_url)
os.remove(zip_filename) |
Process crashes found in the corpus. | def _process_corpus_crashes(output: uworker_msg_pb2.Output):
"""Process crashes found in the corpus."""
if not output.corpus_pruning_task_output.crashes:
return
corpus_pruning_output = output.corpus_pruning_task_output
crash_revision = corpus_pruning_output.crash_revision
fuzz_target = data_handler.get_fuzz_target(output.uworker_input.fuzzer_name)
job_type = environment.get_value('JOB_NAME')
minimized_arguments = f'%TESTCASE% {fuzz_target.binary}'
project_name = data_handler.get_project_name(job_type)
comment = (f'Fuzzer {fuzz_target.project_qualified_name()} generated corpus'
f' testcase crashed (r{crash_revision})')
# Copy the crashes zip file from cloud storage into a temporary directory.
temp_dir = environment.get_value('BOT_TMPDIR')
corpus_crashes_blob_name = (
output.uworker_input.corpus_pruning_task_input.corpus_crashes_blob_name)
corpus_crashes_zip_local_path = os.path.join(
temp_dir, f'{corpus_crashes_blob_name}.zip')
storage.copy_file_from(
blobs.get_gcs_path(corpus_crashes_blob_name),
corpus_crashes_zip_local_path)
with archive.open(corpus_crashes_zip_local_path) as zip_reader:
for crash in corpus_pruning_output.crashes:
existing_testcase = data_handler.find_testcase(
project_name,
crash.crash_type,
crash.crash_state,
crash.security_flag,
fuzz_target=fuzz_target.project_qualified_name())
if existing_testcase:
continue
crash_local_unit_path = os.path.join(temp_dir, crash.unit_name)
# Extract the crash unit_path into crash_local_unit_path
zip_reader.extract(member=crash.unit_name, path=temp_dir)
# Upload/store testcase.
with open(crash_local_unit_path, 'rb') as f:
key = blobs.write_blob(f)
# Set the absolute_path property of the Testcase to a file in FUZZ_INPUTS
# instead of the local quarantine directory.
absolute_testcase_path = os.path.join(
environment.get_value('FUZZ_INPUTS'), 'testcase')
# TODO(https://b.corp.google.com/issues/328691756): Set trusted based on
# the job when we start doing untrusted fuzzing.
testcase_id = data_handler.store_testcase(
crash=crash,
fuzzed_keys=key,
minimized_keys='',
regression='',
fixed='',
one_time_crasher_flag=False,
crash_revision=crash_revision,
comment=comment,
absolute_path=absolute_testcase_path,
fuzzer_name=fuzz_target.engine,
fully_qualified_fuzzer_name=fuzz_target.fully_qualified_name(),
job_type=job_type,
archived=False,
archive_filename='',
http_flag=False,
gestures=None,
redzone=DEFAULT_REDZONE,
disable_ubsan=False,
window_argument=None,
timeout_multiplier=1.0,
minimized_arguments=minimized_arguments,
trusted=True)
# Set fuzzer_binary_name in testcase metadata.
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.set_metadata('fuzzer_binary_name',
corpus_pruning_output.fuzzer_binary_name)
if output.issue_metadata:
for key, value in output.issue_metadata.items():
testcase.set_metadata(key, value, update_testcase=False)
testcase.put()
# Create additional tasks for testcase (starting with minimization).
testcase = data_handler.get_testcase_by_id(testcase_id)
task_creation.create_tasks(testcase)
os.remove(corpus_crashes_zip_local_path)
# Cleanup the uploaded zip file.
blobs.delete_blob(corpus_crashes_blob_name) |
Select jobs to use for cross pollination. | def _select_targets_and_jobs_for_pollination(engine_name, current_fuzzer_name):
"""Select jobs to use for cross pollination."""
target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(engine=engine_name))
targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(target_jobs)
targets_and_jobs = [(target, target_job)
for target, target_job in zip(targets, target_jobs)
if target_job.fuzz_target_name != current_fuzzer_name]
selected_targets_and_jobs = random.SystemRandom().sample(
targets_and_jobs, min(
len(targets_and_jobs), CROSS_POLLINATE_FUZZER_COUNT))
return selected_targets_and_jobs |
Return a list of fuzzer objects to use for cross pollination. | def _get_cross_pollinate_fuzzers(
engine_name: str, current_fuzzer_name: str
) -> List[uworker_msg_pb2.CrossPollinateFuzzerProto]:
"""Return a list of fuzzer objects to use for cross pollination."""
cross_pollinate_fuzzers = []
selected_targets_and_jobs = _select_targets_and_jobs_for_pollination(
engine_name, current_fuzzer_name)
default_backup_bucket = utils.default_backup_bucket()
for target, target_job in selected_targets_and_jobs:
job = data_types.Job.query(data_types.Job.name == target_job.job).get()
if not job:
continue
job_environment = job.get_environment()
backup_bucket_name = job_environment.get('BACKUP_BUCKET',
default_backup_bucket)
if not backup_bucket_name:
continue
corpus_engine_name = job_environment.get('CORPUS_FUZZER_NAME_OVERRIDE',
engine_name)
cross_pollinate_fuzzers.append(
uworker_msg_pb2.CrossPollinateFuzzerProto(
fuzz_target=uworker_io.entity_to_protobuf(target),
backup_bucket_name=backup_bucket_name,
corpus_engine_name=corpus_engine_name,
))
return cross_pollinate_fuzzers |
Saves coverage information in datastore using an atomic transaction. | def _save_coverage_information(output):
"""Saves coverage information in datastore using an atomic transaction."""
if not output.corpus_pruning_task_output.HasField('coverage_info'):
return
cov_info = output.corpus_pruning_task_output.coverage_info
# Use ndb.transaction with retries below to mitigate risk of a race condition.
def _try_save_coverage_information():
"""Implements save_coverage_information function."""
coverage_info = data_handler.get_coverage_information(
cov_info.project_name,
cov_info.timestamp.ToDatetime().date(),
create_if_needed=True)
# Intentionally skip edge and function coverage values as those would come
# from fuzzer coverage cron task (see src/go/server/cron/coverage.go).
coverage_info.corpus_size_units = cov_info.corpus_size_units
coverage_info.corpus_size_bytes = cov_info.corpus_size_bytes
coverage_info.corpus_location = cov_info.corpus_location
if cov_info.corpus_backup_location:
coverage_info.corpus_backup_location = cov_info.corpus_backup_location
coverage_info.quarantine_size_units = cov_info.quarantine_size_units
coverage_info.quarantine_size_bytes = cov_info.quarantine_size_bytes
coverage_info.quarantine_location = cov_info.quarantine_location
coverage_info.put()
try:
ndb.transaction(
_try_save_coverage_information,
retries=data_handler.DEFAULT_FAIL_RETRIES)
except Exception as e:
# TODO(metzman): Don't catch every exception, it makes testing almost
# impossible.
raise CorpusPruningError(
'Failed to save corpus pruning result: %s.' % repr(e)) |
Extracts and stores the coverage information in a proto. | def _extract_coverage_information(context, result):
"""Extracts and stores the coverage information in a proto."""
coverage_info = uworker_msg_pb2.CoverageInformation()
coverage_info.project_name = context.fuzz_target.project_qualified_name()
timestamp = timestamp_pb2.Timestamp() # pylint: disable=no-member
timestamp.FromDatetime(result.coverage_info.date)
coverage_info.timestamp.CopyFrom(timestamp)
# Intentionally skip edge and function coverage values as those would come
# from fuzzer coverage cron task.
coverage_info.corpus_size_units = result.coverage_info.corpus_size_units
coverage_info.corpus_size_bytes = result.coverage_info.corpus_size_bytes
coverage_info.corpus_location = result.coverage_info.corpus_location
if result.coverage_info.corpus_backup_location:
coverage_info.corpus_backup_location = (
result.coverage_info.corpus_backup_location)
coverage_info.quarantine_size_units = (
result.coverage_info.quarantine_size_units)
coverage_info.quarantine_size_bytes = (
result.coverage_info.quarantine_size_bytes)
coverage_info.quarantine_location = result.coverage_info.quarantine_location
return coverage_info |
Extracts the corpus crashes as a list of CrashInfo from the result. | def _extract_corpus_crashes(result):
"""Extracts the corpus crashes as a list of CrashInfo from the result."""
return [
uworker_msg_pb2.CrashInfo(
crash_type=crash.crash_type,
crash_state=crash.crash_state,
security_flag=crash.security_flag,
crash_address=crash.crash_address,
crash_stacktrace=crash.crash_stacktrace,
unit_name=os.path.basename(crash.unit_path))
for crash in result.crashes
] |
Execute corpus pruning task. | def utask_main(uworker_input):
"""Execute corpus pruning task."""
fuzz_target = uworker_io.entity_from_protobuf(
uworker_input.corpus_pruning_task_input.fuzz_target,
data_types.FuzzTarget)
revision = 0 # Trunk revision
if not setup.update_fuzzer_and_data_bundles(uworker_input.setup_input):
error_message = f'Failed to set up fuzzer {fuzz_target.engine}.'
logs.log_error(error_message)
return uworker_msg_pb2.Output(
error_type=uworker_msg_pb2.ErrorType.CORPUS_PRUNING_FUZZER_SETUP_FAILED)
cross_pollinate_fuzzers = _get_cross_pollinate_fuzzers_from_protos(
uworker_input.corpus_pruning_task_input.cross_pollinate_fuzzers)
context = Context(uworker_input, fuzz_target, cross_pollinate_fuzzers)
if uworker_input.global_blacklisted_functions:
leak_blacklist.copy_global_to_local_blacklist(
uworker_input.corpus_task_input.global_blacklisted_functions)
uworker_output = None
try:
result = do_corpus_pruning(context, revision)
issue_metadata = engine_common.get_fuzz_target_issue_metadata(fuzz_target)
issue_metadata = issue_metadata or {}
_upload_corpus_crashes_zip(
context, result,
uworker_input.corpus_pruning_task_input.corpus_crashes_blob_name,
uworker_input.corpus_pruning_task_input.corpus_crashes_upload_url)
uworker_output = uworker_msg_pb2.Output(
corpus_pruning_task_output=uworker_msg_pb2.CorpusPruningTaskOutput(
coverage_info=_extract_coverage_information(context, result),
fuzzer_binary_name=result.fuzzer_binary_name,
crash_revision=result.revision,
crashes=_extract_corpus_crashes(result),
corpus_backup_uploaded=bool(result.coverage_info.corpus_location)),
issue_metadata=issue_metadata)
_fill_cross_pollination_stats(result.cross_pollination_stats,
uworker_output)
except Exception as e:
# TODO(metzman): Don't catch every exception, it makes testing almost
# impossible.
logs.log_error(f'Corpus pruning failed: {e}')
uworker_output = uworker_msg_pb2.Output(
error_type=uworker_msg_pb2.CORPUS_PRUNING_ERROR)
finally:
context.cleanup()
return uworker_output |
Creates the backup urls if a backup bucket is provided. | def _create_backup_urls(fuzz_target: data_types.FuzzTarget,
corpus_pruning_task_input):
"""Creates the backup urls if a backup bucket is provided."""
backup_bucket_name = environment.get_value('BACKUP_BUCKET')
if not backup_bucket_name:
logs.log('No backup bucket provided, corpus backup will be skipped.')
return
timestamp = str(utils.utcnow().date())
dated_backup_gcs_url = corpus_manager.gcs_url_for_backup_file(
backup_bucket_name, fuzz_target.engine,
fuzz_target.project_qualified_name(), timestamp)
latest_backup_gcs_url = corpus_manager.gcs_url_for_backup_file(
backup_bucket_name, fuzz_target.engine,
fuzz_target.project_qualified_name(),
corpus_manager.LATEST_BACKUP_TIMESTAMP)
dated_backup_signed_url = storage.get_signed_upload_url(dated_backup_gcs_url)
corpus_pruning_task_input.dated_backup_gcs_url = dated_backup_gcs_url
corpus_pruning_task_input.latest_backup_gcs_url = latest_backup_gcs_url
corpus_pruning_task_input.dated_backup_signed_url = dated_backup_signed_url |
Runs preprocessing for corpus pruning task. | def utask_preprocess(fuzzer_name, job_type, uworker_env):
"""Runs preprocessing for corpus pruning task."""
fuzz_target = data_handler.get_fuzz_target(fuzzer_name)
task_name = f'corpus_pruning_{fuzzer_name}_{job_type}'
# Get status of last execution.
last_execution_metadata = data_handler.get_task_status(task_name)
last_execution_failed = bool(
last_execution_metadata and
last_execution_metadata.status == data_types.TaskState.ERROR)
# Make sure we're the only instance running for the given fuzzer and
# job_type.
if not data_handler.update_task_status(task_name,
data_types.TaskState.STARTED):
logs.log('A previous corpus pruning task is still running, exiting.')
return None
setup_input = (
setup.preprocess_update_fuzzer_and_data_bundles(fuzz_target.engine))
# TODO(unassigned): Use coverage information for better selection here.
cross_pollinate_fuzzers = _get_cross_pollinate_fuzzers(
fuzz_target.engine, fuzzer_name)
# If our last execution failed, shrink to a randomized corpus of usable size
# to prevent corpus from growing unbounded and recurring failures when trying
# to minimize it.
if last_execution_failed:
# TODO(metzman): Is this too expensive to do in preprocess?
corpus_urls = corpus_manager.get_pruning_corpora_urls(
fuzz_target.engine, fuzz_target.project_qualified_name())
for corpus_url in corpus_urls:
_limit_corpus_size(corpus_url)
corpus, quarantine_corpus = corpus_manager.get_corpuses_for_pruning(
fuzz_target.engine, fuzz_target.project_qualified_name())
(corpus_crashes_blob_name,
corpus_crashes_upload_url) = blobs.get_blob_signed_upload_url()
corpus_pruning_task_input = uworker_msg_pb2.CorpusPruningTaskInput(
fuzz_target=uworker_io.entity_to_protobuf(fuzz_target),
last_execution_failed=last_execution_failed,
cross_pollinate_fuzzers=cross_pollinate_fuzzers,
corpus=corpus.proto_corpus,
quarantine_corpus=quarantine_corpus.proto_corpus,
corpus_crashes_blob_name=corpus_crashes_blob_name,
corpus_crashes_upload_url=corpus_crashes_upload_url)
_create_backup_urls(fuzz_target, corpus_pruning_task_input)
if environment.get_value('LSAN'):
# Copy global blacklist into local suppressions file if LSan is enabled.
setup_input.global_blacklisted_functions.extend(
leak_blacklist.get_global_blacklisted_functions())
return uworker_msg_pb2.Input(
job_type=job_type,
fuzzer_name=fuzzer_name,
uworker_env=uworker_env,
setup_input=setup_input,
corpus_pruning_task_input=corpus_pruning_task_input) |
Updates the latest_backup with the dated_backup uploaded in utask_main
if any. | def _update_latest_backup(output):
"""Updates the latest_backup with the dated_backup uploaded in utask_main
if any."""
if not output.corpus_pruning_task_output.corpus_backup_uploaded:
return
dated_backup_gcs_url = (
output.uworker_input.corpus_pruning_task_input.dated_backup_gcs_url)
latest_backup_gcs_url = (
output.uworker_input.corpus_pruning_task_input.latest_backup_gcs_url)
try:
if not storage.copy_blob(dated_backup_gcs_url, latest_backup_gcs_url):
logs.log_error('backup_corpus: Failed to update latest corpus backup at '
f'{latest_backup_gcs_url}.')
except:
logs.log_error('backup_corpus: Failed to update latest corpus backup at '
f'{latest_backup_gcs_url}.') |
Trusted: Handles errors and writes anything needed to the db. | def utask_postprocess(output):
"""Trusted: Handles errors and writes anything needed to the db."""
if output.error_type != uworker_msg_pb2.ErrorType.NO_ERROR:
_ERROR_HANDLER.handle(output)
return
task_name = (f'corpus_pruning_{output.uworker_input.fuzzer_name}_'
f'{output.uworker_input.job_type}')
_update_latest_backup(output)
_record_cross_pollination_stats(output)
_save_coverage_information(output)
_process_corpus_crashes(output)
data_handler.update_task_status(task_name, data_types.TaskState.FINISHED) |
Set multi-armed bandit strategy selection during preprocessing. Set
multi-armed bandit strategy selection distribution as an environment variable
so we can access it in launcher. | def do_multiarmed_bandit_strategy_selection(uworker_env):
"""Set multi-armed bandit strategy selection during preprocessing. Set
multi-armed bandit strategy selection distribution as an environment variable
so we can access it in launcher."""
# TODO: Remove environment variable once fuzzing engine refactor is
# complete.
if not environment.get_value(
'USE_BANDIT_STRATEGY_SELECTION', env=uworker_env):
return
selection_method = utils.random_weighted_choice(SELECTION_METHOD_DISTRIBUTION,
'probability')
environment.set_value('STRATEGY_SELECTION_METHOD',
selection_method.method_name, uworker_env)
distribution = get_strategy_distribution_from_ndb()
if not distribution:
return
environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution,
uworker_env) |
Read unsymbolized crash stacktrace. | def get_unsymbolized_crash_stacktrace(stack_file_path):
"""Read unsymbolized crash stacktrace."""
with open(stack_file_path, 'rb') as f:
return utils.decode_to_unicode(f.read()) |
Find the first reproducible crash or the first valid crash.
And return the crash and the one_time_crasher_flag. | def find_main_crash(crashes, full_fuzzer_name, test_timeout):
"""Find the first reproducible crash or the first valid crash.
And return the crash and the one_time_crasher_flag."""
for crash in crashes:
# Archiving testcase to blobstore when we need to because it's expensive.
crash.archive_testcase_in_blobstore()
# We need to check again if the crash is valid. In other words, we check
# if archiving to blobstore succeeded.
if not crash.is_valid():
continue
# We pass an empty expected crash state since our initial stack from fuzzing
# can be incomplete. So, make a judgement on reproducibility based on passed
# security flag and crash state generated from re-running testcase in
# test_for_reproducibility. Minimize task will later update the new crash
# type and crash state parameters.
fuzz_target = data_handler.get_fuzz_target(full_fuzzer_name)
if testcase_manager.test_for_reproducibility(
fuzz_target,
crash.file_path,
crash.crash_type,
None,
crash.security_flag,
test_timeout,
crash.http_flag,
crash.gestures,
arguments=crash.arguments):
return crash, False
# All crashes are non-reproducible. Therefore, we get the first valid one.
for crash in crashes:
if crash.is_valid():
return crash, True
return None, None |
Track fuzzer run result | def _track_fuzzer_run_result(fuzzer_name, generated_testcase_count,
expected_testcase_count, return_code):
"""Track fuzzer run result"""
if expected_testcase_count > 0:
ratio = float(generated_testcase_count) / expected_testcase_count
monitoring_metrics.FUZZER_TESTCASE_COUNT_RATIO.add(ratio,
{'fuzzer': fuzzer_name})
def clamp(val, minimum, maximum):
return max(minimum, min(maximum, val))
# Clamp return code to max, min int 32-bit, otherwise it can get detected as
# type long and we will exception out in infra_libs parsing pipeline.
min_int32 = -(2**31)
max_int32 = 2**31 - 1
return_code = int(clamp(return_code, min_int32, max_int32))
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.increment({
'fuzzer': fuzzer_name,
'return_code': return_code,
}) |
Track build run result. | def _track_build_run_result(job_type, _, is_bad_build):
"""Track build run result."""
# FIXME: Add support for |crash_revision| as part of state.
monitoring_metrics.JOB_BAD_BUILD_COUNT.increment({
'job': job_type,
'bad_build': is_bad_build
}) |
Track testcase run result. | def _track_testcase_run_result(fuzzer, job_type, new_crash_count,
known_crash_count):
"""Track testcase run result."""
monitoring_metrics.FUZZER_KNOWN_CRASH_COUNT.increment_by(
known_crash_count, {
'fuzzer': fuzzer,
})
monitoring_metrics.FUZZER_NEW_CRASH_COUNT.increment_by(
new_crash_count, {
'fuzzer': fuzzer,
})
monitoring_metrics.JOB_KNOWN_CRASH_COUNT.increment_by(known_crash_count, {
'job': job_type,
})
monitoring_metrics.JOB_NEW_CRASH_COUNT.increment_by(new_crash_count, {
'job': job_type,
}) |
Read and parse the last sync file for the GCS corpus. | def _last_sync_time(sync_file_path):
"""Read and parse the last sync file for the GCS corpus."""
if not os.path.exists(sync_file_path):
return None
file_contents = utils.read_data_from_file(sync_file_path, eval_data=False)
if not file_contents:
logs.log_warn('Empty last sync file.', path=sync_file_path)
return None
last_sync_time = None
try:
last_sync_time = datetime.datetime.utcfromtimestamp(float(file_contents))
except Exception as e:
logs.log_error(
'Malformed last sync file: "%s".' % str(e),
path=sync_file_path,
contents=file_contents)
return last_sync_time |
Upload TestcaseRun stats. | def upload_testcase_run_stats(testcase_run):
"""Upload TestcaseRun stats."""
fuzzer_stats.upload_stats([testcase_run]) |
Add additional testcase run data. | def add_additional_testcase_run_data(testcase_run, fully_qualified_fuzzer_name,
job_type, revision):
"""Add additional testcase run data."""
testcase_run['fuzzer'] = fully_qualified_fuzzer_name
testcase_run['job'] = job_type
testcase_run['build_revision'] = revision |
Extract metadata from fuzzer output. | def get_fuzzer_metadata_from_output(fuzzer_output):
"""Extract metadata from fuzzer output."""
metadata = {}
for line in fuzzer_output.splitlines():
match = FUZZER_METADATA_REGEX.match(line)
if match:
metadata[match.group(1)] = match.group(2)
return metadata |
Return fuzzed testcases from the data directories. | def get_testcases(testcase_count, testcase_directory, data_directory):
"""Return fuzzed testcases from the data directories."""
logs.log('Locating generated test cases.')
# Get the list of testcase files.
testcase_directories = [testcase_directory, data_directory]
testcase_file_paths = testcase_manager.get_testcases_from_directories(
testcase_directories)
# If the fuzzer created a bot-specific files list, add those now.
bot_testcases_file_path = utils.get_bot_testcases_file_path(data_directory)
if os.path.exists(bot_testcases_file_path):
bot_testcases_file_content = utils.read_data_from_file(
bot_testcases_file_path, eval_data=False)
shell.remove_file(bot_testcases_file_path)
if bot_testcases_file_content:
bot_file_paths = bot_testcases_file_content.splitlines()
testcase_file_paths += [
utils.normalize_path(path) for path in bot_file_paths
]
generated_testcase_count = len(testcase_file_paths)
# Create output strings.
generated_testcase_string = (
'Generated %d/%d testcases.' % (generated_testcase_count, testcase_count))
# Log the number of testcases generated.
logs.log(generated_testcase_string)
# If we are running the same command (again and again) on this bot,
# we want to be careful of scenarios when the fuzzer starts failing
# or has nothing to do, causing no testcases to be generated. This
# will put lot of burden on appengine remote api.
if (environment.get_value('COMMAND_OVERRIDE') and
generated_testcase_count == 0):
logs.log('No testcases generated. Sleeping for ~30 minutes.')
time.sleep(random.uniform(1800, 2100))
return (testcase_file_paths, generated_testcase_count,
generated_testcase_string) |
Return a list of random gestures. | def pick_gestures(test_timeout):
"""Return a list of random gestures."""
if not environment.get_value('ENABLE_GESTURES', True):
# Gestures disabled.
return []
# Probability of choosing gestures.
if utils.random_number(0, DEFAULT_CHOOSE_PROBABILITY):
return []
gesture_count = utils.random_number(1, MAX_GESTURES)
gestures = gesture_handler.get_gestures(gesture_count)
if not gestures:
return []
# Pick a random trigger time to run the gesture at.
min_gesture_time = int(
utils.random_element_from_list([0.25, 0.50, 0.50, 0.50]) * test_timeout)
max_gesture_time = test_timeout - 1
gesture_time = utils.random_number(min_gesture_time, max_gesture_time)
gestures.append('Trigger:%d' % gesture_time)
return gestures |
Return a random size for redzone. | def pick_redzone():
"""Return a random size for redzone."""
thread_multiplier = environment.get_value('THREAD_MULTIPLIER', 1)
if thread_multiplier == 1:
redzone_list = [
Redzone(16, 1.0),
Redzone(32, 1.0),
Redzone(64, 0.5),
Redzone(128, 0.5),
Redzone(256, 0.25),
Redzone(512, 0.25),
]
else:
# For beefier boxes, prioritize using bigger redzones.
redzone_list = [
Redzone(16, 0.25),
Redzone(32, 0.25),
Redzone(64, 0.50),
Redzone(128, 0.50),
Redzone(256, 1.0),
Redzone(512, 1.0),
]
return utils.random_weighted_choice(redzone_list).size |
Choose whether to disable UBSan in an ASan+UBSan build. | def pick_ubsan_disabled(job_type):
"""Choose whether to disable UBSan in an ASan+UBSan build."""
# This is only applicable in an ASan build.
memory_tool_name = environment.get_memory_tool_name(job_type)
if memory_tool_name not in ['ASAN', 'HWASAN']:
return False
# Check if UBSan is enabled in this ASan build. If not, can't disable it.
if not environment.get_value('UBSAN'):
return False
return not utils.random_number(0, DEFAULT_CHOOSE_PROBABILITY) |
Return a random testcase timeout multiplier and adjust timeout. | def pick_timeout_multiplier():
"""Return a random testcase timeout multiplier and adjust timeout."""
fuzz_test_timeout = environment.get_value('FUZZ_TEST_TIMEOUT')
custom_timeout_multipliers = environment.get_value(
'CUSTOM_TIMEOUT_MULTIPLIERS')
timeout_multiplier = 1.0
use_multiplier = not utils.random_number(0, DEFAULT_CHOOSE_PROBABILITY)
if (use_multiplier and not fuzz_test_timeout and
not custom_timeout_multipliers):
timeout_multiplier = utils.random_element_from_list([0.5, 1.5, 2.0, 3.0])
elif use_multiplier and custom_timeout_multipliers:
# Since they are explicitly set in the job definition, it is fine to use
# custom timeout multipliers even in the case where FUZZ_TEST_TIMEOUT is
# set.
timeout_multiplier = utils.random_element_from_list(
custom_timeout_multipliers)
return timeout_multiplier |
Set the test timeout based on a timeout value and multiplier. | def set_test_timeout(timeout, multipler):
"""Set the test timeout based on a timeout value and multiplier."""
test_timeout = int(timeout * multipler)
environment.set_value('TEST_TIMEOUT', test_timeout)
return test_timeout |
Return a window argument with random size and x,y position. | def pick_window_argument():
"""Return a window argument with random size and x,y position."""
default_window_argument = environment.get_value('WINDOW_ARG', '')
window_argument_change_chance = not utils.random_number(
0, DEFAULT_CHOOSE_PROBABILITY)
window_argument = ''
if window_argument_change_chance:
window_argument = default_window_argument
if window_argument:
width = utils.random_number(
100, utils.random_element_from_list([256, 1280, 2048]))
height = utils.random_number(
100, utils.random_element_from_list([256, 1024, 1536]))
left = utils.random_number(0, width)
top = utils.random_number(0, height)
window_argument = window_argument.replace('$WIDTH', str(width))
window_argument = window_argument.replace('$HEIGHT', str(height))
window_argument = window_argument.replace('$LEFT', str(left))
window_argument = window_argument.replace('$TOP', str(top))
# FIXME: Random seed is currently passed along to the next job
# via WINDOW_ARG. Rename it without breaking existing tests.
random_seed_argument = environment.get_value('RANDOM_SEED')
if random_seed_argument:
if window_argument:
window_argument += ' '
seed = utils.random_number(-2147483648, 2147483647)
window_argument += '%s=%d' % (random_seed_argument.strip(), seed)
environment.set_value('WINDOW_ARG', window_argument)
return window_argument |
Truncate output in the middle according to limit. | def truncate_fuzzer_output(output, limit):
"""Truncate output in the middle according to limit."""
if len(output) < limit:
return output
separator = '\n...truncated...\n'
reduced_limit = limit - len(separator)
left = reduced_limit // 2 + reduced_limit % 2
right = reduced_limit // 2
assert reduced_limit > 0
return ''.join([output[:left], separator, output[-right:]]) |
Converts groups to crashes (in an array of uworker_msg_pb2.CrashInfo) for
JobRun. | def convert_groups_to_crashes(
groups: List[CrashGroup]) -> List[uworker_msg_pb2.CrashInfo]:
"""Converts groups to crashes (in an array of uworker_msg_pb2.CrashInfo) for
JobRun."""
return [
uworker_msg_pb2.CrashInfo(
is_new=group.is_new(),
count=len(group.crashes),
crash_type=group.main_crash.crash_type,
crash_state=group.main_crash.crash_state,
security_flag=group.main_crash.security_flag) for group in groups
] |
Converts crashes to groups (in an array of dicts) for JobRun. | def convert_crashes_to_dicts(
crashes: List[uworker_msg_pb2.CrashInfo]) -> List[Dict[str, Any]]:
"""Converts crashes to groups (in an array of dicts) for JobRun."""
return [{
'is_new': crash_info.is_new,
'count': crash_info.count,
'crash_type': crash_info.crash_type,
'crash_state': crash_info.crash_state,
'security_flag': crash_info.security_flag,
} for crash_info in crashes] |
Upload job run stats. | def upload_job_run_stats(fuzzer_name: str, job_type: str, revision: int,
timestamp: float, new_crash_count: int,
known_crash_count: int, testcases_executed: int,
groups: List[Dict[str, Any]]):
"""Upload job run stats."""
# New format.
job_run = fuzzer_stats.JobRun(fuzzer_name, job_type, revision, timestamp,
testcases_executed, new_crash_count,
known_crash_count, groups)
fuzzer_stats.upload_stats([job_run])
_track_testcase_run_result(fuzzer_name, job_type, new_crash_count,
known_crash_count) |
Store fuzzer run results in database. | def store_fuzzer_run_results(testcase_file_paths, fuzzer, fuzzer_command,
fuzzer_output, fuzzer_return_code,
generated_testcase_count, expected_testcase_count,
generated_testcase_string, fuzz_task_input):
"""Store fuzzer run results in database."""
# Upload fuzzer script output to bucket.
fuzzer_logs.upload_script_log(
fuzzer_output, signed_upload_url=fuzz_task_input.script_log_upload_url)
# Save the test results for the following cases.
# 1. There is no result yet.
# 2. There is no timestamp associated with the result.
# 3. Last update timestamp is more than a day old.
# 4. Return code is non-zero and was not found before.
# 5. Testcases generated were fewer than expected in this run and zero return
# code did occur before and zero generated testcases didn't occur before.
# TODO(mbarbella): Break this up for readability.
# pylint: disable=consider-using-in
save_test_results = (
not fuzzer.result or not fuzzer.result_timestamp or
dates.time_has_expired(fuzzer.result_timestamp, days=1) or
(fuzzer_return_code != 0 and fuzzer_return_code != fuzzer.return_code) or
(generated_testcase_count != expected_testcase_count and
fuzzer.return_code == 0 and ' 0/' not in fuzzer.result))
# pylint: enable=consider-using-in
if not save_test_results:
return None
logs.log('Started storing results from fuzzer run.')
fuzzer_run_results_output = uworker_msg_pb2.StoreFuzzerRunResultsOutput()
if testcase_file_paths:
with open(testcase_file_paths[0], 'rb') as sample_testcase_file_handle:
sample_testcase_file = sample_testcase_file_handle.read()
fuzzer_run_results_output.uploaded_sample_testcase = True
storage.upload_signed_url(sample_testcase_file,
fuzz_task_input.sample_testcase_upload_url)
# Store fuzzer console output.
bot_name = environment.get_value('BOT_NAME')
if fuzzer_return_code is not None:
fuzzer_return_code_string = 'Return code (%d).' % fuzzer_return_code
else:
fuzzer_return_code_string = 'Fuzzer timed out.'
truncated_fuzzer_output = truncate_fuzzer_output(fuzzer_output,
data_types.ENTITY_SIZE_LIMIT)
console_output = (f'{bot_name}: {fuzzer_return_code_string}\n{fuzzer_command}'
f'\n{truncated_fuzzer_output}')
fuzzer_run_results_output.console_output = console_output
fuzzer_run_results_output.generated_testcase_string = (
generated_testcase_string)
fuzzer_run_results_output.fuzzer_return_code = fuzzer_return_code
return fuzzer_run_results_output |
Does preprocessing for store_fuzzer_run_results. More specifically, gets
URLs to upload a sample testcase and the logs. | def preprocess_store_fuzzer_run_results(fuzz_task_input):
"""Does preprocessing for store_fuzzer_run_results. More specifically, gets
URLs to upload a sample testcase and the logs."""
fuzz_task_input.sample_testcase_upload_key = blobs.generate_new_blob_name()
fuzz_task_input.sample_testcase_upload_url = blobs.get_signed_upload_url(
fuzz_task_input.sample_testcase_upload_key)
script_log_upload_key = blobs.generate_new_blob_name()
fuzz_task_input.script_log_upload_url = blobs.get_signed_upload_url(
script_log_upload_key) |
Postprocess store_fuzzer_run_results. | def postprocess_store_fuzzer_run_results(output):
"""Postprocess store_fuzzer_run_results."""
if not output.fuzz_task_output.fuzzer_run_results:
return
uworker_input = output.uworker_input
fuzzer = data_types.Fuzzer.query(
data_types.Fuzzer.name == output.uworker_input.fuzzer_name).get()
if not fuzzer:
logs.log_fatal_and_exit('Fuzzer does not exist, exiting.')
fuzzer_run_results = output.fuzz_task_output.fuzzer_run_results
if fuzzer.revision != output.fuzz_task_output.fuzzer_revision:
logs.log('Fuzzer was recently updated, skipping results from old version.')
return
fuzzer.sample_testcase = (
uworker_input.fuzz_task_input.sample_testcase_upload_key)
fuzzer.console_output = fuzzer_run_results.console_output
fuzzer.result = fuzzer_run_results.generated_testcase_string
fuzzer.result_timestamp = datetime.datetime.utcnow()
fuzzer.return_code = fuzzer_run_results.fuzzer_return_code
fuzzer.put()
logs.log('Finished storing results from fuzzer run.') |
Get the right regression value. | def get_regression(one_time_crasher_flag):
"""Get the right regression value."""
if one_time_crasher_flag or build_manager.is_custom_binary():
return 'NA'
return '' |
Get the right fixed value. | def get_fixed_or_minimized_key(one_time_crasher_flag):
"""Get the right fixed value."""
return 'NA' if one_time_crasher_flag else '' |
Get testcase timeout multiplier. | def get_testcase_timeout_multiplier(timeout_multiplier, crash, test_timeout,
thread_wait_timeout):
"""Get testcase timeout multiplier."""
testcase_timeout_multiplier = timeout_multiplier
if timeout_multiplier > 1 and (crash.crash_time + thread_wait_timeout) < (
test_timeout / timeout_multiplier):
testcase_timeout_multiplier = 1.0
return testcase_timeout_multiplier |
Create a testcase based on crash. | def create_testcase(group, context):
"""Create a testcase based on crash."""
crash = group.main_crash
fully_qualified_fuzzer_name = get_fully_qualified_fuzzer_name(context)
# TODO(https://b.corp.google.com/issues/328691756): Set trusted based on the
# job when we start doing untrusted fuzzing.
testcase_id = data_handler.store_testcase(
crash=crash,
fuzzed_keys=crash.fuzzed_key,
minimized_keys=get_fixed_or_minimized_key(group.one_time_crasher_flag),
regression=get_regression(group.one_time_crasher_flag),
fixed=get_fixed_or_minimized_key(group.one_time_crasher_flag),
one_time_crasher_flag=group.one_time_crasher_flag,
crash_revision=context.crash_revision,
comment='Fuzzer %s generated testcase crashed in %d seconds (r%d)' %
(fully_qualified_fuzzer_name, crash.crash_time, context.crash_revision),
absolute_path=crash.absolute_path,
fuzzer_name=context.fuzzer_name,
fully_qualified_fuzzer_name=fully_qualified_fuzzer_name,
job_type=context.job_type,
archived=crash.archived,
archive_filename=crash.archive_filename,
http_flag=crash.http_flag,
gestures=crash.gestures,
redzone=context.redzone,
disable_ubsan=context.disable_ubsan,
window_argument=context.window_argument,
timeout_multiplier=get_testcase_timeout_multiplier(
context.timeout_multiplier, crash, context.test_timeout,
context.thread_wait_timeout),
minimized_arguments=crash.arguments,
trusted=True)
testcase = data_handler.get_testcase_by_id(testcase_id)
if context.fuzzer_metadata:
for key, value in context.fuzzer_metadata.items():
testcase.set_metadata(key, value, update_testcase=False)
testcase.put()
if crash.fuzzing_strategies:
testcase.set_metadata(
'fuzzing_strategies', crash.fuzzing_strategies, update_testcase=True)
# If there is one, record the original file this testcase was mutated from.
if (crash.file_path in context.testcases_metadata and
'original_file_path' in context.testcases_metadata[crash.file_path] and
context.testcases_metadata[crash.file_path]['original_file_path']):
testcase_relative_path = utils.get_normalized_relative_path(
context.testcases_metadata[crash.file_path]['original_file_path'],
context.data_directory)
testcase.set_metadata('original_file_path', testcase_relative_path)
# Track that app args appended by trials are required.
trial_app_args = environment.get_value('TRIAL_APP_ARGS')
if trial_app_args:
testcase.set_metadata('additional_required_app_args', trial_app_args)
# Create tasks to
# 1. Minimize testcase (minimize).
# 2. Find regression range (regression).
# 3. Find testcase impact on production branches (impact).
# 4. Check whether testcase is fixed (progression).
# 5. Get second stacktrace from another job in case of
# one-time crashers (stack).
task_creation.create_tasks(testcase)
return testcase |
Filter crashes based on is_valid(). | def filter_crashes(crashes: List[CrashInfo]) -> List[CrashInfo]:
"""Filter crashes based on is_valid()."""
filtered = []
for crash in crashes:
if not crash.is_valid():
logs.log(
(f'Ignore crash (reason={crash.get_error()}, '
f'type={crash.crash_type}, state={crash.crash_state})'),
stacktrace=crash.crash_stacktrace)
continue
filtered.append(crash)
return filtered |
Get the fuzzing engine. | def get_engine(context):
"""Get the fuzzing engine."""
if context.fuzz_target:
return context.fuzz_target.engine
return '' |
Get the fully qualified fuzzer name. | def get_fully_qualified_fuzzer_name(context):
"""Get the fully qualified fuzzer name."""
if context.fuzz_target:
return context.fuzz_target.fully_qualified_name()
return context.fuzzer_name |
Write a group of crashes to BigQuery. | def write_crashes_to_big_query(group, context):
"""Write a group of crashes to BigQuery."""
created_at = int(time.time())
# Many of ChromeOS fuzz targets run on Linux bots, so we incorrectly set the
# linux platform for this. We cannot change platform_id in testcase as
# otherwise linux bots can no longer lease those testcase. So, just change
# this value in crash stats. This helps cleanup task put correct OS label.
if environment.is_chromeos_job(context.job_type):
actual_platform = 'chrome'
else:
actual_platform = context.platform_id
# Write to a specific partition.
table_id = ('crashes$%s' % (
datetime.datetime.utcfromtimestamp(created_at).strftime('%Y%m%d')))
client = big_query.Client(dataset_id='main', table_id=table_id)
insert_id_prefix = ':'.join(
[group.crashes[0].key, context.bot_name,
str(created_at)])
rows = []
for index, crash in enumerate(group.crashes):
created_testcase_id = None
if crash == group.main_crash and group.newly_created_testcase:
created_testcase_id = str(group.newly_created_testcase.key.id())
rows.append(
big_query.Insert(
row={
'crash_type': crash.crash_type,
'crash_state': crash.crash_state,
'created_at': created_at,
'platform': actual_platform,
'crash_time_in_ms': int(crash.crash_time * 1000),
'parent_fuzzer_name': get_engine(context),
'fuzzer_name': get_fully_qualified_fuzzer_name(context),
'job_type': context.job_type,
'security_flag': crash.security_flag,
'project': context.project_name,
'reproducible_flag': not group.one_time_crasher_flag,
'revision': str(context.crash_revision),
'new_flag': group.is_new() and crash == group.main_crash,
'testcase_id': created_testcase_id
},
insert_id='%s:%s' % (insert_id_prefix, index)))
row_count = len(rows)
try:
result = client.insert(rows)
if result is None:
# Happens in case the big query function is disabled (local development).
return
errors = result.get('insertErrors', [])
failed_count = len(errors)
monitoring_metrics.BIG_QUERY_WRITE_COUNT.increment_by(
row_count - failed_count, {'success': True})
monitoring_metrics.BIG_QUERY_WRITE_COUNT.increment_by(
failed_count, {'success': False})
for error in errors:
logs.log_error(
('Ignoring error writing the crash (%s) to BigQuery.' %
group.crashes[error['index']].crash_type),
exception=Exception(error))
except Exception:
logs.log_error('Ignoring error writing a group of crashes to BigQuery')
monitoring_metrics.BIG_QUERY_WRITE_COUNT.increment_by(
row_count, {'success': False}) |
Update testcase variant if this is not already covered by existing testcase
variant on this job. | def _update_testcase_variant_if_needed(group, context):
"""Update testcase variant if this is not already covered by existing testcase
variant on this job."""
assert group.existing_testcase
variant = data_handler.get_or_create_testcase_variant(
group.existing_testcase.key.id(), context.job_type)
if not variant or variant.status == data_types.TestcaseVariantStatus.PENDING:
# Either no variant created yet since minimization hasn't finished OR
# variant analysis is not yet finished. Wait in both cases, since we
# prefer existing testcase over current one.
return
if (variant.status == data_types.TestcaseVariantStatus.REPRODUCIBLE and
variant.is_similar):
# Already have a similar reproducible variant, don't need to update.
return
variant.reproducer_key = group.main_crash.fuzzed_key
if group.one_time_crasher_flag:
variant.status = data_types.TestcaseVariantStatus.FLAKY
else:
variant.status = data_types.TestcaseVariantStatus.REPRODUCIBLE
variant.revision = context.crash_revision
variant.crash_type = group.main_crash.crash_type
variant.crash_state = group.main_crash.crash_state
variant.security_flag = group.main_crash.security_flag
variant.is_similar = True
variant.put() |
Process a list of crashes. | def process_crashes(crashes, context):
"""Process a list of crashes."""
processed_groups = []
new_crash_count = 0
known_crash_count = 0
def key_fn(crash):
return crash.key
# Filter invalid crashes.
crashes = filter_crashes(crashes)
group_of_crashes = itertools.groupby(sorted(crashes, key=key_fn), key_fn)
for _, grouped_crashes in group_of_crashes:
group = CrashGroup(list(grouped_crashes), context)
# Archiving testcase to blobstore might fail for all crashes within this
# group.
if not group.main_crash:
logs.log('Unable to store testcase in blobstore: %s' %
group.crashes[0].crash_state)
continue
logs.log(
'Process the crash group (file=%s, '
'fuzzed_key=%s, '
'return code=%s, '
'crash time=%d, '
'crash type=%s, '
'crash state=%s, '
'security flag=%s, '
'crash stacktrace=%s)' %
(group.main_crash.filename, group.main_crash.fuzzed_key,
group.main_crash.return_code, group.main_crash.crash_time,
group.main_crash.crash_type, group.main_crash.crash_state,
group.main_crash.security_flag, group.main_crash.crash_stacktrace))
if group.should_create_testcase():
group.newly_created_testcase = create_testcase(
group=group, context=context)
else:
_update_testcase_variant_if_needed(group, context)
write_crashes_to_big_query(group, context)
if group.is_new():
new_crash_count += 1
known_crash_count += len(group.crashes) - 1
else:
known_crash_count += len(group.crashes)
processed_groups.append(group)
# Artificial delay to throttle appengine updates.
time.sleep(1)
logs.log('Finished processing crashes.')
logs.log(f'New crashes: {new_crash_count}, known crashes: {known_crash_count}'
f', processed groups: {processed_groups}')
return new_crash_count, known_crash_count, processed_groups |
Queries and returns the distribution stored in the ndb table. | def get_strategy_distribution_from_ndb():
"""Queries and returns the distribution stored in the ndb table."""
query = data_types.FuzzStrategyProbability.query()
distribution = []
for strategy_entry in list(ndb_utils.get_all_from_query(query)):
distribution.append({
'strategy_name': strategy_entry.strategy_name,
'probability': strategy_entry.probability,
'engine': strategy_entry.engine
})
return distribution |
Get issue metadata from environment. | def _get_issue_metadata_from_environment(variable_name):
"""Get issue metadata from environment."""
values = str(environment.get_value_string(variable_name, '')).split(',')
# Allow a variation with a '_1' to specified. This is needed in cases where
# this is specified in both the job and the bot environment.
values.extend(
str(environment.get_value_string(variable_name + '_1', '')).split(','))
return [value.strip() for value in values if value.strip()] |
Add issue metadata from environment. | def _add_issue_metadata_from_environment(metadata):
"""Add issue metadata from environment."""
def _append(old, new_values):
if not old:
return ','.join(new_values)
return ','.join(old.split(',') + new_values)
components = _get_issue_metadata_from_environment('AUTOMATIC_COMPONENTS')
if components:
metadata['issue_components'] = _append(
metadata.get('issue_components'), components)
labels = _get_issue_metadata_from_environment('AUTOMATIC_LABELS')
if labels:
metadata['issue_labels'] = _append(metadata.get('issue_labels'), labels) |
Run engine for fuzzing. | def run_engine_fuzzer(engine_impl, target_name, sync_corpus_directory,
testcase_directory):
"""Run engine for fuzzing."""
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import tasks_host
return tasks_host.engine_fuzz(engine_impl, target_name,
sync_corpus_directory, testcase_directory)
build_dir = environment.get_value('BUILD_DIR')
target_path = engine_common.find_fuzzer_path(build_dir, target_name)
options = engine_impl.prepare(sync_corpus_directory, target_path, build_dir)
fuzz_test_timeout = environment.get_value('FUZZ_TEST_TIMEOUT')
additional_processing_time = engine_impl.fuzz_additional_processing_timeout(
options)
fuzz_test_timeout -= additional_processing_time
if fuzz_test_timeout <= 0:
raise FuzzTaskError(f'Invalid engine timeout: '
f'{fuzz_test_timeout} - {additional_processing_time}')
result = engine_impl.fuzz(target_path, options, testcase_directory,
fuzz_test_timeout)
logs.log('Used strategies.', strategies=options.strategies)
for strategy, value in options.strategies.items():
result.stats['strategy_' + strategy] = value
# Format logs with header and strategy information.
log_header = engine_common.get_log_header(result.command,
result.time_executed)
formatted_strategies = engine_common.format_fuzzing_strategies(
options.strategies)
result.logs = log_header + '\n' + result.logs + '\n' + formatted_strategies
fuzzer_metadata = {
'fuzzer_binary_name': target_name,
}
fuzzer_metadata.update(engine_common.get_all_issue_metadata(target_path))
_add_issue_metadata_from_environment(fuzzer_metadata)
# Cleanup fuzzer temporary artifacts (e.g. mutations dir, merge dirs. etc).
fuzzer_utils.cleanup()
return result, fuzzer_metadata, options.strategies |
Runs the given fuzzer for one round. | def utask_main(uworker_input):
"""Runs the given fuzzer for one round."""
session = _make_session(uworker_input)
return session.run() |
Picks a random fuzz target from job_type for use in fuzzing. | def _pick_fuzz_target():
"""Picks a random fuzz target from job_type for use in fuzzing."""
if not environment.is_engine_fuzzer_job():
logs.log('Not engine fuzzer. Not picking fuzz target.')
return None
if not has_standard_build():
logs.log('Split build. Not picking fuzz target.')
return None
logs.log('Picking fuzz target.')
target_weights = fuzzer_selection.get_fuzz_target_weights()
return build_manager.set_random_fuzz_target_for_fuzzing_if_needed(
target_weights.keys(), target_weights) |
Preprocess untrusted task. | def utask_preprocess(fuzzer_name, job_type, uworker_env):
"""Preprocess untrusted task."""
setup_input = setup.preprocess_update_fuzzer_and_data_bundles(fuzzer_name)
do_multiarmed_bandit_strategy_selection(uworker_env)
environment.set_value('PROJECT_NAME', data_handler.get_project_name(job_type),
uworker_env)
fuzz_target = _preprocess_get_fuzz_target(fuzzer_name, job_type)
fuzz_task_input = uworker_msg_pb2.FuzzTaskInput()
if fuzz_target:
fuzz_task_input.fuzz_target.CopyFrom(
uworker_io.entity_to_protobuf(fuzz_target))
preprocess_store_fuzzer_run_results(fuzz_task_input)
if environment.get_value('LSAN'):
# Copy global blacklist into local suppressions file if LSan is enabled.
fuzz_task_input.global_blacklisted_functions.extend(
leak_blacklist.get_global_blacklisted_functions())
return uworker_msg_pb2.Input(
fuzz_task_input=fuzz_task_input,
job_type=job_type,
fuzzer_name=fuzzer_name,
uworker_env=uworker_env,
setup_input=setup_input,
) |
Saves fuzz targets that were seen in the build to the database. | def save_fuzz_targets(output):
"""Saves fuzz targets that were seen in the build to the database."""
if not output.fuzz_task_output.fuzz_targets:
return
logs.log(f'Saving fuzz targets: {output.fuzz_task_output.fuzz_targets}.')
data_handler.record_fuzz_targets(output.uworker_input.fuzzer_name,
output.fuzz_task_output.fuzz_targets,
output.uworker_input.job_type) |
Preprocess in a trusted bot. | def utask_preprocess(testcase_id, job_type, uworker_env):
"""Preprocess in a trusted bot."""
# Locate the testcase associated with the id.
testcase = data_handler.get_testcase_by_id(testcase_id)
# Allow setting up a different fuzzer.
minimize_fuzzer_override = environment.get_value('MINIMIZE_FUZZER_OVERRIDE')
setup_input = setup.preprocess_setup_testcase(
testcase, uworker_env, fuzzer_override=minimize_fuzzer_override)
# TODO(metzman): This should be removed.
if not environment.is_minimization_supported():
# TODO(ochang): More robust check for engine minimization support.
_skip_minimization(testcase, 'Engine does not support minimization.')
return None
# Update comments to reflect bot information.
data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)
uworker_input = uworker_msg_pb2.Input(
job_type=job_type,
testcase_id=str(testcase_id),
testcase=uworker_io.entity_to_protobuf(testcase),
setup_input=setup_input,
minimize_task_input=_get_minimize_task_input(testcase),
uworker_env=uworker_env)
testcase_manager.preprocess_testcase_manager(testcase, uworker_input)
return uworker_input |
Attempt to minimize a given testcase. | def utask_main(uworker_input: uworker_msg_pb2.Input):
"""Attempt to minimize a given testcase."""
testcase = uworker_io.entity_from_protobuf(uworker_input.testcase,
data_types.Testcase)
uworker_io.check_handling_testcase_safe(testcase)
minimize_task_input = uworker_input.minimize_task_input
# Setup testcase and its dependencies.
file_list, testcase_file_path, uworker_error_output = setup.setup_testcase(
testcase, uworker_input.job_type, uworker_input.setup_input)
if uworker_error_output:
return uworker_error_output
# Initialize variables.
max_timeout = environment.get_value('TEST_TIMEOUT', 10)
app_arguments = environment.get_value('APP_ARGS')
# Set up a custom or regular build based on revision.
last_tested_crash_revision = testcase.get_metadata(
'last_tested_crash_revision')
crash_revision = last_tested_crash_revision or testcase.crash_revision
build_manager.setup_build(crash_revision)
# Check if we have an application path. If not, our build failed
# to setup correctly.
if not build_manager.check_app_path():
logs.log_error('Unable to setup build for minimization.')
return uworker_msg_pb2.Output(
error_type=uworker_msg_pb2.ErrorType.MINIMIZE_SETUP)
if environment.is_libfuzzer_job():
fuzz_target = testcase_manager.get_fuzz_target_from_input(uworker_input)
return do_libfuzzer_minimization(fuzz_target, minimize_task_input, testcase,
testcase_file_path)
if environment.is_engine_fuzzer_job():
logs.log_error(
'Engine does not support minimization. Something went wrong as this'
' should have been detected in preprocess.')
return None
max_threads = utils.maximum_parallel_processes_allowed()
# Prepare the test case runner.
crash_retries = environment.get_value('CRASH_RETRIES')
warmup_timeout = environment.get_value('WARMUP_TIMEOUT')
required_arguments = environment.get_value('REQUIRED_APP_ARGS', '')
# Add any testcase-specific required arguments if needed.
additional_required_arguments = testcase.get_metadata(
'additional_required_app_args')
if additional_required_arguments:
required_arguments = f'{required_arguments} {additional_required_arguments}'
input_directory = environment.get_value('FUZZ_INPUTS')
# Get deadline to finish this task.
deadline = tasks.get_task_completion_deadline()
test_runner = TestRunner(testcase, testcase_file_path, file_list,
input_directory, app_arguments, required_arguments,
max_threads, deadline)
# Verify the crash with a long timeout.
warmup_crash_occurred = False
result = test_runner.run(timeout=warmup_timeout, log_command=True)
if result.is_crash():
warmup_crash_occurred = True
logs.log(f'Warmup crash occurred in {result.crash_time} seconds.')
saved_unsymbolized_crash_state, flaky_stack, crash_times = (
check_for_initial_crash(test_runner, crash_retries, testcase))
# If the warmup crash occurred but we couldn't reproduce this in with
# multiple processes running in parallel, try to minimize single threaded.
reproducible_crash_count = (
testcase_manager.REPRODUCIBILITY_FACTOR * crash_retries)
if (len(crash_times) < reproducible_crash_count and warmup_crash_occurred and
max_threads > 1):
logs.log('Attempting to continue single-threaded.')
max_threads = 1
test_runner = TestRunner(testcase, testcase_file_path, file_list,
input_directory, app_arguments, required_arguments,
max_threads, deadline)
saved_unsymbolized_crash_state, flaky_stack, crash_times = (
check_for_initial_crash(test_runner, crash_retries, testcase))
if not crash_times:
# We didn't crash at all. This might be a legitimately unreproducible
# test case, so it will get marked as such after being retried on other
# bots.
return uworker_msg_pb2.Output(
error_type=uworker_msg_pb2.ErrorType.MINIMIZE_UNREPRODUCIBLE_CRASH)
minimize_task_output = uworker_msg_pb2.MinimizeTaskOutput()
if flaky_stack:
testcase.flaky_stack = flaky_stack
minimize_task_output.flaky_stack = flaky_stack
is_redo = testcase.get_metadata('redo_minimize')
if not is_redo and len(crash_times) < reproducible_crash_count:
error_message = (
'Crash occurs, but not too consistently. Skipping minimization '
f'(crashed {len(crash_times)}/{crash_retries})')
return uworker_msg_pb2.Output(
error_message=error_message,
minimize_task_output=minimize_task_output,
error_type=uworker_msg_pb2.ErrorType.MINIMIZE_CRASH_TOO_FLAKY)
test_runner.set_test_expectations(testcase.security_flag, flaky_stack,
saved_unsymbolized_crash_state)
# Use the max crash time unless this would be greater than the max timeout.
test_timeout = min(max(crash_times), max_timeout) + 1
logs.log(f'Using timeout {test_timeout} (was {max_timeout})')
test_runner.timeout = test_timeout
logs.log('Starting minimization.')
if should_attempt_phase(testcase, MinimizationPhase.GESTURES):
gestures = minimize_gestures(test_runner, testcase)
# We can't call check_deadline_exceeded_and_store_partial_minimized_testcase
# at this point because we do not have a test case to store.
if testcase.security_flag and len(testcase.gestures) != len(gestures):
# Re-run security severity analysis since gestures affect the severity.
testcase.security_severity = severity_analyzer.get_security_severity(
testcase.crash_type, data_handler.get_stacktrace(testcase),
uworker_input.job_type, bool(gestures))
minimize_task_output.security_severity_updated = True
if testcase.security_severity is not None:
minimize_task_output.security_severity = testcase.security_severity
testcase.gestures = gestures
del minimize_task_output.gestures[:]
minimize_task_output.gestures.extend(gestures)
testcase.set_metadata('minimization_phase', MinimizationPhase.MAIN_FILE,
False)
minimize_task_output.minimization_phase = MinimizationPhase.MAIN_FILE
if time.time() > test_runner.deadline:
return uworker_msg_pb2.Output(
minimize_task_output=minimize_task_output,
error_type=uworker_msg_pb2.ErrorType.
MINIMIZE_DEADLINE_EXCEEDED_IN_MAIN_FILE_PHASE)
# Minimize the main file.
data = utils.get_file_contents_with_fatal_error_on_failure(testcase_file_path)
if should_attempt_phase(testcase, MinimizationPhase.MAIN_FILE):
data = minimize_main_file(test_runner, testcase_file_path, data)
if check_deadline_exceeded_and_store_partial_minimized_testcase(
deadline, testcase, input_directory, file_list, data,
testcase_file_path, minimize_task_input, minimize_task_output):
return uworker_msg_pb2.Output(
error_type=uworker_msg_pb2.ErrorType.MINIMIZE_DEADLINE_EXCEEDED,
minimize_task_output=minimize_task_output)
testcase.set_metadata('minimization_phase', MinimizationPhase.FILE_LIST,
False)
minimize_task_output.minimization_phase = MinimizationPhase.FILE_LIST
# Minimize the file list.
if should_attempt_phase(testcase, MinimizationPhase.FILE_LIST):
if environment.get_value('MINIMIZE_FILE_LIST', True):
file_list = minimize_file_list(test_runner, file_list, input_directory,
testcase_file_path)
if check_deadline_exceeded_and_store_partial_minimized_testcase(
deadline, testcase, input_directory, file_list, data,
testcase_file_path, minimize_task_input, minimize_task_output):
return uworker_msg_pb2.Output(
error_type=uworker_msg_pb2.ErrorType.MINIMIZE_DEADLINE_EXCEEDED,
minimize_task_output=minimize_task_output)
else:
logs.log('Skipping minimization of file list.')
testcase.set_metadata('minimization_phase', MinimizationPhase.RESOURCES,
False)
minimize_task_output.minimization_phase = MinimizationPhase.RESOURCES
# Minimize any files remaining in the file list.
if should_attempt_phase(testcase, MinimizationPhase.RESOURCES):
if environment.get_value('MINIMIZE_RESOURCES', True):
for dependency in file_list:
minimize_resource(test_runner, dependency, input_directory,
testcase_file_path)
if check_deadline_exceeded_and_store_partial_minimized_testcase(
deadline, testcase, input_directory, file_list, data,
testcase_file_path, minimize_task_input, minimize_task_output):
return uworker_msg_pb2.Output(
error_type=uworker_msg_pb2.ErrorType.MINIMIZE_DEADLINE_EXCEEDED,
minimize_task_output=minimize_task_output)
else:
logs.log('Skipping minimization of resources.')
testcase.set_metadata('minimization_phase', MinimizationPhase.ARGUMENTS,
False)
minimize_task_output.minimization_phase = MinimizationPhase.ARGUMENTS
if should_attempt_phase(testcase, MinimizationPhase.ARGUMENTS):
app_arguments = minimize_arguments(test_runner, app_arguments)
# Arguments must be stored here in case we time out below.
testcase.minimized_arguments = app_arguments
minimize_task_output.minimized_arguments = app_arguments
if check_deadline_exceeded_and_store_partial_minimized_testcase(
deadline, testcase, input_directory, file_list, data,
testcase_file_path, minimize_task_input, minimize_task_output):
return uworker_msg_pb2.Output(
error_type=uworker_msg_pb2.ErrorType.MINIMIZE_DEADLINE_EXCEEDED,
minimize_task_output=minimize_task_output)
command = testcase_manager.get_command_line_for_application(
testcase_file_path, app_args=app_arguments, needs_http=testcase.http_flag)
last_crash_result = test_runner.last_failing_result
store_minimized_testcase(testcase, input_directory, file_list, data,
testcase_file_path, minimize_task_input,
minimize_task_output)
minimize_task_output.last_crash_result_dict.clear()
minimize_task_output.last_crash_result_dict.update(
_extract_crash_result(last_crash_result, command, minimize_task_input))
return uworker_msg_pb2.Output(minimize_task_output=minimize_task_output) |
Cleanup the blobs created in preprocess if they weren't used during
utask_main. | def _cleanup_unused_blobs_from_storage(output: uworker_msg_pb2.Output):
"""Cleanup the blobs created in preprocess if they weren't used during
utask_main."""
delete_testcase_blob = True
delete_stacktrace_blob = True
if output.HasField('minimize_task_output'):
# If minimized_keys was set, we should not cleanup the corresponding blob.
if output.minimize_task_output.HasField("minimized_keys"):
delete_testcase_blob = False
stacktrace_blob_key = output.minimize_task_output.last_crash_result_dict[
'crash_stacktrace']
if stacktrace_blob_key.startswith(data_types.BLOBSTORE_STACK_PREFIX):
delete_stacktrace_blob = False
testcase_blob_name = (
output.uworker_input.minimize_task_input.testcase_blob_name)
stacktrace_blob_name = (
output.uworker_input.minimize_task_input.stacktrace_blob_name)
if delete_testcase_blob:
blobs.delete_blob(testcase_blob_name)
if delete_stacktrace_blob:
blobs.delete_blob(stacktrace_blob_name) |
Updates the tescase using the values passed from utask_main. This is done
at the beginning of utask_postprocess and before error handling is called. | def update_testcase(output: uworker_msg_pb2.Output):
"""Updates the tescase using the values passed from utask_main. This is done
at the beginning of utask_postprocess and before error handling is called."""
if not output.HasField('minimize_task_output'):
return
minimize_task_output = output.minimize_task_output
testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id)
_update_testcase_memory_tool_options(testcase,
minimize_task_output.memory_tool_options)
if minimize_task_output.security_severity_updated:
if minimize_task_output.HasField('security_severity'):
testcase.security_severity = minimize_task_output.security_severity
else:
testcase.security_severity = None
if minimize_task_output.HasField('minimization_phase'):
testcase.set_metadata('minimization_phase',
minimize_task_output.minimization_phase)
if minimize_task_output.flaky_stack:
testcase.flaky_stack = minimize_task_output.flaky_stack
if minimize_task_output.HasField('minimized_arguments'):
testcase.minimized_arguments = minimize_task_output.minimized_arguments
if minimize_task_output.HasField('archive_state'):
testcase.archive_state = minimize_task_output.archive_state
if minimize_task_output.HasField('absolute_path'):
testcase.absolute_path = minimize_task_output.absolute_path
if minimize_task_output.gestures:
# One must convert repeated fields to lists in order to save them using ndb.
testcase.gestures = list(minimize_task_output.gestures)
if minimize_task_output.HasField('minimized_keys'):
testcase.minimized_keys = minimize_task_output.minimized_keys
testcase.put() |
Handles errors occuring during setup. | def handle_minimize_setup_error(output):
"""Handles errors occuring during setup."""
build_fail_wait = environment.get_value('FAIL_WAIT')
if environment.get_value('ORIGINAL_JOB_NAME'):
testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id)
_skip_minimization(testcase, 'Failed to setup build for overridden job.')
else:
# Only recreate task if this isn't an overriden job. It's possible that a
# revision exists for the original job, but doesn't exist for the
# overriden job.
build_fail_wait = environment.get_value('FAIL_WAIT')
tasks.add_task(
'minimize',
output.uworker_input.testcase_id,
output.uworker_input.job_type,
wait_time=build_fail_wait) |
Handles unreproducible crashes. | def handle_minimize_unreproducible_crash(output):
"""Handles unreproducible crashes."""
testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
'Unable to reproduce crash')
task_creation.mark_unreproducible_if_flaky(testcase, 'minimize', True) |
Schedules postminimize tasks when the crash is too flaky. | def handle_minimize_crash_too_flaky(output):
"""Schedules postminimize tasks when the crash is too flaky."""
# We reproduced this crash at least once. It's too flaky to minimize, but
# maybe we'll have more luck in the other jobs.
testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id)
testcase.minimized_keys = 'NA'
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
output.error_message)
task_creation.create_postminimize_tasks(testcase) |
Reschedules the minimize task when the deadline is exceeded just before
starting the main file phase. | def handle_minimize_deadline_exceeded_in_main_file_phase(output):
"""Reschedules the minimize task when the deadline is exceeded just before
starting the main file phase."""
tasks.add_task('minimize', output.uworker_input.testcase_id,
output.uworker_input.job_type) |
Reschedules a minimize task when minimization deadline is exceeded or
calls _skip_minimization when the number of reattempts is surpassed. | def handle_minimize_deadline_exceeded(output: uworker_msg_pb2.Output):
"""Reschedules a minimize task when minimization deadline is exceeded or
calls _skip_minimization when the number of reattempts is surpassed."""
testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id)
attempts = testcase.get_metadata(
'minimization_deadline_exceeded_attempts', default=0)
if attempts >= MAX_DEADLINE_EXCEEDED_ATTEMPTS:
_skip_minimization(testcase,
'Exceeded minimization deadline too many times.')
else:
testcase.set_metadata('minimization_deadline_exceeded_attempts',
attempts + 1)
tasks.add_task('minimize', output.uworker_input.testcase_id,
output.uworker_input.job_type) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.