response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Return the path of the peach pit for the given grammar. Return None if the Pit does not exist or the grammar is None.
def get_path(grammar): """Return the path of the peach pit for the given grammar. Return None if the Pit does not exist or the grammar is None.""" pit_dir = os.path.join(environment.get_platform_resources_directory(), 'peach', 'pits') pit_path = os.path.join(pit_dir, grammar + '.xml') if not os.path.exists(pit_path): logs.log_error( 'Pit file for "%s" grammar is not found.' % grammar, pit_path=pit_path) return None return pit_path
Generates syzkaller config file. Args: serial: (str)serial number of the device being fuzzed. work_dir_path: (str) path to working directory of syzkaller. binary_path: (str) path to syzkaller binaries. vmlinux_path: (str) path to the vmlinux file. config_path: (str) path to write the syzkaller config to. kcov: (boolean) true if coverage is enabled. reproduce: (boolean) true if repro is enabled. syzhub_address: (str) ip:host of the syzhub to connect to. syzhub_client: (str) syzhub client name. syzhub_key: (str) syzhub key.
def generate(serial, work_dir_path, binary_path, vmlinux_path, config_path, kcov=True, reproduce=True, syzhub_address=None, syzhub_client=None, syzhub_key=None, on_cuttlefish=False): """Generates syzkaller config file. Args: serial: (str)serial number of the device being fuzzed. work_dir_path: (str) path to working directory of syzkaller. binary_path: (str) path to syzkaller binaries. vmlinux_path: (str) path to the vmlinux file. config_path: (str) path to write the syzkaller config to. kcov: (boolean) true if coverage is enabled. reproduce: (boolean) true if repro is enabled. syzhub_address: (str) ip:host of the syzhub to connect to. syzhub_client: (str) syzhub client name. syzhub_key: (str) syzhub key. """ devices = {} devices['devices'] = [serial] data = {} data['target'] = 'linux/arm64' data['reproduce'] = reproduce data['workdir'] = work_dir_path data['http'] = 'localhost:0' data['syzkaller'] = binary_path #TODO(hzawawy): consider what suppressions are best for Android. data['suppressions'] = ['do_rt_sigqueueinfo', 'do_rt_tgsigqueueinfo'] data['vm'] = devices data['kernel_obj'] = vmlinux_path data['sandbox'] = 'android' data['ignores'] = ['WARNING:', 'INFO:'] data['type'] = 'adb' data['procs'] = 1 data['cover'] = kcov if on_cuttlefish: data['target'] = 'linux/amd64' data['disable_syscalls'] = ['openat$vhost_vsock'] data['sandbox'] = 'none' device = {} device['serial'] = serial # Syzkaller uses cuttlefish kernel.log as console instead of serial console. # kernel.log will be collected within respective cuttlefish_runtime folder. # We only have one instance launched at one time. cvd_dir = environment.get_value('CVD_DIR') device['console'] = f'{cvd_dir}/cuttlefish_runtime/kernel.log' devices['devices'] = [device] if syzhub_address and syzhub_client and syzhub_key: data['hub_addr'] = syzhub_address data['hub_client'] = syzhub_client data['hub_key'] = syzhub_key data['name'] = '{}-{}'.format(syzhub_client, serial) ensure_dir(config_path) with open(config_path, 'w') as write_file: json.dump(data, write_file)
Return work directory for Syzkaller.
def get_work_dir(): """Return work directory for Syzkaller.""" work_dir = os.path.join( environment.get_value('FUZZ_INPUTS_DISK'), 'syzkaller') os.makedirs(work_dir, exist_ok=True) return work_dir
Get arguments for a given fuzz target.
def get_config(): """Get arguments for a given fuzz target.""" device_serial = environment.get_value('ANDROID_SERIAL') build_dir = environment.get_value('BUILD_DIR') temp_dir = fuzzer_utils.get_temp_dir() binary_path = os.path.join(build_dir, 'syzkaller') json_config_path = os.path.join(temp_dir, 'config.json') default_vmlinux_path = os.path.join('/tmp', device_serial, 'vmlinux') vmlinux_path = environment.get_value('VMLINUX_PATH', default_vmlinux_path) syzhub_address = environment.get_value('SYZHUB_ADDRESS') syzhub_client = environment.get_value('SYZHUB_CLIENT') syzhub_key = environment.get_value('SYZHUB_KEY') on_cuttlefish = environment.is_android_cuttlefish() config.generate( serial=device_serial, work_dir_path=get_work_dir(), binary_path=binary_path, vmlinux_path=vmlinux_path, config_path=json_config_path, kcov=True, reproduce=False, syzhub_address=syzhub_address, syzhub_client=syzhub_client, syzhub_key=syzhub_key, on_cuttlefish=on_cuttlefish) return ['-config', json_config_path]
Return location of coverage file for Syzkaller.
def get_cover_file_path(): """Return location of coverage file for Syzkaller.""" return os.path.join(get_work_dir(), 'coverfile')
Return a syzkaller runner object.
def get_runner(fuzzer_path): """Return a syzkaller runner object.""" return AndroidSyzkallerRunner(fuzzer_path)
Upload kcov data to a cloud storage bucket.
def _upload_kernel_coverage_data(kcov_path, kernel_bid): """Upload kcov data to a cloud storage bucket.""" bucket_name = local_config.ProjectConfig().get('coverage.reports.bucket') if not bucket_name: return formatted_date = str(utils.utcnow().date().isoformat()) identifier = environment.get_value('BOT_NAME') + str( utils.utcnow().isoformat()) gcs_url = (f'gs://{bucket_name}/syzkaller/{formatted_date}/{kernel_bid}/' f'{identifier}') if storage.copy_file_to(kcov_path, gcs_url): logs.log(f'Copied kcov data to {gcs_url}.')
Run Android initialization.
def run(): """Run Android initialization.""" init_runner.run() # Set cuttlefish device serial if needed. if environment.is_android_cuttlefish(): logs.log('Running Android init script on Cuttlefish.') android.adb.set_cuttlefish_device_serial() else: logs.log('Running Android init script on non-Cuttlefish.') # Check if we need to reflash device to latest build. logs.log('Init: flash_to_latest_build_if_needed.') android.flash.flash_to_latest_build_if_needed() # Reconnect to cuttlefish device if connection is ever lost. if environment.is_android_cuttlefish(): logs.log('Init: connect_to_cuttlefish_device.') android.adb.connect_to_cuttlefish_device() # Reboot to bring device in a good state if not done recently. if android.adb.time_since_last_reboot() > TIME_SINCE_REBOOT_MIN_THRESHOLD: logs.log('Init: reboot.') android.device.reboot() # Make sure that device is in a good condition before we move forward. logs.log('Init: wait_until_fully_booted.') android.adb.wait_until_fully_booted() # Wait until battery charges to a minimum level and temperature threshold. android.battery.wait_until_good_state() # Initialize environment settings. android.device.initialize_environment()
Run Chrome OS initialization.
def run(): """Run Chrome OS initialization.""" init_runner.run()
Initialize a device before running a task.
def run(): """Initialize a device before running a task.""" init_runner.run()
Get the init extension for a platform.
def _extension(platform): """Get the init extension for a platform.""" if platform == 'windows': return '.ps1' return '.bash'
Run custom platform specific init scripts.
def run(): """Run custom platform specific init scripts.""" platform = environment.platform().lower() platform = environment.base_platform(platform) script_path = os.path.join(environment.get_config_directory(), SCRIPT_DIR, platform + _extension(platform)) if not os.path.exists(script_path): return os.chmod(script_path, 0o750) if script_path.endswith('.ps1'): cmd = 'powershell.exe ' + script_path else: cmd = script_path try: process_handler.run_process( cmd, timeout=1800, need_shell=True, testcase_run=False, ignore_children=True) except Exception: logs.log_error('Failed to execute platform initialization script.')
Run Linux initialization.
def run(): """Run Linux initialization.""" init_runner.run()
Execute command and return output as an iterator.
def _execute(cmd): """Execute command and return output as an iterator.""" proc = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) try: for line in iter(proc.stdout.readline, b''): yield line.decode('utf-8') finally: proc.kill()
Get launch service path from lsregister.
def get_launch_service_path(): """Get launch service path from lsregister.""" for line in _execute(LSREGISTER_CMD): m = LAUNCH_SERVICE_PATH_REGEX.match(line) if not m: continue return '/'.join(m.group(1).split('/')[:5]) return None
See crbug.com/661221 for more info.
def clear_launch_service_data(): """See crbug.com/661221 for more info.""" path = get_launch_service_path() if not path or not os.path.exists(path): return # Best effort removal. We use shutil instead of shell.remove_directory since # it's too noisy and there are many files that cannot be removed. shutil.rmtree(os.path.join(path, '0'), ignore_errors=True) shutil.rmtree(os.path.join(path, 'T'), ignore_errors=True)
Run the initialization for Mac.
def run(): """Run the initialization for Mac.""" init_runner.run() clear_launch_service_data()
Clean temporary directories.
def clean_temp_directories(): """Clean temporary directories.""" for temp_directory in TEMP_DIRECTORIES: temp_directory_full_path = os.path.abspath( os.path.expandvars(temp_directory)) shell.remove_directory( temp_directory_full_path, recreate=True, ignore_errors=True)
Run the initialization for Windows.
def run(): """Run the initialization for Windows.""" init_runner.run() clean_temp_directories()
Helper function. Decreases index from cur while condition is satisfied.
def step_back_while(cur_index, condition): """Helper function. Decreases index from cur while condition is satisfied.""" while cur_index >= 0 and condition(cur_index): cur_index -= 1 return cur_index
Default string tokenizer which splits on newlines.
def _default_tokenizer(s): """Default string tokenizer which splits on newlines.""" return s.split(b'\n')
Default token combiner which assumes each token is a line.
def _default_combiner(tokens): """Default token combiner which assumes each token is a line.""" return b'\n'.join(tokens)
Minimize a file.
def main(): """Minimize a file.""" minimizers = { 'chunk': chunk_minimizer.ChunkMinimizer, 'html': html_minimizer.HTMLMinimizer, 'js': js_minimizer.JSMinimizer, 'line': delta_minimizer.DeltaMinimizer, } parser = argparse.ArgumentParser() parser.add_argument( '-t', '--threads', default=minimizer.DEFAULT_THREAD_COUNT, type=int, help='number of parallel instances') parser.add_argument( '-m', '--minimizer', choices=list(minimizers.keys()), default='line', help='minimization strategy') parser.add_argument( '-o', '--output-file', help='path to minimized output file') parser.add_argument( 'COMMAND', help='command (quoted) to run for an individual test') parser.add_argument('FILE', help='file to minimize') args = vars(parser.parse_args(sys.argv[1:])) thread_count = args['threads'] selected_minimizer = minimizers[args['minimizer']] command = args['COMMAND'] file_path = args['FILE'] file_extension = os.path.splitext(file_path)[1] output_file_path = args['output_file'] if not output_file_path: output_file_path = '%s.min' % file_path utils.set_test_command(command) try: with open(file_path, 'rb') as file_handle: data = file_handle.read() except OSError: print('Unable to open input file %s.' % file_path) sys.exit(1) # Do not print an additional newline after minimization. minimized_output = selected_minimizer.run( data, thread_count=thread_count, file_extension=file_extension) print('Writing minimized output to %s.' % output_file_path) try: with open(output_file_path, 'wb') as file_handle: file_handle.write(minimized_output) except OSError: print('Unable to write output file %s.' % output_file_path) sys.exit(1)
Return string representation for size.
def get_size_string(size): """Return string representation for size.""" if size < 1 << 10: return '%d B' % size if size < 1 << 20: return '%d KB' % (size >> 10) if size < 1 << 30: return '%d MB' % (size >> 20) return '%d GB' % (size >> 30)
Return true if the stacktrace has atleast one marker in the marker list.
def has_marker(stacktrace, marker_list): """Return true if the stacktrace has atleast one marker in the marker list.""" for marker in marker_list: if marker in stacktrace: return True return False
Set the command used for testing.
def set_test_command(new_test_command): """Set the command used for testing.""" global test_command test_command = shlex.split(new_test_command)
Set the number of times to attempt the test.
def set_test_attempts(new_attempts): """Set the number of times to attempt the test.""" global attempts attempts = new_attempts
Wrapper function to verify that a test does not fail for multiple runs.
def test(test_path): """Wrapper function to verify that a test does not fail for multiple runs.""" for _ in range(attempts): if not single_test_run(test_path): return False return True
Hacky test function that checks for certain common errors.
def single_test_run(test_path): """Hacky test function that checks for certain common errors.""" if not test_command: raise errors.NoCommandError args = test_command + [test_path] try: console_output = subprocess.check_output(args, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as error: console_output = error.output # If we meet one of these conditions, assume we crashed. if ((has_marker(console_output, STACKTRACE_TOOL_MARKERS) and has_marker(console_output, STACKTRACE_END_MARKERS)) or has_marker(console_output, CHECK_FAILURE_MARKERS)): print('Crashed, current test size %s.' % (get_size_string( os.path.getsize(test_path)))) return False # No crash, test passed. print('Not crashed, current test size %s.' % (get_size_string( os.path.getsize(test_path)))) return True
HTML tokenizer.
def tokenize(data): """HTML tokenizer.""" return AntlrTokenizer(HTMLLexer).tokenize(data)
Dummy token combiner.
def token_combiner(tokens): """Dummy token combiner.""" return ''.join(tokens)
Compute rolls between the start and end revision.
def _compute_rolls(start_revisions_dict, end_revisions_dict): """Compute rolls between the start and end revision.""" result = [] for path, entry in end_revisions_dict.items(): url, end_sha = _extract_url_and_sha_from_deps_entry(entry) start_sha = None if path in start_revisions_dict: start_entry = start_revisions_dict[path] _, start_sha = _extract_url_and_sha_from_deps_entry(start_entry) # Skip adding dependencies that were unchanged between the two DEPS files. if start_sha == end_sha: continue current_roll = { 'dep_path': path, 'repo_url': url, 'new_revision': end_sha, } # Unless this is new code, include the earlier revision as well. if start_sha: current_roll['old_revision'] = start_sha result.append(current_roll) return result
Split a DEPS file entry into a URL and git sha.
def _extract_url_and_sha_from_deps_entry(entry): """Split a DEPS file entry into a URL and git sha.""" assert 'url' in entry and 'rev' in entry, 'Unexpected format: %s' % entry url = entry['url'] sha = entry['rev'] # Strip unnecessary ".git" from the URL where applicable. if url.endswith('.git'): url = url[:-len('.git')] return url, sha
Convert a dict of dependency rolls to the format Predator expects.
def _format_component_revisions_for_predator(component_revisions): """Convert a dict of dependency rolls to the format Predator expects.""" result = [] for path, entry in component_revisions.items(): url, sha = _extract_url_and_sha_from_deps_entry(entry) result.append({ 'dep_path': path, 'repo_url': url, 'revision': sha, }) return result
Return bool and error message for whether this testcase is applicable to predator or not.
def _is_predator_testcase(testcase): """Return bool and error message for whether this testcase is applicable to predator or not.""" if build_manager.is_custom_binary(): return False, 'Not applicable to custom binaries.' if testcase.regression != 'NA': if not testcase.regression: return False, 'No regression range, wait for regression task to finish.' if ':' not in testcase.regression: return False, 'Invalid regression range %s.' % testcase.regression return True, None
Reduces noise from stacktrace and limit its size to avoid pubsub request limit of one megabyte.
def _filter_stacktrace(stacktrace): """Reduces noise from stacktrace and limit its size to avoid pubsub request limit of one megabyte.""" filtered_stacktrace_size = 0 filtered_stacktrace_lines = [] for line in reversed(stacktrace.splitlines()): # Exclude uninteresting lines such as ones from verbose logging, info, etc. if UNINTERESTING_LINES_REGEX.match(line): continue new_size = filtered_stacktrace_size + len(line) + 1 if new_size > data_types.PUBSUB_REQUEST_LIMIT: break filtered_stacktrace_lines.append(line) filtered_stacktrace_size = new_size return '\n'.join(reversed(filtered_stacktrace_lines))
Get a component revisions dict and git sha for a revision and job type. Revision is expected to be a commit position.
def _prepare_component_revisions_dict(revision, job_type): """Get a component revisions dict and git sha for a revision and job type. Revision is expected to be a commit position.""" revisions_dict = revisions.get_component_revisions_dict(revision, job_type) if not revisions_dict: return revisions_dict, None # Other code depends on the "/" prefix, but it doesn't match the DEPS format # that we would usually expect. Clean these values up before sending to # predator. revisions_dict['src'] = revisions_dict.pop('/src') return revisions_dict, revisions_dict['src']['rev']
Sets predator result with error.
def _set_predator_result_with_error(testcase, error_message): """Sets predator result with error.""" predator_result = { 'result': { 'found': False, 'project': '', 'suspected_components': '', 'suspected_cls': '', 'feedback_url': '', 'error_message': error_message, } } testcase = data_handler.get_testcase_by_id(testcase.key.id()) testcase.set_metadata( 'predator_result', predator_result, update_testcase=False) testcase.delete_metadata('blame_pending', update_testcase=False) testcase.put()
Prepare the json sent to the Predator service for the given test case.
def _prepare_predator_message(testcase): """Prepare the json sent to the Predator service for the given test case.""" result, error_message = _is_predator_testcase(testcase) if not result: _set_predator_result_with_error(testcase, error_message) return None crash_revisions_dict, crash_revision_hash = _prepare_component_revisions_dict( testcase.crash_revision, testcase.job_type) # Do a None check since we can return {} for revision = 0. if crash_revisions_dict is None: _set_predator_result_with_error( testcase, 'Failed to fetch component revisions for revision %s.' % testcase.crash_revision) return None dependency_rolls = [] start_revision_hash = end_revision_hash = None if ':' in testcase.regression: regression_parts = testcase.regression.split(':', 1) start_revision = int(regression_parts[0]) end_revision = int(regression_parts[1]) start_revisions_dict, start_revision_hash = ( _prepare_component_revisions_dict(start_revision, testcase.job_type)) # Do a None check since we can return {} for revision = 0. if start_revisions_dict is None: _set_predator_result_with_error( testcase, 'Failed to fetch component revisions for revision %s.' % start_revision) return None end_revisions_dict, end_revision_hash = ( _prepare_component_revisions_dict(end_revision, testcase.job_type)) # Do a None check since we can return {} for revision = 0. if end_revisions_dict is None: _set_predator_result_with_error( testcase, 'Failed to fetch component revisions for revision %s.' % end_revision) return None if start_revision != 0: dependency_rolls = _compute_rolls(start_revisions_dict, end_revisions_dict) # Put the current revisions dictionary in the format predator expects. crash_revision_component_revisions_list = ( _format_component_revisions_for_predator(crash_revisions_dict)) # In addition to the start and end revisions, Predator expects the regression # range to include the dependency path and repository URL in the same way that # they would be included in the dependency rolls. Note that we do not take # this from the rolls dict directly as it may not be available. src_entry = [ entry for entry in crash_revision_component_revisions_list if entry['dep_path'] == 'src' ][0] # TODO(mbarbella): This is a hack since ClusterFuzz relies on "src" as a # special-cased path, but this is only going to be the correct repository # root path some of the time. For certain cases, we must update it. repo_url = src_entry['repo_url'] real_dep_path = SRC_COMPONENT_OVERRIDES.get(repo_url, 'src') if real_dep_path != 'src': for dependency_list in [ dependency_rolls, crash_revision_component_revisions_list ]: for entry in dependency_list: if entry['dep_path'] == 'src': entry['dep_path'] = real_dep_path break regression_range = { 'dep_path': real_dep_path, 'repo_url': repo_url, 'old_revision': start_revision_hash, 'new_revision': end_revision_hash, } crash_stacktrace = _filter_stacktrace(data_handler.get_stacktrace(testcase)) return pubsub.Message( data=json.dumps({ 'stack_trace': crash_stacktrace, 'crash_revision': crash_revision_hash, 'customized_data': { 'regression_range': regression_range, 'dependency_rolls': dependency_rolls, 'dependencies': crash_revision_component_revisions_list, 'crash_type': testcase.crash_type, 'crash_address': testcase.crash_address, 'sanitizer': environment.get_memory_tool_name(testcase.job_type), 'security_flag': testcase.security_flag, 'job_type': testcase.job_type, 'testcase_id': testcase.key.id() }, 'platform': testcase.platform, 'client_id': 'clusterfuzz', 'signature': testcase.crash_state, }).encode('utf-8'))
Clear blame result and set pending bit.
def _clear_blame_result_and_set_pending_flag(testcase): """Clear blame result and set pending bit.""" testcase.set_metadata('blame_pending', True, update_testcase=False) testcase.set_metadata('predator_result', None, update_testcase=False) testcase.put()
Attempt to find the CL introducing the bug associated with testcase_id.
def execute_task(testcase_id, _): """Attempt to find the CL introducing the bug associated with testcase_id.""" # Locate the testcase associated with the id. testcase = data_handler.get_testcase_by_id(testcase_id) # Make sure that predator topic is configured. If not, nothing to do here. topic = db_config.get_value('predator_crash_topic') if not topic: logs.log('Predator is not configured, skipping blame task.') return data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED) # Prepare pubsub message to send to predator. message = _prepare_predator_message(testcase) if not message: testcase = data_handler.get_testcase_by_id(testcase_id) data_handler.update_testcase_comment( testcase, data_types.TaskState.ERROR, 'Failed to generate request for Predator') return # Clear existing results and mark blame result as pending. testcase = data_handler.get_testcase_by_id(testcase_id) _clear_blame_result_and_set_pending_flag(testcase) # Post request to pub sub. client = pubsub.PubSubClient() message_ids = client.publish(topic, [message]) logs.log('Successfully published testcase %s to Predator. Message IDs: %s.' % (testcase_id, message_ids)) data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED)
Cleans state before and after a task is executed.
def cleanup_task_state(): """Cleans state before and after a task is executed.""" # Cleanup stale processes. process_handler.cleanup_stale_processes() # Clear build urls, temp and testcase directories. shell.clear_build_urls_directory() shell.clear_crash_stacktraces_directory() shell.clear_testcase_directories() shell.clear_temp_directory() shell.clear_system_temp_directory() shell.clear_device_temp_directories() # Reset memory tool environment variables. environment.reset_current_memory_tool_options() # Call python's garbage collector. utils.python_gc()
Return true if the current cpu architecture can run this job.
def is_supported_cpu_arch_for_job(): """Return true if the current cpu architecture can run this job.""" cpu_arch = environment.get_cpu_arch() if not cpu_arch: # No cpu architecture check is defined for this platform, bail out. return True supported_cpu_arch = environment.get_value('CPU_ARCH') if not supported_cpu_arch: # No specific cpu architecture requirement specified in job, bail out. return True # Convert to list just in case anyone specifies value as a single string. supported_cpu_arch_list = list(supported_cpu_arch) return cpu_arch in supported_cpu_arch_list
Process the environment variable string included with a job.
def update_environment_for_job(environment_string): """Process the environment variable string included with a job.""" # Now parse the job's environment definition. env = environment.parse_environment_definition(environment_string) uworker_env = env.copy() for key, value in env.items(): environment.set_value(key, value) # If we share the build with another job type, force us to be a custom binary # job type. if environment.get_value('SHARE_BUILD_WITH_JOB_TYPE'): environment.set_value('CUSTOM_BINARY', True) uworker_env['CUSTOM_BINARY'] = 'True' # Allow the default FUZZ_TEST_TIMEOUT and MAX_TESTCASES to be overridden on # machines that are preempted more often. fuzz_test_timeout_override = environment.get_value( 'FUZZ_TEST_TIMEOUT_OVERRIDE') if fuzz_test_timeout_override: environment.set_value('FUZZ_TEST_TIMEOUT', fuzz_test_timeout_override) uworker_env['FUZZ_TEST_TIMEOUT'] = fuzz_test_timeout_override max_testcases_override = environment.get_value('MAX_TESTCASES_OVERRIDE') if max_testcases_override: environment.set_value('MAX_TESTCASES', max_testcases_override) uworker_env['MAX_TESTCASES'] = max_testcases_override uworker_env['JOB_NAME'] = environment.get_value('JOB_NAME') if environment.is_trusted_host(): env['JOB_NAME'] = environment.get_value('JOB_NAME') from clusterfuzz._internal.bot.untrusted_runner import \ environment as worker_environment worker_environment.update_environment(env) return uworker_env
Set TASK_PAYLOAD and unset TASK_PAYLOAD.
def set_task_payload(func): """Set TASK_PAYLOAD and unset TASK_PAYLOAD.""" @functools.wraps(func) def wrapper(task_name, task_argument, job_name, *args, **kwargs): """Wrapper.""" payload = tasks.construct_payload(task_name, task_argument, job_name) environment.set_value('TASK_PAYLOAD', payload) try: return func(task_name, task_argument, job_name, *args, **kwargs) except: # Truly catch *all* exceptions. e = sys.exc_info()[1] e.extras = {'task_payload': environment.get_value('TASK_PAYLOAD')} raise finally: environment.remove_key('TASK_PAYLOAD') return wrapper
Whether the task status should be automatically handled.
def should_update_task_status(task_name): """Whether the task status should be automatically handled.""" return task_name not in [ # Multiple fuzz tasks are expected to run in parallel. 'fuzz', # The task payload can't be used as-is for de-duplication purposes as it # includes revision. corpus_pruning_task calls update_task_status itself # to handle this. # TODO(ochang): This will be cleaned up as part of migration to Pub/Sub. 'corpus_pruning', ]
Start web server for blackbox fuzzer jobs (non-engine fuzzer jobs).
def start_web_server_if_needed(): """Start web server for blackbox fuzzer jobs (non-engine fuzzer jobs).""" if environment.is_engine_fuzzer_job(): return try: http_server.start() except Exception: logs.log_error('Failed to start web server, skipping.')
Run the command.
def run_command(task_name, task_argument, job_name, uworker_env, preprocess=False): """Run the command.""" task = COMMAND_MAP.get(task_name) if not task: logs.log_error("Unknown command '%s'" % task_name) return None # If applicable, ensure this is the only instance of the task running. task_state_name = ' '.join([task_name, task_argument, job_name]) if should_update_task_status(task_name): if not data_handler.update_task_status(task_state_name, data_types.TaskState.STARTED): logs.log('Another instance of "{}" already ' 'running, exiting.'.format(task_state_name)) raise AlreadyRunningError result = None try: if not preprocess: result = task.execute(task_argument, job_name, uworker_env) else: result = task.preprocess(task_argument, job_name, uworker_env) except errors.InvalidTestcaseError: # It is difficult to try to handle the case where a test case is deleted # during processing. Rather than trying to catch by checking every point # where a test case is reloaded from the datastore, just abort the task. logs.log_warn('Test case %s no longer exists.' % task_argument) except BaseException: # On any other exceptions, update state to reflect error and re-raise. if should_update_task_status(task_name): data_handler.update_task_status(task_state_name, data_types.TaskState.ERROR) raise # Task completed successfully. if should_update_task_status(task_name): data_handler.update_task_status(task_state_name, data_types.TaskState.FINISHED) return result
Figures out what to do with the given task and executes the command.
def process_command(task): """Figures out what to do with the given task and executes the command.""" logs.log(f'Executing command "{task.payload()}"') if not task.payload().strip(): logs.log_error('Empty task received.') return None return process_command_impl(task.command, task.argument, task.job, task.high_end, task.is_command_override)
Implementation of process_command.
def process_command_impl(task_name, task_argument, job_name, high_end, is_command_override, preprocess=False): """Implementation of process_command.""" uworker_env = None environment.set_value('TASK_NAME', task_name) environment.set_value('TASK_ARGUMENT', task_argument) environment.set_value('JOB_NAME', job_name) if job_name != 'none': job = data_types.Job.query(data_types.Job.name == job_name).get() # Job might be removed. In that case, we don't want an exception # raised and causing this task to be retried by another bot. if not job: logs.log_error("Job '%s' not found." % job_name) return None if not job.platform: error_string = "No platform set for job '%s'" % job_name logs.log_error(error_string) raise errors.BadStateError(error_string) job_base_queue_suffix = tasks.queue_suffix_for_platform( environment.base_platform(job.platform)) bot_platform = environment.platform().lower() bot_base_queue_suffix = tasks.queue_suffix_for_platform( environment.base_platform(bot_platform)) # A misconfiguration led to this point. Clean up the job if necessary. if job_base_queue_suffix != bot_base_queue_suffix: # This happens rarely, store this as a hard exception. logs.log_error( 'Wrong platform for job %s: job queue [%s], bot queue [%s].' % (job_name, job_base_queue_suffix, bot_base_queue_suffix)) # Try to recreate the job in the correct task queue. new_queue = ( tasks.high_end_queue() if high_end else tasks.regular_queue()) new_queue += job_base_queue_suffix # Command override is continuously run by a bot. If we keep failing # and recreating the task, it will just DoS the entire task queue. # So, we don't create any new tasks in that case since it needs # manual intervention to fix the override anyway. if not is_command_override: try: tasks.add_task(task_name, task_argument, job_name, new_queue) except Exception: # This can happen on trying to publish on a non-existent topic, e.g. # a topic for a high-end bot on another platform. In this case, just # give up. logs.log_error('Failed to fix platform and re-add task.') # Add a wait interval to avoid overflowing task creation. failure_wait_interval = environment.get_value('FAIL_WAIT') time.sleep(failure_wait_interval) return None if task_name != 'fuzz': # Make sure that our platform id matches that of the testcase (for # non-fuzz tasks). testcase = data_handler.get_entity_by_type_and_id(data_types.Testcase, task_argument) if testcase: current_platform_id = environment.get_platform_id() testcase_platform_id = testcase.platform_id testcase_id = testcase.key.id() # This indicates we are trying to run this job on the wrong platform # and potentially blocks fuzzing. See the 'subqueues' feature for # more details: https://github.com/google/clusterfuzz/issues/3347 if (task_name != 'variant' and testcase_platform_id and not utils.fields_match(testcase_platform_id, current_platform_id)): logs.log(f'Testcase {testcase_id} platform {testcase_platform_id}\ does not match with ours {current_platform_id}, checking ...') # Check if the device or branch is deprecated. # If it is deprecated, try to execute on an updated platform. if not (environment.is_testcase_deprecated(testcase_platform_id) and environment.can_testcase_run_on_platform( testcase_platform_id, current_platform_id)): logs.log('Testcase %d platform (%s) does not match with ours\ (%s), exiting' % (testcase.key.id(), testcase_platform_id, current_platform_id)) tasks.add_task( task_name, task_argument, job_name, wait_time=utils.random_number(1, TASK_RETRY_WAIT_LIMIT)) return None logs.log(f'Testcase {testcase_id} platform {testcase_platform_id}\ can run on current platform {current_platform_id}') # Some fuzzers contain additional environment variables that should be # set for them. Append these for tests generated by these fuzzers and for # the fuzz command itself. fuzzer_name = None if task_name == 'fuzz': fuzzer_name = task_argument elif testcase: fuzzer_name = testcase.fuzzer_name # Get job's environment string. environment_string = job.get_environment_string() if task_name == 'minimize': # Let jobs specify a different job and fuzzer to minimize with. job_environment = job.get_environment() minimize_job_override = job_environment.get('MINIMIZE_JOB_OVERRIDE') if minimize_job_override: minimize_job = data_types.Job.query( data_types.Job.name == minimize_job_override).get() if minimize_job: environment.set_value('JOB_NAME', minimize_job_override) environment_string = minimize_job.get_environment_string() environment_string += '\nORIGINAL_JOB_NAME = %s\n' % job_name job_name = minimize_job_override else: logs.log_error( 'Job for minimization not found: %s.' % minimize_job_override) # Fallback to using own job for minimization. minimize_fuzzer_override = job_environment.get('MINIMIZE_FUZZER_OVERRIDE') fuzzer_name = minimize_fuzzer_override or fuzzer_name if fuzzer_name and not environment.is_engine_fuzzer_job(job_name): fuzzer = data_types.Fuzzer.query( data_types.Fuzzer.name == fuzzer_name).get() additional_default_variables = '' additional_variables_for_job = '' if (fuzzer and hasattr(fuzzer, 'additional_environment_string') and fuzzer.additional_environment_string): for line in fuzzer.additional_environment_string.splitlines(): # Job specific values may be defined in fuzzer additional # environment variable name strings in the form # job_name:VAR_NAME = VALUE. if '=' in line and ':' in line.split('=', 1)[0]: fuzzer_job_name, environment_definition = line.split(':', 1) if fuzzer_job_name == job_name: additional_variables_for_job += '\n%s' % environment_definition continue additional_default_variables += '\n%s' % line environment_string += additional_default_variables environment_string += additional_variables_for_job # Update environment for the job. uworker_env = update_environment_for_job(environment_string) uworker_env['TASK_NAME'] = task_name uworker_env['TASK_ARGUMENT'] = task_argument uworker_env['JOB_NAME'] = job_name # Match the cpu architecture with the ones required in the job definition. # If they don't match, then bail out and recreate task. if not is_supported_cpu_arch_for_job(): logs.log( 'Unsupported cpu architecture specified in job definition, exiting.') tasks.add_task( task_name, task_argument, job_name, wait_time=utils.random_number(1, TASK_RETRY_WAIT_LIMIT)) return None # Initial cleanup. cleanup_task_state() start_web_server_if_needed() try: return run_command(task_name, task_argument, job_name, uworker_env, preprocess) finally: # Final clean up. cleanup_task_state()
Get revisions from chromium component.
def get_chromium_component_start_and_end_revision(start_revision, end_revision, job_type): """Get revisions from chromium component.""" component_rev_list = revisions.get_component_range_list( start_revision, end_revision, job_type) for component_rev in component_rev_list: if component_rev['component'] == 'Chromium': start_revision, end_revision = ( revisions.get_start_and_end_revision(component_rev['link_text'])) return start_revision, end_revision
Get start and end revision.
def get_start_and_end_revision(regression_range, job_type): """Get start and end revision.""" start_revision, end_revision = revisions.get_start_and_end_revision( regression_range) # FIXME: Hack to use chromium revision for android builds. if environment.is_android(): return get_chromium_component_start_and_end_revision( start_revision, end_revision, job_type) return start_revision, end_revision
Return whether we have a valid regression range.
def is_valid_regression_range(regression_range, job_type): """Return whether we have a valid regression range.""" start, end = get_start_and_end_revision(regression_range, job_type) return start != 0 or end != 0
Returns a dictionary with information about a component at a revision.
def get_component_information_by_name(chromium_revision, component_display_name): """Returns a dictionary with information about a component at a revision.""" lower_name = component_display_name.lower() component_revisions = revisions.get_component_revisions_dict( chromium_revision, None) if component_revisions is None: return None all_details = [] for value in component_revisions.values(): if value and 'name' in value and value['name'].lower() == lower_name: all_details.append(value) # If we found several components with the same name, return nothing useful. if len(all_details) == 1: return all_details[0] return None
Gets component impact string using the build information url.
def get_component_impacts_from_url(component_name, regression_range, job_type, platform=None): """Gets component impact string using the build information url.""" logs.log('Getting component impacts from URL. Component name %s, ' 'regression range %s, job type %s, platform %s.' % (component_name, regression_range, str(job_type), str(platform))) start_revision, end_revision = get_start_and_end_revision( regression_range, job_type) logs.log('Start and end revision %s, %s' % (start_revision, end_revision)) if not end_revision: return Impacts() build_revision_mappings = build_info.get_build_to_revision_mappings(platform) if not build_revision_mappings: return Impacts() found_impacts = {} for build in ['extended_stable', 'stable', 'beta', 'canary']: mapping = build_revision_mappings.get(build) logs.log('Considering impacts for %s.' % (build)) # TODO(yuanjunh): bypass for now but remove it after ES is enabled. if build == 'extended_stable' and not mapping: found_impacts[build] = Impact() continue # Some platforms don't have canary, so use dev to represent # the affected head version. if build == 'canary' and not mapping: mapping = build_revision_mappings.get('dev') if not mapping: return Impacts() chromium_revision = mapping['revision'] logs.log('Chromium revision is %s.' % (chromium_revision)) component_revision = get_component_information_by_name( chromium_revision, component_name) logs.log('Component revision is %s.' % (component_revision)) if not component_revision: return Impacts() branched_from = revisions.revision_to_branched_from( component_revision['url'], component_revision['rev']) logs.log('Branched from revision is %s.' % (branched_from)) if not branched_from: # This is a head revision, not branched. branched_from = component_revision['rev'] impact = get_impact({ 'revision': branched_from, 'version': mapping['version'] }, start_revision, end_revision, build == 'canary') logs.log('Resulting impact is %s.' % (str(impact))) found_impacts[build] = impact return Impacts(found_impacts['stable'], found_impacts['beta'], found_impacts['extended_stable'], found_impacts['canary'])
Gets impact string using the build information url.
def get_impacts_from_url(regression_range, job_type, platform=None): """Gets impact string using the build information url.""" logs.log('Get component impacts from URL: range %s, ' 'job type %s.' % (regression_range, str(job_type))) component_name = data_handler.get_component_name(job_type) if component_name: return get_component_impacts_from_url(component_name, regression_range, job_type, platform) start_revision, end_revision = get_start_and_end_revision( regression_range, job_type) logs.log('Proceeding to calculate impacts as non-component based on ' 'range %s-%s' % (str(start_revision), str(end_revision))) if not end_revision: return Impacts() logs.log(f'Gathering build to revision mappings for {platform}') build_revision_mappings = build_info.get_build_to_revision_mappings(platform) if not build_revision_mappings: return Impacts() logs.log('Calculating impacts from URL') extended_stable = get_impact( build_revision_mappings.get('extended_stable'), start_revision, end_revision) stable = get_impact( build_revision_mappings.get('stable'), start_revision, end_revision) beta = get_impact( build_revision_mappings.get('beta'), start_revision, end_revision) head = get_head_impact(build_revision_mappings, start_revision, end_revision) return Impacts(stable, beta, extended_stable, head)
Return a Impact object represents the impact on a given build_type. Or return None.
def get_impact(build_revision, start_revision, end_revision, is_last_possible_build=False): """Return a Impact object represents the impact on a given build_type. Or return None.""" if not build_revision: return Impact() revision = build_revision['revision'] if not revision.isdigit(): return Impact() revision = int(revision) version = build_revision['version'] if start_revision > revision: if is_last_possible_build: # There are no further builds to be tested. We are probably testing # a revision of the code which hasn't yet made it into *any* build. # If that's the case, we'll say that this test case _probably_ # impacts the milestone. We can't be sure, because the next build # might happen to gain a new milestone number, but it's unlikely. milestone = version.split('.')[0] return Impact(milestone, likely=True) return Impact() if end_revision < revision: return Impact(version, likely=False) # We can't figure out the impact, but it is likely. return Impact(version, likely=True)
Return the impact on 'head', i.e. the latest build we can find.
def get_head_impact(build_revision_mappings, start_revision, end_revision): """Return the impact on 'head', i.e. the latest build we can find.""" latest_build = build_revision_mappings.get('canary') if latest_build is None: latest_build = build_revision_mappings.get('dev') return get_impact( latest_build, start_revision, end_revision, is_last_possible_build=True)
Set testcase's impact-related fields given impacts.
def set_testcase_with_impacts(testcase, impacts): """Set testcase's impact-related fields given impacts.""" testcase.impact_extended_stable_version = impacts.extended_stable.version testcase.impact_extended_stable_version_likely = \ impacts.extended_stable.likely testcase.impact_stable_version = impacts.stable.version testcase.impact_stable_version_likely = impacts.stable.likely testcase.impact_beta_version = impacts.beta.version testcase.impact_beta_version_likely = impacts.beta.likely testcase.impact_head_version = impacts.head.version testcase.impact_head_version_likely = impacts.head.likely testcase.is_impact_set_flag = True
Attempt to find if the testcase affects release branches on Chromium.
def execute_task(testcase_id, job_type): """Attempt to find if the testcase affects release branches on Chromium.""" # We don't need job_type but it's supplied to all tasks. del job_type # This shouldn't ever get scheduled, but check just in case. if not utils.is_chromium(): return # Locate the testcase associated with the id. testcase = data_handler.get_testcase_by_id(testcase_id) # If this testcase is fixed, we should no longer be doing impact testing. if testcase.fixed and testcase.is_impact_set_flag: return # For testcases with status unreproducible, we just do impact analysis just # once. if testcase.is_status_unreproducible() and testcase.is_impact_set_flag: return # Update comments only after checking the above bailout conditions. data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED) # This task is not applicable to unreproducible testcases. if testcase.one_time_crasher_flag: data_handler.update_testcase_comment( testcase, data_types.TaskState.ERROR, 'Not applicable for unreproducible testcases') return # This task is not applicable for custom binaries. We cannot remove the # creation of such tasks specifically for custom binary testcase in cron, # so exit gracefully. if build_manager.is_custom_binary(): data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED, 'Not applicable for custom binaries') return logs.log('Preparing to calculate impact.') # Formerly ClusterFuzz had buckets containing builds for stable, # beta and dev builds, and attempted reproduction on them. That had # the advantage that we would test against the exact thing shipped on each # channel, including any backported features. In practice, though, we # never noticed a difference from a bisection-based approach to determining # impacted builds, and those production build buckets disappered, so we have # switched to a purely bisection-based approach. if not is_valid_regression_range(testcase.regression, testcase.job_type): data_handler.update_testcase_comment( testcase, data_types.TaskState.FINISHED, 'Cannot run without regression range, will re-run once regression ' 'task finishes') return logs.log('Calculating impact from URL.') impacts = get_impacts_from_url(testcase.regression, testcase.job_type) testcase = data_handler.get_testcase_by_id(testcase_id) set_testcase_with_impacts(testcase, impacts) data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED)
Get the timeout associated with this testcase.
def _set_timeout_value_from_user_upload(testcase_id, uworker_env): """Get the timeout associated with this testcase.""" metadata = data_types.TestcaseUploadMetadata.query( data_types.TestcaseUploadMetadata.testcase_id == int(testcase_id)).get() if metadata and metadata.timeout: uworker_env['TEST_TIMEOUT'] = str(metadata.timeout)
Android specific setup steps for testcase.
def _copy_testcase_to_device_and_setup_environment(testcase, testcase_file_path): """Android specific setup steps for testcase.""" # Copy test(s) to device. android.device.push_testcases_to_device() # The following steps need privileged job access. job_type_has_privileged_access = environment.get_value('PRIVILEGED_ACCESS') if not job_type_has_privileged_access: return # Install testcase if it is an app. package_name = android.app.get_package_name(testcase_file_path) if package_name: # Set the package name for later use. environment.set_value('PKG_NAME', package_name) # Install the application apk. android.device.install_application_if_needed( testcase_file_path, force_update=True) # Set app launch command if available from upload. app_launch_command = testcase.get_metadata('app_launch_command') if app_launch_command: environment.set_value('APP_LAUNCH_COMMAND', app_launch_command) # Set executable bit on the testcase (to allow binary executable testcases # to work in app launch command, e.g. shell %TESTCASE%). local_testcases_directory = environment.get_value('FUZZ_INPUTS') if (testcase_file_path and testcase_file_path.startswith(local_testcases_directory)): relative_testcase_file_path = ( testcase_file_path[len(local_testcases_directory) + 1:]) device_testcase_file_path = os.path.join( android.constants.DEVICE_TESTCASES_DIR, relative_testcase_file_path) android.adb.run_shell_command(['chmod', '0755', device_testcase_file_path])
Get application arguments to use for setting up |testcase|. Use minimized arguments if available. For variant task, where we run a testcase against another job type, use both minimized arguments and application arguments from job.
def _get_application_arguments(testcase, job_type, task_name): """Get application arguments to use for setting up |testcase|. Use minimized arguments if available. For variant task, where we run a testcase against another job type, use both minimized arguments and application arguments from job.""" testcase_args = testcase.minimized_arguments if not testcase_args: return None if task_name != 'variant': return testcase_args # TODO(aarya): Use %TESTCASE% explicitly since it will not exist with new # engine impl libFuzzer testcases and AFL's launcher.py requires it as the # first argument. Remove once AFL is migrated to the new engine impl. if environment.is_afl_job(job_type): return '%TESTCASE%' job_args = data_handler.get_value_from_job_definition( job_type, 'APP_ARGS', default='') job_args_list = shlex.split(job_args) testcase_args_list = shlex.split(testcase_args) testcase_args_filtered_list = [ arg for arg in testcase_args_list if arg not in job_args_list ] app_args = ' '.join(testcase_args_filtered_list) if job_args: if app_args: app_args += ' ' app_args += job_args return app_args
Set up environment for various memory tools used.
def _setup_memory_tools_environment(testcase): """Set up environment for various memory tools used.""" env = testcase.get_metadata('env') if not env: environment.reset_current_memory_tool_options( redzone_size=testcase.redzone, disable_ubsan=testcase.disable_ubsan) return for options_name, options_value in env.items(): if not options_value: environment.remove_key(options_name) continue environment.set_memory_tool_options(options_name, options_value)
Set various environment variables based on the test case.
def prepare_environment_for_testcase(testcase): """Set various environment variables based on the test case.""" _setup_memory_tools_environment(testcase) # Setup environment variable for windows size and location properties. # Explicit override to avoid using the default one from job definition since # that contains unsubsituted vars like $WIDTH, etc. environment.set_value('WINDOW_ARG', testcase.window_argument) # Adjust timeout based on the stored multiplier (if available). if testcase.timeout_multiplier: test_timeout = environment.get_value('TEST_TIMEOUT') environment.set_value('TEST_TIMEOUT', int(test_timeout * testcase.timeout_multiplier)) # Add FUZZ_TARGET to environment if this is a fuzz target testcase. fuzz_target = testcase.get_metadata('fuzzer_binary_name') if fuzz_target: environment.set_value('FUZZ_TARGET', fuzz_target)
Error handler for setup_testcase that is called by uworker_postprocess.
def handle_setup_testcase_error(uworker_output: uworker_msg_pb2.Output): """Error handler for setup_testcase that is called by uworker_postprocess.""" # Get the testcase again because it is too hard to set the testcase for # partially migrated tasks. # TODO(metzman): Experiment with making this unnecessary. # First update comment. testcase = data_handler.get_testcase_by_id( uworker_output.uworker_input.testcase_id) data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, uworker_output.error_message) # Then reschedule the task. command = task_utils.get_command_from_module( uworker_output.uworker_input.module_name) testcase_fail_wait = environment.get_value('FAIL_WAIT') tasks.add_task( command, uworker_output.uworker_input.testcase_id, uworker_output.uworker_input.job_type, wait_time=testcase_fail_wait)
Preprocessing for setup_testcase function.
def preprocess_setup_testcase(testcase, uworker_env, fuzzer_override=None, with_deps=True): """Preprocessing for setup_testcase function.""" fuzzer_name = fuzzer_override or testcase.fuzzer_name testcase_id = testcase.key.id() if fuzzer_name and not with_deps: logs.log(f'Skipping fuzzer preprocess: {fuzzer_name}.') if fuzzer_name and with_deps: # This branch is taken when we assume fuzzer needs to be set up for a # testcase to be executed (i.e. when a testcase was found by a fuzzer). # It's not the case for testcases uploaded by users. try: setup_input = preprocess_update_fuzzer_and_data_bundles(fuzzer_name) except errors.InvalidFuzzerError: # Close testcase and don't recreate tasks if this fuzzer is invalid. logs.log_error('Closed testcase %d with invalid fuzzer %s.' % (testcase_id, fuzzer_name)) error_message = f'Fuzzer {fuzzer_name} no longer exists.' # First update comment. testcase = data_handler.get_testcase_by_id(testcase_id) data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message) testcase.open = False testcase.fixed = 'NA' testcase.set_metadata('fuzzer_was_deleted', True) testcase.put() raise else: setup_input = uworker_msg_pb2.SetupInput() setup_input.testcase_download_url = get_signed_testcase_download_url(testcase) if environment.get_value('LSAN'): setup_input.global_blacklisted_functions.extend( leak_blacklist.get_global_blacklisted_functions()) if testcase.uploader_email: _set_timeout_value_from_user_upload(testcase_id, uworker_env) # Override APP_ARGS with minimized arguments (if available). Don't do this # for variant task since other job types can have its own set of required # arguments, so use the full set of arguments of that job. app_args = _get_application_arguments(testcase, uworker_env['JOB_NAME'], uworker_env['TASK_NAME']) if app_args: environment.set_value('APP_ARGS', app_args, uworker_env) return setup_input
Sets up the testcase and needed dependencies like fuzzer, data bundle, etc.
def setup_testcase(testcase: data_types.Testcase, job_type: str, setup_input: uworker_msg_pb2.SetupInput): """Sets up the testcase and needed dependencies like fuzzer, data bundle, etc.""" testcase_id = testcase.key.id() # Prepare an error result to return in case of error. # Only include uworker_input for callers that aren't deserializing the output # and thus, uworker_io is not adding the input to. # TODO(metzman): Remove the input when the consolidation is complete. uworker_error_input = uworker_msg_pb2.Input( testcase_id=str(testcase_id), job_type=job_type) uworker_error_output = uworker_msg_pb2.Output( uworker_input=uworker_error_input, error_type=uworker_msg_pb2.ErrorType.TESTCASE_SETUP) testcase_setup_error_result = (None, None, uworker_error_output) # Clear testcase directories. shell.clear_testcase_directories() # Update the fuzzer if necessary in order to get the updated data bundle. if setup_input.fuzzer_name: update_successful = update_fuzzer_and_data_bundles(setup_input) if not update_successful: error_message = f'Unable to setup fuzzer {setup_input.fuzzer_name}' uworker_error_output.error_message = error_message return testcase_setup_error_result # Extract the testcase and any of its resources to the input directory. file_list, testcase_file_path = unpack_testcase( testcase, setup_input.testcase_download_url) if not file_list: error_message = f'Unable to setup testcase {testcase_file_path}' uworker_error_output.error_message = error_message return testcase_setup_error_result # For Android/Fuchsia, we need to sync our local testcases directory with the # one on the device. if environment.is_android(): _copy_testcase_to_device_and_setup_environment(testcase, testcase_file_path) # Push testcases to worker. if environment.is_trusted_host(): from clusterfuzz._internal.bot.untrusted_runner import file_host file_host.push_testcases_to_worker() # Copy global blacklist into local blacklist. if setup_input.global_blacklisted_functions: # Get local blacklist without this testcase's entry. leak_blacklist.copy_global_to_local_blacklist( setup_input.global_blacklisted_functions, excluded_testcase=testcase) prepare_environment_for_testcase(testcase) return file_list, testcase_file_path, None
Figure out the relative path and input directory for this testcase.
def _get_testcase_file_and_path(testcase): """Figure out the relative path and input directory for this testcase.""" testcase_absolute_path = testcase.absolute_path # This hack is needed so that we can run a testcase generated on windows, on # linux. os.path.isabs return false on paths like c:\a\b\c. testcase_path_is_absolute = ( testcase_absolute_path[1:3] == ':\\' or os.path.isabs(testcase_absolute_path)) # Fix os.sep in testcase path if we are running this on non-windows platform. # It is unusual to have '\\' on linux paths, so substitution should be safe. if environment.platform() != 'WINDOWS' and '\\' in testcase_absolute_path: testcase_absolute_path = testcase_absolute_path.replace('\\', os.sep) # Default directory for testcases. input_directory = environment.get_value('FUZZ_INPUTS') if not testcase_path_is_absolute: testcase_path = os.path.join(input_directory, testcase_absolute_path) return input_directory, testcase_path # Root directory can be different on bots. Fix the path to account for this. root_directory = environment.get_value('ROOT_DIR') search_string = '%s%s%s' % (os.sep, _BOT_DIR, os.sep) search_index = testcase_absolute_path.find(search_string) relative_path = testcase_absolute_path[search_index + len(search_string):] testcase_path = os.path.join(root_directory, _BOT_DIR, relative_path) return input_directory, testcase_path
Returns a signed download URL for the testcase.
def get_signed_testcase_download_url(testcase): """Returns a signed download URL for the testcase.""" key, _ = _get_testcase_key_and_archive_status(testcase) return blobs.get_signed_download_url(key)
Returns the testcase's key and whether or not it is archived.
def _get_testcase_key_and_archive_status(testcase): """Returns the testcase's key and whether or not it is archived.""" if _is_testcase_minimized(testcase): key = testcase.minimized_keys archived = bool(testcase.archive_state & data_types.ArchiveStatus.MINIMIZED) return key, archived key = testcase.fuzzed_keys archived = bool(testcase.archive_state & data_types.ArchiveStatus.FUZZED) return key, archived
Unpacks a testcase and returns all files it is composed of.
def unpack_testcase(testcase, testcase_download_url): """Unpacks a testcase and returns all files it is composed of.""" # Figure out where the testcase file should be stored. input_directory, testcase_file_path = _get_testcase_file_and_path(testcase) key, archived = _get_testcase_key_and_archive_status(testcase) if _is_testcase_minimized(testcase) and archived: temp_filename = ( os.path.join(input_directory, str(testcase.key.id()) + _TESTCASE_ARCHIVE_EXTENSION)) elif archived: temp_filename = os.path.join(input_directory, testcase.archive_filename) else: temp_filename = testcase_file_path if not download_testcase(testcase_download_url, temp_filename): logs.log(f'Couldn\'t download testcase {key} {testcase_download_url}.') return None, testcase_file_path file_list = [] if archived: with archive.open(temp_filename) as reader: reader.extract_all(input_directory) file_list = [f.name for f in reader.list_members()] shell.remove_file(temp_filename) file_exists = False for file_name in file_list: if os.path.basename(file_name) == os.path.basename(testcase_file_path): file_exists = True break if not file_exists: logs.log_error( 'Expected file to run %s is not in archive. Base directory is %s and ' 'files in archive are [%s].' % (testcase_file_path, input_directory, ','.join(file_list))) return None, testcase_file_path else: file_list.append(testcase_file_path) return file_list, testcase_file_path
Return the lock key name for the given data bundle.
def _get_data_bundle_update_lock_name(data_bundle_name): """Return the lock key name for the given data bundle.""" return f'update:data_bundle:{data_bundle_name}'
Return path to data bundle sync file.
def _get_data_bundle_sync_file_path(data_bundle_directory): """Return path to data bundle sync file.""" return os.path.join(data_bundle_directory, _SYNC_FILENAME)
Clear old data bundles so as to keep the disk cache restricted to |_DATA_BUNDLE_CACHE_COUNT| data bundles and prevent potential out-of-disk spaces.
def _clear_old_data_bundles_if_needed(): """Clear old data bundles so as to keep the disk cache restricted to |_DATA_BUNDLE_CACHE_COUNT| data bundles and prevent potential out-of-disk spaces.""" data_bundles_directory = environment.get_value('DATA_BUNDLES_DIR') dirs = [] for filename in os.listdir(data_bundles_directory): file_path = os.path.join(data_bundles_directory, filename) if not os.path.isdir(file_path): continue dirs.append(file_path) dirs_to_remove = sorted( dirs, key=os.path.getmtime, reverse=True)[_DATA_BUNDLE_CACHE_COUNT:] for dir_to_remove in dirs_to_remove: logs.log('Removing data bundle directory to keep disk cache small: %s' % dir_to_remove) shell.remove_directory(dir_to_remove)
Returns True if the data bundle should be updated because it is out of date.
def _should_update_data_bundle(data_bundle, data_bundle_directory): """Returns True if the data bundle should be updated because it is out of date.""" # Check if data bundle is up to date. If yes, skip the update. if _is_data_bundle_up_to_date(data_bundle, data_bundle_directory): logs.log('Data bundle was recently synced, skip.') return False # Re-check if another bot did the sync already. If yes, skip. # TODO(metzman): Figure out if is this even needed without NFS? if _is_data_bundle_up_to_date(data_bundle, data_bundle_directory): logs.log('Another bot finished the sync, skip.') return False return True
Create necessary directories to download the data bundle.
def _prepare_update_data_bundle(fuzzer, data_bundle): """Create necessary directories to download the data bundle.""" data_bundle_directory = get_data_bundle_directory(fuzzer, data_bundle) if not data_bundle_directory: logs.log_error('Failed to setup data bundle %s.' % data_bundle.name) return None if not shell.create_directory( data_bundle_directory, create_intermediates=True): logs.log_error( 'Failed to create data bundle %s directory.' % data_bundle.name) return None return data_bundle_directory
Updates a data bundle to the latest version.
def update_data_bundle( fuzzer: data_types.Fuzzer, data_bundle_corpus: uworker_msg_pb2.DataBundleCorpus) -> bool: """Updates a data bundle to the latest version.""" data_bundle = uworker_io.entity_from_protobuf(data_bundle_corpus.data_bundle, data_types.DataBundle) logs.log('Setting up data bundle %s.' % data_bundle) data_bundle_directory = _prepare_update_data_bundle(fuzzer, data_bundle) if not _should_update_data_bundle(data_bundle, data_bundle_directory): return True time_before_sync_start = time.time() # No need to sync anything if this is a search index data bundle. In that # case, the fuzzer will generate testcases from a gcs bucket periodically. if not _is_search_index_data_bundle(data_bundle.name): if not (environment.is_trusted_host() and data_bundle.sync_to_worker): result = corpus_manager.sync_data_bundle_corpus_to_disk( data_bundle_corpus, data_bundle_directory) else: from clusterfuzz._internal.bot.untrusted_runner import \ corpus_manager as untrusted_corpus_manager from clusterfuzz._internal.bot.untrusted_runner import file_host worker_data_bundle_directory = file_host.rebase_to_worker_root( data_bundle_directory) file_host.create_directory( worker_data_bundle_directory, create_intermediates=True) result = untrusted_corpus_manager.RemoteGSUtilRunner().rsync( data_bundle_corpus.gcs_url, worker_data_bundle_directory, delete=False) result = result.return_code == 0 if not result: logs.log_error(f'Failed to sync data bundle {data_bundle.name}.') return False # Update the testcase list file. testcase_manager.create_testcase_list_file(data_bundle_directory) logs.log('Synced data bundle.') # Write last synced time in the sync file. sync_file_path = _get_data_bundle_sync_file_path(data_bundle_directory) utils.write_data_to_file(time_before_sync_start, sync_file_path) if environment.is_trusted_host() and data_bundle.sync_to_worker: from clusterfuzz._internal.bot.untrusted_runner import file_host worker_sync_file_path = file_host.rebase_to_worker_root(sync_file_path) file_host.copy_file_to_worker(sync_file_path, worker_sync_file_path) return True
Sets fuzzer env vars for fuzzer set up.
def _set_fuzzer_env_vars(fuzzer): """Sets fuzzer env vars for fuzzer set up.""" environment.set_value('UNTRUSTED_CONTENT', fuzzer.untrusted_content) # Adjust the test timeout, if user has provided one. if fuzzer.timeout: environment.set_value('TEST_TIMEOUT', fuzzer.timeout) # Increase fuzz test timeout if the fuzzer timeout is higher than its # current value. fuzz_test_timeout = environment.get_value('FUZZ_TEST_TIMEOUT') if fuzz_test_timeout and fuzz_test_timeout < fuzzer.timeout: environment.set_value('FUZZ_TEST_TIMEOUT', fuzzer.timeout) # Adjust the max testcases if this fuzzer has specified a lower limit. max_testcases = environment.get_value('MAX_TESTCASES') if fuzzer.max_testcases and fuzzer.max_testcases < max_testcases: environment.set_value('MAX_TESTCASES', fuzzer.max_testcases) # If the fuzzer generates large testcases or a large number of small ones # that don't fit on tmpfs, then use the larger disk directory. if fuzzer.has_large_testcases: testcase_disk_directory = environment.get_value('FUZZ_INPUTS_DISK') environment.set_value('FUZZ_INPUTS', testcase_disk_directory)
Does preprocessing for calls to update_fuzzer_and_data_bundles in uworker_main. Returns a SetupInput object.
def preprocess_update_fuzzer_and_data_bundles( fuzzer_name: str) -> uworker_msg_pb2.SetupInput: """Does preprocessing for calls to update_fuzzer_and_data_bundles in uworker_main. Returns a SetupInput object.""" fuzzer = data_types.Fuzzer.query(data_types.Fuzzer.name == fuzzer_name).get() if not fuzzer: logs.log_error('No fuzzer exists with name %s.' % fuzzer_name) raise errors.InvalidFuzzerError update_input = uworker_msg_pb2.SetupInput( fuzzer_name=fuzzer_name, fuzzer=uworker_io.entity_to_protobuf(fuzzer)) preprocess_get_data_bundles(fuzzer.data_bundle_name, update_input) update_input.fuzzer_log_upload_url = storage.get_signed_upload_url( fuzzer_logs.get_logs_gcs_path(fuzzer_name=fuzzer_name)) if not fuzzer.builtin: update_input.fuzzer_download_url = blobs.get_signed_download_url( fuzzer.blobstore_key) # TODO(https://github.com/google/clusterfuzz/issues/3008): Finish migrating # update data bundles. return update_input
Updates the fuzzer. Helper for update_fuzzer_and_data_bundles.
def _update_fuzzer(update_input: uworker_msg_pb2.SetupInput, fuzzer_directory: str, version_file: str) -> bool: """Updates the fuzzer. Helper for update_fuzzer_and_data_bundles.""" fuzzer = uworker_io.entity_from_protobuf(update_input.fuzzer, data_types.Fuzzer) fuzzer_name = update_input.fuzzer_name if fuzzer.builtin: return True if not revisions.needs_update(version_file, fuzzer.revision): return True logs.log('Fuzzer update was found, updating.') # Clear the old fuzzer directory if it exists. if not shell.remove_directory(fuzzer_directory, recreate=True): logs.log_error('Failed to clear fuzzer directory.') return False # Copy the archive to local disk and unpack it. archive_path = os.path.join(fuzzer_directory, fuzzer.filename) if not storage.download_signed_url_to_file(update_input.fuzzer_download_url, archive_path): logs.log_error('Failed to copy fuzzer archive.') return False try: with archive.open(archive_path) as reader: reader.extract_all(fuzzer_directory) except Exception: error_message = (f'Failed to unpack fuzzer archive {fuzzer.filename} ' '(bad archive or unsupported format).') logs.log_error(error_message) fuzzer_logs.upload_script_log( 'Fatal error: ' + error_message, signed_upload_url=update_input.fuzzer_log_upload_url) return False fuzzer_path = os.path.join(fuzzer_directory, fuzzer.executable_path) if not os.path.exists(fuzzer_path): error_message = ('Fuzzer executable %s not found. ' 'Check fuzzer configuration.') % fuzzer.executable_path logs.log_error(error_message) fuzzer_logs.upload_script_log( 'Fatal error: ' + error_message, fuzzer_name=fuzzer_name, signed_upload_url=update_input.fuzzer_log_upload_url) return False # Make fuzzer executable. os.chmod(fuzzer_path, 0o750) # Cleanup unneeded archive. shell.remove_file(archive_path) # Save the current revision of this fuzzer in a file for later checks. revisions.write_revision_to_revision_file(version_file, fuzzer.revision) logs.log('Updated fuzzer to revision %d.' % fuzzer.revision) return True
Sets up data bundles. Helper for update_fuzzer_and_data_bundles.
def _set_up_data_bundles(update_input: uworker_msg_pb2.SetupInput): """Sets up data bundles. Helper for update_fuzzer_and_data_bundles.""" # Setup data bundles associated with this fuzzer. logs.log('Setting up data bundles.') fuzzer = uworker_io.entity_from_protobuf(update_input.fuzzer, data_types.Fuzzer) for data_bundle_corpus in update_input.data_bundle_corpuses: if not update_data_bundle(fuzzer, data_bundle_corpus): return False return True
Updates the fuzzer specified by |update_input| and its data bundles.
def update_fuzzer_and_data_bundles( update_input: uworker_msg_pb2.SetupInput) -> Optional[data_types.Fuzzer]: """Updates the fuzzer specified by |update_input| and its data bundles.""" fuzzer = uworker_io.entity_from_protobuf(update_input.fuzzer, data_types.Fuzzer) _set_fuzzer_env_vars(fuzzer) # Set some helper environment variables. fuzzer_directory = get_fuzzer_directory(update_input.fuzzer_name) environment.set_value('FUZZER_DIR', fuzzer_directory) # Check for updates to this fuzzer. version_file = os.path.join(fuzzer_directory, f'.{update_input.fuzzer_name}_version') if not _update_fuzzer(update_input, fuzzer_directory, version_file): return None _clear_old_data_bundles_if_needed() if not _set_up_data_bundles(update_input): return None # Setup environment variable for launcher script path. if fuzzer.launcher_script: fuzzer_launcher_path = os.path.join(fuzzer_directory, fuzzer.launcher_script) environment.set_value('LAUNCHER_PATH', fuzzer_launcher_path) # For launcher script usecase, we need the entire fuzzer directory on the # worker. if environment.is_trusted_host(): from clusterfuzz._internal.bot.untrusted_runner import file_host worker_fuzzer_directory = file_host.rebase_to_worker_root( fuzzer_directory) file_host.copy_directory_to_worker( fuzzer_directory, worker_fuzzer_directory, replace=True) return fuzzer
Return true on if this is a search index data bundle, false otherwise.
def _is_search_index_data_bundle(data_bundle_name): """Return true on if this is a search index data bundle, false otherwise.""" return data_bundle_name.startswith( testcase_manager.SEARCH_INDEX_BUNDLE_PREFIX)
Return true if the data bundle is up to date, false otherwise.
def _is_data_bundle_up_to_date(data_bundle, data_bundle_directory): """Return true if the data bundle is up to date, false otherwise.""" sync_file_path = _get_data_bundle_sync_file_path(data_bundle_directory) if environment.is_trusted_host() and data_bundle.sync_to_worker: from clusterfuzz._internal.bot.untrusted_runner import file_host worker_sync_file_path = file_host.rebase_to_worker_root(sync_file_path) shell.remove_file(sync_file_path) file_host.copy_file_from_worker(worker_sync_file_path, sync_file_path) if not os.path.exists(sync_file_path): return False last_sync_time = datetime.datetime.utcfromtimestamp( utils.read_data_from_file(sync_file_path)) # Check if we recently synced. if not dates.time_has_expired( last_sync_time, seconds=_DATA_BUNDLE_SYNC_INTERVAL_IN_SECONDS): return True # For search index data bundle, we don't sync them from bucket. Instead, we # rely on the fuzzer to generate testcases periodically. if _is_search_index_data_bundle(data_bundle.name): return False # Check when the bucket url had last updates. If no new updates, no need to # update directory. bucket_url = data_handler.get_data_bundle_bucket_url(data_bundle.name) last_updated_time = storage.last_updated(bucket_url) if last_updated_time and last_sync_time > last_updated_time: logs.log( 'Data bundle %s has no new content from last sync.' % data_bundle.name) return True return False
For fuzz_task which doesn't get data bundles in an untrusted manner.
def trusted_get_data_bundle_directory(fuzzer): """For fuzz_task which doesn't get data bundles in an untrusted manner.""" # TODO(metzman): Delete this when fuzz_task is migrated. # Check if we have a fuzzer-specific data bundle. Use it to calculate the # data directory we will fetch our testcases from. data_bundle = data_types.DataBundle.query( data_types.DataBundle.name == fuzzer.data_bundle_name).get() return get_data_bundle_directory(fuzzer, data_bundle)
Return data bundle data directory.
def get_data_bundle_directory(fuzzer, data_bundle): """Return data bundle data directory.""" # Store corpora for built-in fuzzers like libFuzzer in the same directory # as other local data bundles. This makes it easy to clear them when we run # out of disk space. local_data_bundles_directory = environment.get_value('DATA_BUNDLES_DIR') if fuzzer.builtin: return local_data_bundles_directory if not data_bundle: # Generic data bundle directory. Available to all fuzzers if they don't # have their own data bundle. return environment.get_value('FUZZ_DATA') local_data_bundle_directory = os.path.join(local_data_bundles_directory, data_bundle.name) return local_data_bundle_directory
Return directory used by a fuzzer.
def get_fuzzer_directory(fuzzer_name): """Return directory used by a fuzzer.""" fuzzer_directory = environment.get_value('FUZZERS_DIR') fuzzer_directory = os.path.join(fuzzer_directory, fuzzer_name) return fuzzer_directory
Archive testcase and its dependencies, and store in blobstore.
def archive_testcase_and_dependencies_in_gcs(resource_list, testcase_path): """Archive testcase and its dependencies, and store in blobstore.""" if not os.path.exists(testcase_path): logs.log_error('Unable to find testcase %s.' % testcase_path) return None, None, None, None absolute_filename = testcase_path archived = False zip_filename = None zip_path = None if not resource_list: resource_list = [] # Add resource dependencies based on testcase path. These include # stuff like extensions directory, dependency files, etc. resource_list.extend( testcase_manager.get_resource_dependencies(testcase_path)) # Filter out duplicates, directories, and files that do not exist. resource_list = utils.filter_file_list(resource_list) logs.log('Testcase and related files :\n%s' % str(resource_list)) if len(resource_list) <= 1: # If this does not have any resources, just save the testcase. # TODO(flowerhack): Update this when we teach CF how to download testcases. try: file_handle = open(testcase_path, 'rb') except OSError: logs.log_error('Unable to open testcase %s.' % testcase_path) return None, None, None, None else: # If there are resources, create an archive. # Find the common root directory for all of the resources. # Assumption: resource_list[0] is the testcase path. base_directory_list = resource_list[0].split(os.path.sep) for list_index in range(1, len(resource_list)): current_directory_list = resource_list[list_index].split(os.path.sep) length = min(len(base_directory_list), len(current_directory_list)) for directory_index in range(length): if (current_directory_list[directory_index] != base_directory_list[directory_index]): base_directory_list = base_directory_list[0:directory_index] break base_directory = os.path.sep.join(base_directory_list) logs.log('Subresource common base directory: %s' % base_directory) if base_directory: # Common parent directory, archive sub-paths only. base_len = len(base_directory) + len(os.path.sep) else: # No common parent directory, archive all paths as it-is. base_len = 0 # Prepare the filename for the archive. zip_filename, _ = os.path.splitext(os.path.basename(testcase_path)) zip_filename += _TESTCASE_ARCHIVE_EXTENSION # Create the archive. zip_path = os.path.join(environment.get_value('INPUT_DIR'), zip_filename) zip_file = zipfile.ZipFile(zip_path, 'w') for file_name in resource_list: if os.path.exists(file_name): relative_filename = file_name[base_len:] zip_file.write(file_name, relative_filename, zipfile.ZIP_DEFLATED) zip_file.close() try: file_handle = open(zip_path, 'rb') except OSError: logs.log_error('Unable to open testcase archive %s.' % zip_path) return None, None, None, None archived = True absolute_filename = testcase_path[base_len:] fuzzed_key = blobs.write_blob(file_handle) file_handle.close() # Don't need the archive after writing testcase to blobstore. if zip_path: shell.remove_file(zip_path) return fuzzed_key, archived, absolute_filename, zip_filename
Check to see if a test case appears to be flaky.
def mark_unreproducible_if_flaky(testcase, task_name, potentially_flaky) -> None: """Check to see if a test case appears to be flaky.""" # If this run does not suggest that we are flaky, clear the flag and assume # that we are reproducible. if not potentially_flaky: testcase.set_metadata('potentially_flaky', False) return # If we have not been marked as potentially flaky in the past, don't mark # mark the test case as unreproducible yet. It is now potentially flaky. if not testcase.get_metadata('potentially_flaky'): testcase.set_metadata('potentially_flaky', True) # In this case, the current task will usually be in a state where it cannot # be completed. Recreate it. tasks.add_task(task_name, testcase.key.id(), testcase.job_type) return # At this point, this test case has been flagged as potentially flaky twice. # It should be marked as unreproducible. Mark it as unreproducible, and set # fields that cannot be populated accordingly. if task_name == 'minimize' and not testcase.minimized_keys: testcase.minimized_keys = 'NA' if task_name in ['minimize', 'impact']: testcase.set_impacts_as_na() if task_name in ['minimize', 'regression']: testcase.regression = 'NA' if task_name in ['minimize', 'progression']: testcase.fixed = 'NA' testcase.one_time_crasher_flag = True data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, 'Testcase appears to be flaky') # Issue update to flip reproducibility label is done in App Engine cleanup # cron. This avoids calling the issue tracker apis from GCE. # For unreproducible testcases, it is still beneficial to get component # information from blame task. create_blame_task_if_needed(testcase) # Let bisection service know about flakiness. bisection.request_bisection(testcase)
Creates a blame task if needed.
def create_blame_task_if_needed(testcase): """Creates a blame task if needed.""" # Blame doesn't work for non-chromium projects. if not utils.is_chromium(): return # Blame is only applicable to chromium project, otherwise bail out. if not testcase.is_chromium(): return # We cannot run blame job for custom binaries since we don't have any context # on the crash revision and regression range. if build_manager.is_custom_binary(): return # Don't send duplicate issues to Predator. This causes issues with metrics # tracking and wastes cycles. if testcase.status == 'Duplicate': return create_task = False if testcase.one_time_crasher_flag: # For unreproducible testcases, it is still beneficial to get component # information from blame task. create_task = True else: # Reproducible testcase. # Step 1: Check if the regression task finished. If not, bail out. if not testcase.regression: return # Step 2: Check if the symbolize task is applicable and finished. If not, # bail out. if build_manager.has_symbolized_builds() and not testcase.symbolized: return create_task = True if create_task: tasks.add_task('blame', testcase.key.id(), testcase.job_type)
Creates an impact task if needed.
def create_impact_task_if_needed(testcase): """Creates an impact task if needed.""" # Impact doesn't make sense for non-chromium projects. if not utils.is_chromium(): return # Impact is only applicable to chromium project, otherwise bail out. if testcase.project_name != 'chromium': return # We cannot run impact job for custom binaries since we don't have any # archived production builds for these. if build_manager.is_custom_binary(): return tasks.add_task('impact', testcase.key.id(), testcase.job_type)
Creates a minimize task if needed.
def create_minimize_task_if_needed(testcase): """Creates a minimize task if needed.""" tasks.add_task('minimize', testcase.key.id(), testcase.job_type)
Creates a regression task if needed.
def create_regression_task_if_needed(testcase): """Creates a regression task if needed.""" # We cannot run regression job for custom binaries since we don't have any # archived builds for previous revisions. We only track the last uploaded # custom build. if build_manager.is_custom_binary(): return tasks.add_task('regression', testcase.key.id(), testcase.job_type)
Creates a variant task if needed.
def create_variant_tasks_if_needed(testcase): """Creates a variant task if needed.""" # TODO(https://b.corp.google.com/issues/328691756): Allow untrusted # testcases to only run untrusted variants. if testcase.duplicate_of: # If another testcase exists with same params, no need to spend cycles on # calculating variants again. return testcase_id = testcase.key.id() project = data_handler.get_project_name(testcase.job_type) jobs = data_types.Job.query(data_types.Job.project == project) testcase_job_is_engine = environment.is_engine_fuzzer_job(testcase.job_type) testcase_job_app_name = None if not testcase_job_is_engine: testcase_job = ( data_types.Job.query(data_types.Job.name == testcase.job_type).get()) testcase_job_environment = testcase_job.get_environment() testcase_job_app_name = testcase_job_environment.get('APP_NAME') num_variant_tasks = 0 for job in jobs: # The variant needs to be tested in a different job type than us. job_type = job.name if testcase.job_type == job_type: continue # Don't try to reproduce engine fuzzer testcase with blackbox fuzzer # testcases and vice versa. if testcase_job_is_engine != environment.is_engine_fuzzer_job(job_type): continue # Skip experimental jobs. job_environment = job.get_environment() if utils.string_is_true(job_environment.get('EXPERIMENTAL')): continue # Skip jobs for which variant tasks are disabled. if utils.string_is_true(job_environment.get('DISABLE_VARIANT')): continue if (not testcase_job_is_engine and job_environment.get('APP_NAME') != testcase_job_app_name): continue if not testcase.trusted: if (task_utils.is_remotely_executing_utasks() and not task_types.is_no_privilege_workload('variant', job_type)): continue queue = tasks.queue_for_platform(job.platform) tasks.add_task('variant', testcase_id, job_type, queue) variant = data_handler.get_or_create_testcase_variant(testcase_id, job_type) variant.status = data_types.TestcaseVariantStatus.PENDING variant.put() num_variant_tasks += 1 logs.log(f'Number of variant tasks: {num_variant_tasks}.')
Creates a symbolize task if needed.
def create_symbolize_task_if_needed(testcase): """Creates a symbolize task if needed.""" # We cannot run symbolize job for custom binaries since we don't have any # archived symbolized builds. if build_manager.is_custom_binary(): return # Make sure we have atleast one symbolized url pattern defined in job type. if not build_manager.has_symbolized_builds(): return tasks.add_task('symbolize', testcase.key.id(), testcase.job_type)
Create tasks like minimization, regression, impact, progression, stack stack for a newly generated testcase.
def create_tasks(testcase): """Create tasks like minimization, regression, impact, progression, stack stack for a newly generated testcase.""" # No need to create progression task. It is automatically created by the cron # handler for reproducible testcases. # For a non reproducible crash. if testcase.one_time_crasher_flag: # For unreproducible testcases, it is still beneficial to get component # information from blame task. create_blame_task_if_needed(testcase) return # For a fully reproducible crash. # MIN environment variable defined in a job definition indicates if # we want to do the heavy weight tasks like minimization, regression, # impact, etc on this testcase. These are usually skipped when we have # a large timeout and we can't afford to waste more than a couple of hours # on these jobs. testcase_id = testcase.key.id() if environment.get_value('MIN') == 'No': testcase = data_handler.get_testcase_by_id(testcase_id) testcase.minimized_keys = 'NA' testcase.regression = 'NA' testcase.set_impacts_as_na() testcase.put() return if environment.is_minimization_supported(): # For supported environments, just create the minimize task for now. # Once minimization is complete, it automatically creates the rest of the # needed tasks. create_minimize_task_if_needed(testcase) else: # Environments that don't support minimization skip directly to other # tasks. testcase = data_handler.get_testcase_by_id(testcase_id) testcase.minimized_keys = 'NA' testcase.put() create_postminimize_tasks(testcase)
Create assorted tasks needed after minimize task completes.
def create_postminimize_tasks(testcase): """Create assorted tasks needed after minimize task completes.""" create_impact_task_if_needed(testcase) create_regression_task_if_needed(testcase) create_symbolize_task_if_needed(testcase) create_variant_tasks_if_needed(testcase)
This returns True if the uworker_main portion of this task is unprivileged.
def task_main_runs_on_uworker(): """This returns True if the uworker_main portion of this task is unprivileged.""" command = environment.get_value('TASK_NAME') job = environment.get_value('JOB_NAME') return is_remote_utask(command, job)
Unpack a bundled testcase archive and create analyze jobs for each item.
def execute_task(metadata_id, job_type): """Unpack a bundled testcase archive and create analyze jobs for each item.""" metadata = ndb.Key(data_types.BundledArchiveMetadata, int(metadata_id)).get() if not metadata: logs.log_error('Invalid bundle metadata id %s.' % metadata_id) return bot_name = environment.get_value('BOT_NAME') upload_metadata = data_types.TestcaseUploadMetadata.query( data_types.TestcaseUploadMetadata.blobstore_key == metadata.blobstore_key).get() if not upload_metadata: logs.log_error('Invalid upload metadata key %s.' % metadata.blobstore_key) return job = data_types.Job.query(data_types.Job.name == metadata.job_type).get() if not job: logs.log_error('Invalid job_type %s.' % metadata.job_type) return # Update the upload metadata with this bot name. upload_metadata.bot_name = bot_name upload_metadata.put() # We can't use FUZZ_INPUTS directory since it is constrained # by tmpfs limits. testcases_directory = environment.get_value('FUZZ_INPUTS_DISK') # Retrieve multi-testcase archive. archive_path = os.path.join(testcases_directory, metadata.archive_filename) if not blobs.read_blob_to_disk(metadata.blobstore_key, archive_path): logs.log_error('Could not retrieve archive for bundle %d.' % metadata_id) tasks.add_task('unpack', metadata_id, job_type) return try: with archive.open(archive_path) as reader: reader.extract_all(testcases_directory) except: logs.log_error('Could not unpack archive for bundle %d.' % metadata_id) tasks.add_task('unpack', metadata_id, job_type) return # Get additional testcase metadata (if any). additional_metadata = None if upload_metadata.additional_metadata_string: additional_metadata = json.loads(upload_metadata.additional_metadata_string) archive_state = data_types.ArchiveStatus.NONE bundled = True for f in reader.list_members(): absolute_file_path = os.path.join(testcases_directory, f.name) filename = os.path.basename(absolute_file_path) # Only files are actual testcases. Skip directories. if not os.path.isfile(absolute_file_path): continue try: file_handle = open(absolute_file_path, 'rb') blob_key = blobs.write_blob(file_handle) file_handle.close() except: blob_key = None if not blob_key: logs.log_error( 'Could not write testcase %s to blobstore.' % absolute_file_path) continue data_handler.create_user_uploaded_testcase( blob_key, metadata.blobstore_key, archive_state, metadata.archive_filename, filename, metadata.timeout, job, metadata.job_queue, metadata.http_flag, metadata.gestures, metadata.additional_arguments, metadata.bug_information, metadata.crash_revision, metadata.uploader_email, metadata.platform_id, metadata.app_launch_command, metadata.fuzzer_name, metadata.overridden_fuzzer_name, metadata.fuzzer_binary_name, bundled, upload_metadata.retries, upload_metadata.bug_summary_update_flag, upload_metadata.quiet_flag, additional_metadata) # The upload metadata for the archive is not needed anymore since we created # one for each testcase. upload_metadata.key.delete() shell.clear_testcase_directories()