response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Handles libfuzzer minimization task's failure to reproduce the issue.
def handle_libfuzzer_minimization_unreproducible( output: uworker_msg_pb2.Output): """Handles libfuzzer minimization task's failure to reproduce the issue.""" testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id) # Be more lenient with marking testcases as unreproducible when this is a # job override. is_overriden_job = bool(environment.get_value('ORIGINAL_JOB_NAME')) if is_overriden_job: _skip_minimization(testcase, 'Unreproducible on overridden job') else: task_creation.mark_unreproducible_if_flaky(testcase, 'minimize', True)
Handles libfuzzer minimization task failure.
def handle_libfuzzer_minimization_failed(output: uworker_msg_pb2.Output): """Handles libfuzzer minimization task failure.""" testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id) _skip_minimization( testcase, 'LibFuzzer minimization failed', crash_result_dict=output.minimize_task_output.last_crash_result_dict)
Perform final updates on a test case and prepare it for other tasks.
def finalize_testcase(testcase_id, last_crash_result_dict, flaky_stack=False): """Perform final updates on a test case and prepare it for other tasks.""" # Symbolize crash output if we have it. testcase = data_handler.get_testcase_by_id(testcase_id) if last_crash_result_dict: _update_crash_result(testcase, last_crash_result_dict) testcase.delete_metadata('redo_minimize', update_testcase=False) # Update remaining test case information. testcase.flaky_stack = flaky_stack if build_manager.is_custom_binary(): testcase.set_impacts_as_na() testcase.regression = 'NA' data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED) # We might have updated the crash state. See if we need to marked as duplicate # based on other testcases. data_handler.handle_duplicate_entry(testcase) task_creation.create_postminimize_tasks(testcase)
Postprocess in a trusted bot.
def utask_postprocess(output): """Postprocess in a trusted bot.""" update_testcase(output) _cleanup_unused_blobs_from_storage(output) if output.error_type != uworker_msg_pb2.ErrorType.NO_ERROR: _ERROR_HANDLER.handle(output) return finalize_testcase( output.uworker_input.testcase_id, output.minimize_task_output.last_crash_result_dict, flaky_stack=output.minimize_task_output.flaky_stack)
Return true if we should we attempt a minimization phase.
def should_attempt_phase(testcase, phase): """Return true if we should we attempt a minimization phase.""" if (phase == MinimizationPhase.ARGUMENTS and environment.is_engine_fuzzer_job()): # Should not minimize arguments list for engine based fuzzer jobs. return False current_phase = testcase.get_metadata( 'minimization_phase', default=MinimizationPhase.GESTURES) return phase >= current_phase
Minimize the gesture list for a test case.
def minimize_gestures(test_runner, testcase): """Minimize the gesture list for a test case.""" gestures = testcase.gestures if gestures: gesture_minimizer = delta_minimizer.DeltaMinimizer( test_runner.test_with_gestures, max_threads=test_runner.threads, tokenize=False, deadline=test_runner.deadline, cleanup_function=process_handler.cleanup_stale_processes, single_thread_cleanup_interval=test_runner.cleanup_interval, progress_report_function=functools.partial(logs.log)) gestures = gesture_minimizer.minimize(gestures) logs.log(f'Minimized gestures: {str(gestures)}') return gestures
Minimize the main test case file.
def minimize_main_file(test_runner, testcase_file_path, data): """Minimize the main test case file.""" if not can_minimize_file(testcase_file_path): return data get_random_file = functools.partial(get_temporary_file, testcase_file_path) data = ( minimize_file(testcase_file_path, test_runner.test_with_file, get_random_file, data, test_runner.deadline, test_runner.threads, test_runner.cleanup_interval)) logs.log('Minimized main test file.') return data
Minimize the test case files.
def minimize_file_list(test_runner, file_list, input_directory, main_file): """Minimize the test case files.""" if len(file_list) <= 1: return file_list # TODO(mbarbella): Simplify this with refactoring of setup_testcase. offset = len(input_directory) + len(os.path.sep) fixed_testcase_file_path = main_file[offset:] # As of now, this must be single-threaded. file_list_minimizer = basic_minimizers.SinglePassMinimizer( test_runner.test_with_files, tokenize=False, deadline=test_runner.deadline, cleanup_function=process_handler.cleanup_stale_processes, single_thread_cleanup_interval=test_runner.cleanup_interval, progress_report_function=functools.partial(logs.log)) file_list = file_list_minimizer.minimize(file_list) if fixed_testcase_file_path not in file_list: file_list.append(fixed_testcase_file_path) logs.log(f'Minimized file list: {str(file_list)}') return file_list
Minimize a resource for the test case.
def minimize_resource(test_runner, dependency, input_directory, main_file): """Minimize a resource for the test case.""" # TODO(mbarbella): Simplify this with refactoring of setup_testcase. offset = len(input_directory) + len(os.path.sep) fixed_testcase_file_path = main_file[offset:] dependency_absolute_path = os.path.join(input_directory, dependency) if (dependency == fixed_testcase_file_path or dependency == main_file or not can_minimize_file(dependency_absolute_path)): return get_temp_file = functools.partial( get_temporary_file, dependency_absolute_path, no_modifications=True) original_data = utils.get_file_contents_with_fatal_error_on_failure( dependency_absolute_path) dependency_data = ( minimize_file( dependency, test_runner.test_with_defaults, get_temp_file, original_data, test_runner.deadline, 1, test_runner.cleanup_interval, delete_temp_files=False)) utils.write_data_to_file(dependency_data, dependency_absolute_path) logs.log(f'Minimized dependency file: {dependency}')
Minimize the argument list for a test case.
def minimize_arguments(test_runner, app_arguments): """Minimize the argument list for a test case.""" argument_minimizer = delta_minimizer.DeltaMinimizer( test_runner.test_with_command_line_arguments, max_threads=test_runner.threads, tokenize=False, deadline=test_runner.deadline, cleanup_function=process_handler.cleanup_stale_processes, single_thread_cleanup_interval=test_runner.cleanup_interval, progress_report_function=functools.partial(logs.log)) reduced_args = argument_minimizer.minimize(app_arguments.split()) reduced_arg_string = test_runner.get_argument_string(reduced_args) return reduced_arg_string
Store all files that make up this testcase.
def store_minimized_testcase( testcase: data_types.Testcase, base_directory: str, file_list: List[str], file_to_run_data: str, file_to_run: str, minimize_task_input: uworker_msg_pb2.MinimizeTaskInput, minimize_task_output: uworker_msg_pb2.MinimizeTaskOutput): """Store all files that make up this testcase.""" # Write the main file data. utils.write_data_to_file(file_to_run_data, file_to_run) # Prepare the file. zip_path = None if testcase.archive_state: if len(file_list) > 1: testcase.archive_state |= data_types.ArchiveStatus.MINIMIZED minimize_task_output.archive_state = testcase.archive_state zip_path = os.path.join( environment.get_value('INPUT_DIR'), '%d.zip' % testcase.key.id()) zip_file = zipfile.ZipFile(zip_path, 'w') count = 0 filtered_file_list = [] for file_name in file_list: absolute_filename = os.path.join(base_directory, file_name) is_file = os.path.isfile(absolute_filename) if file_to_run_data and is_file and os.path.getsize( absolute_filename) == 0 and (os.path.basename( absolute_filename).encode('utf-8') not in file_to_run_data): continue if not os.path.exists(absolute_filename): continue zip_file.write(absolute_filename, file_name, zipfile.ZIP_DEFLATED) if is_file: count += 1 filtered_file_list.append(absolute_filename) zip_file.close() try: if count > 1: file_handle = open(zip_path, 'rb') else: if not filtered_file_list: # We minimized everything. The only thing needed to reproduce is the # interaction gesture. file_path = file_list[0] file_handle = open(file_path, 'wb') file_handle.close() else: file_path = filtered_file_list[0] file_handle = open(file_path, 'rb') testcase.absolute_path = os.path.join(base_directory, os.path.basename(file_path)) minimize_task_output.absolute_path = testcase.absolute_path testcase.archive_state &= ~data_types.ArchiveStatus.MINIMIZED minimize_task_output.archive_state = testcase.archive_state except OSError: logs.log_error('Unable to open archive for blobstore write.') return else: absolute_filename = os.path.join(base_directory, file_list[0]) file_handle = open(absolute_filename, 'rb') testcase.archive_state &= ~data_types.ArchiveStatus.MINIMIZED minimize_task_output.archive_state = testcase.archive_state else: file_handle = open(file_list[0], 'rb') testcase.archive_state &= ~data_types.ArchiveStatus.MINIMIZED minimize_task_output.archive_state = testcase.archive_state # Store the testcase. data = file_handle.read() storage.upload_signed_url(data, minimize_task_input.testcase_upload_url) minimized_keys = minimize_task_input.testcase_blob_name file_handle.close() testcase.minimized_keys = minimized_keys minimize_task_output.minimized_keys = minimized_keys if zip_path: shell.remove_file(zip_path)
Store the partially minimized test and check the deadline.
def check_deadline_exceeded_and_store_partial_minimized_testcase( deadline, testcase: data_types.Testcase, input_directory: str, file_list, file_to_run_data, main_file_path: str, minimize_task_input: uworker_msg_pb2.MinimizeTaskInput, minimize_task_output: uworker_msg_pb2.MinimizeTaskOutput) -> bool: """Store the partially minimized test and check the deadline.""" store_minimized_testcase(testcase, input_directory, file_list, file_to_run_data, main_file_path, minimize_task_input, minimize_task_output) return time.time() > deadline
Initial check to see how long it takes to reproduce a crash.
def check_for_initial_crash(test_runner, crash_retries, testcase): """Initial check to see how long it takes to reproduce a crash.""" crash_times = [] flaky_stack = False saved_crash_state = None saved_security_flag = None saved_unsymbolized_crash_state = None results = test_runner.execute_parallel_runs(crash_retries) for result in results: if not result.is_crash(): continue if result.should_ignore(): continue crash_state = result.get_state(symbolized=True) security_flag = result.is_security_issue() unsymbolized_crash_state = result.get_state(symbolized=False) if not unsymbolized_crash_state: continue if security_flag != testcase.security_flag: continue crash_times.append(result.crash_time) if not saved_crash_state: saved_crash_state = crash_state saved_security_flag = security_flag saved_unsymbolized_crash_state = unsymbolized_crash_state continue crash_comparer = CrashComparer(crash_state, saved_crash_state) if not crash_comparer.is_similar(): flaky_stack = True logs.log(f'Total crash count: {len(crash_times)}/{crash_retries}.' f'Flaky: {flaky_stack}. Security: {saved_security_flag}.' f'State:\n{saved_crash_state}') return saved_unsymbolized_crash_state, flaky_stack, crash_times
Generate a temporary file name in the same directory as |original_file|.
def get_temporary_file_name(original_file): """Generate a temporary file name in the same directory as |original_file|.""" directory, basename = os.path.split(original_file) basename = basename[-MAX_TEMPORARY_FILE_BASENAME_LENGTH:] random_hex = binascii.b2a_hex(os.urandom(16)).decode('utf-8') new_file_path = os.path.join(directory, '%s%s' % (random_hex, basename)) return new_file_path
Get a temporary file handle with a name based on an original file name.
def get_temporary_file(original_file, no_modifications=False): """Get a temporary file handle with a name based on an original file name.""" if no_modifications: handle = open(original_file, 'wb') return handle handle = open(get_temporary_file_name(original_file), 'wb') return handle
Return the ipc_message_util executable path for the current build.
def get_ipc_message_util_executable(): """Return the ipc_message_util executable path for the current build.""" app_directory = environment.get_value('APP_DIR') platform = environment.platform() try: executable = IPC_MESSAGE_UTIL_EXECUTABLE_FOR_PLATFORM[platform] except KeyError: # Current platform is not supported. return None return os.path.join(app_directory, executable)
Use the ipc_message_util utility to create a file for up to |TOKENS_PER_IPCDUMP| tokens.
def create_partial_ipc_dump(tokens, original_file_path): """Use the ipc_message_util utility to create a file for up to |TOKENS_PER_IPCDUMP| tokens.""" assert len(tokens) <= TOKENS_PER_IPCDUMP token_list = ','.join([str(token) for token in tokens]) temp_file_path = get_temporary_file_name(original_file_path) executable = get_ipc_message_util_executable() command_line = shell.get_command_line_from_argument_list( [executable, '--in=%s' % token_list, original_file_path, temp_file_path]) return_code, _, output = process_handler.run_process( command_line, testcase_run=False, timeout=IPCDUMP_TIMEOUT) if return_code or not os.path.exists(temp_file_path): # For some reason, generating the new file failed. logs.log_error('Failed to create ipc dump file %s.' % output) return None return temp_file_path
Combines a list of ipcdump files into a single dump.
def combine_ipc_dumps(ipcdumps, original_file_path): """Combines a list of ipcdump files into a single dump.""" input_file_string = ','.join(ipcdumps) executable = get_ipc_message_util_executable() output_file_path = get_temporary_file_name(original_file_path) command_line = shell.get_command_line_from_argument_list( [executable, input_file_string, output_file_path]) return_code, _, output = process_handler.run_process( command_line, testcase_run=False, timeout=COMBINED_IPCDUMP_TIMEOUT) for ipcdump in ipcdumps: shell.remove_file(ipcdump) if return_code or not os.path.exists(output_file_path): logs.log_error('Failed to create ipc dump file %s.' % output) return None return output_file_path
Check to see if IPC minimization is supported for the current build.
def supports_ipc_minimization(file_path): """Check to see if IPC minimization is supported for the current build.""" executable = get_ipc_message_util_executable() if not executable: # IPC fuzzer minimization is not supported on this platform. return False command_line = shell.get_command_line_from_argument_list( [executable, '--dump', '--in=0', file_path]) return_code, _, output = process_handler.run_process( command_line, testcase_run=False, timeout=IPCDUMP_TIMEOUT) # If --in is not supported by this version of the ipc_message_util binary, # it will exit with a nonzero exit status. Also ensure that the first message # is printed in case the build is bad for some other reason. # Example output: 0. AutofillHostMsg_DidFillAutofillFormData if return_code or not output.startswith('0.'): return False supports_ipc_minimization.is_supported = True return True
Check to see if we support minimization for this file.
def can_minimize_file(file_path): """Check to see if we support minimization for this file.""" # If this is not a binary file, we should be able to minimize it in some way. if not utils.is_binary_file(file_path): return True # Attempt to minimize IPC dumps. if file_path.endswith(testcase_manager.IPCDUMP_EXTENSION): return supports_ipc_minimization(file_path) # Other binary file formats are not supported. return False
IPC dump minimization strategy.
def do_ipc_dump_minimization(test_function, get_temp_file, file_path, deadline, threads, cleanup_interval, delete_temp_files): """IPC dump minimization strategy.""" def tokenize(current_file_path): """Generate a token list for an IPC fuzzer test case.""" command_line = shell.get_command_line_from_argument_list( [get_ipc_message_util_executable(), '--dump', current_file_path]) _, _, output = process_handler.run_process( command_line, testcase_run=False, timeout=IPCDUMP_TIMEOUT) output_lines = output.splitlines() if not output_lines: return [] # Each output line starts with the message index followed by a ".", but # we are only interested in the total number of messages in the file. To # find this, we add one to the index of the final message. try: last_index = int(output_lines[-1].split('.')[0]) except ValueError: return [] return list(range(last_index + 1)) def combine_tokens(tokens): """Use the ipc_message_util utility to create a file for these tokens.""" partial_ipcdumps = [] for start_index in range(0, len(tokens), TOKENS_PER_IPCDUMP): end_index = min(start_index + TOKENS_PER_IPCDUMP, len(tokens)) current_tokens = tokens[start_index:end_index] partial_ipcdumps.append( create_partial_ipc_dump(current_tokens, file_path)) combined_file_path = None if len(partial_ipcdumps) > 1: combined_file_path = combine_ipc_dumps(partial_ipcdumps, file_path) elif len(partial_ipcdumps) == 1: combined_file_path = partial_ipcdumps[0] if not combined_file_path: # This can happen in the case of a timeout or other error. The actual # error should already be logged, so no need to do it again here. return b'' # TODO(mbarbella): Allow token combining functions to write files directly. handle = open(combined_file_path, 'rb') result = handle.read() handle.close() shell.remove_file(combined_file_path) return result current_minimizer = delta_minimizer.DeltaMinimizer( test_function, max_threads=threads, deadline=deadline, cleanup_function=process_handler.cleanup_stale_processes, single_thread_cleanup_interval=cleanup_interval, get_temp_file=get_temp_file, delete_temp_files=delete_temp_files, tokenizer=tokenize, token_combiner=combine_tokens, progress_report_function=functools.partial(logs.log)) return current_minimizer.minimize(file_path)
Javascript minimization strategy.
def do_js_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files): """Javascript minimization strategy.""" # Start by using a generic line minimizer on the test. # Do two line minimizations to make up for the fact that minimzations on bots # don't always minimize as much as they can. for _ in range(2): data = do_line_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files) tokenizer = AntlrTokenizer(JavaScriptLexer) current_minimizer = js_minimizer.JSMinimizer( test_function, max_threads=threads, deadline=deadline, cleanup_function=process_handler.cleanup_stale_processes, single_thread_cleanup_interval=cleanup_interval, get_temp_file=get_temp_file, delete_temp_files=delete_temp_files, tokenizer=tokenizer.tokenize, token_combiner=tokenizer.combine, progress_report_function=functools.partial(logs.log)) # Some tokens can't be removed until other have, so do 2 passes. try: for _ in range(2): data = current_minimizer.minimize(data) except minimizer_errors.AntlrDecodeError: data = do_line_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files) # FIXME(mbarbella): Improve the JS minimizer so that this is not necessary. # Sometimes, lines that could not have been removed on their own can now be # removed since they have already been partially cleaned up. return do_line_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files)
Run libFuzzer testcase, and return the CrashResult.
def _run_libfuzzer_testcase(fuzz_target, testcase, testcase_file_path, crash_retries=1): """Run libFuzzer testcase, and return the CrashResult.""" # Cleanup any existing application instances and temp directories. process_handler.cleanup_stale_processes() shell.clear_temp_directory() if environment.is_trusted_host(): from clusterfuzz._internal.bot.untrusted_runner import file_host file_host.copy_file_to_worker( testcase_file_path, file_host.rebase_to_worker_root(testcase_file_path)) test_timeout = environment.get_value('TEST_TIMEOUT', process_handler.DEFAULT_TEST_TIMEOUT) return testcase_manager.test_for_crash_with_retries( fuzz_target, testcase, testcase_file_path, test_timeout, compare_crash=False, crash_retries=crash_retries)
Run the libFuzzer engine.
def run_libfuzzer_engine(tool_name, target_name, arguments, testcase_path, output_path, timeout): """Run the libFuzzer engine.""" arguments = list(arguments) if environment.is_trusted_host(): from clusterfuzz._internal.bot.untrusted_runner import tasks_host # TODO(ochang): Remove hardcode. return tasks_host.process_testcase('libFuzzer', tool_name, target_name, arguments, testcase_path, output_path, timeout) target_path = engine_common.find_fuzzer_path( environment.get_value('BUILD_DIR'), target_name) if not target_path: return engine.ReproduceResult([], 0, 0, '') engine_impl = libfuzzer_engine.Engine() if tool_name == 'minimize': func = engine_impl.minimize_testcase else: assert tool_name == 'cleanse' func = engine_impl.cleanse return func(target_path, arguments, testcase_path, output_path, timeout)
Run libFuzzer tool to either minimize or cleanse.
def _run_libfuzzer_tool(tool_name: str, testcase: data_types.Testcase, testcase_file_path: str, timeout: int, expected_crash_state: str, minimize_task_input: uworker_msg_pb2.MinimizeTaskInput, fuzz_target: Optional[data_types.FuzzTarget], set_dedup_flags: bool = False): """Run libFuzzer tool to either minimize or cleanse.""" memory_tool_options_var = environment.get_current_memory_tool_var() saved_memory_tool_options = environment.get_value(memory_tool_options_var) def _set_dedup_flags(): """Allow libFuzzer to do its own crash comparison during minimization.""" memory_tool_options = environment.get_memory_tool_options( memory_tool_options_var, default_value={}) memory_tool_options['symbolize'] = 1 memory_tool_options['dedup_token_length'] = 3 environment.set_memory_tool_options(memory_tool_options_var, memory_tool_options) def _unset_dedup_flags(): """Reset memory tool options.""" # This is needed so that when we re-run, we can symbolize ourselves # (ignoring inline frames). if saved_memory_tool_options is not None: environment.set_value(memory_tool_options_var, saved_memory_tool_options) output_file_path = get_temporary_file_name(testcase_file_path) fuzzer_display = data_handler.get_fuzzer_display_unprivileged( testcase, fuzz_target) if set_dedup_flags: _set_dedup_flags() try: result = run_libfuzzer_engine(tool_name, fuzzer_display.target, minimize_task_input.arguments, testcase_file_path, output_file_path, timeout) except TimeoutError: logs.log_warn('LibFuzzer timed out.') return None, None, None if set_dedup_flags: _unset_dedup_flags() if not os.path.exists(output_file_path): logs.log_warn(f'LibFuzzer {tool_name} run failed.', output=result.output) return None, None, None # Ensure that the crash parameters match. It's possible that we will # minimize/cleanse to an unrelated bug, such as a timeout. crash_result = _run_libfuzzer_testcase(fuzz_target, testcase, output_file_path) state = crash_result.get_symbolized_data() security_flag = crash_result.is_security_issue() if (security_flag != testcase.security_flag or state.crash_state != expected_crash_state): logs.log_warn( 'Ignoring unrelated crash.\n' f'State: {state.crash_state} (expected {expected_crash_state})\n' f'Security: {security_flag} (expected {testcase.security_flag})\n' f'Output: {state.crash_stacktrace}\n') return None, None, None with open(output_file_path, 'rb') as file_handle: data = file_handle.read() storage.upload_signed_url(data, minimize_task_input.testcase_upload_url) minimized_keys = minimize_task_input.testcase_blob_name testcase.minimized_keys = minimized_keys return output_file_path, crash_result, minimized_keys
Extract necessary data from CrashResult.
def _extract_crash_result(crash_result, command, minimize_task_input): """Extract necessary data from CrashResult.""" if not crash_result: raise errors.BadStateError( 'No crash result was provided to _extract_crash_result') min_state = crash_result.get_symbolized_data() min_unsymbolized_crash_stacktrace = crash_result.get_stacktrace( symbolized=False) min_crash_stacktrace = utils.get_crash_stacktrace_output( command, min_state.crash_stacktrace, min_unsymbolized_crash_stacktrace) return { 'crash_type': min_state.crash_type, 'crash_address': min_state.crash_address, 'crash_state': min_state.crash_state, 'crash_stacktrace': data_handler.filter_stacktrace( min_crash_stacktrace, minimize_task_input.stacktrace_blob_name, minimize_task_input.stacktrace_upload_url), }
Update testcase with crash result.
def _update_crash_result(testcase, crash_result_dict): """Update testcase with crash result.""" testcase.crash_type = crash_result_dict['crash_type'] testcase.crash_address = crash_result_dict['crash_address'] testcase.crash_state = crash_result_dict['crash_state'] testcase.crash_stacktrace = crash_result_dict['crash_stacktrace']
Skip minimization for a testcase, only called during postrocess.
def _skip_minimization(testcase: data_types.Testcase, message: str, crash_result_dict: Dict[str, str] = None): """Skip minimization for a testcase, only called during postrocess.""" testcase.minimized_keys = testcase.fuzzed_keys if crash_result_dict: _update_crash_result(testcase, crash_result_dict) data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, message) task_creation.create_postminimize_tasks(testcase)
Updates the testcase metadata env with the values set during utask_main.
def _update_testcase_memory_tool_options(testcase: data_types.Testcase, memory_tool_options: Dict[str, str]): """Updates the testcase metadata env with the values set during utask_main.""" env = {} for key, value in memory_tool_options.items(): environment.set_value(key, value) env[key] = environment.get_memory_tool_options(key) if env: testcase.set_metadata('env', env, False)
Use libFuzzer's built-in minimizer where appropriate.
def do_libfuzzer_minimization( fuzz_target: Optional[data_types.FuzzTarget], minimize_task_input: uworker_msg_pb2.MinimizeTaskInput, testcase: data_types.Testcase, testcase_file_path: str) -> uworker_msg_pb2.Output: """Use libFuzzer's built-in minimizer where appropriate.""" timeout = environment.get_value('LIBFUZZER_MINIMIZATION_TIMEOUT', 600) rounds = environment.get_value('LIBFUZZER_MINIMIZATION_ROUNDS', 5) current_testcase_path = testcase_file_path last_crash_result = None # Get initial crash state. initial_crash_result = _run_libfuzzer_testcase( fuzz_target, testcase, testcase_file_path, crash_retries=None) # Use default retries. if not initial_crash_result.is_crash(): logs.log_warn('Did not crash. Output:\n' + initial_crash_result.get_stacktrace(symbolized=True)) return uworker_msg_pb2.Output(error_type=uworker_msg_pb2.ErrorType. LIBFUZZER_MINIMIZATION_UNREPRODUCIBLE) if testcase.security_flag != initial_crash_result.is_security_issue(): logs.log_warn('Security flag does not match.') return uworker_msg_pb2.Output(error_type=uworker_msg_pb2.ErrorType. LIBFUZZER_MINIMIZATION_UNREPRODUCIBLE) expected_state = initial_crash_result.get_symbolized_data() logs.log(f'Initial crash state: {expected_state.crash_state}\n') # Minimize *_OPTIONS env variable first. env = {} # A Dict[str, str] potentially containing options_env_var strings and the # corresponding minimized_options_string to be parsed and set in testcase # metadata during postprocess. memory_tool_options = {} for tool in environment.SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS: options_env_var = tool + '_OPTIONS' options = environment.get_memory_tool_options(options_env_var) if not options: continue minimized_options = options.copy() for options_name, options_value in options.items(): if utils.is_oss_fuzz() and options_name in MANDATORY_OSS_FUZZ_OPTIONS: continue minimized_options.pop(options_name) environment.set_memory_tool_options(options_env_var, minimized_options) reproduced = False for _ in range(MINIMIZE_SANITIZER_OPTIONS_RETRIES): crash_result = _run_libfuzzer_testcase(fuzz_target, testcase, testcase_file_path) if (crash_result.is_crash() and crash_result.is_security_issue() == initial_crash_result.is_security_issue() and crash_result.get_type() == initial_crash_result.get_type() and crash_result.get_state() == initial_crash_result.get_state()): reproduced = True break if reproduced: logs.log( 'Removed unneeded {options_env_var} option: {options_name}'.format( options_env_var=options_env_var, options_name=options_name)) else: minimized_options[options_name] = options_value logs.log( 'Skipped needed {options_env_var} option: {options_name}'.format( options_env_var=options_env_var, options_name=options_name), crash_type=crash_result.get_type(), crash_state=crash_result.get_state(), security_flag=crash_result.is_security_issue()) environment.set_memory_tool_options(options_env_var, minimized_options) env[options_env_var] = environment.get_memory_tool_options(options_env_var) memory_tool_options[options_env_var] = environment.join_memory_tool_options( minimized_options) if env: testcase.set_metadata('env', env, False) minimized_keys = None # We attempt minimization multiple times in case one round results in an # incorrect state, or runs into another issue such as a slow unit. for round_number in range(1, rounds + 1): logs.log(f'Minimizing round {round_number}.') output_file_path, crash_result, minimized_keys = _run_libfuzzer_tool( 'minimize', testcase, current_testcase_path, timeout, expected_state.crash_state, minimize_task_input, fuzz_target, set_dedup_flags=True) if output_file_path: last_crash_result = crash_result current_testcase_path = output_file_path if not last_crash_result: repro_command = testcase_manager.get_command_line_for_application( file_to_run=testcase_file_path, needs_http=testcase.http_flag) crash_result_dict = _extract_crash_result( initial_crash_result, repro_command, minimize_task_input) minimize_task_output = uworker_msg_pb2.MinimizeTaskOutput( last_crash_result_dict=crash_result_dict, memory_tool_options=memory_tool_options) if minimized_keys: minimize_task_output.minimized_keys = str(minimized_keys) return uworker_msg_pb2.Output( error_type=uworker_msg_pb2.ErrorType.LIBFUZZER_MINIMIZATION_FAILED, minimize_task_output=minimize_task_output) logs.log('LibFuzzer minimization succeeded.') if utils.is_oss_fuzz(): # Scrub the testcase of non-essential data. cleansed_testcase_path, minimized_keys = do_libfuzzer_cleanse( fuzz_target, testcase, current_testcase_path, expected_state.crash_state, minimize_task_input) if cleansed_testcase_path: current_testcase_path = cleansed_testcase_path # Finalize the test case if we were able to reproduce it. repro_command = testcase_manager.get_command_line_for_application( file_to_run=current_testcase_path, needs_http=testcase.http_flag) last_crash_result_dict = _extract_crash_result( last_crash_result, repro_command, minimize_task_input) # Clean up after we're done. shell.clear_testcase_directories() minimize_task_output = uworker_msg_pb2.MinimizeTaskOutput( last_crash_result_dict=last_crash_result_dict, memory_tool_options=memory_tool_options) if minimized_keys: minimize_task_output.minimized_keys = str(minimized_keys) return uworker_msg_pb2.Output(minimize_task_output=minimize_task_output)
Cleanse testcase using libFuzzer.
def do_libfuzzer_cleanse(fuzz_target: Optional[data_types.FuzzTarget], testcase, testcase_file_path, expected_crash_state, minimize_task_input): """Cleanse testcase using libFuzzer.""" timeout = environment.get_value('LIBFUZZER_CLEANSE_TIMEOUT', 180) output_file_path, _, minimized_keys = _run_libfuzzer_tool( 'cleanse', testcase, testcase_file_path, timeout, expected_crash_state, minimize_task_input, fuzz_target) if output_file_path: logs.log('LibFuzzer cleanse succeeded.') return output_file_path, minimized_keys
Line-by-line minimization strategy.
def do_line_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files): """Line-by-line minimization strategy.""" current_minimizer = delta_minimizer.DeltaMinimizer( test_function, max_threads=threads, deadline=deadline, cleanup_function=process_handler.cleanup_stale_processes, single_thread_cleanup_interval=cleanup_interval, get_temp_file=get_temp_file, delete_temp_files=delete_temp_files, progress_report_function=functools.partial(logs.log)) return current_minimizer.minimize(data)
HTML minimization strategy.
def do_html_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files): """HTML minimization strategy.""" current_minimizer = html_minimizer.HTMLMinimizer( test_function, max_threads=threads, deadline=deadline, cleanup_function=process_handler.cleanup_stale_processes, single_thread_cleanup_interval=cleanup_interval, get_temp_file=get_temp_file, delete_temp_files=delete_temp_files, progress_report_function=functools.partial(logs.log)) try: return current_minimizer.minimize(data) except minimizer_errors.AntlrDecodeError: return do_line_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files)
Attempt to minimize a single file.
def minimize_file(file_path, test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files=True): """Attempt to minimize a single file.""" # Specialized minimization strategy for IPC dumps. if file_path.endswith(testcase_manager.IPCDUMP_EXTENSION): return do_ipc_dump_minimization(test_function, get_temp_file, file_path, deadline, threads, cleanup_interval, delete_temp_files) # Specialized minimization strategy for javascript. if file_path.endswith('.js'): return do_js_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files) if file_path.endswith('.html'): return do_html_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files) # We could not identify another strategy for this file, so use the default. return do_line_minimization(test_function, get_temp_file, data, deadline, threads, cleanup_interval, delete_temp_files)
Clears last_progression_min and last_progression_max when clear_min_max_metadata is set to True
def _maybe_clear_progression_last_min_max_metadata( testcase: data_types.Testcase, uworker_output: uworker_msg_pb2.Output): """Clears last_progression_min and last_progression_max when clear_min_max_metadata is set to True""" if not uworker_output.HasField('progression_task_output'): return task_output = uworker_output.progression_task_output if task_output.clear_min_max_metadata: testcase.delete_metadata('last_progression_min', update_testcase=False) testcase.delete_metadata('last_progression_max', update_testcase=False) testcase.put()
A helper method to update the build metadata corresponding to a job_type.
def _update_build_metadata(job_type: str, build_data_list: List[uworker_msg_pb2.BuildData]): """A helper method to update the build metadata corresponding to a job_type.""" for build_data in build_data_list: testcase_manager.update_build_metadata(job_type, build_data)
Save current fixed range indices in case we die in middle of task.
def _save_current_fixed_range_indices(testcase, uworker_output): """Save current fixed range indices in case we die in middle of task.""" task_output = uworker_output.progression_task_output last_progression_min = None last_progression_max = None if task_output.HasField('last_progression_min'): last_progression_min = task_output.last_progression_min if task_output.HasField('last_progression_max'): last_progression_max = task_output.last_progression_max testcase.set_metadata( 'last_progression_min', last_progression_min, update_testcase=False) testcase.set_metadata( 'last_progression_max', last_progression_max, update_testcase=False)
Job has exceeded the deadline. Recreate the task to pick up where we left off.
def handle_progression_timeout(uworker_output: uworker_msg_pb2.Output): """Job has exceeded the deadline. Recreate the task to pick up where we left off.""" testcase_id = uworker_output.uworker_input.testcase_id job_type = uworker_output.uworker_input.job_type testcase = data_handler.get_testcase_by_id(testcase_id) _save_current_fixed_range_indices(testcase, uworker_output) data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, uworker_output.error_message) tasks.add_task('progression', testcase_id, job_type)
Handles an expected build that no longer exists, we can't continue. Also, clears progression_pending testcase metadata
def handle_progression_build_not_found(uworker_output: uworker_msg_pb2.Output): """Handles an expected build that no longer exists, we can't continue. Also, clears progression_pending testcase metadata""" testcase_id = uworker_output.uworker_input.testcase_id testcase = data_handler.get_testcase_by_id(testcase_id) testcase.fixed = 'NA' testcase.open = False data_handler.clear_progression_pending(testcase) data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, uworker_output.error_message)
Handles revision list errors, in which case the testcase is closed with error.
def handle_progression_revision_list_error( uworker_output: uworker_msg_pb2.Output): """Handles revision list errors, in which case the testcase is closed with error.""" testcase_id = uworker_output.uworker_input.testcase_id testcase = data_handler.get_testcase_by_id(testcase_id) data_handler.close_testcase_with_error(testcase, 'Failed to fetch revision list')
Cleanup the blob created in preprocess if it wasn't used to store the filterd stacktrace.
def _cleanup_stacktrace_blob_from_storage(output: uworker_msg_pb2.Output): """Cleanup the blob created in preprocess if it wasn't used to store the filterd stacktrace.""" if output.HasField('progression_task_output'): stacktrace = output.progression_task_output.last_tested_crash_stacktrace if stacktrace.startswith(data_types.BLOBSTORE_STACK_PREFIX): return if not output.uworker_input.progression_task_input.blob_name: raise ValueError('blob_name should not be empty here.') blob_name = output.uworker_input.progression_task_input.blob_name blobs.delete_blob(blob_name)
Handles crash on latest revision, or custom binary crashes. Saves the crash info for non-custom binaries.
def crash_on_latest(uworker_output: uworker_msg_pb2.Output): """Handles crash on latest revision, or custom binary crashes. Saves the crash info for non-custom binaries.""" testcase_id = uworker_output.uworker_input.testcase_id progression_task_output = uworker_output.progression_task_output testcase = data_handler.get_testcase_by_id(testcase_id) testcase.last_tested_crash_stacktrace = ( progression_task_output.last_tested_crash_stacktrace) data_handler.update_progression_completion_metadata( testcase, progression_task_output.crash_revision, is_crash=True, message=progression_task_output.crash_on_latest_message) # This means we are in a custom binary crash, we do not upload crash info. if uworker_output.uworker_input.progression_task_input.custom_binary: return # Since we've verified that the test case is still crashing, clear out any # metadata indicating potential flake from previous runs. task_creation.mark_unreproducible_if_flaky(testcase, 'progression', False)
Handles when we end up in a state having min and max versions the same during a progression.
def handle_progression_bad_state_min_max( uworker_output: uworker_msg_pb2.Output): """Handles when we end up in a state having min and max versions the same during a progression.""" testcase = data_handler.get_testcase_by_id( uworker_output.uworker_input.testcase_id) _save_current_fixed_range_indices(testcase, uworker_output) testcase.fixed = 'NA' testcase.open = False message = ('Fixed testing errored out (min and max revisions are both ' f'{uworker_output.progression_task_output.min_revision}') data_handler.update_progression_completion_metadata( testcase, uworker_output.progression_task_output.max_revision, message=message) # Let the bisection service know about the NA status. bisection.request_bisection(testcase)
Expected crash version doesn't crash. Retries once to confirm the result otherwise marks unreproducible if the testcase is flaky.
def handle_progression_no_crash(uworker_output: uworker_msg_pb2.Output): """Expected crash version doesn't crash. Retries once to confirm the result otherwise marks unreproducible if the testcase is flaky.""" testcase_id = uworker_output.uworker_input.testcase_id job_type = uworker_output.uworker_input.job_type testcase = data_handler.get_testcase_by_id(testcase_id) # Retry once on another bot to confirm our result. if data_handler.is_first_attempt_for_task( 'progression', testcase, reset_after_retry=True): tasks.add_task('progression', testcase_id, job_type) error_message = ( uworker_output.error_message + ', will retry on another bot to confirm result.') data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message) data_handler.update_progression_completion_metadata( testcase, uworker_output.progression_task_output.crash_revision) return data_handler.clear_progression_pending(testcase) data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, uworker_output.error_message) task_creation.mark_unreproducible_if_flaky(testcase, 'progression', True) return
Handles errors for scenarios where build setup fails.
def handle_progression_build_setup_error( uworker_output: uworker_msg_pb2.Output): """Handles errors for scenarios where build setup fails.""" # If we failed to setup a build, it is likely a bot error. We can retry # the task in this case. testcase_id = uworker_output.uworker_input.testcase_id job_type = uworker_output.uworker_input.job_type testcase = data_handler.get_testcase_by_id(testcase_id) data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, uworker_output.error_message) build_fail_wait = environment.get_value('FAIL_WAIT') tasks.add_task( 'progression', testcase_id, job_type, wait_time=build_fail_wait)
Handles unrecoverable bad build errors.
def handle_progression_bad_build(uworker_output: uworker_msg_pb2.Output): """Handles unrecoverable bad build errors.""" # Though bad builds when narrowing the range are recoverable, certain builds # being marked as bad may be unrecoverable. Recoverable ones should not # reach this point. testcase_id = uworker_output.uworker_input.testcase_id testcase = data_handler.get_testcase_by_id(testcase_id) error_message = 'Unable to recover from bad build' data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message)
Write the fixed range to BigQuery.
def _write_to_bigquery(testcase, progression_range_start, progression_range_end): """Write the fixed range to BigQuery.""" big_query.write_range( table_id='fixeds', testcase=testcase, range_name='fixed', start=progression_range_start, end=progression_range_end)
Log process output.
def _log_output(revision, crash_result): """Log process output.""" logs.log( f'Testing {revision}', revision=revision, output=crash_result.get_stacktrace(symbolized=True))
Simplified fixed check for test cases using custom binaries.
def _check_fixed_for_custom_binary(testcase: data_types.Testcase, testcase_file_path: str, uworker_input: uworker_msg_pb2.Input): """Simplified fixed check for test cases using custom binaries.""" build_manager.setup_build() # 'APP_REVISION' is set during setup_build(). revision = environment.get_value('APP_REVISION') if revision is None: logs.log_error('APP_REVISION is not set, setting revision to 0') revision = 0 if not build_manager.check_app_path(): return uworker_msg_pb2.Output( error_message='Build setup failed for custom binary', error_type=uworker_msg_pb2.ErrorType.PROGRESSION_BUILD_SETUP_ERROR) test_timeout = environment.get_value('TEST_TIMEOUT', 10) fuzz_target = testcase_manager.get_fuzz_target_from_input(uworker_input) result = testcase_manager.test_for_crash_with_retries( fuzz_target, testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag) _log_output(revision, result) # If this still crashes on the most recent build, it's not fixed. The task # will be rescheduled by a cron job and re-attempted eventually. if result.is_crash(): app_path = environment.get_value('APP_PATH') command = testcase_manager.get_command_line_for_application( testcase_file_path, app_path=app_path, needs_http=testcase.http_flag) symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True) unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False) stacktrace = utils.get_crash_stacktrace_output( command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace) progression_task_input = uworker_input.progression_task_input last_tested_crash_stacktrace = data_handler.filter_stacktrace( stacktrace, progression_task_input.blob_name, progression_task_input.stacktrace_upload_url) progression_task_output = uworker_msg_pb2.ProgressionTaskOutput( crash_on_latest=True, crash_on_latest_message='Still crashes on latest custom build.', crash_revision=int(revision), last_tested_crash_stacktrace=last_tested_crash_stacktrace) return uworker_msg_pb2.Output( progression_task_output=progression_task_output) progression_task_output = uworker_msg_pb2.ProgressionTaskOutput( crash_revision=int(revision)) return uworker_msg_pb2.Output(progression_task_output=progression_task_output)
Tests to see if a test case reproduces in the specified revision. Returns a tuple containing the (result, error) depending on whether there was an error.
def _testcase_reproduces_in_revision( testcase: data_types.Testcase, testcase_file_path: str, job_type: str, revision: int, fuzz_target: data_types.FuzzTarget, progression_task_output: uworker_msg_pb2.ProgressionTaskOutput): """Tests to see if a test case reproduces in the specified revision. Returns a tuple containing the (result, error) depending on whether there was an error.""" build_manager.setup_build(revision) if not build_manager.check_app_path(): # Let postprocess handle the failure and reschedule the task if needed. error_message = f'Build setup failed at r{revision}' return None, uworker_msg_pb2.Output( error_message=error_message, progression_task_output=progression_task_output, error_type=uworker_msg_pb2.ErrorType.PROGRESSION_BUILD_SETUP_ERROR) build_data = testcase_manager.check_for_bad_build(job_type, revision) progression_task_output.build_data_list.append(build_data) if build_data.is_bad_build: # TODO(alhijazi): This is not logged for recoverable builds. error_message = f'Bad build at r{revision}. Skipping' return None, uworker_msg_pb2.Output( progression_task_output=progression_task_output, error_message=error_message, error_type=uworker_msg_pb2.ErrorType.PROGRESSION_BAD_BUILD) test_timeout = environment.get_value('TEST_TIMEOUT', 10) result = testcase_manager.test_for_crash_with_retries( fuzz_target, testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag) _log_output(revision, result) return result, None
Update a test case and other metadata with a fixed range.
def _save_fixed_range(testcase_id, min_revision, max_revision): """Update a test case and other metadata with a fixed range.""" testcase = data_handler.get_testcase_by_id(testcase_id) testcase.fixed = f'{min_revision}:{max_revision}' testcase.open = False data_handler.update_progression_completion_metadata( testcase, max_revision, message=f'fixed in range r{testcase.fixed}') _write_to_bigquery(testcase, min_revision, max_revision)
Stores reproduction testcase for future regression testing in corpus pruning task.
def _store_testcase_for_regression_testing( testcase: data_types.Testcase, testcase_file_path: str, progression_task_input: uworker_msg_pb2.ProgressionTaskInput): """Stores reproduction testcase for future regression testing in corpus pruning task.""" if testcase.open: # Store testcase only after the crash is fixed. return if not testcase.bug_information: # Only store crashes with bugs associated with them. return if not progression_task_input.HasField('regression_testcase_url'): return regression_testcase_url = progression_task_input.regression_testcase_url with open(testcase_file_path, 'rb') as testcase_file_handle: testcase_file = testcase_file_handle.read() try: storage.upload_signed_url(testcase_file, regression_testcase_url) logs.log('Successfully stored testcase for regression testing: ' + regression_testcase_url) except: logs.log_error('Failed to store testcase for regression testing: ' + regression_testcase_url)
Determines and sets the signed regression_testcase_url (if any) in the progression task input. Raises RuntimeError in case of UUID collision on the generated filename.
def _set_regression_testcase_upload_url( progression_input: uworker_msg_pb2.ProgressionTaskInput, testcase: data_types.Testcase): """Determines and sets the signed regression_testcase_url (if any) in the progression task input. Raises RuntimeError in case of UUID collision on the generated filename. """ fuzz_target = data_handler.get_fuzz_target(testcase.overridden_fuzzer_name) if not fuzz_target: # No work to do, only applicable for engine fuzzers. return if not testcase.trusted: logs.log_warn('Not saving untrusted testcase to regression corpus.') return # We probably don't need these checks, but do them anyway since it is # important not to mess this up. if testcase.uploader_email: logs.log_error( 'Not saving uploaded testcase to regression corpus (uploaded not set).') return upload_metadata = data_types.TestcaseUploadMetadata.query( data_types.TestcaseUploadMetadata.testcase_id == testcase.key.id()).get() if upload_metadata: logs.log_error('Not saving uploaded testcase to regression corpus ' '(uploaded and email not set).') return progression_input.regression_testcase_url = ( corpus_manager.get_regressions_signed_upload_url( fuzz_target.engine, fuzz_target.project_qualified_name()))
Runs preprocessing for progression task.
def utask_preprocess(testcase_id, job_type, uworker_env): """Runs preprocessing for progression task.""" testcase = data_handler.get_testcase_by_id(testcase_id) if not testcase: return None if testcase.fixed: logs.log_error(f'Fixed range is already set as {testcase.fixed}, skip.') return None # Set a flag to indicate we are running progression task. This shows pending # status on testcase report page and avoid conflicting testcase updates by # triage cron. testcase.set_metadata('progression_pending', True) data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED) blob_name, blob_upload_url = blobs.get_blob_signed_upload_url() progression_input = uworker_msg_pb2.ProgressionTaskInput( custom_binary=build_manager.is_custom_binary(), bad_revisions=build_manager.get_job_bad_revisions(), blob_name=blob_name, stacktrace_upload_url=blob_upload_url) # Setup testcase and its dependencies. setup_input = setup.preprocess_setup_testcase(testcase, uworker_env) _set_regression_testcase_upload_url(progression_input, testcase) uworker_input = uworker_msg_pb2.Input( job_type=job_type, testcase_id=str(testcase_id), uworker_env=uworker_env, progression_task_input=progression_input, testcase=uworker_io.entity_to_protobuf(testcase), setup_input=setup_input) testcase_manager.preprocess_testcase_manager(testcase, uworker_input) return uworker_input
Attempt to find the revision range where a testcase was fixed.
def find_fixed_range(uworker_input): """Attempt to find the revision range where a testcase was fixed.""" deadline = tasks.get_task_completion_deadline() testcase = uworker_io.entity_from_protobuf(uworker_input.testcase, data_types.Testcase) job_type = uworker_input.job_type setup_input = uworker_input.setup_input fuzz_target = testcase_manager.get_fuzz_target_from_input(uworker_input) _, testcase_file_path, error = setup.setup_testcase(testcase, job_type, setup_input) if error: return error # Custom binaries are handled as special cases. if build_manager.is_custom_binary(): return _check_fixed_for_custom_binary(testcase, testcase_file_path, uworker_input) build_bucket_path = build_manager.get_primary_bucket_path() bad_revisions = uworker_input.progression_task_input.bad_revisions revision_list = build_manager.get_revisions_list( build_bucket_path, bad_revisions, testcase=testcase) if not revision_list: return uworker_msg_pb2.Output( error_type=uworker_msg_pb2.ErrorType.PROGRESSION_REVISION_LIST_ERROR) # Use min, max_index to mark the start and end of revision list that is used # for bisecting the progression range. Set start to the revision where noticed # the crash. Set end to the trunk revision. Also, use min, max from past run # if it timed out. min_revision = testcase.get_metadata('last_progression_min') max_revision = testcase.get_metadata('last_progression_max') progression_task_output = uworker_msg_pb2.ProgressionTaskOutput( clear_min_max_metadata=False, build_data_list=[]) if min_revision or max_revision: # Clear these to avoid using them in next run. If this run fails, then we # should try next run without them to see it succeeds. If this run succeeds, # we should still clear them to avoid capping max revision in next run. progression_task_output.clear_min_max_metadata = True last_tested_revision = testcase.get_metadata('last_tested_crash_revision') known_crash_revision = last_tested_revision or testcase.crash_revision if not min_revision: min_revision = known_crash_revision if not max_revision: max_revision = revisions.get_last_revision_in_list(revision_list) min_index = revisions.find_min_revision_index(revision_list, min_revision) if min_index is None: error_message = f'Build {min_revision} no longer exists.' return uworker_msg_pb2.Output( error_message=error_message, progression_task_output=progression_task_output, error_type=uworker_msg_pb2.ErrorType.PROGRESSION_BUILD_NOT_FOUND) max_index = revisions.find_max_revision_index(revision_list, max_revision) if max_index is None: error_message = f'Build {max_revision} no longer exists.' return uworker_msg_pb2.Output( error_message=error_message, progression_task_output=progression_task_output, error_type=uworker_msg_pb2.ErrorType.PROGRESSION_BUILD_NOT_FOUND) # Check to see if this testcase is still crashing now. If it is, then just # bail out. result, error = _testcase_reproduces_in_revision( testcase, testcase_file_path, job_type, max_revision, fuzz_target, progression_task_output) issue_metadata = engine_common.get_fuzz_target_issue_metadata(fuzz_target) issue_metadata = issue_metadata or {} if error is not None: return error if result.is_crash(): logs.log(f'Found crash with same signature on latest' f' revision r{max_revision}.') app_path = environment.get_value('APP_PATH') command = testcase_manager.get_command_line_for_application( testcase_file_path, app_path=app_path, needs_http=testcase.http_flag) symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True) unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False) stacktrace = utils.get_crash_stacktrace_output( command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace) last_tested_crash_stacktrace = data_handler.filter_stacktrace( stacktrace, uworker_input.progression_task_input.blob_name, uworker_input.progression_task_input.stacktrace_upload_url) crash_on_latest_message = ('Still crashes on latest' f' revision r{max_revision}.') progression_task_output.crash_on_latest = True progression_task_output.crash_on_latest_message = crash_on_latest_message progression_task_output.crash_revision = int(max_revision) progression_task_output.last_tested_crash_stacktrace = ( last_tested_crash_stacktrace) return uworker_msg_pb2.Output( progression_task_output=progression_task_output, issue_metadata=issue_metadata) # Verify that we do crash in the min revision. This is assumed to be true # while we are doing the bisect. result, error = _testcase_reproduces_in_revision( testcase, testcase_file_path, job_type, min_revision, fuzz_target, progression_task_output) if error is not None: return error if result and not result.is_crash(): error_message = f'Minimum revision r{min_revision} did not crash.' progression_task_output.crash_revision = int(min_revision) return uworker_msg_pb2.Output( progression_task_output=progression_task_output, issue_metadata=issue_metadata, error_message=error_message, error_type=uworker_msg_pb2.ErrorType.PROGRESSION_NO_CRASH) last_progression_min = None last_progression_max = None # Start a binary search to find last non-crashing revision. At this point, we # know that we do crash in the min_revision, and do not crash in max_revision. while time.time() < deadline: min_revision = revision_list[min_index] max_revision = revision_list[max_index] # If the min and max revisions are one apart this is as much as we can # narrow the range. if max_index - min_index == 1: testcase.open = False _store_testcase_for_regression_testing( testcase, testcase_file_path, uworker_input.progression_task_input) progression_task_output.min_revision = int(min_revision) progression_task_output.max_revision = int(max_revision) return uworker_msg_pb2.Output( progression_task_output=progression_task_output, issue_metadata=issue_metadata) # Occasionally, we get into this bad state. It seems to be related to test # cases with flaky stacks, but the exact cause is unknown. if max_index - min_index < 1: progression_task_output.min_revision = int(min_revision) progression_task_output.max_revision = int(max_revision) # We could be in a bad state from the beginning of this loop. In that # case, both last_progression_min and last_progression_max would be None. if last_progression_min: progression_task_output.last_progression_min = int(last_progression_min) if last_progression_max: progression_task_output.last_progression_max = int(last_progression_max) return uworker_msg_pb2.Output( progression_task_output=progression_task_output, issue_metadata=issue_metadata, error_type=uworker_msg_pb2.ErrorType.PROGRESSION_BAD_STATE_MIN_MAX) # Test the middle revision of our range. middle_index = (min_index + max_index) // 2 middle_revision = revision_list[middle_index] result, error = _testcase_reproduces_in_revision( testcase, testcase_file_path, job_type, middle_revision, fuzz_target, progression_task_output) if error is not None: if error.error_type == uworker_msg_pb2.ErrorType.PROGRESSION_BAD_BUILD: # Skip this revision. del revision_list[middle_index] max_index -= 1 continue # Only bad build errors are recoverable. progression_task_output.last_progression_min = int(last_progression_min) progression_task_output.last_progression_max = int(last_progression_max) error.progression_task_output.CopyFrom(progression_task_output) return error if result.is_crash(): min_index = middle_index else: max_index = middle_index last_progression_min = int(revision_list[min_index]) last_progression_max = int(revision_list[max_index]) # If we've broken out of the loop, we've exceeded the deadline. Recreate the # task to pick up where we left off. error_message = (f'Timed out, current range ' f'r{revision_list[min_index]}:r{revision_list[max_index]}') if last_progression_min is not None: progression_task_output.last_progression_min = last_progression_min if last_progression_max is not None: progression_task_output.last_progression_max = last_progression_max return uworker_msg_pb2.Output( error_message=error_message, issue_metadata=issue_metadata, progression_task_output=progression_task_output, error_type=uworker_msg_pb2.ErrorType.PROGRESSION_TIMEOUT)
Executes the untrusted part of progression_task.
def utask_main(uworker_input): """Executes the untrusted part of progression_task.""" testcase = uworker_io.entity_from_protobuf(uworker_input.testcase, data_types.Testcase) uworker_io.check_handling_testcase_safe(testcase) return find_fixed_range(uworker_input)
Trusted: Cleans up after a uworker execute_task, writing anything needed to the db.
def utask_postprocess(output: uworker_msg_pb2.Output): """Trusted: Cleans up after a uworker execute_task, writing anything needed to the db.""" testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id) _maybe_clear_progression_last_min_max_metadata(testcase, output) _cleanup_stacktrace_blob_from_storage(output) task_output = None if output.issue_metadata: _update_issue_metadata(testcase, output.issue_metadata) if output.HasField('progression_task_output'): task_output = output.progression_task_output _update_build_metadata(output.uworker_input.job_type, task_output.build_data_list) if output.error_type != uworker_msg_pb2.ErrorType.NO_ERROR: _ERROR_HANDLER.handle(output) return if task_output and task_output.crash_on_latest: crash_on_latest(output) return if output.uworker_input.progression_task_input.custom_binary: # Retry once on another bot to confirm our results and in case this bot is # in a bad state which we didn't catch through our usual means. if data_handler.is_first_attempt_for_task( 'progression', testcase, reset_after_retry=True): tasks.add_task('progression', output.uworker_input.testcase_id, output.uworker_input.job_type) data_handler.update_progression_completion_metadata( testcase, task_output.crash_revision) return # The bug is fixed. testcase.fixed = 'Yes' testcase.open = False data_handler.update_progression_completion_metadata( testcase, task_output.crash_revision, message='fixed on latest custom build') return testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id) if task_output.HasField('min_revision'): _save_fixed_range(output.uworker_input.testcase_id, task_output.min_revision, task_output.max_revision) # If there is a fine grained bisection service available, request it. Both # regression and fixed ranges are requested once. Regression is also requested # here as the bisection service may require details that are not yet available # (e.g. issue ID) at the time regress_task completes. bisection.request_bisection(testcase)
Write the regression range to BigQuery.
def write_to_big_query(testcase, regression_range_start, regression_range_end): """Write the regression range to BigQuery.""" big_query.write_range( table_id='regressions', testcase=testcase, range_name='regression', start=regression_range_start, end=regression_range_end)
Save current regression range indices in case we die in middle of task.
def _save_current_regression_range_indices( task_output: uworker_msg_pb2.RegressionTaskOutput, testcase_id: str): """Save current regression range indices in case we die in middle of task.""" if not task_output.HasField( 'last_regression_min') or not task_output.HasField('last_regression_max'): return testcase = data_handler.get_testcase_by_id(testcase_id) testcase.set_metadata( 'last_regression_min', task_output.last_regression_min, update_testcase=False) testcase.set_metadata( 'last_regression_max', task_output.last_regression_max, update_testcase=False) testcase.put()
Saves the regression range and creates blame and impact task if needed.
def save_regression_range(output: uworker_msg_pb2.Output): """Saves the regression range and creates blame and impact task if needed.""" testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id) regression_range_start = output.regression_task_output.regression_range_start regression_range_end = output.regression_task_output.regression_range_end testcase.regression = '%d:%d' % (regression_range_start, regression_range_end) data_handler.update_testcase_comment( testcase, data_types.TaskState.FINISHED, 'regressed in range %s' % testcase.regression) write_to_big_query(testcase, regression_range_start, regression_range_end) # Force impacts update after regression range is updated. In several cases, # we might not have a production build to test with, so regression range is # used to decide impacts. task_creation.create_impact_task_if_needed(testcase) # Get blame information using the regression range result. task_creation.create_blame_task_if_needed(testcase)
Test to see if a test case reproduces in the specified revision. Returns a tuple containing the (result, error) depending on whether there was an error.
def _testcase_reproduces_in_revision( testcase: data_types.Testcase, testcase_file_path: str, job_type: str, revision: int, regression_task_output: uworker_msg_pb2.RegressionTaskOutput, fuzz_target: Optional[data_types.FuzzTarget], should_log: bool = True, min_revision: Optional[int] = None, max_revision: Optional[int] = None): """Test to see if a test case reproduces in the specified revision. Returns a tuple containing the (result, error) depending on whether there was an error.""" if should_log: log_message = 'Testing r%d' % revision if min_revision is not None and max_revision is not None: log_message += ' (current range %d:%d)' % (min_revision, max_revision) logs.log(log_message) build_manager.setup_build(revision) if not build_manager.check_app_path(): error_message = f'Build setup failed r{revision}' return None, uworker_msg_pb2.Output( regression_task_output=regression_task_output, error_message=error_message, error_type=uworker_msg_pb2.ErrorType.REGRESSION_BUILD_SETUP_ERROR) build_data = testcase_manager.check_for_bad_build(job_type, revision) regression_task_output.build_data_list.append(build_data) if build_data.is_bad_build: error_message = f'Bad build at r{revision}. Skipping' logs.log_error(error_message) return None, uworker_msg_pb2.Output( regression_task_output=regression_task_output, error_type=uworker_msg_pb2.ErrorType.REGRESSION_BAD_BUILD_ERROR) test_timeout = environment.get_value('TEST_TIMEOUT', 10) result = testcase_manager.test_for_crash_with_retries( fuzz_target, testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag) return result.is_crash(), None
Test to see if we regressed near either the min or max revision. Returns a uworker_msg_pb2.Output or None. The uworker_msg_pb2.Output contains either: a. The regression range start/end in case these were correctly determined. b. An error-code in case of error.
def found_regression_near_extreme_revisions( testcase: data_types.Testcase, testcase_file_path: str, job_type: str, revision_list: List[int], min_index: int, max_index: int, fuzz_target: Optional[data_types.FuzzTarget], regression_task_output: uworker_msg_pb2.RegressionTaskOutput, ) -> Optional[uworker_msg_pb2.Output]: """Test to see if we regressed near either the min or max revision. Returns a uworker_msg_pb2.Output or None. The uworker_msg_pb2.Output contains either: a. The regression range start/end in case these were correctly determined. b. An error-code in case of error. """ # Test a few of the most recent revisions. last_known_crashing_revision = revision_list[max_index] for offset in range(1, EXTREME_REVISIONS_TO_TEST + 1): current_index = max_index - offset if current_index < min_index: break # If we don't crash in a recent revision, we regressed in one of the # commits between the current revision and the one at the next index. is_crash, error = _testcase_reproduces_in_revision( testcase, testcase_file_path, job_type, revision_list[current_index], regression_task_output, fuzz_target) if error: # Skip this revision only on bad build errors. if error.error_type == uworker_msg_pb2.REGRESSION_BAD_BUILD_ERROR: continue return error if not is_crash: regression_task_output.regression_range_start = revision_list[ current_index] regression_task_output.regression_range_end = last_known_crashing_revision return uworker_msg_pb2.Output( regression_task_output=regression_task_output) last_known_crashing_revision = revision_list[current_index] # Test to see if we crash in the oldest revision we can run. This is a pre- # condition for our binary search. If we do crash in that revision, it # implies that we regressed between the first commit and our first revision, # which we represent as 0:|min_revision|. for _ in range(EXTREME_REVISIONS_TO_TEST): min_revision = revision_list[min_index] crashes_in_min_revision, error = _testcase_reproduces_in_revision( testcase, testcase_file_path, job_type, min_revision, regression_task_output, fuzz_target, should_log=False) if error: if error.error_type == uworker_msg_pb2.REGRESSION_BAD_BUILD_ERROR: # If we find a bad build, potentially try another. if min_index + 1 >= max_index: break min_index += 1 continue # Only bad build errors are skipped. return error if crashes_in_min_revision: regression_task_output.regression_range_start = 0 regression_task_output.regression_range_end = min_revision return uworker_msg_pb2.Output( regression_task_output=regression_task_output) return None # We should have returned above. If we get here, it means we tried too many # builds near the min revision, and they were all bad. error_message = ('Tried too many builds near the min revision, and they were' f' all bad. Bad build at r{revision_list[min_index]}') logs.log_error(error_message) return uworker_msg_pb2.Output( regression_task_output=regression_task_output, error_type=uworker_msg_pb2.REGRESSION_BAD_BUILD_ERROR)
Ensure that we found the correct min revision by testing earlier ones. Returns a uworker_msg_pb2.Output in case of error or crash, None otherwise.
def validate_regression_range( testcase: data_types.Testcase, testcase_file_path: str, job_type: str, revision_list: List[int], min_index: int, regression_task_output: uworker_msg_pb2.RegressionTaskOutput, fuzz_target: Optional[data_types.FuzzTarget], ) -> Optional[uworker_msg_pb2.Output]: """Ensure that we found the correct min revision by testing earlier ones. Returns a uworker_msg_pb2.Output in case of error or crash, None otherwise.""" earlier_revisions = revision_list[ min_index - EARLIER_REVISIONS_TO_CONSIDER_FOR_VALIDATION:min_index] revision_count = min(len(earlier_revisions), REVISIONS_TO_TEST_FOR_VALIDATION) revisions_to_test = random.sample(earlier_revisions, revision_count) for revision in revisions_to_test: is_crash, error = _testcase_reproduces_in_revision( testcase, testcase_file_path, job_type, revision, regression_task_output, fuzz_target) if error: if error.error_type == uworker_msg_pb2.REGRESSION_BAD_BUILD_ERROR: continue return error if is_crash: error_message = ( 'Low confidence in regression range. Test case crashes in ' 'revision r%d but not later revision r%d' % (revision, revision_list[min_index])) return uworker_msg_pb2.Output( error_message=error_message, error_type=uworker_msg_pb2. REGRESSION_LOW_CONFIDENCE_IN_REGRESSION_RANGE, regression_task_output=regression_task_output) return None
Attempt to find when the testcase regressed.
def find_regression_range(uworker_input: uworker_msg_pb2.Input, ) -> uworker_msg_pb2.Output: """Attempt to find when the testcase regressed.""" testcase = uworker_io.entity_from_protobuf(uworker_input.testcase, data_types.Testcase) job_type = uworker_input.job_type deadline = tasks.get_task_completion_deadline() fuzz_target = testcase_manager.get_fuzz_target_from_input(uworker_input) # Setup testcase and its dependencies. _, testcase_file_path, error = setup.setup_testcase(testcase, job_type, uworker_input.setup_input) if error: return error build_bucket_path = build_manager.get_primary_bucket_path() revision_list = build_manager.get_revisions_list( build_bucket_path, uworker_input.regression_task_input.bad_revisions, testcase=testcase) if not revision_list: return uworker_msg_pb2.Output( error_type=uworker_msg_pb2.ErrorType.REGRESSION_REVISION_LIST_ERROR) # Pick up where left off in a previous run if necessary. min_revision = testcase.get_metadata('last_regression_min') max_revision = testcase.get_metadata('last_regression_max') first_run = not min_revision and not max_revision if not min_revision: min_revision = revisions.get_first_revision_in_list(revision_list) if not max_revision: max_revision = testcase.crash_revision min_index = revisions.find_min_revision_index(revision_list, min_revision) if min_index is None: error_message = f'Could not find good min revision <= {min_revision}.' return uworker_msg_pb2.Output( error_type=uworker_msg_pb2.ErrorType.REGRESSION_BUILD_NOT_FOUND, error_message=error_message) max_index = revisions.find_max_revision_index(revision_list, max_revision) if max_index is None: error_message = f'Could not find good max revision >= {max_revision}.' return uworker_msg_pb2.Output( error_type=uworker_msg_pb2.ErrorType.REGRESSION_BUILD_NOT_FOUND, error_message=error_message) # Make sure that the revision where we noticed the crash, still crashes at # that revision. Otherwise, our binary search algorithm won't work correctly. max_revision = revision_list[max_index] regression_task_output = uworker_msg_pb2.RegressionTaskOutput() crashes_in_max_revision, error = _testcase_reproduces_in_revision( testcase, testcase_file_path, job_type, max_revision, regression_task_output, fuzz_target, should_log=False) if error: return error if not crashes_in_max_revision: error_message = f'Known crash revision {max_revision} did not crash' return uworker_msg_pb2.Output( regression_task_output=regression_task_output, error_message=error_message, error_type=uworker_msg_pb2.ErrorType.REGRESSION_NO_CRASH) # If we've made it this far, the test case appears to be reproducible. regression_task_output.is_testcase_reproducible = True # On the first run, check to see if we regressed near either the min or max # revision. if first_run: result = found_regression_near_extreme_revisions( testcase, testcase_file_path, job_type, revision_list, min_index, max_index, fuzz_target, regression_task_output) if result: return result while time.time() < deadline: min_revision = revision_list[min_index] max_revision = revision_list[max_index] # If the min and max revisions are one apart (or the same, if we only have # one build), this is as much as we can narrow the range. if max_index - min_index <= 1: # Verify that the regression range seems correct, and save it if so. error = validate_regression_range(testcase, testcase_file_path, job_type, revision_list, min_index, regression_task_output, fuzz_target) if error: return error regression_task_output.regression_range_start = min_revision regression_task_output.regression_range_end = max_revision return uworker_msg_pb2.Output( regression_task_output=regression_task_output) middle_index = (min_index + max_index) // 2 middle_revision = revision_list[middle_index] is_crash, error = _testcase_reproduces_in_revision( testcase, testcase_file_path, job_type, middle_revision, regression_task_output, fuzz_target, min_revision=min_revision, max_revision=max_revision) if error: if error.error_type == uworker_msg_pb2.REGRESSION_BAD_BUILD_ERROR: # Skip this revision. del revision_list[middle_index] max_index -= 1 continue return error if is_crash: max_index = middle_index else: min_index = middle_index # Save current regression range in case the task dies prematurely. regression_task_output.last_regression_min = revision_list[min_index] regression_task_output.last_regression_max = revision_list[max_index] # If we've broken out of the above loop, we timed out. We'll finish by # running another regression task and picking up from this point. # TODO: Error handling should be moved to postprocess. error_message = 'Timed out, current range r%d:r%d' % ( revision_list[min_index], revision_list[max_index]) regression_task_output.last_regression_min = revision_list[min_index] regression_task_output.last_regression_max = revision_list[max_index] return uworker_msg_pb2.Output( regression_task_output=regression_task_output, error_type=uworker_msg_pb2.REGRESSION_TIMEOUT_ERROR, error_message=error_message)
Prepares inputs for `utask_main()` to run on an untrusted worker. Runs on a trusted worker.
def utask_preprocess(testcase_id: str, job_type: str, uworker_env: Dict) -> Optional[uworker_msg_pb2.Input]: """Prepares inputs for `utask_main()` to run on an untrusted worker. Runs on a trusted worker. """ testcase = data_handler.get_testcase_by_id(testcase_id) if testcase.regression: logs.log_error( f'Regression range is already set as {testcase.regression}, skip.') return None # This task is not applicable for custom binaries. if build_manager.is_custom_binary(): testcase.regression = 'NA' data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, 'Not applicable for custom binaries') return None data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED) setup_input = setup.preprocess_setup_testcase(testcase, uworker_env) task_input = uworker_msg_pb2.RegressionTaskInput( bad_revisions=build_manager.get_job_bad_revisions()) uworker_input = uworker_msg_pb2.Input( testcase_id=testcase_id, testcase=uworker_io.entity_to_protobuf(testcase), job_type=job_type, uworker_env=uworker_env, setup_input=setup_input, regression_task_input=task_input, ) testcase_manager.preprocess_testcase_manager(testcase, uworker_input) return uworker_input
Runs regression task and handles potential errors. Runs on an untrusted worker.
def utask_main(uworker_input: uworker_msg_pb2.Input, ) -> Optional[uworker_msg_pb2.Output]: """Runs regression task and handles potential errors. Runs on an untrusted worker. """ testcase = uworker_io.entity_from_protobuf(uworker_input.testcase, data_types.Testcase) uworker_io.check_handling_testcase_safe(testcase) return find_regression_range(uworker_input)
Handles the output of `utask_main()` run on an untrusted worker. Runs on a trusted worker.
def utask_postprocess(output: uworker_msg_pb2.Output) -> None: """Handles the output of `utask_main()` run on an untrusted worker. Runs on a trusted worker. """ if output.HasField('regression_task_output'): task_output = output.regression_task_output _update_build_metadata(output.uworker_input.job_type, task_output.build_data_list) _save_current_regression_range_indices(task_output, output.uworker_input.testcase_id) if task_output.is_testcase_reproducible: # Clear metadata from previous runs had it been marked as potentially # flaky. testcase = data_handler.get_testcase_by_id( output.uworker_input.testcase_id) task_creation.mark_unreproducible_if_flaky(testcase, 'regression', False) if output.error_type != uworker_msg_pb2.ErrorType.NO_ERROR: _ERROR_HANDLER.handle(output) return save_regression_range(output)
A helper method to update the build metadata corresponding to a job_type.
def _update_build_metadata(job_type: str, build_data_list: List[uworker_msg_pb2.BuildData]): """A helper method to update the build metadata corresponding to a job_type.""" for build_data in build_data_list: testcase_manager.update_build_metadata(job_type, build_data)
Runs preprocessing for symbolize task.
def utask_preprocess(testcase_id, job_type, uworker_env): """Runs preprocessing for symbolize task.""" # Locate the testcase associated with the id. testcase = data_handler.get_testcase_by_id(testcase_id) # We should atleast have a symbolized debug or release build. if not build_manager.has_symbolized_builds(): return None data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED) # Setup testcase and its dependencies. setup_input = setup.preprocess_setup_testcase(testcase, uworker_env) old_crash_stacktrace = data_handler.get_stacktrace(testcase) return uworker_msg_pb2.Input( job_type=job_type, testcase_id=testcase_id, uworker_env=uworker_env, setup_input=setup_input, testcase=uworker_io.entity_to_protobuf(testcase), symbolize_task_input=uworker_msg_pb2.SymbolizeTaskInput( old_crash_stacktrace=old_crash_stacktrace))
Execute the untrusted part of a symbolize command.
def utask_main(uworker_input): """Execute the untrusted part of a symbolize command.""" job_type = uworker_input.job_type testcase = uworker_io.entity_from_protobuf(uworker_input.testcase, data_types.Testcase) uworker_io.check_handling_testcase_safe(testcase) job_type = uworker_input.job_type setup_input = uworker_input.setup_input _, testcase_file_path, error = setup.setup_testcase(testcase, job_type, setup_input) if error: return error # Initialize variables. old_crash_stacktrace = ( uworker_input.symbolize_task_input.old_crash_stacktrace) sym_crash_address = testcase.crash_address sym_crash_state = testcase.crash_state sym_redzone = DEFAULT_REDZONE warmup_timeout = environment.get_value('WARMUP_TIMEOUT') # Decide which build revision to use. if testcase.crash_stacktrace == 'Pending': # This usually happen when someone clicked the 'Update stacktrace from # trunk' button on the testcase details page. In this case, we are forced # to use trunk. No revision -> trunk build. build_revision = None else: build_revision = testcase.crash_revision # Set up a custom or regular build based on revision. build = build_manager.setup_build(build_revision) # Get crash revision used in setting up build. crash_revision = environment.get_value('APP_REVISION') if not build_manager.check_app_path(): return uworker_msg_pb2.Output( error_message='Build setup failed', error_type=uworker_msg_pb2.ErrorType.SYMBOLIZE_BUILD_SETUP_ERROR) # ASAN tool settings (if the tool is used). # See if we can get better stacks with higher redzone sizes. # A UAF might actually turn out to be OOB read/write with a bigger redzone. if environment.tool_matches('ASAN', job_type) and testcase.security_flag: redzone = MAX_REDZONE while redzone >= MIN_REDZONE: logs.log(f'Trying to reproduce crash with ASAN redzone size {redzone}.') environment.reset_current_memory_tool_options( redzone_size=testcase.redzone, disable_ubsan=testcase.disable_ubsan) process_handler.terminate_stale_application_instances() command = testcase_manager.get_command_line_for_application( testcase_file_path, needs_http=testcase.http_flag) return_code, crash_time, output = ( process_handler.run_process( command, timeout=warmup_timeout, gestures=testcase.gestures)) crash_result = CrashResult(return_code, crash_time, output) if crash_result.is_crash() and 'AddressSanitizer' in output: state = crash_result.get_symbolized_data() security_flag = crash_result.is_security_issue() if crash_analyzer.ignore_stacktrace(state.crash_stacktrace): logs.log( f'Skipping crash with ASAN redzone size {redzone}: ' + 'stack trace should be ignored.', stacktrace=state.crash_stacktrace) elif security_flag != testcase.security_flag: logs.log(f'Skipping crash with ASAN redzone size {redzone}: ' + f'mismatched security flag: old = {testcase.security_flag}, ' f'new = {security_flag}') elif state.crash_type != testcase.crash_type: logs.log(f'Skipping crash with ASAN redzone size {redzone}: ' + f'mismatched crash type: old = {testcase.crash_type}, ' f'new = {state.crash_type}') elif state.crash_state == sym_crash_state: logs.log(f'Skipping crash with ASAN redzone size {redzone}: ' + f'same crash state = {sym_crash_state}') else: logs.log(f'Using crash with larger ASAN redzone size {redzone}: ' + f'old crash address = {sym_crash_address}, ' + f'new crash address = {state.crash_address}, ' + f'old crash state = {sym_crash_state}, ' + f'new crash state = {state.crash_state}') sym_crash_address = state.crash_address sym_crash_state = state.crash_state sym_redzone = redzone old_crash_stacktrace = state.crash_stacktrace break redzone /= 2 # We no longer need this build, delete it to save some disk space. We will # download a symbolized release build to perform the symbolization. build.delete() # We should have atleast a symbolized debug or a release build. symbolized_builds = build_manager.setup_symbolized_builds(crash_revision) if (not symbolized_builds or (not build_manager.check_app_path() and not build_manager.check_app_path('APP_PATH_DEBUG'))): return uworker_msg_pb2.Output( error_message='Build setup failed', error_type=uworker_msg_pb2.ErrorType.SYMBOLIZE_BUILD_SETUP_ERROR) # Increase malloc_context_size to get all stack frames. Default is 30. environment.reset_current_memory_tool_options( redzone_size=sym_redzone, malloc_context_size=STACK_FRAME_COUNT, symbolize_inline_frames=True, disable_ubsan=testcase.disable_ubsan) # TSAN tool settings (if the tool is used). if environment.tool_matches('TSAN', job_type): environment.set_tsan_max_history_size() # Do the symbolization if supported by this application. result, sym_crash_stacktrace = ( get_symbolized_stacktraces(testcase_file_path, testcase, old_crash_stacktrace, sym_crash_state)) symbolize_task_output = uworker_msg_pb2.SymbolizeTaskOutput( crash_type=testcase.crash_type, crash_address=sym_crash_address, crash_state=sym_crash_state, crash_stacktrace=(data_handler.filter_stacktrace(sym_crash_stacktrace)), symbolized=result, crash_revision=int(crash_revision)) if result: build_url = environment.get_value('BUILD_URL') if build_url: symbolize_task_output.build_url = str(build_url) # Switch current directory before builds cleanup. root_directory = environment.get_value('ROOT_DIR') os.chdir(root_directory) # Cleanup symbolized builds which are space-heavy. symbolized_builds.delete() return uworker_msg_pb2.Output(symbolize_task_output=symbolize_task_output)
Use the symbolized builds to generate an updated stacktrace.
def get_symbolized_stacktraces(testcase_file_path, testcase, old_crash_stacktrace, expected_state): """Use the symbolized builds to generate an updated stacktrace.""" # Initialize variables. app_path = environment.get_value('APP_PATH') app_path_debug = environment.get_value('APP_PATH_DEBUG') long_test_timeout = environment.get_value('WARMUP_TIMEOUT') retry_limit = environment.get_value('FAIL_RETRIES') symbolized = False debug_build_stacktrace = '' release_build_stacktrace = old_crash_stacktrace # Symbolize using the debug build first so that the debug build stacktrace # comes after the more important release build stacktrace. if app_path_debug: for _ in range(retry_limit): process_handler.terminate_stale_application_instances() command = testcase_manager.get_command_line_for_application( testcase_file_path, app_path=app_path_debug, needs_http=testcase.http_flag) return_code, crash_time, output = ( process_handler.run_process( command, timeout=long_test_timeout, gestures=testcase.gestures)) crash_result = CrashResult(return_code, crash_time, output) if crash_result.is_crash(): state = crash_result.get_symbolized_data() if crash_analyzer.ignore_stacktrace(state.crash_stacktrace): continue unsymbolized_crash_stacktrace = crash_result.get_stacktrace( symbolized=False) debug_build_stacktrace = utils.get_crash_stacktrace_output( command, state.crash_stacktrace, unsymbolized_crash_stacktrace, build_type='debug') symbolized = True break # Symbolize using the release build. if app_path: for _ in range(retry_limit): process_handler.terminate_stale_application_instances() command = testcase_manager.get_command_line_for_application( testcase_file_path, app_path=app_path, needs_http=testcase.http_flag) return_code, crash_time, output = ( process_handler.run_process( command, timeout=long_test_timeout, gestures=testcase.gestures)) crash_result = CrashResult(return_code, crash_time, output) if crash_result.is_crash(): state = crash_result.get_symbolized_data() if crash_analyzer.ignore_stacktrace(state.crash_stacktrace): continue if state.crash_state != expected_state: continue # Release stack's security flag has to match the symbolized release # stack's security flag. security_flag = crash_result.is_security_issue() if security_flag != testcase.security_flag: continue unsymbolized_crash_stacktrace = crash_result.get_stacktrace( symbolized=False) release_build_stacktrace = utils.get_crash_stacktrace_output( command, state.crash_stacktrace, unsymbolized_crash_stacktrace, build_type='release') symbolized = True break stacktrace = release_build_stacktrace if debug_build_stacktrace: stacktrace += '\n\n' + debug_build_stacktrace return symbolized, stacktrace
Handle the output from utask_main.
def utask_postprocess(output): """Handle the output from utask_main.""" if output.error_type != uworker_msg_pb2.ErrorType.NO_ERROR: _ERROR_HANDLER.handle(output) return symbolize_task_output = output.symbolize_task_output # Update crash parameters. testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id) testcase.crash_type = symbolize_task_output.crash_type testcase.crash_address = symbolize_task_output.crash_address testcase.crash_state = symbolize_task_output.crash_state testcase.crash_stacktrace = symbolize_task_output.crash_stacktrace if not symbolize_task_output.symbolized: data_handler.update_testcase_comment( testcase, data_types.TaskState.ERROR, 'Unable to reproduce crash, skipping ' 'stacktrace update') else: # Switch build url to use the less-optimized symbolized build with better # stacktrace. if symbolize_task_output.build_url: testcase.set_metadata( 'build_url', symbolize_task_output.build_url, update_testcase=False) data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED) testcase.symbolized = True testcase.crash_revision = symbolize_task_output.crash_revision testcase.put() # We might have updated the crash state. See if we need to marked as duplicate # based on other testcases. data_handler.handle_duplicate_entry(testcase) task_creation.create_blame_task_if_needed(testcase)
Generates a new input file name.
def generate_new_input_file_name() -> str: """Generates a new input file name.""" return str(uuid.uuid4()).lower()
Returns a GCS path for uworker I/O.
def get_uworker_input_gcs_path() -> str: """Returns a GCS path for uworker I/O.""" # Inspired by blobs.write_blob. io_bucket = storage.uworker_input_bucket() io_file_name = generate_new_input_file_name() if storage.get(storage.get_cloud_storage_file_path(io_bucket, io_file_name)): raise RuntimeError(f'UUID collision found: {io_file_name}.') return f'/{io_bucket}/{io_file_name}'
Returns a signed download URL for the uworker to upload the output and a GCS url for the tworker to download the output. Make sure we can infer the actual input since the output is not trusted.
def get_uworker_output_urls(input_gcs_path: str) -> str: """Returns a signed download URL for the uworker to upload the output and a GCS url for the tworker to download the output. Make sure we can infer the actual input since the output is not trusted.""" gcs_path = uworker_input_path_to_output_path(input_gcs_path) # Note that the signed upload URL can't be directly downloaded from. return storage.get_signed_upload_url(gcs_path), gcs_path
Returns a signed download URL for the uworker to download the input and a GCS url for the tworker to upload it (this happens first).
def get_uworker_input_urls(): """Returns a signed download URL for the uworker to download the input and a GCS url for the tworker to upload it (this happens first).""" gcs_path = get_uworker_input_gcs_path() return storage.get_signed_download_url(gcs_path), gcs_path
Uploads input for the untrusted portion of a task.
def upload_uworker_input(uworker_input: bytes, gcs_path: str): """Uploads input for the untrusted portion of a task.""" storage.write_data(uworker_input, gcs_path)
Deserializes input for the untrusted part of a task.
def deserialize_uworker_input( serialized_uworker_input: bytes) -> uworker_msg_pb2.Input: """Deserializes input for the untrusted part of a task.""" uworker_input_proto = uworker_msg_pb2.Input() try: uworker_input_proto.ParseFromString(serialized_uworker_input) except google.protobuf.message.DecodeError: logs.log_error('Cannot decode uworker msg.') raise task_utils.UworkerMsgParseError('Cannot decode uworker msg.') return uworker_input_proto
Serializes and returns |uworker_input| as JSON. Can handle ndb entities.
def serialize_uworker_input(uworker_input: uworker_msg_pb2.Input) -> bytes: """Serializes and returns |uworker_input| as JSON. Can handle ndb entities.""" return uworker_input.SerializeToString()
Serializes input for the untrusted portion of a task.
def serialize_and_upload_uworker_input( uworker_input: uworker_msg_pb2.Input) -> Tuple[str, str]: """Serializes input for the untrusted portion of a task.""" signed_input_download_url, input_gcs_url = get_uworker_input_urls() # Get URLs for the uworker'ps output. We need a signed upload URL so it can # write its output. Also get a download URL in case the caller wants to read # the output. signed_output_upload_url, output_gcs_url = get_uworker_output_urls( input_gcs_url) assert not uworker_input.HasField('uworker_output_upload_url') uworker_input.uworker_output_upload_url = signed_output_upload_url serialized_uworker_input = uworker_input.SerializeToString() upload_uworker_input(serialized_uworker_input, input_gcs_url) return signed_input_download_url, output_gcs_url
Downloads and deserializes the input to the uworker from the signed download URL.
def download_and_deserialize_uworker_input( uworker_input_download_url: str) -> uworker_msg_pb2.Input: """Downloads and deserializes the input to the uworker from the signed download URL.""" data = storage.download_signed_url(uworker_input_download_url) return deserialize_uworker_input(data)
Serializes uworker's output for deserializing by deserialize_uworker_output and consumption by postprocess_task.
def serialize_uworker_output( uworker_output_obj: uworker_msg_pb2.Output) -> bytes: """Serializes uworker's output for deserializing by deserialize_uworker_output and consumption by postprocess_task.""" return uworker_output_obj.SerializeToString()
Serializes |uworker_output| and uploads it to |upload_url.
def serialize_and_upload_uworker_output(uworker_output: uworker_msg_pb2.Output, upload_url: str): """Serializes |uworker_output| and uploads it to |upload_url.""" serialized_uworker_output = uworker_output.SerializeToString() storage.upload_signed_url(serialized_uworker_output, upload_url)
Downloads and deserializes uworker output.
def download_and_deserialize_uworker_output( output_url: str) -> uworker_msg_pb2.Output: """Downloads and deserializes uworker output.""" serialized_uworker_output = storage.read_data(output_url) uworker_output = deserialize_uworker_output(serialized_uworker_output) # Now download the input, which is stored securely so that the uworker cannot # tamper with it. uworker_input = download_input_based_on_output_url(output_url) uworker_output.uworker_input.CopyFrom(uworker_input) return uworker_output
Helper function to convert entity to protobuf format.
def entity_to_protobuf(entity: ndb.Model) -> entity_pb2.Entity: """Helper function to convert entity to protobuf format.""" return model._entity_to_protobuf(entity)
Converts `entity_proto` to the `ndb.Model` of type `model_type` it encodes. Raises: AssertionError: if `entity_proto` does not encode a model of type `model_type`
def entity_from_protobuf(entity_proto: entity_pb2.Entity, model_type: Type[T]) -> T: """Converts `entity_proto` to the `ndb.Model` of type `model_type` it encodes. Raises: AssertionError: if `entity_proto` does not encode a model of type `model_type` """ entity = model._entity_from_protobuf(entity_proto) # pylint: disable=protected-access assert isinstance(entity, model_type) return entity
Exits when the current task execution model is trusted but the testcase is untrusted. This will allow uploading testcases to trusted jobs (e.g. Mac) more safely.
def check_handling_testcase_safe(testcase): """Exits when the current task execution model is trusted but the testcase is untrusted. This will allow uploading testcases to trusted jobs (e.g. Mac) more safely.""" if testcase.trusted: return if not environment.get_value('UNTRUSTED_UTASK'): # TODO(https://b.corp.google.com/issues/328691756): Change this to # log_fatal_and_exit once we are handling untrusted tasks properly. logs.log_warn(f'Cannot handle {testcase.key.id()} in trusted task.')
Return a testcase entity for variant task use. This changes the fuzz target params for a particular fuzzing engine.
def _get_variant_testcase_for_job(testcase, job_type): """Return a testcase entity for variant task use. This changes the fuzz target params for a particular fuzzing engine.""" if testcase.job_type == job_type: # Update stack operation on same testcase. return testcase if not environment.is_engine_fuzzer_job(testcase.job_type): # For blackbox fuzzer testcases, there is no change of fuzzer required. return testcase engine_name = environment.get_engine_for_job(job_type) project = data_handler.get_project_name(job_type) binary_name = testcase.get_metadata('fuzzer_binary_name') fully_qualified_fuzzer_name = data_types.fuzz_target_fully_qualified_name( engine_name, project, binary_name) variant_testcase = data_types.clone_entity(testcase) variant_testcase.key = testcase.key variant_testcase.fuzzer_name = engine_name variant_testcase.overridden_fuzzer_name = fully_qualified_fuzzer_name variant_testcase.job_type = job_type # Remove put() method to avoid updates. DO NOT REMOVE THIS. variant_testcase.put = lambda: None return variant_testcase
Run a test case with a different job type to see if they reproduce.
def utask_preprocess(testcase_id, job_type, uworker_env): """Run a test case with a different job type to see if they reproduce.""" testcase = data_handler.get_testcase_by_id(testcase_id) uworker_io.check_handling_testcase_safe(testcase) if (environment.is_engine_fuzzer_job(testcase.job_type) != environment.is_engine_fuzzer_job(job_type)): # We should never reach here. But in case we do, we should bail out as # otherwise we will run into exceptions. return None # Use a cloned testcase entity with different fuzz target paramaters for # a different fuzzing engine. original_job_type = testcase.job_type testcase = _get_variant_testcase_for_job(testcase, job_type) setup_input = setup.preprocess_setup_testcase( testcase, uworker_env, with_deps=False) variant_input = uworker_msg_pb2.VariantTaskInput( original_job_type=original_job_type) uworker_input = uworker_msg_pb2.Input( job_type=job_type, testcase=uworker_io.entity_to_protobuf(testcase), uworker_env=uworker_env, testcase_id=testcase_id, variant_task_input=variant_input, setup_input=setup_input, ) testcase_manager.preprocess_testcase_manager(testcase, uworker_input) return uworker_input
The main part of the variant task. Downloads the testcase and build checks if the build can reproduce the error.
def utask_main(uworker_input): """The main part of the variant task. Downloads the testcase and build checks if the build can reproduce the error.""" testcase = uworker_io.entity_from_protobuf(uworker_input.testcase, data_types.Testcase) if environment.is_engine_fuzzer_job(testcase.job_type): # Remove put() method to avoid updates. DO NOT REMOVE THIS. # Repeat this because the in-memory executor may allow puts. # TODO(metzman): Remove this when we use batch. testcase.put = lambda: None # Setup testcase and its dependencies. _, testcase_file_path, error = setup.setup_testcase( testcase, uworker_input.job_type, uworker_input.setup_input) if error: return error # Set up a custom or regular build. We explicitly omit the crash revision # since we want to test against the latest build here. try: build_manager.setup_build() except errors.BuildNotFoundError: logs.log_warn('Matching build not found.') return uworker_msg_pb2.Output( error_type=uworker_msg_pb2.ErrorType.UNHANDLED) # Check if we have an application path. If not, our build failed to setup # correctly. if not build_manager.check_app_path(): return uworker_msg_pb2.Output( error_type=uworker_msg_pb2.ErrorType.VARIANT_BUILD_SETUP) # Disable gestures if we're running on a different platform from that of # the original test case. use_gestures = testcase.platform == environment.platform().lower() # Reproduce the crash. app_path = environment.get_value('APP_PATH') command = testcase_manager.get_command_line_for_application( testcase_file_path, app_path=app_path, needs_http=testcase.http_flag) test_timeout = environment.get_value('TEST_TIMEOUT', 10) revision = environment.get_value('APP_REVISION') fuzz_target = testcase_manager.get_fuzz_target_from_input(uworker_input) try: result = testcase_manager.test_for_crash_with_retries( fuzz_target, testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag, use_gestures=use_gestures, compare_crash=False) except testcase_manager.TargetNotFoundError: logs.log_warn('Could not find target in build, probably does not exist.') return uworker_msg_pb2.Output( error_type=uworker_msg_pb2.ErrorType.UNHANDLED) if result.is_crash() and not result.should_ignore(): crash_state = result.get_state() crash_type = result.get_type() security_flag = result.is_security_issue() gestures = testcase.gestures if use_gestures else None fuzz_target = testcase_manager.get_fuzz_target_from_input(uworker_input) one_time_crasher_flag = not testcase_manager.test_for_reproducibility( fuzz_target, testcase_file_path, crash_type, crash_state, security_flag, test_timeout, testcase.http_flag, gestures) if one_time_crasher_flag: status = data_types.TestcaseVariantStatus.FLAKY else: status = data_types.TestcaseVariantStatus.REPRODUCIBLE crash_comparer = CrashComparer(crash_state, testcase.crash_state) is_similar = ( crash_comparer.is_similar() and security_flag == testcase.security_flag) unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False) symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True) crash_stacktrace_output = utils.get_crash_stacktrace_output( command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace) else: status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE is_similar = False crash_type = None crash_state = None security_flag = False crash_stacktrace_output = 'No crash occurred.' # Regular case of variant analysis. variant_task_output = uworker_msg_pb2.VariantTaskOutput() variant_task_output.status = status variant_task_output.revision = int(revision) if crash_type is not None: variant_task_output.crash_type = crash_type if crash_state is not None: variant_task_output.crash_state = crash_state variant_task_output.security_flag = bool(security_flag) variant_task_output.is_similar = bool(is_similar) variant_task_output.platform = environment.platform().lower() return uworker_msg_pb2.Output( variant_task_output=variant_task_output, crash_stacktrace_output=crash_stacktrace_output)
Handle the output from utask_main.
def utask_postprocess(output): """Handle the output from utask_main.""" if output.error_type != uworker_msg_pb2.ErrorType.NO_ERROR: _ERROR_HANDLER.handle(output) return testcase = data_handler.get_testcase_by_id(output.uworker_input.testcase_id) if environment.is_engine_fuzzer_job(output.uworker_input.job_type): # Remove put() method to avoid updates. DO NOT REMOVE THIS. testcase.put = lambda: None if (output.uworker_input.variant_task_input.original_job_type == output.uworker_input.job_type): # This case happens when someone clicks 'Update last tested stacktrace using # trunk build' button. testcase.last_tested_crash_stacktrace = ( data_handler.filter_stacktrace(output.crash_stacktrace_output)) testcase.set_metadata( 'last_tested_crash_revision', output.variant_task_output.revision, update_testcase=True) else: # Explicitly skipping crash stacktrace for now as it make entities larger # and we plan to use only crash paramaters in UI. variant = data_handler.get_or_create_testcase_variant( output.uworker_input.testcase_id, output.uworker_input.job_type) variant_task_output = output.variant_task_output variant.status = variant_task_output.status variant.revision = variant_task_output.revision if variant_task_output.HasField('crash_type'): variant.crash_type = variant_task_output.crash_type else: variant.crash_type = None if variant_task_output.HasField('crash_state'): variant.crash_state = variant_task_output.crash_state else: variant.crash_state = None variant.security_flag = variant_task_output.security_flag variant.is_similar = variant_task_output.is_similar variant.platform = variant_task_output.platform variant.put()
Converts all values in |uworker_env| to str types. ClusterFuzz parses env var values so that the type implied by the value (which in every OS I've seen is a string), is the Python type of the value. E.g. if "DO_BLAH=1" in the environment, environment.get_value('DO_BLAH') is 1, not '1'. This is dangerous when using protos because the environment is a proto map, and values in these can only have one type, which in this case is string. Therefore we must make sure values in uworker_envs are always strings so we don't try to save an int to a string map.
def ensure_uworker_env_type_safety(uworker_env): """Converts all values in |uworker_env| to str types. ClusterFuzz parses env var values so that the type implied by the value (which in every OS I've seen is a string), is the Python type of the value. E.g. if "DO_BLAH=1" in the environment, environment.get_value('DO_BLAH') is 1, not '1'. This is dangerous when using protos because the environment is a proto map, and values in these can only have one type, which in this case is string. Therefore we must make sure values in uworker_envs are always strings so we don't try to save an int to a string map.""" for k in uworker_env: uworker_env[k] = str(uworker_env[k])
Shared logic for preprocessing between preprocess_no_io and the I/O tworker_preprocess.
def _preprocess(utask_module, task_argument, job_type, uworker_env, recorder: _MetricRecorder): """Shared logic for preprocessing between preprocess_no_io and the I/O tworker_preprocess.""" ensure_uworker_env_type_safety(uworker_env) set_uworker_env(uworker_env) recorder.set_task_details(utask_module, job_type, environment.platform()) logs.log('Starting utask_preprocess: %s.' % utask_module) uworker_input = utask_module.utask_preprocess(task_argument, job_type, uworker_env) if not uworker_input: logs.log_error('No uworker_input returned from preprocess') return None logs.log('Preprocess finished.') task_payload = environment.get_value('TASK_PAYLOAD') if task_payload: uworker_input.uworker_env['INITIAL_TASK_PAYLOAD'] = task_payload uworker_input.preprocess_start_time.FromNanoseconds(recorder.start_time_ns) assert not uworker_input.module_name uworker_input.module_name = utask_module.__name__ return uworker_input
Executes the preprocessing step of the utask |utask_module| and returns the serialized output.
def tworker_preprocess_no_io(utask_module, task_argument, job_type, uworker_env): """Executes the preprocessing step of the utask |utask_module| and returns the serialized output.""" with _MetricRecorder(_Subtask.PREPROCESS, _Mode.QUEUE) as recorder: uworker_input = _preprocess(utask_module, task_argument, job_type, uworker_env, recorder) if not uworker_input: return None return uworker_io.serialize_uworker_input(uworker_input)
Executes the main part of a utask on the uworker (locally if not using remote executor).
def uworker_main_no_io(utask_module, serialized_uworker_input): """Executes the main part of a utask on the uworker (locally if not using remote executor).""" with _MetricRecorder(_Subtask.UWORKER_MAIN, _Mode.QUEUE) as recorder: logs.log('Starting utask_main: %s.' % utask_module) uworker_input = uworker_io.deserialize_uworker_input( serialized_uworker_input) set_uworker_env(uworker_input.uworker_env) uworker_input.uworker_env.clear() recorder.set_task_details(utask_module, uworker_input.job_type, environment.platform(), uworker_input.preprocess_start_time) uworker_output = utask_module.utask_main(uworker_input) if uworker_output is None: return None return uworker_io.serialize_uworker_output(uworker_output)
Executes the postprocess step on the trusted (t)worker (in this case it is the same bot as the uworker).
def tworker_postprocess_no_io(utask_module, uworker_output, uworker_input): """Executes the postprocess step on the trusted (t)worker (in this case it is the same bot as the uworker).""" with _MetricRecorder(_Subtask.POSTPROCESS, _Mode.QUEUE) as recorder: uworker_output = uworker_io.deserialize_uworker_output(uworker_output) # Do this to simulate out-of-band tamper-proof storage of the input. uworker_input = uworker_io.deserialize_uworker_input(uworker_input) uworker_output.uworker_input.CopyFrom(uworker_input) set_uworker_env(uworker_output.uworker_input.uworker_env) recorder.set_task_details(utask_module, uworker_input.job_type, environment.platform(), uworker_input.preprocess_start_time) utask_module.utask_postprocess(uworker_output)
Executes the preprocessing step of the utask |utask_module| and returns the signed download URL for the uworker's input and the (unsigned) download URL for its output.
def tworker_preprocess(utask_module, task_argument, job_type, uworker_env): """Executes the preprocessing step of the utask |utask_module| and returns the signed download URL for the uworker's input and the (unsigned) download URL for its output.""" with _MetricRecorder(_Subtask.PREPROCESS, _Mode.BATCH) as recorder: uworker_input = _preprocess(utask_module, task_argument, job_type, uworker_env, recorder) if not uworker_input: # Bail if preprocessing failed since we can't proceed. return None # Write the uworker's input to GCS and get the URL to download the input in # case the caller needs it. # Return both the uworker input signed download URL for the remote executor # to pass to the batch job and for the local executor to download locally, # and the uworker output download URL for the local executor to download # the output after local execution of `utask_main`. return uworker_io.serialize_and_upload_uworker_input(uworker_input)
Sets all env vars in |uworker_env| in the actual environment.
def set_uworker_env(uworker_env: dict) -> None: """Sets all env vars in |uworker_env| in the actual environment.""" for key, value in uworker_env.items(): environment.set_value(key, value)
Executes the main part of a utask on the uworker (locally if not using remote executor).
def uworker_main(input_download_url) -> None: """Executes the main part of a utask on the uworker (locally if not using remote executor).""" with _MetricRecorder(_Subtask.UWORKER_MAIN, _Mode.BATCH) as recorder: uworker_input = uworker_io.download_and_deserialize_uworker_input( input_download_url) uworker_output_upload_url = uworker_input.uworker_output_upload_url uworker_input.ClearField('uworker_output_upload_url') set_uworker_env(uworker_input.uworker_env) uworker_input.uworker_env.clear() utask_module = get_utask_module(uworker_input.module_name) recorder.set_task_details(utask_module, uworker_input.job_type, environment.platform(), uworker_input.preprocess_start_time) logs.log('Starting utask_main: %s.' % utask_module) uworker_output = utask_module.utask_main(uworker_input) uworker_io.serialize_and_upload_uworker_output(uworker_output, uworker_output_upload_url) logs.log('Finished uworker_main.') return True
The entrypoint for a uworker.
def uworker_bot_main(): """The entrypoint for a uworker.""" logs.log('Starting utask_main on untrusted worker.') input_download_url = environment.get_value('UWORKER_INPUT_DOWNLOAD_URL') uworker_main(input_download_url) return 0