response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Corrects obvious errors such as missing quotes in a dictionary.
def correct_if_needed(dict_path): """Corrects obvious errors such as missing quotes in a dictionary.""" if not dict_path or not os.path.exists(dict_path): return content = utils.read_data_from_file( dict_path, eval_data=False).decode('utf-8') new_content = '' for current_line in content.splitlines(): new_content += _fix_dictionary_line(current_line, dict_path) + '\n' # End of file newlines are inconsistent in dictionaries. if new_content.rstrip('\n') != content.rstrip('\n'): utils.write_data_to_file(new_content, dict_path)
Pick a generator to generate new testcases before fuzzing or return Generator.NONE if no generator selected.
def select_generator(strategy_pool, fuzzer_path): """Pick a generator to generate new testcases before fuzzing or return Generator.NONE if no generator selected.""" if environment.is_lib() or environment.platform() == 'FUCHSIA': # Unsupported. return Generator.NONE # We can't use radamsa binary on Windows. Disable ML for now until we know it # works on Win. # These generators don't produce testcases that LPM fuzzers can use. if (environment.platform() == 'WINDOWS' or is_lpm_fuzz_target(fuzzer_path)): return Generator.NONE if strategy_pool.do_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY): return Generator.RADAMSA return Generator.NONE
Generate new testcase mutations, using existing corpus directory or other methods. Returns true if mutations are successfully generated using radamsa. A false return signifies either no generator use or unsuccessful generation of testcase mutations.
def generate_new_testcase_mutations( corpus_directory, new_testcase_mutations_directory, candidate_generator): """Generate new testcase mutations, using existing corpus directory or other methods. Returns true if mutations are successfully generated using radamsa. A false return signifies either no generator use or unsuccessful generation of testcase mutations.""" generation_timeout = get_new_testcase_mutations_timeout() pre_mutations_filecount = shell.get_directory_file_count( new_testcase_mutations_directory) # Generate new testcase mutations using Radamsa. if candidate_generator == Generator.RADAMSA: generate_new_testcase_mutations_using_radamsa( corpus_directory, new_testcase_mutations_directory, generation_timeout) # If new mutations are successfully generated, return true. if shell.get_directory_file_count( new_testcase_mutations_directory) > pre_mutations_filecount: return True return False
Get the name of a file mutated by radamsa.
def get_radamsa_output_filename(initial_filename, i): """Get the name of a file mutated by radamsa.""" # Don't add the radamsa prefix to a file that already has it to avoid hitting # filename/path length limits. match = RADAMSA_FILENAME_REGEX.search(initial_filename) if match: base_filename = match.group(1) else: base_filename = initial_filename prefix = 'radamsa-%05d-' % (i + 1) # FIXME: AFL will still break if the filename is near 255 chars since it # naively appends. AFL needs to rename every file to a sensible length (not # just those created by radamsa). return prefix + base_filename[:FILENAME_LENGTH_LIMIT - len(prefix)]
Generate new testcase mutations based on Radamsa.
def generate_new_testcase_mutations_using_radamsa( corpus_directory, new_testcase_mutations_directory, generation_timeout): """Generate new testcase mutations based on Radamsa.""" radamsa_path = get_radamsa_path() if not radamsa_path: # Mutations using radamsa are not supported on current platform, bail out. return radamsa_runner = new_process.ProcessRunner(radamsa_path) files_list = shell.get_files_list(corpus_directory) filtered_files_list = [ f for f in files_list if os.path.getsize(f) <= CORPUS_INPUT_SIZE_LIMIT ] if not filtered_files_list: # No mutations to do on an empty corpus or one with very large files. return old_corpus_size = shell.get_directory_file_count( new_testcase_mutations_directory) expected_completion_time = time.time() + generation_timeout for i in range(RADAMSA_MUTATIONS): original_file_path = random_choice(filtered_files_list) original_filename = os.path.basename(original_file_path) output_path = os.path.join( new_testcase_mutations_directory, get_radamsa_output_filename(original_filename, i)) result = radamsa_runner.run_and_wait( ['-o', output_path, original_file_path], timeout=RADAMSA_TIMEOUT) if (os.path.exists(output_path) and os.path.getsize(output_path) > CORPUS_INPUT_SIZE_LIMIT): # Skip large files to avoid further mutations and impact fuzzing # efficiency. shell.remove_file(output_path) elif result.return_code or result.timed_out: logs.log_warn( 'Radamsa failed to mutate or timed out.', output=result.output) # Check if we exceeded our timeout. If yes, do no more mutations and break. if time.time() > expected_completion_time: break new_corpus_size = shell.get_directory_file_count( new_testcase_mutations_directory) logs.log('Added %d tests using Radamsa mutations.' % (new_corpus_size - old_corpus_size))
Return path to radamsa binary for current platform.
def get_radamsa_path(): """Return path to radamsa binary for current platform.""" bin_directory_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'bin') platform = environment.platform() if platform == 'LINUX': return os.path.join(bin_directory_path, 'linux', 'radamsa') if platform == 'MAC': return os.path.join(bin_directory_path, 'mac', 'radamsa') return None
Get the timeout for new testcase mutations.
def get_new_testcase_mutations_timeout(): """Get the timeout for new testcase mutations.""" return get_overridable_timeout(10 * 60, 'MUTATIONS_TIMEOUT_OVERRIDE')
Returns the current timestamp. Needed for mocking.
def current_timestamp(): """Returns the current timestamp. Needed for mocking.""" return time.time()
Returns a strategy weight based on env variable |FUZZING_STRATEGIES|
def get_strategy_probability(strategy_name, default): """Returns a strategy weight based on env variable |FUZZING_STRATEGIES|""" fuzzing_strategies = environment.get_value('FUZZING_STRATEGIES') if fuzzing_strategies is None or not isinstance(fuzzing_strategies, dict): return default if strategy_name not in fuzzing_strategies: return 0.0 return fuzzing_strategies[strategy_name]
Decide if we want to do something with the given probability.
def decide_with_probability(probability): """Decide if we want to do something with the given probability.""" return random.SystemRandom().random() < probability
Obtain a random number.
def get_probability(): """Obtain a random number.""" return random.SystemRandom().random()
Get testcase run for stats.
def get_testcase_run(stats, fuzzer_command): """Get testcase run for stats.""" build_revision = fuzzer_utils.get_build_revision() job = environment.get_value('JOB_NAME') # fuzzer name is filled by fuzz_task. testcase_run = fuzzer_stats.TestcaseRun(None, job, build_revision, current_timestamp()) testcase_run['command'] = fuzzer_command if stats is not None: testcase_run.update(stats) return testcase_run
Dump BigQuery stats.
def dump_big_query_data(stats, testcase_file_path, fuzzer_command): """Dump BigQuery stats.""" testcase_run = get_testcase_run(stats, fuzzer_command) fuzzer_stats.TestcaseRun.write_to_disk(testcase_run, testcase_file_path)
Find the fuzzer path with the given name.
def find_fuzzer_path(build_directory, fuzzer_name): """Find the fuzzer path with the given name.""" if not build_directory: # Grey-box fuzzers might not have the build directory for a particular job # configuration when doing variant task testing (e.g. Android on-device # fuzz target might not exist on host). In this case, treat it similar to # target not found by returning None. logs.log_warn('No build directory found for fuzzer: %s' % fuzzer_name) return None if environment.platform() == 'FUCHSIA': # Fuchsia targets are not on disk. return fuzzer_name if environment.is_android_kernel(): return os.path.join(build_directory, 'syzkaller', 'bin', 'syz-manager') # TODO(ochang): This is necessary for legacy testcases, which include the # project prefix in arguments. Remove this in the near future. project_name = environment.get_value('PROJECT_NAME') legacy_name_prefix = '' if project_name: legacy_name_prefix = project_name + '_' fuzzer_filename = environment.get_executable_filename(fuzzer_name) for root, _, files in shell.walk(build_directory): for filename in files: if (legacy_name_prefix + filename == fuzzer_name or filename == fuzzer_filename): return os.path.join(root, filename) # This is an expected case when doing regression testing with old builds # that do not have that fuzz target. It can also happen when a host sends a # message to an untrusted worker that just restarted and lost information on # build directory. logs.log_warn('Fuzzer: %s not found in build_directory: %s.' % (fuzzer_name, build_directory)) return None
Return shell quoted command string.
def get_command_quoted(command): """Return shell quoted command string.""" return ' '.join(shlex.quote(part) for part in command)
Returns a timeout given a |default_timeout| and the environment variable, |override_env_var|, that overrides it. Returns the overriden value if |override_env_var| is set, otherwise returns default_timeout. Throws an assertion error if the return value is negative.
def get_overridable_timeout(default_timeout, override_env_var): """Returns a timeout given a |default_timeout| and the environment variable, |override_env_var|, that overrides it. Returns the overriden value if |override_env_var| is set, otherwise returns default_timeout. Throws an assertion error if the return value is negative.""" timeout_override = environment.get_value(override_env_var) timeout = float(timeout_override or default_timeout) assert timeout >= 0, timeout return timeout
Get the hard timeout for fuzzing.
def get_hard_timeout(total_timeout=None): """Get the hard timeout for fuzzing.""" if total_timeout is None: total_timeout = environment.get_value('FUZZ_TEST_TIMEOUT') return get_overridable_timeout(total_timeout, 'HARD_TIMEOUT_OVERRIDE')
Get the maximum amount of time that should be spent merging a corpus.
def get_merge_timeout(default_merge_timeout): """Get the maximum amount of time that should be spent merging a corpus.""" return get_overridable_timeout(default_merge_timeout, 'MERGE_TIMEOUT_OVERRIDE')
Returns True if |fuzzer_path| is a libprotobuf-mutator based fuzz target.
def is_lpm_fuzz_target(fuzzer_path): """Returns True if |fuzzer_path| is a libprotobuf-mutator based fuzz target.""" # TODO(metzman): Use this function to disable running LPM targets with AFL. with open(fuzzer_path, 'rb') as file_handle: return utils.search_bytes_in_file(b'TestOneProtoInput', file_handle)
Return list of owner emails given a fuzz target path. Format of an owners file is described at: https://cs.chromium.org/chromium/src/third_party/depot_tools/owners.py
def get_issue_owners(fuzz_target_path): """Return list of owner emails given a fuzz target path. Format of an owners file is described at: https://cs.chromium.org/chromium/src/third_party/depot_tools/owners.py """ owners_file_path = fuzzer_utils.get_supporting_file(fuzz_target_path, OWNERS_FILE_EXTENSION) if environment.is_trusted_host(): owners_file_path = fuzzer_utils.get_file_from_untrusted_worker( owners_file_path) if not os.path.exists(owners_file_path): return [] owners = [] with open(owners_file_path) as owners_file_handle: owners_file_content = owners_file_handle.read() for line in owners_file_content.splitlines(): stripped_line = line.strip() if not stripped_line: # Ignore empty lines. continue if stripped_line.startswith('#'): # Ignore comment lines. continue if stripped_line == '*': # Not of any use, we can't add everyone as owner with this. continue if (stripped_line.startswith('per-file') or stripped_line.startswith('file:')): # Don't have a source checkout, so ignore. continue if '@' not in stripped_line: # Bad email address. continue owners.append(stripped_line) return owners
Get issue metadata.
def get_issue_metadata(fuzz_target_path, extension): """Get issue metadata.""" metadata_file_path = fuzzer_utils.get_supporting_file(fuzz_target_path, extension) if environment.is_trusted_host(): metadata_file_path = fuzzer_utils.get_file_from_untrusted_worker( metadata_file_path) if not os.path.exists(metadata_file_path): return [] with open(metadata_file_path) as handle: return utils.parse_delimited( handle, delimiter='\n', strip=True, remove_empty=True)
Return list of issue labels given a fuzz target path.
def get_issue_labels(fuzz_target_path): """Return list of issue labels given a fuzz target path.""" return get_issue_metadata(fuzz_target_path, LABELS_FILE_EXTENSION)
Return list of issue components given a fuzz target path.
def get_issue_components(fuzz_target_path): """Return list of issue components given a fuzz target path.""" return get_issue_metadata(fuzz_target_path, COMPONENTS_FILE_EXTENSION)
Return the additional metadata fields given a fuzz target path. The data will be a JSON-formatted dictionary.
def get_additional_issue_metadata(fuzz_target_path): """Return the additional metadata fields given a fuzz target path. The data will be a JSON-formatted dictionary.""" metadata_file_path = fuzzer_utils.get_supporting_file( fuzz_target_path, METADATA_FILE_EXTENSION) if environment.is_trusted_host(): metadata_file_path = fuzzer_utils.get_file_from_untrusted_worker( metadata_file_path) if not os.path.exists(metadata_file_path): return {} with open(metadata_file_path) as handle: try: return json.load(handle) except (ValueError, TypeError): logs.log_error('Invalid metadata file format.', path=metadata_file_path) return {}
Get issue related metadata for a target.
def get_all_issue_metadata(fuzz_target_path): """Get issue related metadata for a target.""" metadata = {} issue_labels = get_issue_labels(fuzz_target_path) if issue_labels: metadata['issue_labels'] = ','.join(issue_labels) issue_components = get_issue_components(fuzz_target_path) if issue_components: metadata['issue_components'] = ','.join(issue_components) issue_owners = get_issue_owners(fuzz_target_path) if issue_owners: metadata['issue_owners'] = ','.join(issue_owners) issue_metadata = get_additional_issue_metadata(fuzz_target_path) if issue_metadata: metadata['issue_metadata'] = issue_metadata return metadata
Get issue related metadata given a testcase.
def get_fuzz_target_issue_metadata(fuzz_target): """Get issue related metadata given a testcase.""" if fuzz_target is None: return None if environment.is_trusted_host(): # Not applicable. return None build_dir = environment.get_value('BUILD_DIR') target_path = find_fuzzer_path(build_dir, fuzz_target.binary) if not target_path: logs.log_error('Failed to find target path for ' + fuzz_target.binary) return None return get_all_issue_metadata(target_path)
Return project qualified fuzzer name for a given target path.
def get_project_qualified_fuzzer_name(target_path): """Return project qualified fuzzer name for a given target path.""" return data_types.fuzz_target_project_qualified_name( utils.current_project(), os.path.basename(target_path))
Format the strategies used for logging purposes.
def format_fuzzing_strategies(fuzzing_strategies): """Format the strategies used for logging purposes.""" if not fuzzing_strategies: return '' if isinstance(fuzzing_strategies, list): # Legacy format. TODO(ochang): Remove this once it's not used. value = ','.join(fuzzing_strategies) else: # New format. assert isinstance(fuzzing_strategies, dict) value = ','.join(f'{key}:{val}' for key, val in fuzzing_strategies.items()) return 'cf::fuzzing_strategies: ' + value
Return a random element from the non-empty sequence.
def random_choice(sequence): """Return a random element from the non-empty sequence.""" return random.SystemRandom().choice(sequence)
Read data from file.
def read_data_from_file(file_path): """Read data from file.""" with open(file_path, 'rb') as file_handle: return file_handle.read()
Delete directory if exists, create empty directory. Throw an exception if either fails.
def recreate_directory(directory_path): """Delete directory if exists, create empty directory. Throw an exception if either fails.""" if not shell.remove_directory(directory_path, recreate=True): raise OSError('Failed to recreate directory: ' + directory_path)
Remove minijail arguments from a fuzzer command. Args: command: The command. fuzzer_path: Absolute path to the fuzzer. Returns: The stripped command.
def strip_minijail_command(command, fuzzer_path): """Remove minijail arguments from a fuzzer command. Args: command: The command. fuzzer_path: Absolute path to the fuzzer. Returns: The stripped command. """ try: fuzzer_path_index = command.index(fuzzer_path) return command[fuzzer_path_index:] except ValueError: return command
Writes data to file.
def write_data_to_file(content, file_path): """Writes data to file.""" with open(file_path, 'wb') as file_handle: file_handle.write(str(content).encode('utf-8'))
Returns the path of the seed corpus if one exists. Otherwise returns None. Logs an error if multiple seed corpora exist for the same target.
def get_seed_corpus_path(fuzz_target_path): """Returns the path of the seed corpus if one exists. Otherwise returns None. Logs an error if multiple seed corpora exist for the same target.""" archive_path_without_extension = fuzzer_utils.get_supporting_file( fuzz_target_path, SEED_CORPUS_ARCHIVE_SUFFIX) # Get all files that end with _seed_corpus.* possible_archive_paths = set(glob.glob(archive_path_without_extension + '.*')) # Now get a list of these that are valid seed corpus archives. archive_paths = possible_archive_paths.intersection({ archive_path_without_extension + extension for extension in archive.ARCHIVE_FILE_EXTENSIONS }) archive_paths = list(archive_paths) if not archive_paths: return None if len(archive_paths) > 1: logs.log_error('Multiple seed corpuses exist for fuzz target %s: %s.' % (fuzz_target_path, ', '.join(archive_paths))) return archive_paths[0]
Applies sanitizer option overrides from .options file.
def process_sanitizer_options_overrides(fuzzer_path): """Applies sanitizer option overrides from .options file.""" fuzzer_options = options.get_fuzz_target_options(fuzzer_path) if not fuzzer_options: return asan_options = environment.get_memory_tool_options('ASAN_OPTIONS', {}) msan_options = environment.get_memory_tool_options('MSAN_OPTIONS', {}) ubsan_options = environment.get_memory_tool_options('UBSAN_OPTIONS', {}) hwasan_options = environment.get_memory_tool_options('HWASAN_OPTIONS', {}) asan_overrides = fuzzer_options.get_asan_options() if asan_options and asan_overrides: asan_options.update(asan_overrides) environment.set_memory_tool_options('ASAN_OPTIONS', asan_options) msan_overrides = fuzzer_options.get_msan_options() if msan_options and msan_overrides: msan_options.update(msan_overrides) environment.set_memory_tool_options('MSAN_OPTIONS', msan_options) ubsan_overrides = fuzzer_options.get_ubsan_options() if ubsan_options and ubsan_overrides: ubsan_options.update(ubsan_overrides) environment.set_memory_tool_options('UBSAN_OPTIONS', ubsan_options) hwasan_overrides = fuzzer_options.get_hwasan_options() if hwasan_options and hwasan_overrides: hwasan_options.update(hwasan_overrides) environment.set_memory_tool_options('HWASAN_OPTIONS', hwasan_options)
If seed corpus available, unpack it into the corpus directory if needed, ie: if corpus exists and either |force_unpack| is True, or the number of files in corpus_directory is less than |max_files_for_unpack|. Uses |fuzz_target_path| to find the seed corpus. If max_bytes is specified, then seed corpus files larger than |max_bytes| will not be unpacked.
def unpack_seed_corpus_if_needed(fuzz_target_path, corpus_directory, max_bytes=float('inf'), force_unpack=False, max_files_for_unpack=MAX_FILES_FOR_UNPACK): """If seed corpus available, unpack it into the corpus directory if needed, ie: if corpus exists and either |force_unpack| is True, or the number of files in corpus_directory is less than |max_files_for_unpack|. Uses |fuzz_target_path| to find the seed corpus. If max_bytes is specified, then seed corpus files larger than |max_bytes| will not be unpacked. """ seed_corpus_archive_path = get_seed_corpus_path(fuzz_target_path) if not seed_corpus_archive_path: return num_corpus_files = len(shell.get_files_list(corpus_directory)) if not force_unpack and num_corpus_files > max_files_for_unpack: return if force_unpack: logs.log('Forced unpack: %s.' % seed_corpus_archive_path) try: reader = archive.open(seed_corpus_archive_path) except: logs.log_error(f'Failed reading archive: {seed_corpus_archive_path}') return idx = 0 with reader: for file in reader.list_members(): if file.is_dir: continue if file.size_bytes > max_bytes: continue output_filename = '%016d' % idx output_file_path = os.path.join(corpus_directory, output_filename) with open(output_file_path, 'wb') as file_handle: with reader.open(file.name) as file: shutil.copyfileobj(file, file_handle) idx += 1 logs.log('Unarchiving %d files from seed corpus %s.' % (idx, seed_corpus_archive_path))
Get the log header.
def get_log_header(command, time_executed): """Get the log header.""" quoted_command = get_command_quoted(command) return f'Command: {quoted_command}\nTime ran: {time_executed}\n'
Initialise builtin fuzzing engines.
def run(include_private=True, include_lowercase=False): """Initialise builtin fuzzing engines.""" if include_private: engines = fuzzing.ENGINES else: engines = fuzzing.PUBLIC_ENGINES for engine_name in engines: mod = importlib.import_module( f'clusterfuzz._internal.bot.fuzzers.{engine_name}.engine') engine.register(engine_name, mod.Engine) if include_lowercase and engine_name.lower() != engine_name: engine.register(engine_name.lower(), mod.Engine)
Get a libfuzzer runner.
def get_runner(fuzzer_path, temp_dir=None, use_minijail=None): """Get a libfuzzer runner.""" if use_minijail is None: use_minijail = environment.get_value('USE_MINIJAIL') if use_minijail is False: # If minijail is explicitly disabled, set the environment variable as well. environment.set_value('USE_MINIJAIL', False) if temp_dir is None: temp_dir = fuzzer_utils.get_temp_dir() build_dir = environment.get_value('BUILD_DIR') dataflow_build_dir = environment.get_value('DATAFLOW_BUILD_DIR') is_android = environment.is_android() is_fuchsia = environment.platform() == 'FUCHSIA' if not is_fuchsia: # To ensure that we can run the fuzz target. os.chmod(fuzzer_path, 0o755) is_chromeos_system_job = environment.is_chromeos_system_job() if is_chromeos_system_job: minijail_chroot = minijail.ChromeOSChroot(build_dir) elif use_minijail: minijail_chroot = minijail.MinijailChroot(base_dir=temp_dir) if use_minijail or is_chromeos_system_job: # While it's possible for dynamic binaries to run without this, they need # to be accessible for symbolization etc. For simplicity we bind BUILD_DIR # to the same location within the chroot, which leaks the directory # structure of CF but this shouldn't be a big deal. minijail_chroot.add_binding( minijail.ChrootBinding(build_dir, build_dir, writeable=False)) if dataflow_build_dir: minijail_chroot.add_binding( minijail.ChrootBinding( dataflow_build_dir, dataflow_build_dir, writeable=False)) # Also bind the build dir to /out to make it easier to hardcode references # to data files. minijail_chroot.add_binding( minijail.ChrootBinding(build_dir, '/out', writeable=False)) minijail_bin = os.path.join(minijail_chroot.directory, 'bin') shell.create_directory(minijail_bin) # Set up /bin with llvm-symbolizer to allow symbolized stacktraces. # Don't copy if it already exists (e.g. ChromeOS chroot jail). llvm_symbolizer_source_path = environment.get_llvm_symbolizer_path() llvm_symbolizer_destination_path = os.path.join(minijail_bin, 'llvm-symbolizer') if not os.path.exists(llvm_symbolizer_destination_path): shutil.copy(llvm_symbolizer_source_path, llvm_symbolizer_destination_path) # copy /bin/sh, necessary for system(). if not environment.is_chromeos_system_job(): # The chroot has its own shell we don't need to copy (and probably # shouldn't because of library differences). shutil.copy(os.path.realpath('/bin/sh'), os.path.join(minijail_bin, 'sh')) runner = MinijailLibFuzzerRunner(fuzzer_path, minijail_chroot) elif is_fuchsia: instance_handle = environment.get_value('FUCHSIA_INSTANCE_HANDLE') if not instance_handle: raise undercoat.UndercoatError('Instance handle not provided.') runner = FuchsiaUndercoatLibFuzzerRunner(fuzzer_path, instance_handle) elif is_android: runner = AndroidLibFuzzerRunner(fuzzer_path, build_dir) else: runner = LibFuzzerRunner(fuzzer_path) return runner
Create a corpus directory with a give name in temp directory and return its full path.
def create_corpus_directory(name): """Create a corpus directory with a give name in temp directory and return its full path.""" new_corpus_directory = os.path.join(fuzzer_utils.get_temp_dir(), name) engine_common.recreate_directory(new_corpus_directory) return new_corpus_directory
Choose |num_testcases| testcases from the src corpus directory (and its subdirectories) and copy it into the dest directory.
def copy_from_corpus(dest_corpus_path, src_corpus_path, num_testcases): """Choose |num_testcases| testcases from the src corpus directory (and its subdirectories) and copy it into the dest directory.""" src_corpus_files = [] for root, _, files in shell.walk(src_corpus_path): for f in files: src_corpus_files.append(os.path.join(root, f)) # There is no reason to preserve structure of src_corpus_path directory. for i, to_copy in enumerate(random.sample(src_corpus_files, num_testcases)): shutil.copy(os.path.join(to_copy), os.path.join(dest_corpus_path, str(i)))
Remove arguments used during fuzzing.
def strip_fuzzing_arguments(arguments, is_merge=False): """Remove arguments used during fuzzing.""" args = fuzzer_options.FuzzerArguments.from_list(arguments) for argument in [ # Remove as it overrides `-merge` argument. constants.FORK_FLAGNAME, # It overrides `-merge` argument. # Remove as it may shrink the testcase. constants.MAX_LEN_FLAGNAME, # This may shrink the testcases. # Remove any existing runs argument as we will create our own for # reproduction. constants.RUNS_FLAGNAME, # Make sure we don't have any '-runs' argument. # Remove the following flags/arguments that are only used for fuzzing. constants.DATA_FLOW_TRACE_FLAGNAME, constants.DICT_FLAGNAME, constants.FOCUS_FUNCTION_FLAGNAME, ]: if argument in args: del args[argument] # Value profile is needed during corpus merge, so do not remove if set. if not is_merge: if constants.VALUE_PROFILE_FLAGNAME in args: del args[constants.VALUE_PROFILE_FLAGNAME] return args.list()
Changes timeout argument for reproduction. This is slightly less than the |TEST_TIMEOUT| value for the job.
def fix_timeout_argument_for_reproduction(arguments): """Changes timeout argument for reproduction. This is slightly less than the |TEST_TIMEOUT| value for the job.""" args = fuzzer_options.FuzzerArguments.from_list(arguments) if constants.TIMEOUT_FLAGNAME in args: del args[constants.TIMEOUT_FLAGNAME] # Leave 5 sec buffer for report processing. adjusted_test_timeout = max( 1, environment.get_value('TEST_TIMEOUT', constants.DEFAULT_TIMEOUT_LIMIT) - constants.REPORT_PROCESSING_TIME) args[constants.TIMEOUT_FLAGNAME] = adjusted_test_timeout return args.list()
Parse libFuzzer log output.
def parse_log_stats(log_lines): """Parse libFuzzer log output.""" log_stats = {} # Parse libFuzzer generated stats (`-print_final_stats=1`). stats_regex = re.compile(r'stat::([A-Za-z_]+):\s*([^\s]+)') for line in log_lines: match = stats_regex.match(line) if not match: continue value = match.group(2) if not value.isdigit(): # We do not expect any non-numeric stats from libFuzzer, skip those. logs.log_error('Corrupted stats reported by libFuzzer: "%s".' % line) continue value = int(value) log_stats[match.group(1)] = value if log_stats.get('new_units_added') is not None: # 'new_units_added' value will be overwritten after corpus merge step, but # the initial number of units generated is an interesting data as well. log_stats['new_units_generated'] = log_stats['new_units_added'] return log_stats
Sets sanitizer options based on .options file overrides, FuzzOptions (if provided), and what this script requires.
def set_sanitizer_options(fuzzer_path, fuzz_options=None): """Sets sanitizer options based on .options file overrides, FuzzOptions (if provided), and what this script requires.""" engine_common.process_sanitizer_options_overrides(fuzzer_path) sanitizer_options_var = environment.get_current_memory_tool_var() sanitizer_options = environment.get_memory_tool_options( sanitizer_options_var, {}) sanitizer_options['exitcode'] = constants.TARGET_ERROR_EXITCODE if fuzz_options and fuzz_options.use_dataflow_tracing: # Focus function feature does not work without symbolization. sanitizer_options['symbolize'] = 1 environment.update_symbolizer_options(sanitizer_options) environment.set_memory_tool_options(sanitizer_options_var, sanitizer_options)
Get the fuzz timeout.
def get_fuzz_timeout(is_mutations_run, total_timeout=None): """Get the fuzz timeout.""" fuzz_timeout = ( engine_common.get_hard_timeout(total_timeout=total_timeout) - engine_common.get_merge_timeout(DEFAULT_MERGE_TIMEOUT)) if is_mutations_run: fuzz_timeout -= engine_common.get_new_testcase_mutations_timeout() return fuzz_timeout
Helper functions. Returns whether or not the current env is linux asan.
def is_linux_asan(): """Helper functions. Returns whether or not the current env is linux asan.""" return (environment.platform() != 'LINUX' or environment.get_value('MEMORY_TOOL') != 'ASAN')
Decide whether or not to use peach mutator, and set up all of the environment variables necessary to do so.
def use_peach_mutator(extra_env, grammar): """Decide whether or not to use peach mutator, and set up all of the environment variables necessary to do so.""" # TODO(mpherman): Include architecture info in job definition and exclude # i386. if environment.is_lib() or not is_linux_asan(): return False if not grammar: return False pit_path = pits.get_path(grammar) if not pit_path: return False # Set title and pit environment variables extra_env['PIT_FILENAME'] = pit_path extra_env['PIT_TITLE'] = grammar # Extract zip of peach mutator code. peach_dir = os.path.join(environment.get_platform_resources_directory(), 'peach') unzipped = os.path.join(peach_dir, 'mutator') source = os.path.join(peach_dir, 'peach_mutator.zip') with archive.open(source) as reader: reader.extract_all(unzipped, trusted=True) # Set LD_PRELOAD. peach_path = os.path.join(unzipped, 'peach_mutator', 'src', 'peach.so') extra_env['LD_PRELOAD'] = peach_path # Set Python path. new_path = [ os.path.join(unzipped, 'peach_mutator', 'src'), os.path.join(unzipped, 'peach_mutator', 'third_party', 'peach'), ] + sys.path extra_env['PYTHONPATH'] = os.pathsep.join(new_path) return True
Returns True if |possible_hash| looks like a valid sha1 hash.
def is_sha1_hash(possible_hash): """Returns True if |possible_hash| looks like a valid sha1 hash.""" if len(possible_hash) != 40: return False hexdigits_set = set(string.hexdigits) return all(char in hexdigits_set for char in possible_hash)
Move new units in |merge_directory| into |corpus_directory|.
def move_mergeable_units(merge_directory, corpus_directory): """Move new units in |merge_directory| into |corpus_directory|.""" initial_units = { os.path.basename(filename) for filename in shell.get_files_list(corpus_directory) } for unit_path in shell.get_files_list(merge_directory): unit_name = os.path.basename(unit_path) if unit_name in initial_units and is_sha1_hash(unit_name): continue dest_path = os.path.join(corpus_directory, unit_name) shell.move(unit_path, dest_path)
Pick strategies.
def pick_strategies(strategy_pool, fuzzer_path, corpus_directory, existing_arguments, grammar=None): """Pick strategies.""" build_directory = environment.get_value('BUILD_DIR') fuzzing_strategies = [] arguments = fuzzer_options.FuzzerArguments({}) additional_corpus_dirs = [] # Select a generator to attempt to use for existing testcase mutations. candidate_generator = engine_common.select_generator(strategy_pool, fuzzer_path) is_mutations_run = (not environment.is_ephemeral() and candidate_generator != engine_common.Generator.NONE) # Depends on the presense of DFSan instrumented build. dataflow_build_dir = environment.get_value('DATAFLOW_BUILD_DIR') use_dataflow_tracing = ( dataflow_build_dir and strategy_pool.do_strategy(strategy.DATAFLOW_TRACING_STRATEGY)) if use_dataflow_tracing: dataflow_binary_path = os.path.join( dataflow_build_dir, os.path.relpath(fuzzer_path, build_directory)) dataflow_trace_dir = dataflow_binary_path + DATAFLOW_TRACE_DIR_SUFFIX if os.path.exists(dataflow_trace_dir): arguments[constants.DATA_FLOW_TRACE_FLAGNAME] = str(dataflow_trace_dir) arguments[constants.FOCUS_FUNCTION_FLAGNAME] = 'auto' fuzzing_strategies.append(strategy.DATAFLOW_TRACING_STRATEGY.name) else: logs.log_warn( 'Dataflow trace is not found in dataflow build, skipping strategy.') use_dataflow_tracing = False # Generate new testcase mutations using radamsa, etc. if is_mutations_run: new_testcase_mutations_directory = create_corpus_directory('mutations') generator_used = engine_common.generate_new_testcase_mutations( corpus_directory, new_testcase_mutations_directory, candidate_generator) # Add the used generator strategy to our fuzzing strategies list. if (generator_used and candidate_generator == engine_common.Generator.RADAMSA): fuzzing_strategies.append(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY.name) additional_corpus_dirs.append(new_testcase_mutations_directory) if strategy_pool.do_strategy(strategy.RANDOM_MAX_LENGTH_STRATEGY): if constants.MAX_LEN_FLAGNAME not in existing_arguments: max_length = random.SystemRandom().randint(1, MAX_VALUE_FOR_MAX_LENGTH) arguments[constants.MAX_LEN_FLAGNAME] = max_length fuzzing_strategies.append(strategy.RANDOM_MAX_LENGTH_STRATEGY.name) if strategy_pool.do_strategy(strategy.VALUE_PROFILE_STRATEGY): arguments[constants.VALUE_PROFILE_FLAGNAME] = 1 fuzzing_strategies.append(strategy.VALUE_PROFILE_STRATEGY.name) if not use_dataflow_tracing and should_set_fork_flag(existing_arguments, strategy_pool): max_fuzz_threads = environment.get_value('MAX_FUZZ_THREADS', 1) num_fuzz_processes = max(1, utils.cpu_count() // max_fuzz_threads) arguments[constants.FORK_FLAGNAME] = num_fuzz_processes fuzzing_strategies.append( '%s_%d' % (strategy.FORK_STRATEGY.name, num_fuzz_processes)) extra_env = {} if (not has_existing_mutator_strategy(fuzzing_strategies) and strategy_pool.do_strategy(strategy.PEACH_GRAMMAR_MUTATION_STRATEGY) and use_peach_mutator(extra_env, grammar)): fuzzing_strategies.append( '%s_%s' % (strategy.PEACH_GRAMMAR_MUTATION_STRATEGY.name, grammar)) if (environment.platform() == 'LINUX' and utils.is_oss_fuzz() and strategy_pool.do_strategy(strategy.USE_EXTRA_SANITIZERS_STRATEGY)): fuzzing_strategies.append(strategy.USE_EXTRA_SANITIZERS_STRATEGY.name) return StrategyInfo(fuzzing_strategies, arguments, additional_corpus_dirs, extra_env, use_dataflow_tracing, is_mutations_run)
Returns True if fork flag should be set.
def should_set_fork_flag(existing_arguments, strategy_pool): """Returns True if fork flag should be set.""" # FIXME: Disable for now to avoid severe battery drainage. Stabilize and # re-enable with a lower process count. if environment.is_android(): return False # Fork mode is not supported on Fuchsia platform. if environment.platform() == 'FUCHSIA': return False # Fork mode is disabled on ephemeral bots due to a bug on the platform. if environment.is_ephemeral(): return False # Do not use fork mode for DFT-based fuzzing. This is needed in order to # collect readable and actionable logs from fuzz targets running with DFT. # If fork_server is already set by the user, let's keep it that way. if constants.FORK_FLAGNAME in existing_arguments.flags: return False return strategy_pool.do_strategy(strategy.FORK_STRATEGY)
Return a FuzzerOptions for the given target, or None if it does not exist.
def get_fuzz_target_options(fuzz_target_path): """Return a FuzzerOptions for the given target, or None if it does not exist.""" options_file_path = fuzzer_utils.get_supporting_file(fuzz_target_path, OPTIONS_FILE_EXTENSION) if environment.is_trusted_host(): options_file_path = fuzzer_utils.get_file_from_untrusted_worker( options_file_path) if not os.path.exists(options_file_path): return None options_cwd = os.path.dirname(options_file_path) try: return FuzzerOptions(options_file_path, cwd=options_cwd) except FuzzerOptionsError: logs.log_error('Invalid options file: %s.' % options_file_path) return None
Chooses whether to use radamsa or no generator and updates the strategy pool.
def choose_generator(strategy_pool): """Chooses whether to use radamsa or no generator and updates the strategy pool.""" radamsa_prob = engine_common.get_strategy_probability( strategy.CORPUS_MUTATION_RADAMSA_STRATEGY.name, default=strategy.CORPUS_MUTATION_RADAMSA_STRATEGY.probability) if engine_common.decide_with_probability(radamsa_prob): strategy_pool.add_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY)
Return whether or not to use a given strategy.
def do_strategy(strategy_tuple): """Return whether or not to use a given strategy.""" return engine_common.decide_with_probability( engine_common.get_strategy_probability(strategy_tuple.name, strategy_tuple.probability))
Return a strategy pool representing a selection of strategies for launcher to consider. Select strategies according to default strategy selection method.
def generate_default_strategy_pool(strategy_list, use_generator): """Return a strategy pool representing a selection of strategies for launcher to consider. Select strategies according to default strategy selection method.""" pool = StrategyPool() # If use_generator is enabled, decide whether to include radamsa or no # generator (mutually exclusive). if use_generator: choose_generator(pool) # Decide whether or not to add non-generator strategies according to # probability parameters. for value in [ strategy_entry for strategy_entry in strategy_list if strategy_entry not in GENERATORS ]: if do_strategy(value): pool.add_strategy(value) logs.log('Strategy pool was generated according to default parameters. ' 'Chosen strategies: ' + ', '.join(pool.strategy_names)) return pool
Generate a strategy pool based on probability distribution from multi armed bandit experimentation.
def generate_weighted_strategy_pool(strategy_list, use_generator, engine_name): """Generate a strategy pool based on probability distribution from multi armed bandit experimentation.""" # If weighted strategy selection is enabled, there will be a distribution # stored in the environment. distribution = environment.get_value('STRATEGY_SELECTION_DISTRIBUTION') selection_method = environment.get_value( 'STRATEGY_SELECTION_METHOD', default_value='default') # Otherwise if weighted strategy selection is not enabled (strategy selection # method is default) or if we cannot query properly, generate strategy # pool according to default parameters. We pass the combined list of # multi-armed bandit strategies and manual strategies for consideration in # the default strategy selection process. if not distribution or selection_method == 'default': return generate_default_strategy_pool(strategy_list, use_generator) # Change the distribution to a list of named tuples rather than a list of # dictionaries so that we can use the random_weighted_choice function. Filter # out probability entries from other engines. distribution_tuples = [ StrategyCombination( strategy_name=elem['strategy_name'], probability=elem['probability']) for elem in distribution if elem['engine'] == engine_name ] if not distribution_tuples: logs.log_warn('Tried to generate a weighted strategy pool, but do not have ' 'strategy probabilities for %s fuzzing engine.' % engine_name) return generate_default_strategy_pool(strategy_list, use_generator) strategy_selection = utils.random_weighted_choice(distribution_tuples, 'probability') strategy_name = strategy_selection.strategy_name chosen_strategies = strategy_name.split(',') pool = StrategyPool() for strategy_tuple in strategy_list: if strategy_tuple.name in chosen_strategies: pool.add_strategy(strategy_tuple) # We consider certain strategies separately as those are only supported by a # small number of fuzz targets and should be used heavily when available. for value in [ strategy_entry for strategy_entry in strategy_list if strategy_entry.manually_enable ]: if do_strategy(value): pool.add_strategy(value) logs.log('Strategy pool was generated according to weighted distribution. ' 'Chosen strategies: ' + ', '.join(pool.strategy_names)) return pool
Returns whether |file_path| is a fuzz target binary (local path).
def is_fuzz_target_local(file_path, file_handle=None): """Returns whether |file_path| is a fuzz target binary (local path).""" # TODO(hzawawy): Handle syzkaller case. if '@' in file_path: # GFT targets often have periods in the name that get misinterpreted as an # extension. filename = os.path.basename(file_path) file_extension = '' else: filename, file_extension = os.path.splitext(os.path.basename(file_path)) if not VALID_TARGET_NAME_REGEX.match(filename): # Check fuzz target has a valid name (without any special chars). return False if BLOCKLISTED_TARGET_NAME_REGEX.match(filename): # Check fuzz target an explicitly disallowed name (e.g. binaries used for # jazzer-based targets). return False if file_extension not in ALLOWED_FUZZ_TARGET_EXTENSIONS: # Ignore files with disallowed extensions (to prevent opening e.g. .zips). return False if not file_handle and not os.path.exists(file_path): # Ignore non-existent files for cases when we don't have a file handle. return False if (filename.endswith('_fuzzer') or filename.endswith('_fuzztest') or filename.endswith('_fuzz_test')): return True # TODO(aarya): Remove this optimization if it does not show up significant # savings in profiling results. fuzz_target_name_regex = environment.get_value('FUZZER_NAME_REGEX') if fuzz_target_name_regex: return bool(re.match(fuzz_target_name_regex, filename)) if os.path.exists(file_path) and not stat.S_ISREG(os.stat(file_path).st_mode): # Don't read special files (eg: /dev/urandom). logs.log_warn('Tried to read from non-regular file: %s.' % file_path) return False # Use already provided file handle or open the file. local_file_handle = file_handle or open(file_path, 'rb') result = False for pattern in FUZZ_TARGET_SEARCH_BYTES: # TODO(metzman): Bound this call so we don't read forever if something went # wrong. local_file_handle.seek(0) result = utils.search_bytes_in_file(pattern, local_file_handle) if result: break if not file_handle: # If this local file handle is owned by our function, close it now. # Otherwise, it is caller's responsibility. local_file_handle.close() return result
Get list of fuzz targets paths (local).
def get_fuzz_targets_local(path): """Get list of fuzz targets paths (local).""" fuzz_target_paths = [] for root, _, files in shell.walk(path): for filename in files: if os.path.basename(root) == EXTRA_BUILD_DIR: # Ignore extra binaries. continue file_path = os.path.join(root, filename) if is_fuzz_target_local(file_path): fuzz_target_paths.append(file_path) return fuzz_target_paths
Get list of fuzz targets paths.
def get_fuzz_targets(path): """Get list of fuzz targets paths.""" if environment.is_trusted_host(): from clusterfuzz._internal.bot.untrusted_runner import file_host return file_host.get_fuzz_targets(path) return get_fuzz_targets_local(path)
Extract argument from arguments.
def extract_argument(arguments, prefix, remove=True): """Extract argument from arguments.""" for argument in arguments[:]: if argument.startswith(prefix): if remove: arguments.remove(argument) return argument[len(prefix):] return None
Get build revision.
def get_build_revision(): """Get build revision.""" try: build_revision = int(environment.get_value('APP_REVISION')) except (ValueError, TypeError): build_revision = -1 return build_revision
Get supporting file for a fuzz target with the provided extension.
def get_supporting_file(fuzz_target_path, extension_or_suffix): """Get supporting file for a fuzz target with the provided extension.""" base_fuzz_target_path = fuzz_target_path # Strip any known extensions. for ext in ALLOWED_FUZZ_TARGET_EXTENSIONS: if not ext: continue if base_fuzz_target_path.endswith(ext): base_fuzz_target_path = base_fuzz_target_path[:-len(ext)] break return base_fuzz_target_path + extension_or_suffix
Return the temp dir.
def get_temp_dir(): """Return the temp dir.""" temp_dirname = 'temp-' + str(os.getpid()) temp_directory = os.path.join( environment.get_value('FUZZ_INPUTS_DISK', tempfile.gettempdir()), temp_dirname) shell.create_directory(temp_directory) return temp_directory
Gets file from an untrusted worker to local. Local file stays in the temp folder until the end of task or can be explicitly deleted by the caller.
def get_file_from_untrusted_worker(worker_file_path): """Gets file from an untrusted worker to local. Local file stays in the temp folder until the end of task or can be explicitly deleted by the caller.""" from clusterfuzz._internal.bot.untrusted_runner import file_host with tempfile.NamedTemporaryFile(delete=False, dir=get_temp_dir()) as f: local_file_path = f.name file_host.copy_file_from_worker(worker_file_path, local_file_path) return local_file_path
Clean up temporary metadata.
def cleanup(): """Clean up temporary metadata.""" shell.remove_directory(get_temp_dir())
Normalize target path, removing file extensions.
def normalize_target_name(target_path): """Normalize target path, removing file extensions.""" target_name = os.path.basename(target_path) if '@' in target_name: # GFT target names often have periods in their name. return target_name return os.path.splitext(target_name)[0]
Loads a crash testcase if it exists.
def _run_single_testcase(fuzzer_runner, testcase_file_path): """Loads a crash testcase if it exists.""" # To ensure that we can run the fuzzer. os.chmod(fuzzer_runner.executable_path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP) return fuzzer_runner.run_single_testcase(testcase_file_path)
Afl will refuse to run if the corpus directory is empty or contains empty files. So write the bare minimum to get afl to run if there is no corpus yet.
def write_dummy_file(input_dir): """Afl will refuse to run if the corpus directory is empty or contains empty files. So write the bare minimum to get afl to run if there is no corpus yet.""" dummy_input_path = os.path.join(input_dir, AFL_DUMMY_INPUT) if environment.is_trusted_host(): from clusterfuzz._internal.bot.untrusted_runner import file_host file_host.write_data_to_worker(b' ', dummy_input_path) else: utils.write_data_to_file(' ', dummy_input_path)
Verifies system settings required for AFL.
def _verify_system_config(): """Verifies system settings required for AFL.""" def _check_core_pattern_file(): """Verifies that core pattern file content is set to 'core'.""" if not os.path.exists(constants.CORE_PATTERN_FILE_PATH): return False with open(constants.CORE_PATTERN_FILE_PATH, 'rb') as f: return f.read().strip() == b'core' if _check_core_pattern_file(): return return_code = subprocess.call( 'sudo -n bash -c "echo core > {path}"'.format( path=constants.CORE_PATTERN_FILE_PATH), shell=True) if return_code or not _check_core_pattern_file(): logs.log_fatal_and_exit( 'Failed to set {path}. AFL needs {path} to be set to core.'.format( path=constants.CORE_PATTERN_FILE_PATH))
Loads a crash testcase if it exists.
def load_testcase_if_exists(fuzzer_runner, testcase_file_path): """Loads a crash testcase if it exists.""" # To ensure that we can run the fuzzer. os.chmod(fuzzer_runner.executable_path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP) fuzzer_runner.run_single_testcase(testcase_file_path) print(fuzzer_runner.fuzzer_stderr) return True
Set *SAN_OPTIONS to afl's liking. If ASAN_OPTIONS or MSAN_OPTION is set, they must contain certain options or afl-fuzz will refuse to fuzz. See check_asan_opts() in afl-fuzz.c in afl for more details.
def set_additional_sanitizer_options_for_afl_fuzz(): """Set *SAN_OPTIONS to afl's liking. If ASAN_OPTIONS or MSAN_OPTION is set, they must contain certain options or afl-fuzz will refuse to fuzz. See check_asan_opts() in afl-fuzz.c in afl for more details. """ # We need to check if ASAN_OPTIONS and/or MSAN_OPTIONS contain symbolize=0 # because ClusterFuzz sets all sanitizers options equal to an empty string # before adding symbolize=0 to *either* ASAN_OPTIONS or MSAN_OPTIONS. Because # they will both be set but one will be empty, afl will think the empty one is # incorrect and quit if we don't do this. required_sanitizer_options = { 'ASAN_OPTIONS': { 'symbolize': 0, 'abort_on_error': 1 }, 'MSAN_OPTIONS': { 'symbolize': 0, 'exit_code': 86 }, } for options_env_var, option_values in required_sanitizer_options.items(): # If os.environ[options_env_var] is an empty string, afl will refuse to run, # because we haven't set the right options. Thus only continue if it does # not exist. if options_env_var not in os.environ: continue options_env_value = environment.get_memory_tool_options(options_env_var) options_env_value.update(option_values) environment.set_memory_tool_options(options_env_var, options_env_value)
Remove |path| if it exists. Similar to running rm -rf |path|.
def remove_path(path): """Remove |path| if it exists. Similar to running rm -rf |path|.""" # Remove links to files and files. if os.path.isfile(path): os.remove(path) elif os.path.isdir(path): shutil.rmtree(path)
List the absolute paths of files in |directory| on Android device.
def list_full_file_paths_device(directory): """List the absolute paths of files in |directory| on Android device.""" directory_absolute_path = os.path.abspath(directory) directory_absolute_path = android.util.get_device_path( directory_absolute_path) dir_contents = android.adb.run_command( ['shell', 'ls', directory_absolute_path]) paths = [] for rel_path in dir_contents.split(): full_path = os.path.join(directory_absolute_path, rel_path) if android.adb.file_exists(full_path): paths.append(full_path) return paths
List the absolute paths of files in |directory|.
def list_full_file_paths(directory): """List the absolute paths of files in |directory|.""" directory_absolute_path = os.path.abspath(directory) paths = [] for relative_path in os.listdir(directory): absolute_path = os.path.join(directory_absolute_path, relative_path) if os.path.isfile(absolute_path): # Only return paths to files. paths.append(absolute_path) return paths
If |stderr_data| contains stack traces, only returns the first one. Otherwise returns the entire string.
def get_first_stacktrace(stderr_data): """If |stderr_data| contains stack traces, only returns the first one. Otherwise returns the entire string.""" # Use question mark after .+ for non-greedy, otherwise it will match more # than one stack trace. sanitizer_stacktrace_regex = ( r'ERROR: [A-Za-z]+Sanitizer: .*\n(.|\n)+?ABORTING') match = re.search(sanitizer_stacktrace_regex, stderr_data) # If we can't find the first stacktrace, return the whole thing. if match is None: return stderr_data return stderr_data[:match.end()]
Get the maximum amount of time that should be spent fuzzing.
def get_fuzz_timeout(is_mutations_run, full_timeout=None): """Get the maximum amount of time that should be spent fuzzing.""" fuzz_timeout = full_timeout # TODO(mbarbella): Delete this check once non-engine support is removed. The # engine pipeline always specifies a timeout. if fuzz_timeout is None: fuzz_timeout = engine_common.get_hard_timeout() - POSTPROCESSING_TIMEOUT # Subtract mutations timeout from the total timeout. if is_mutations_run: fuzz_timeout -= engine_common.get_new_testcase_mutations_timeout() # Subtract the merge timeout from the fuzz timeout in the non-engine case. if not full_timeout: fuzz_timeout -= engine_common.get_merge_timeout(DEFAULT_MERGE_TIMEOUT) assert fuzz_timeout > 0 return fuzz_timeout
Common initialization code shared by the new pipeline and main.
def prepare_runner(fuzzer_path, config, testcase_file_path, input_directory, timeout=None, strategy_dict=None): """Common initialization code shared by the new pipeline and main.""" # Set up temp dir. engine_common.recreate_directory(fuzzer_utils.get_temp_dir()) if environment.get_value('USE_UNSHARE'): runner_class = UnshareAflRunner elif environment.is_android(): runner_class = AflAndroidRunner else: runner_class = AflRunner runner = runner_class( fuzzer_path, config, testcase_file_path, input_directory, timeout=timeout, strategy_dict=strategy_dict) # Make sure afl won't exit because of bad sanitizer options. set_additional_sanitizer_options_for_afl_fuzz() # Add *SAN_OPTIONS overrides from .options file. engine_common.process_sanitizer_options_overrides(fuzzer_path) return runner
Returns a random queue scheduler.
def rand_schedule(scheduler_probs): """Returns a random queue scheduler.""" schedule = 'fast' rnd = engine_common.get_probability() for schedule_opt, prob in scheduler_probs: if rnd > prob: rnd -= prob else: schedule = schedule_opt break return schedule
Returns a random CMPLOG intensity level.
def rand_cmplog_level(strategies): """Returns a random CMPLOG intensity level.""" cmplog_level = '2' rnd = engine_common.get_probability() for cmplog_level_opt, prob in strategies.CMPLOG_LEVEL_PROBS: if rnd > prob: rnd -= prob else: cmplog_level = cmplog_level_opt break if engine_common.decide_with_probability(strategies.CMPLOG_ARITH_PROB): cmplog_level += constants.CMPLOG_ARITH if engine_common.decide_with_probability(strategies.CMPLOG_TRANS_PROB): cmplog_level += constants.CMPLOG_TRANS if engine_common.decide_with_probability(strategies.CMPLOG_XTREME_PROB): cmplog_level += constants.CMPLOG_XTREME if engine_common.decide_with_probability(strategies.CMPLOG_RAND_PROB): cmplog_level += constants.CMPLOG_RAND return cmplog_level
Run afl as specified by argv.
def main(argv): """Run afl as specified by argv.""" atexit.register(fuzzer_utils.cleanup) # Initialize variables. testcase_file_path = argv[1] target_name = environment.get_value('FUZZ_TARGET') input_directory = environment.get_value('FUZZ_CORPUS_DIR') # FIXME: Remove this once AFL is migrated to the new engine impl and runs in # same python process. logs.configure('run_fuzzer') _verify_system_config() profiler.start_if_needed('afl_launcher') build_directory = environment.get_value('BUILD_DIR') fuzzer_path = engine_common.find_fuzzer_path(build_directory, target_name) if not fuzzer_path: return # Install signal handler. signal.signal(signal.SIGTERM, engine_common.signal_term_handler) config = AflConfig.from_target_path(fuzzer_path) runner = prepare_runner(fuzzer_path, config, testcase_file_path, input_directory) # If we don't have a corpus, then that means this is not a fuzzing run. if not input_directory: load_testcase_if_exists(runner, testcase_file_path) return # Execute afl-fuzz on the fuzzing target. fuzz_result = runner.fuzz() # Print info for the fuzzer logs. print( engine_common.get_log_header(fuzz_result.command, fuzz_result.time_executed)) print(fuzz_result.output) if fuzz_result.return_code: # If AFL returned a non-zero return code quit now without getting stats, # since they would be meaningless. print(runner.fuzzer_stderr) return stats_getter = stats.StatsGetter(runner.afl_output.stats_path, config.dict_path) try: new_units_generated, new_units_added, corpus_size = ( runner.libfuzzerize_corpus()) stats_getter.set_stats(fuzz_result.time_executed, new_units_generated, new_units_added, corpus_size, runner.strategies, runner.fuzzer_stderr, fuzz_result.output) engine_common.dump_big_query_data(stats_getter.stats, testcase_file_path, fuzz_result.command) finally: print(runner.fuzzer_stderr) # Record the stats to make them easily searchable in stackdriver. if new_units_added: logs.log( 'New units added to corpus: %d.' % new_units_added, stats=stats_getter.stats) else: logs.log('No new units found.', stats=stats_getter.stats)
Gets the Centipede runner.
def _get_runner(target_path): """Gets the Centipede runner.""" centipede_path = pathlib.Path(target_path).parent / 'centipede' if not centipede_path.exists(): raise CentipedeError('Centipede not found in build') centipede_path = str(centipede_path) if environment.get_value('USE_UNSHARE'): return new_process.UnicodeModifierRunner(centipede_path) return new_process.UnicodeProcessRunner(centipede_path)
Gets the reproducer path, if any.
def _get_reproducer_path(log, reproducers_dir): """Gets the reproducer path, if any.""" crash_match = CRASH_REGEX.search(log) if not crash_match: return None tmp_crash_path = pathlib.Path(crash_match.group(1)) crash_path = pathlib.Path(reproducers_dir) / tmp_crash_path.name shutil.copy(tmp_crash_path, crash_path) return crash_path
Sets sanitizer options based on .options file overrides.
def _set_sanitizer_options(fuzzer_path): """Sets sanitizer options based on .options file overrides.""" engine_common.process_sanitizer_options_overrides(fuzzer_path) sanitizer_options_var = environment.get_current_memory_tool_var() sanitizer_options = environment.get_memory_tool_options( sanitizer_options_var, {}) environment.set_memory_tool_options(sanitizer_options_var, sanitizer_options)
Get the reproducer path, if any.
def _get_reproducer_path(line): """Get the reproducer path, if any.""" crash_match = _CRASH_REGEX.match(line) if not crash_match: return None return crash_match.group(1)
Get the honggfuzz runner.
def _get_runner(): """Get the honggfuzz runner.""" honggfuzz_path = os.path.join(environment.get_value('BUILD_DIR'), 'honggfuzz') if not os.path.exists(honggfuzz_path): raise HonggfuzzError('honggfuzz not found in build') os.chmod(honggfuzz_path, 0o755) if environment.get_value('USE_UNSHARE'): return new_process.UnicodeModifierRunner(honggfuzz_path) return new_process.UnicodeProcessRunner(honggfuzz_path)
Find the sanitizer stacktrace from the reproducers dir.
def _find_sanitizer_stacktrace(reproducers_dir): """Find the sanitizer stacktrace from the reproducers dir.""" for stacktrace_path in glob.glob( os.path.join(reproducers_dir, _HF_SANITIZER_LOG_PREFIX + '*')): with open(stacktrace_path, 'rb') as f: return utils.decode_to_unicode(f.read()) return None
Get the reproducer path, if any.
def _get_reproducer_path(line): """Get the reproducer path, if any.""" crash_match = _CRASH_REGEX.match(line) if not crash_match: return None return crash_match.group(1)
Get stats, if any.
def _get_stats(line): """Get stats, if any.""" if not line.startswith(_STATS_PREFIX): return None parts = line[len(_STATS_PREFIX):].split() stats = {} for part in parts: if ':' not in part: logs.log_error('Invalid stat part.', value=part) key, value = part.split(':', 2) try: stats[key] = int(value) except (ValueError, TypeError): logs.log_error('Invalid stat value.', key=key, value=value) return stats
Returns whether |target_path| contains netdriver string.
def _contains_netdriver(target_path): """Returns whether |target_path| contains netdriver string.""" with open(target_path, 'rb') as file_handle: data = file_handle.read() return data.find(b'\x01_LIBHFUZZ_NETDRIVER_BINARY_SIGNATURE_\x02\xff') != -1
Checks whether a particular binary support multistep merge.
def _is_multistep_merge_supported(target_path): """Checks whether a particular binary support multistep merge.""" # TODO(Dor1s): implementation below a temporary workaround, do not tell any # body that we are doing this. The real solution would be to execute a # fuzz target with '-help=1' and check the output for the presence of # multistep merge support added in https://reviews.llvm.org/D71423. # The temporary implementation checks that the version of libFuzzer is at # least https://github.com/llvm/llvm-project/commit/da3cf61, which supports # multi step merge: https://github.com/llvm/llvm-project/commit/f054067. if os.path.exists(target_path): with open(target_path, 'rb') as file_handle: return utils.search_bytes_in_file(MULTISTEP_MERGE_SUPPORT_TOKEN, file_handle) return False
Get grammar for a given fuzz target. Return none if there isn't one.
def get_grammar(fuzzer_path): """Get grammar for a given fuzz target. Return none if there isn't one.""" fuzzer_options = options.get_fuzz_target_options(fuzzer_path) if fuzzer_options: grammar = fuzzer_options.get_grammar_options() if grammar: return grammar.get('grammar') return None
Get environment variables for a given fuzz target if any (or None).
def get_extra_env(fuzzer_path): """Get environment variables for a given fuzz target if any (or None).""" fuzzer_options = options.get_fuzz_target_options(fuzzer_path) if fuzzer_options: return fuzzer_options.get_env() return None
Get arguments for a given fuzz target.
def get_arguments(fuzzer_path) -> options.FuzzerArguments: """Get arguments for a given fuzz target.""" arguments = options.FuzzerArguments() rss_limit_mb = None timeout = None fuzzer_options = options.get_fuzz_target_options(fuzzer_path) if fuzzer_options: arguments = fuzzer_options.get_engine_arguments('libfuzzer') rss_limit_mb = arguments.get('rss_limit_mb', constructor=int) timeout = arguments.get('timeout', constructor=int) if timeout is None or timeout > constants.DEFAULT_TIMEOUT_LIMIT: arguments[constants.TIMEOUT_FLAGNAME] = constants.DEFAULT_TIMEOUT_LIMIT if not rss_limit_mb: arguments[constants.RSS_LIMIT_FLAGNAME] = constants.DEFAULT_RSS_LIMIT_MB else: # psutil gives the total amount of memory in bytes, but we're only dealing # with options that are counting memory space in MB, so we need to do the # conversion first. max_memory_limit_mb = (psutil.virtual_memory().total // (1 << 20)) - constants.MEMORY_OVERHEAD # Custom rss_limit_mb value shouldn't be greater than the actual memory # allocated on the machine. if rss_limit_mb > max_memory_limit_mb: arguments[constants.RSS_LIMIT_FLAGNAME] = max_memory_limit_mb return arguments
Calculate number of logs lines of different kind in the given log.
def calculate_log_lines(log_lines): """Calculate number of logs lines of different kind in the given log.""" # Counters to be returned. libfuzzer_lines_count = 0 other_lines_count = 0 ignored_lines_count = 0 lines_after_last_libfuzzer_line_count = 0 libfuzzer_inited = False found_libfuzzer_crash = False for line in log_lines: if not libfuzzer_inited: # Skip to start of libFuzzer log output. if LIBFUZZER_LOG_START_INITED_REGEX.match(line): libfuzzer_inited = True else: ignored_lines_count += 1 continue if LIBFUZZER_LOG_IGNORE_REGEX.match(line): # We should ignore lines like sanitizer warnings, etc. ignored_lines_count += 1 continue if LIBFUZZER_ANY_CRASH_TYPE_REGEX.match(line): # We should ignore whole block if a libfuzzer crash is found. # E.g. slow units. found_libfuzzer_crash = True elif LIBFUZZER_LOG_LINE_REGEX.match(line): if found_libfuzzer_crash: # Ignore previous block. other_lines_count -= lines_after_last_libfuzzer_line_count ignored_lines_count += lines_after_last_libfuzzer_line_count libfuzzer_lines_count += 1 lines_after_last_libfuzzer_line_count = 0 found_libfuzzer_crash = False elif LIBFUZZER_LOG_END_REGEX.match(line): libfuzzer_lines_count += 1 break else: other_lines_count += 1 lines_after_last_libfuzzer_line_count += 1 # Ignore the lines after the last libfuzzer line. other_lines_count -= lines_after_last_libfuzzer_line_count ignored_lines_count += lines_after_last_libfuzzer_line_count return other_lines_count, libfuzzer_lines_count, ignored_lines_count
Convert the strategy name into stats column name.
def strategy_column_name(strategy_name): """Convert the strategy name into stats column name.""" return 'strategy_%s' % strategy_name
Extract stats for fuzzing strategies used.
def parse_fuzzing_strategies(log_lines, strategies): """Extract stats for fuzzing strategies used.""" if not strategies: # Extract strategies from the log. for line in log_lines: match = LIBFUZZER_FUZZING_STRATEGIES.match(line) if match: strategies = match.group(1).split(',') break return process_strategies(strategies)
Process strategies, parsing any stored values.
def process_strategies(strategies, name_modifier=strategy_column_name): """Process strategies, parsing any stored values.""" stats = {} def parse_line_for_strategy_prefix(line, strategy_name): """Parse log line to find the value of a strategy with a prefix.""" strategy_prefix = strategy_name + '_' if not line.startswith(strategy_prefix): return suffix_type = strategy.LIBFUZZER_STRATEGIES_WITH_PREFIX_VALUE_TYPE[ strategy_name] try: strategy_value = suffix_type(line[len(strategy_prefix):]) stats[name_modifier(strategy_name)] = strategy_value except (IndexError, ValueError) as e: logs.log_error('Failed to parse strategy "%s":\n%s\n' % (line, str(e))) # These strategies are used with different values specified in the prefix. for strategy_type in strategy.LIBFUZZER_STRATEGIES_WITH_PREFIX_VALUE: for line in strategies: parse_line_for_strategy_prefix(line, strategy_type.name) # Other strategies are either ON or OFF, without arbitrary values. for strategy_type in strategy.LIBFUZZER_STRATEGIES_WITH_BOOLEAN_VALUE: if strategy_type.name in strategies: stats[name_modifier(strategy_type.name)] = 1 return stats
Extract stats for performance analysis.
def parse_performance_features(log_lines, strategies, arguments): """Extract stats for performance analysis.""" # TODO(ochang): Remove include_strategies once refactor is complete. # Initialize stats with default values. stats = { 'bad_instrumentation': 0, 'corpus_crash_count': 0, 'corpus_size': 0, 'crash_count': 0, 'dict_used': 0, 'edge_coverage': 0, 'edges_total': 0, 'feature_coverage': 0, 'initial_edge_coverage': 0, 'initial_feature_coverage': 0, 'leak_count': 0, 'log_lines_unwanted': 0, 'log_lines_from_engine': 0, 'log_lines_ignored': 0, 'max_len': 0, 'manual_dict_size': 0, 'merge_edge_coverage': 0, 'new_edges': 0, 'new_features': 0, 'oom_count': 0, 'slow_unit_count': 0, 'slow_units_count': 0, 'startup_crash_count': 1, 'timeout_count': 0, } arguments = fuzzer_options.FuzzerArguments.from_list(arguments) # Extract strategy selection method. # TODO(ochang): Move to more general place? stats['strategy_selection_method'] = environment.get_value( 'STRATEGY_SELECTION_METHOD', default_value='default') # Initialize all strategy stats as disabled by default. for strategy_type in strategy.LIBFUZZER_STRATEGY_LIST: if strategy.LIBFUZZER_STRATEGIES_WITH_PREFIX_VALUE_TYPE.get( strategy_type.name) == str: stats[strategy_column_name(strategy_type.name)] = '' else: stats[strategy_column_name(strategy_type.name)] = 0 # Process fuzzing strategies used. stats.update(parse_fuzzing_strategies(log_lines, strategies)) (stats['log_lines_unwanted'], stats['log_lines_from_engine'], stats['log_lines_ignored']) = calculate_log_lines(log_lines) if stats['log_lines_from_engine'] > 0: stats['startup_crash_count'] = 0 # Extract '-max_len' value from arguments, if possible. max_len = arguments.get( constants.MAX_LEN_FLAGNAME, default=None, constructor=int) stats['max_len'] = max_len if max_len is not None else int(stats['max_len']) # Extract sizes of manual dictionary used for fuzzing. dictionary_path = arguments.get(constants.DICT_FLAGNAME, constructor=str) stats['manual_dict_size'] = dictionary_manager.get_stats_for_dictionary_file( dictionary_path) # Different crashes and other flags extracted via regexp match. has_corpus = False for line in log_lines: if LIBFUZZER_BAD_INSTRUMENTATION_REGEX.match(line): stats['bad_instrumentation'] = 1 continue if LIBFUZZER_CRASH_TESTCASE_REGEX.match(line): stats['crash_count'] = 1 continue if LIBFUZZER_LOG_DICTIONARY_REGEX.match(line): stats['dict_used'] = 1 continue if LEAK_TESTCASE_REGEX.match(line): stats['leak_count'] = 1 continue if (LIBFUZZER_OOM_TESTCASE_REGEX.match(line) or stacktrace_constants.OUT_OF_MEMORY_REGEX.match(line)): stats['oom_count'] = 1 continue if LIBFUZZER_SLOW_UNIT_TESTCASE_REGEX.match(line): # Use |slow_unit_count| to track if this run had any slow units at all. # and use |slow_units_count| to track the actual number of slow units in # this run (used by performance analyzer). stats['slow_unit_count'] = 1 stats['slow_units_count'] += 1 continue match = LIBFUZZER_LOG_SEED_CORPUS_INFO_REGEX.match(line) if match: has_corpus = True match = LIBFUZZER_MODULES_LOADED_REGEX.match(line) if match: stats['startup_crash_count'] = 0 stats['edges_total'] = int(match.group(2)) if (LIBFUZZER_TIMEOUT_TESTCASE_REGEX.match(line) or stacktrace_constants.LIBFUZZER_TIMEOUT_REGEX.match(line)): stats['timeout_count'] = 1 continue if not stats['max_len']: # Get "max_len" value from the log, if it has not been found in arguments. match = LIBFUZZER_LOG_MAX_LEN_REGEX.match(line) if match: stats['max_len'] = int(match.group(1)) continue if has_corpus and not stats['log_lines_from_engine']: stats['corpus_crash_count'] = 1 return stats
Extract stats from a log produced by libFuzzer run with -merge=1.
def parse_stats_from_merge_log(log_lines): """Extract stats from a log produced by libFuzzer run with -merge=1.""" stats = { 'edge_coverage': 0, 'feature_coverage': 0, } # Reverse the list as an optimization. The line of our interest is the last. for line in reversed(log_lines): match = LIBFUZZER_MERGE_LOG_STATS_REGEX.match(line) if match: stats['edge_coverage'] = int(match.group(2)) stats['feature_coverage'] = int(match.group(1)) break return stats