response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Returns true if this task is tried atleast once. Only applicable for analyze and progression tasks.
def is_first_attempt_for_task(task_name, testcase, reset_after_retry=False): """Returns true if this task is tried atleast once. Only applicable for analyze and progression tasks.""" retry_key = f'{task_name}_retry' retry_flag = testcase.get_metadata(retry_key) if not retry_flag: # Update the metadata key since now we have tried it once. retry_value = True testcase.set_metadata(retry_key, retry_value) return True # Reset the metadata key so that tasks like progression task can be retried. if reset_after_retry: retry_value = False testcase.set_metadata(retry_key, retry_value) return False
Return issue tracker name for a job type.
def get_issue_tracker_name(job_type=None): """Return issue tracker name for a job type.""" return get_value_from_job_definition_or_environment(job_type, 'ISSUE_TRACKER')
Return project name for a job type.
def get_project_name(job_type): """Return project name for a job type.""" default_project_name = utils.default_project_name() return get_value_from_job_definition(job_type, 'PROJECT_NAME', default_project_name)
Return project name for a job type.
def get_main_repo(job_type): """Return project name for a job type.""" return get_value_from_job_definition(job_type, 'MAIN_REPO')
Get security severity.
def _get_security_severity(crash, job_type, gestures): """Get security severity.""" if crash.security_flag: return severity_analyzer.get_security_severity( crash.crash_type, crash.crash_stacktrace, job_type, bool(gestures)) return None
Create a testcase and store it in the datastore using remote api.
def store_testcase(crash, fuzzed_keys, minimized_keys, regression, fixed, one_time_crasher_flag, crash_revision, comment, absolute_path, fuzzer_name, fully_qualified_fuzzer_name, job_type, archived, archive_filename, http_flag, gestures, redzone, disable_ubsan, window_argument, timeout_multiplier, minimized_arguments, trusted): """Create a testcase and store it in the datastore using remote api.""" # Initialize variable to prevent invalid values. if archived: archive_state = data_types.ArchiveStatus.FUZZED else: archive_state = 0 if not gestures: gestures = [] if not redzone: redzone = 128 # Create the testcase. testcase = data_types.Testcase() testcase.crash_type = crash.crash_type testcase.crash_address = crash.crash_address testcase.crash_state = utils.decode_to_unicode(crash.crash_state) testcase.crash_stacktrace = filter_stacktrace(crash.crash_stacktrace) testcase.fuzzed_keys = fuzzed_keys testcase.minimized_keys = minimized_keys testcase.bug_information = '' testcase.regression = regression testcase.fixed = fixed testcase.security_flag = crash.security_flag testcase.security_severity = _get_security_severity(crash, job_type, gestures) testcase.one_time_crasher_flag = one_time_crasher_flag testcase.crash_revision = crash_revision testcase.absolute_path = absolute_path testcase.fuzzer_name = fuzzer_name testcase.overridden_fuzzer_name = fully_qualified_fuzzer_name or fuzzer_name testcase.job_type = job_type testcase.queue = tasks.default_queue() testcase.archive_state = archive_state testcase.archive_filename = archive_filename testcase.http_flag = http_flag testcase.timestamp = datetime.datetime.utcnow() testcase.gestures = gestures testcase.redzone = redzone testcase.disable_ubsan = disable_ubsan testcase.window_argument = window_argument testcase.timeout_multiplier = float(timeout_multiplier) testcase.minimized_arguments = minimized_arguments testcase.project_name = get_project_name(job_type) testcase.trusted = trusted # Set metadata fields (e.g. build url, build key, platform string, etc). set_initial_testcase_metadata(testcase) # Set crash metadata. # TODO(https://github.com/google/clusterfuzz/pull/3333#discussion_r1369199761) if hasattr(crash, 'crash_categories') and crash.crash_categories: testcase.set_metadata('crash_categories', list(crash.crash_categories)) # Update the comment and save testcase. update_testcase_comment(testcase, data_types.TaskState.NA, comment) # Get testcase id from newly created testcase. testcase_id = testcase.key.id() logs.log(('Created new testcase %d (reproducible:%s, security:%s).\n' 'crash_type: %s\ncrash_state:\n%s\n') % (testcase_id, not testcase.one_time_crasher_flag, testcase.security_flag, testcase.crash_type, testcase.crash_state)) # Update global blacklist to avoid finding this leak again (if needed). is_lsan_enabled = environment.get_value('LSAN') if is_lsan_enabled: from clusterfuzz._internal.fuzzing import leak_blacklist leak_blacklist.add_crash_to_global_blacklist_if_needed(testcase) return testcase_id
Set various testcase metadata fields during testcase initialization.
def set_initial_testcase_metadata(testcase): """Set various testcase metadata fields during testcase initialization.""" build_key = environment.get_value('BUILD_KEY') if build_key: testcase.set_metadata('build_key', build_key, update_testcase=False) build_url = environment.get_value('BUILD_URL') if build_url: testcase.set_metadata('build_url', build_url, update_testcase=False) gn_args_path = environment.get_value('GN_ARGS_PATH', '') if gn_args_path: gn_args = utils.read_data_from_file( gn_args_path, eval_data=False, default='is_msan = true').decode('utf-8') # Remove goma_dir from gn args since it is only relevant to the machine that # did the build. filtered_gn_args_lines = [ line for line in gn_args.splitlines() if not GOMA_DIR_LINE_REGEX.match(line) ] filtered_gn_args = '\n'.join(filtered_gn_args_lines) testcase.set_metadata('gn_args', filtered_gn_args, update_testcase=False) testcase.platform = environment.platform().lower() testcase.platform_id = environment.get_platform_id()
Add task status and message to the test case's comment field.
def update_testcase_comment(testcase, task_state, message=None): """Add task status and message to the test case's comment field.""" bot_name = environment.get_value('BOT_NAME', 'Unknown') task_name = environment.get_value('TASK_NAME', 'Unknown') # Override in postprocess. task_name = environment.get_initial_task_name() or task_name task_string = '%s task' % task_name.capitalize() timestamp = utils.current_date_time() # For some tasks like blame, progression and impact, we need to delete lines # from old task executions to avoid clutter. if (task_name in ['blame', 'progression', 'impact'] and task_state == data_types.TaskState.STARTED): pattern = r'.*?: %s.*\n' % task_string testcase.comments = re.sub(pattern, '', testcase.comments) testcase.comments += '[%s] %s: %s %s' % (timestamp, bot_name, task_string, task_state) if message: testcase.comments += ': %s' % message.rstrip('.') testcase.comments += '.\n' # Truncate if too long. if len(testcase.comments) > data_types.TESTCASE_COMMENTS_LENGTH_LIMIT: logs.log_error( 'Testcase comments truncated (testcase {testcase_id}, job {job_type}).'. format(testcase_id=testcase.key.id(), job_type=testcase.job_type)) testcase.comments = testcase.comments[ -data_types.TESTCASE_COMMENTS_LENGTH_LIMIT:] testcase.put() # Log the message in stackdriver after the testcase.put() call as otherwise # the testcase key might not available yet (i.e. for new testcase). if message: log_func = ( logs.log_error if task_state == data_types.TaskState.ERROR else logs.log) log_func('{message} (testcase {testcase_id}, job {job_type}).'.format( message=message, testcase_id=testcase.key.id(), job_type=testcase.job_type))
Get an iterator for open testcase ids.
def get_open_testcase_id_iterator(): """Get an iterator for open testcase ids.""" keys = ndb_utils.get_all_from_query( data_types.Testcase.query( ndb_utils.is_true(data_types.Testcase.open), data_types.Testcase.status == 'Processed'), keys_only=True, batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT) for key in keys: yield key.id()
Check to see if all critical tasks have finished running on a test case.
def critical_tasks_completed(testcase): """Check to see if all critical tasks have finished running on a test case.""" if testcase.status == 'Unreproducible': # These tasks don't apply to unreproducible testcases. return True if testcase.one_time_crasher_flag: # These tasks don't apply to flaky testcases. return True # For non-chromium projects, impact and blame tasks are not applicable. if not utils.is_chromium(): return testcase.minimized_keys and testcase.regression return bool(testcase.minimized_keys and testcase.regression and testcase.is_impact_set_flag)
Return whether a build is unmarked, good or bad.
def get_build_state(job_type, crash_revision): """Return whether a build is unmarked, good or bad.""" build = data_types.BuildMetadata.query( data_types.BuildMetadata.job_type == job_type, data_types.BuildMetadata.revision == crash_revision).get() if not build: return data_types.BuildState.UNMARKED if build.bad_build: return data_types.BuildState.BAD return data_types.BuildState.GOOD
Add build metadata.
def add_build_metadata(job_type, crash_revision, is_bad_build, console_output=None): """Add build metadata.""" build = data_types.BuildMetadata() build.bad_build = is_bad_build build.bot_name = environment.get_value('BOT_NAME') build.console_output = filter_stacktrace(console_output) build.job_type = job_type build.revision = crash_revision build.timestamp = datetime.datetime.utcnow() build.put() if is_bad_build: logs.log_error( 'Bad build %s.' % job_type, revision=crash_revision, job_type=job_type, output=console_output) else: logs.log( 'Good build %s.' % job_type, revision=crash_revision, job_type=job_type) return build
Creates a data bundle bucket and adds iams for access.
def create_data_bundle_bucket_and_iams(data_bundle_name, emails): """Creates a data bundle bucket and adds iams for access.""" bucket_name = get_data_bundle_bucket_name(data_bundle_name) if not storage.create_bucket_if_needed(bucket_name): return False client = storage.create_discovery_storage_client() iam_policy = storage.get_bucket_iam_policy(client, bucket_name) if not iam_policy: return False members = [] # Add access for the domains allowed in project. domains = local_config.AuthConfig().get('whitelisted_domains', default=[]) for domain in domains: members.append('domain:%s' % domain) # Add access for the emails provided in function arguments. for email in emails: members.append('user:%s' % email) if not members: # No members to add, bail out. return True binding = storage.get_bucket_iam_binding(iam_policy, DATA_BUNDLE_DEFAULT_BUCKET_IAM_ROLE) if binding: binding['members'] = members else: binding = { 'role': DATA_BUNDLE_DEFAULT_BUCKET_IAM_ROLE, 'members': members, } iam_policy['bindings'].append(binding) return bool(storage.set_bucket_iam_policy(client, bucket_name, iam_policy))
Return data bundle bucket name on GCS.
def get_data_bundle_bucket_name(data_bundle_name): """Return data bundle bucket name on GCS.""" domain = bucket_domain_suffix() return '%s-corpus.%s' % (data_bundle_name, domain)
Return data bundle bucket url on GCS.
def get_data_bundle_bucket_url(data_bundle_name): """Return data bundle bucket url on GCS.""" return 'gs://%s' % get_data_bundle_bucket_name(data_bundle_name)
Get a specific environment variable's value for a fuzzer.
def get_value_from_fuzzer_environment_string(fuzzer_name, variable_pattern, default=None): """Get a specific environment variable's value for a fuzzer.""" fuzzer = data_types.Fuzzer.query(data_types.Fuzzer.name == fuzzer_name).get() if not fuzzer or not fuzzer.additional_environment_string: return default return get_value_from_environment_string( fuzzer.additional_environment_string, variable_pattern, default=default)
Return the TaskStatus object with the given name.
def get_task_status(name, create_if_needed=False): """Return the TaskStatus object with the given name.""" metadata = ndb.Key(data_types.TaskStatus, name).get() if not metadata and create_if_needed: metadata = data_types.TaskStatus(id=name) return metadata
Updates status for a task. Used to ensure that a single instance of a task is running at any given time.
def update_task_status(task_name, status, expiry_interval=None): """Updates status for a task. Used to ensure that a single instance of a task is running at any given time.""" bot_name = environment.get_value('BOT_NAME') failure_wait_interval = environment.get_value('FAIL_WAIT') # If we didn't get an expiry interval, default to our task lease interval. if expiry_interval is None: expiry_interval = environment.get_value('TASK_LEASE_SECONDS') if expiry_interval is None: logs.log_error('expiry_interval is None and TASK_LEASE_SECONDS not set.') def _try_update_status(): """Try update metadata.""" task_status = get_task_status(task_name, create_if_needed=True) # If another bot is already working on this task, bail out with error. if (status == data_types.TaskState.STARTED and task_status.status == data_types.TaskState.STARTED and not dates.time_has_expired( task_status.time, seconds=expiry_interval - 1)): return False task_status.bot_name = bot_name task_status.status = status task_status.time = utils.utcnow() task_status.put() return True # It is important that we do not continue until the metadata is updated. # This can lead to task loss, or can cause issues with multiple bots # attempting to run the task at the same time. while True: try: return ndb.transaction(_try_update_status, retries=0) except Exception: # We need to update the status under all circumstances. # Failing to update 'completed' status causes another bot # that picked up this job to bail out. logs.log_error('Unable to update %s task metadata. Retrying.' % task_name) time.sleep(utils.random_number(1, failure_wait_interval))
Updates heartbeat with current timestamp and log data.
def update_heartbeat(force_update=False): """Updates heartbeat with current timestamp and log data.""" if environment.is_uworker(): # Uworkers can't update heartbeats. return 0 # Check if the heartbeat was recently updated. If yes, bail out. last_modified_time = persistent_cache.get_value( HEARTBEAT_LAST_UPDATE_KEY, constructor=datetime.datetime.utcfromtimestamp) if (not force_update and last_modified_time and not dates.time_has_expired( last_modified_time, seconds=data_types.HEARTBEAT_WAIT_INTERVAL)): return 0 bot_name = environment.get_value('BOT_NAME') current_time = datetime.datetime.utcnow() try: heartbeat = ndb.Key(data_types.Heartbeat, bot_name).get() if not heartbeat: heartbeat = data_types.Heartbeat() heartbeat.bot_name = bot_name heartbeat.key = ndb.Key(data_types.Heartbeat, bot_name) heartbeat.task_payload = tasks.get_task_payload() heartbeat.task_end_time = tasks.get_task_end_time() heartbeat.last_beat_time = current_time heartbeat.source_version = utils.current_source_version() heartbeat.platform_id = environment.get_platform_id() heartbeat.put() persistent_cache.set_value( HEARTBEAT_LAST_UPDATE_KEY, time.time(), persist_across_reboots=True) except: logs.log_error('Unable to update heartbeat.') return 0 return 1
Return true if our run timed out.
def bot_run_timed_out(): """Return true if our run timed out.""" run_timeout = environment.get_value('RUN_TIMEOUT') if not run_timeout: return False start_time = environment.get_value('START_TIME') if not start_time: return False start_time = datetime.datetime.utcfromtimestamp(start_time) # Actual run timeout takes off the duration for one task. average_task_duration = environment.get_value('AVERAGE_TASK_DURATION', 0) actual_run_timeout = run_timeout - average_task_duration return dates.time_has_expired(start_time, seconds=actual_run_timeout)
Gets component name for a job type.
def get_component_name(job_type): """Gets component name for a job type.""" job = data_types.Job.query(data_types.Job.name == job_type).get() if not job: return '' match = re.match(r'.*BUCKET_PATH[^\r\n]*-([a-zA-Z0-9]+)-component', job.get_environment_string(), re.DOTALL) if not match: return '' component_name = match.group(1) return component_name
Get the repository based on component.
def get_repository_for_component(component): """Get the repository based on component.""" default_repository = '' repository = '' repository_mappings = db_config.get_value('component_repository_mappings') for line in repository_mappings.splitlines(): current_component, value = line.split(';', 1) if current_component == 'default': default_repository = value elif current_component == component: repository = value return repository or default_repository
Return the first value matching the pattern from the environment string.
def get_value_from_environment_string(environment_string, variable_pattern, default=None): """Return the first value matching the pattern from the environment string.""" pattern = r'%s\s*=\s*(.*)' % variable_pattern match = re.search(pattern, environment_string) if not match: return default return match.group(1).strip()
Get a specific environment variable's value from a job definition.
def get_value_from_job_definition(job_type, variable_pattern, default=None): """Get a specific environment variable's value from a job definition.""" if not job_type: return default job = data_types.Job.query(data_types.Job.name == job_type).get() if not job: return default return job.get_environment().get(variable_pattern, default)
Gets a specific environment variable's value from a job definition. If not found, it returns the value from current environment.
def get_value_from_job_definition_or_environment(job_type, variable_pattern, default=None): """Gets a specific environment variable's value from a job definition. If not found, it returns the value from current environment.""" return get_value_from_job_definition( job_type, variable_pattern, default=environment.get_value(variable_pattern, default))
Helper function to read a list of additional items from a job definition and fuzzer's additional environment string.
def get_additional_values_for_variable(variable_name, job_type, fuzzer_name): """Helper function to read a list of additional items from a job definition and fuzzer's additional environment string.""" value_list_strings = [ get_value_from_job_definition(job_type, variable_name), get_value_from_fuzzer_environment_string(fuzzer_name, variable_name), ] additional_values = [] for value_list_string in value_list_strings: if value_list_string: # Ignore whitespace between commas. additional_values += [v.strip() for v in value_list_string.split(',')] return additional_values
Return true if this notification has already been sent.
def is_notification_sent(testcase_id, user_email): """Return true if this notification has already been sent.""" notification = data_types.Notification.query( data_types.Notification.testcase_id == testcase_id, data_types.Notification.user_email == user_email).get() return bool(notification)
Create a entry log for sent notification.
def create_notification_entry(testcase_id, user_email): """Create a entry log for sent notification.""" notification = data_types.Notification() notification.testcase_id = testcase_id notification.user_email = user_email notification.put()
Create a testcase object, metadata, and task for a user uploaded test.
def create_user_uploaded_testcase(key, original_key, archive_state, filename, file_path_input, timeout, job, queue, http_flag, gestures, additional_arguments, bug_information, crash_revision, uploader_email, platform_id, app_launch_command, fuzzer_name, fully_qualified_fuzzer_name, fuzzer_binary_name, bundled, retries, bug_summary_update_flag, quiet_flag, additional_metadata=None, crash_data=None): """Create a testcase object, metadata, and task for a user uploaded test.""" testcase = data_types.Testcase() if crash_data: # External job with provided stacktrace. testcase.crash_type = crash_data.crash_type testcase.crash_state = crash_data.crash_state testcase.crash_address = crash_data.crash_address testcase.crash_stacktrace = crash_data.crash_stacktrace testcase.status = 'Processed' testcase.security_flag = crash_analyzer.is_security_issue( testcase.crash_stacktrace, testcase.crash_type, testcase.crash_address) testcase.regression = 'NA' testcase.comments = '[%s] %s: External testcase upload.\n' % ( utils.current_date_time(), uploader_email) # External jobs never get minimized. testcase.minimized_keys = 'NA' # analyze_task sets this for non-external reproductions. testcase.platform = job.platform.lower() testcase.platform_id = testcase.platform else: testcase.crash_type = '' testcase.crash_state = 'Pending' testcase.crash_address = '' testcase.crash_stacktrace = '' testcase.status = 'Pending' testcase.security_flag = False testcase.regression = '' testcase.comments = '[%s] %s: Analyze task.\n' % (utils.current_date_time(), uploader_email) testcase.minimized_keys = '' testcase.fuzzed_keys = key testcase.bug_information = '' testcase.fixed = '' testcase.one_time_crasher_flag = False testcase.crash_revision = crash_revision testcase.fuzzer_name = fuzzer_name testcase.overridden_fuzzer_name = fully_qualified_fuzzer_name or fuzzer_name testcase.job_type = job.name testcase.http_flag = bool(http_flag) testcase.archive_state = archive_state testcase.project_name = get_project_name(job.name) testcase.trusted = False if archive_state or bundled: testcase.absolute_path = file_path_input testcase.archive_filename = filename else: testcase.absolute_path = filename testcase.gestures = gestures if bug_information and bug_information.isdigit() and int(bug_information): testcase.bug_information = bug_information if platform_id: testcase.platform_id = platform_id.strip().lower() if additional_arguments: testcase.set_metadata( 'uploaded_additional_args', additional_arguments, update_testcase=False) if app_launch_command: testcase.set_metadata( 'app_launch_command', app_launch_command, update_testcase=False) if fuzzer_binary_name: testcase.set_metadata( 'fuzzer_binary_name', fuzzer_binary_name, update_testcase=False) if additional_metadata: for metadata_key, metadata_value in additional_metadata.items(): testcase.set_metadata(metadata_key, metadata_value, update_testcase=False) testcase.timestamp = utils.utcnow() testcase.uploader_email = uploader_email testcase.put() # Store the testcase upload metadata. testcase_id = testcase.key.id() metadata = data_types.TestcaseUploadMetadata() metadata.security_flag = testcase.security_flag metadata.filename = filename if testcase.status == 'Processed': metadata.status = 'Confirmed' else: metadata.status = 'Pending' metadata.uploader_email = uploader_email metadata.testcase_id = testcase_id metadata.blobstore_key = key metadata.original_blobstore_key = original_key metadata.timeout = timeout metadata.bundled = bundled metadata.retries = retries if bundled: metadata.path_in_archive = file_path_input metadata.timestamp = testcase.timestamp metadata.bug_summary_update_flag = bug_summary_update_flag metadata.quiet_flag = quiet_flag metadata.bug_information = testcase.bug_information if crash_data: if crash_analyzer.ignore_stacktrace(testcase.crash_stacktrace): close_invalid_uploaded_testcase(testcase, metadata, 'Irrelevant') return testcase.key.id() if check_uploaded_testcase_duplicate(testcase, metadata): close_invalid_uploaded_testcase(testcase, metadata, 'Duplicate') return testcase.key.id() metadata.put() # Create the job to analyze the testcase. # Use wait_time=0 to execute the task ASAP, since it is user-facing. tasks.add_task('analyze', testcase_id, job.name, queue, wait_time=0) return testcase.key.id()
Check if the uploaded testcase is a duplicate.
def check_uploaded_testcase_duplicate(testcase, metadata): """Check if the uploaded testcase is a duplicate.""" existing_testcase = find_testcase(testcase.project_name, testcase.crash_type, testcase.crash_state, testcase.security_flag) if not existing_testcase or existing_testcase.key.id() == testcase.key.id(): return False # If the existing test case is unreproducible and we are, replace the # existing test case with this one. if (existing_testcase.one_time_crasher_flag and not testcase.one_time_crasher_flag): duplicate_testcase = existing_testcase original_testcase = testcase else: duplicate_testcase = testcase original_testcase = existing_testcase metadata.status = 'Duplicate' metadata.duplicate_of = existing_testcase.key.id() duplicate_testcase.status = 'Duplicate' duplicate_testcase.duplicate_of = original_testcase.key.id() duplicate_testcase.put() return duplicate_testcase.key.id() == testcase.key.id()
Closes an invalid testcase and updates metadata.
def mark_invalid_uploaded_testcase(testcase, metadata, status): """Closes an invalid testcase and updates metadata.""" testcase.status = status testcase.minimized_keys = 'NA' testcase.regression = 'NA' testcase.set_impacts_as_na() testcase.fixed = 'NA' testcase.triaged = True testcase.put() metadata.status = status metadata.put()
Delete the testcase group with the specified id if it exists.
def delete_group(group_id, update_testcases=True): """Delete the testcase group with the specified id if it exists.""" # Remove all testcases from the group. if update_testcases: testcases = get_testcases_in_group(group_id) for testcase in testcases: remove_testcase_from_group(testcase) # Delete the group itself. group = get_entity_by_type_and_id(data_types.TestcaseGroup, group_id) if group: group.key.delete()
Return the all testcase ids in the specified group.
def get_testcase_ids_in_group(group_id): """Return the all testcase ids in the specified group.""" if not group_id or not str(group_id).isdigit(): return [] query = ndb_utils.get_all_from_query( data_types.Testcase.query(data_types.Testcase.group_id == int(group_id)), keys_only=True) return [key.id() for key in query]
Return the all testcases in the specified group.
def get_testcases_in_group(group_id): """Return the all testcases in the specified group.""" # Fetch by keys (strongly consistent) to avoid stale results from query # (eventually consistent). testcases = [] for testcase_id in get_testcase_ids_in_group(group_id): try: testcases.append(get_testcase_by_id(testcase_id)) except errors.InvalidTestcaseError: # Already deleted. continue return testcases
Removes a testcase from group.
def remove_testcase_from_group(testcase): """Removes a testcase from group.""" if not testcase: return testcase.group_id = 0 testcase.group_bug_information = 0 testcase.put()
Update group bug information for a group.
def update_group_bug(group_id): """Update group bug information for a group.""" if not group_id: # No associated group, no work to do. Bail out. return testcases = get_testcases_in_group(group_id) if not testcases: # No group members found. Bail out. return group_bug_information = 0 for testcase in testcases: if not testcase.bug_information: continue issue_id = int(testcase.bug_information) if not group_bug_information: group_bug_information = issue_id else: group_bug_information = min(group_bug_information, issue_id) for testcase in testcases: testcase.group_bug_information = group_bug_information ndb_utils.put_multi(testcases)
Return the datastore object with the given type and id if it exists.
def get_entity_by_type_and_id(entity_type, entity_id): """Return the datastore object with the given type and id if it exists.""" if not entity_id or not str(entity_id).isdigit() or int(entity_id) == 0: return None return entity_type.get_by_id(int(entity_id))
Get a testcase variant entity, and create if needed.
def get_or_create_testcase_variant(testcase_id, job_type): """Get a testcase variant entity, and create if needed.""" testcase_id = int(testcase_id) variant = data_types.TestcaseVariant.query( data_types.TestcaseVariant.testcase_id == testcase_id, data_types.TestcaseVariant.job_type == job_type).get() if not variant: variant = data_types.TestcaseVariant( testcase_id=testcase_id, job_type=job_type) return variant
Get a testcase variant entity
def get_testcase_variant(testcase_id, job_type): """Get a testcase variant entity""" testcase_id = int(testcase_id) return data_types.TestcaseVariant.query( data_types.TestcaseVariant.testcase_id == testcase_id, data_types.TestcaseVariant.job_type == job_type).get()
Records exsistence of fuzz target to the DB.
def record_fuzz_target(engine_name, binary_name, job_type): """Records exsistence of fuzz target to the DB.""" result = record_fuzz_targets(engine_name, [binary_name], job_type)[0] project = get_project_name(job_type) key_name = data_types.fuzz_target_fully_qualified_name( engine_name, project, binary_name) logs.log( 'Recorded use of fuzz target %s.' % key_name, project=project, engine=engine_name, binary_name=binary_name, job_type=job_type) return result
Gets or creates multiple db entities.
def get_or_create_multi_entities_from_keys(mapping): """Gets or creates multiple db entities.""" keys = list(mapping.keys()) entities = ndb_utils.get_multi( [ndb.Key(value.__class__, key) for key, value in mapping.items()]) entities = dict(zip(keys, entities)) new_entities = [ mapping[key] for key, entity in entities.items() if not entity ] new_entities = ndb_utils.get_multi(ndb_utils.put_multi(new_entities)) all_entities = [entity for entity in entities.values() if entity] + ( new_entities) return all_entities
Record existence of fuzz targets to the DB.
def record_fuzz_targets(engine_name, binaries, job_type): """Record existence of fuzz targets to the DB.""" # TODO(metzman): All of this code assumes that fuzzing jobs are behaving # reasonably and won't try to DoS us by putting bogus fuzzers in the db. # This should be changed by limiting the number of fuzz targets saved and # putting an expiration on them. binaries = [binary for binary in binaries if binary] if not binaries: logs.log_error('Expected binaries.') return None project = get_project_name(job_type) fuzz_target_mapping = { data_types.fuzz_target_fully_qualified_name(engine_name, project, binary): data_types.FuzzTarget(engine=engine_name, project=project, binary=binary) for binary in binaries } fuzz_targets = get_or_create_multi_entities_from_keys(fuzz_target_mapping) ndb_utils.put_multi(fuzz_targets) time_now = utils.utcnow() job_mapping = { data_types.fuzz_target_job_key(key_name, job_type): data_types.FuzzTargetJob( fuzz_target_name=key_name, job=job_type, engine=engine_name, last_run=time_now) for key_name in fuzz_target_mapping } jobs = get_or_create_multi_entities_from_keys(job_mapping) for job in jobs: # TODO(metzman): Decide if we want to handle unused fuzzers differentlyo. job.last_run = utils.utcnow() ndb_utils.put_multi(jobs) return fuzz_targets
Get FuzzTarget by fully qualified name.
def get_fuzz_target(name): """Get FuzzTarget by fully qualified name.""" if not name: return None return ndb.Key(data_types.FuzzTarget, name).get()
Get FuzzTargetJob by fully qualified name and job.
def get_fuzz_target_job(fuzz_target_name, job): """Get FuzzTargetJob by fully qualified name and job.""" return ndb.Key(data_types.FuzzTargetJob, data_types.fuzz_target_job_key(fuzz_target_name, job)).get()
Return a Datastore query for fuzz targets.
def get_fuzz_targets(engine=None, project=None, binary=None): """Return a Datastore query for fuzz targets.""" query = data_types.FuzzTarget().query() if engine: query = query.filter(data_types.FuzzTarget.engine == engine) if project: query = query.filter(data_types.FuzzTarget.project == project) if binary: query = query.filter(data_types.FuzzTarget.binary == binary) return ndb_utils.get_all_from_query(query)
Return the fuzzing engines currently running.
def get_fuzzing_engines(): """Return the fuzzing engines currently running.""" query = data_types.FuzzTarget.query( projection=[data_types.FuzzTarget.engine], distinct=True) return [f.engine for f in ndb_utils.get_all_from_query(query)]
Return whether or not |name| is a fuzzing engine.
def is_fuzzing_engine(name): """Return whether or not |name| is a fuzzing engine.""" query = data_types.FuzzTarget.query(data_types.FuzzTarget.engine == name) return bool(query.count(limit=1))
Returns all fuzzer names, including expanded child fuzzers.
def get_all_fuzzer_names_including_children(include_parents=False, project=None): """Returns all fuzzer names, including expanded child fuzzers.""" all_fuzzers = set() engine_fuzzers = get_fuzzing_engines() fuzzers = data_types.Fuzzer.query(projection=['name']) for fuzzer in fuzzers: # Add this if we're including all parents or this is not an engine fuzzer # with fuzz targets. if include_parents or fuzzer.name not in engine_fuzzers: all_fuzzers.add(fuzzer.name) for fuzz_target in get_fuzz_targets(project=project): all_fuzzers.add(fuzz_target.fully_qualified_name()) return sorted(list(all_fuzzers))
Return all job type names.
def get_all_job_type_names(project=None): """Return all job type names.""" query = data_types.Job.query(projection=['name']) if project: query = query.filter(data_types.Job.project == project) return sorted([job.name for job in query])
Get coverage information, or create if it doesn't exist.
def get_coverage_information(fuzzer_name, date, create_if_needed=False): """Get coverage information, or create if it doesn't exist.""" coverage_info = ndb.Key( data_types.CoverageInformation, data_types.coverage_information_key(fuzzer_name, date)).get() if not coverage_info and create_if_needed: coverage_info = data_types.CoverageInformation( fuzzer=fuzzer_name, date=date) return coverage_info
Close testcase (fixed=NA) with an error message.
def close_testcase_with_error(testcase, error_message): """Close testcase (fixed=NA) with an error message.""" update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message) testcase.fixed = 'NA' testcase.open = False testcase.put()
If we marked progression as pending for this testcase, clear that state.
def clear_progression_pending(testcase): """If we marked progression as pending for this testcase, clear that state.""" if not testcase.get_metadata('progression_pending'): return testcase.delete_metadata('progression_pending', update_testcase=False)
Update metadata the progression task completes.
def update_progression_completion_metadata(testcase, revision, is_crash=False, message=None): """Update metadata the progression task completes.""" clear_progression_pending(testcase) testcase.set_metadata('last_tested_revision', revision, update_testcase=False) if is_crash: testcase.set_metadata( 'last_tested_crash_revision', revision, update_testcase=False) testcase.set_metadata( 'last_tested_crash_time', utils.utcnow(), update_testcase=False) if not testcase.open: testcase.set_metadata('closed_time', utils.utcnow(), update_testcase=False) update_testcase_comment(testcase, data_types.TaskState.FINISHED, message)
Clones a DataStore entity and returns the clone.
def clone_entity(e, **extra_args): """Clones a DataStore entity and returns the clone.""" ent_class = e.__class__ # pylint: disable=protected-access,unnecessary-dunder-call props = { v._code_name: v.__get__(e, ent_class) for v in ent_class._properties.values() if not isinstance(v, ndb.ComputedProperty) } props.update(extra_args) return ent_class(**props)
Get a fuzz target's fully qualified name.
def fuzz_target_fully_qualified_name(engine, project, binary): """Get a fuzz target's fully qualified name.""" return engine + '_' + fuzz_target_project_qualified_name(project, binary)
Return normalized name with special chars like slash, colon, etc normalized to hyphen(-). This is important as otherwise these chars break local and cloud storage paths.
def normalized_name(name): """Return normalized name with special chars like slash, colon, etc normalized to hyphen(-). This is important as otherwise these chars break local and cloud storage paths.""" return SPECIAL_CHARS_REGEX.sub('-', name).strip('-')
Get a fuzz target's project qualified name.
def fuzz_target_project_qualified_name(project, binary): """Get a fuzz target's project qualified name.""" binary = normalized_name(binary) if not project: return binary if project == utils.default_project_name(): # Don't prefix with project name if it's the default project. return binary normalized_project_prefix = normalized_name(project) + '_' if binary.startswith(normalized_project_prefix): return binary return normalized_project_prefix + binary
Return the key for FuzzTargetJob.
def fuzz_target_job_key(fuzz_target_name, job): """Return the key for FuzzTargetJob.""" return f'{fuzz_target_name}/{job}'
Returns string representation of the date in a format used for coverage.
def coverage_information_date_to_string(date): """Returns string representation of the date in a format used for coverage.""" return date.strftime(COVERAGE_INFORMATION_DATE_FORMAT)
Constructs an ndb key for CoverageInformation entity.
def coverage_information_key(project_qualified_fuzzer_name, date): """Constructs an ndb key for CoverageInformation entity.""" date_string = coverage_information_date_to_string(date) return project_qualified_fuzzer_name + '-' + date_string
Return corresponding FuzzTargets for the given FuzzTargetJobs.
def get_fuzz_targets_for_target_jobs(target_jobs): """Return corresponding FuzzTargets for the given FuzzTargetJobs.""" target_keys = [ ndb.Key(data_types.FuzzTarget, t.fuzz_target_name) for t in target_jobs ] return ndb_utils.get_multi(target_keys)
Return a Datastore query for fuzz target to job mappings.
def get_fuzz_target_jobs(fuzz_target_name=None, engine=None, job=None, limit=None): """Return a Datastore query for fuzz target to job mappings.""" query = data_types.FuzzTargetJob.query() if fuzz_target_name: query = query.filter( data_types.FuzzTargetJob.fuzz_target_name == fuzz_target_name) if job: query = query.filter(data_types.FuzzTargetJob.job == job) if engine: query = query.filter(data_types.FuzzTargetJob.engine == engine) if limit is not None: return query.iter(limit=limit) return ndb_utils.get_all_from_query(query)
Get the current zone for locking purposes.
def _get_current_lock_zone(): """Get the current zone for locking purposes.""" if environment.get_value('LOCAL_DEVELOPMENT', False): return 'local' platform = environment.get_platform_group().lower() platform_group_mappings = db_config.get_value('platform_group_mappings') for mapping in platform_group_mappings.splitlines(): if ';' not in mapping: continue platform_group, zone = mapping.split(';') if platform_group.strip() == platform: return zone # Default to per-platform separation. logs.log_warn('Platform group mapping not set in admin configuration, ' 'using default platform - %s.' % platform) return platform
Return the lock key name with the current lock zone.
def _get_key_name_with_lock_zone(key_name): """Return the lock key name with the current lock zone.""" current_zone = _get_current_lock_zone() if not current_zone: logs.log_error('Could not find zone.') return None return current_zone + ';' + key_name
Actual lock acquire that runs in a transaction.
def _try_acquire_lock(key_name, expiration_time, holder): """Actual lock acquire that runs in a transaction.""" lock_entity = ndb.Key(data_types.Lock, key_name).get() if lock_entity is None: # Lock wasn't held, try to acquire. lock_entity = data_types.Lock( id=key_name, expiration_time=expiration_time, holder=holder) lock_entity.put() return lock_entity if lock_entity.expiration_time <= datetime.datetime.utcnow(): # Lock was expired, try to take over the lock. lock_entity.expiration_time = expiration_time lock_entity.holder = holder lock_entity.put() return lock_entity
Acquire a lock for the given key name. Returns the expiration time if succeeded, otherwise None. The lock holder is responsible for making sure it doesn't assume the lock is still held after the expiration time.
def acquire_lock(key_name, max_hold_seconds=DEFAULT_MAX_HOLD_SECONDS, retries=None, by_zone=True): """Acquire a lock for the given key name. Returns the expiration time if succeeded, otherwise None. The lock holder is responsible for making sure it doesn't assume the lock is still held after the expiration time.""" logs.log('Acquiring lock for %s.' % key_name) failed_acquires = 0 total_wait = 0 wait_exponent = 1 if by_zone: key_name_with_zone = _get_key_name_with_lock_zone(key_name) if key_name_with_zone is None: logs.log_error('Failed to get zone while trying to lock %s.' % key_name) return None key_name = key_name_with_zone bot_name = environment.get_value('BOT_NAME') expiration_delta = datetime.timedelta(seconds=max_hold_seconds) while total_wait < LOCK_CHECK_TIMEOUT: try: lock_entity = ndb.transaction( lambda: _try_acquire_lock(key_name, expiration_time=datetime.datetime.utcnow() + expiration_delta, holder=bot_name), retries=TRANSACTION_RETRIES) if lock_entity.holder == bot_name: logs.log('Got the lock.') return lock_entity.expiration_time except exceptions.Error: pass failed_acquires += 1 if retries and retries >= failed_acquires: logs.log('Failed to acquire lock, exceeded max retries.') return None logs.log('Failed to acquire lock, waiting...') # Exponential backoff. max_sleep = (1 << wait_exponent) * LOCK_CHECK_SLEEP_MULTIPLIER sleep_time = random.uniform(1.0, max_sleep) time.sleep(sleep_time) total_wait += sleep_time wait_exponent = min(wait_exponent + 1, MAX_WAIT_EXPONENT) logs.log('Timeout exceeded while trying to acquire lock, bailing.') return None
Release a lock for the given key name.
def release_lock(key_name, force_release=False, by_zone=True): """Release a lock for the given key name.""" logs.log('Releasing lock for %s.' % key_name) bot_name = environment.get_value('BOT_NAME') if by_zone: key_name_with_zone = _get_key_name_with_lock_zone(key_name) if key_name_with_zone is None: logs.log_error('Failed to get zone while releasing %s.' % key_name) return key_name = key_name_with_zone lock_entity = ndb.Key(data_types.Lock, key_name).get() if lock_entity and (force_release or lock_entity.holder == bot_name): lock_entity.key.delete()
Get or initialize the NDB client.
def _client(): """Get or initialize the NDB client.""" global _ndb_client global _initial_pid if not _ndb_client: with _ndb_client_lock: if not _ndb_client: _ndb_client = ndb.Client(project=utils.get_application_id()) _initial_pid = os.getpid() return _ndb_client
Get the NDB context.
def context(): """Get the NDB context.""" if _initial_pid != os.getpid(): # Forked, clear the existing context to avoid issues. # TODO(ochang): Remove this hack once on Python 3, where we can set # multiprocessing.set_start_method to not fork. context_module._state.context = None # pylint: disable=protected-access with _client().context() as ndb_context: # Disable NDB caching, as NDB on GCE VMs do not use memcache and therefore # can't invalidate the memcache cache. ndb_context.set_memcache_policy(False) # Disable the in-context cache, as it can use up a lot of memory for # longer running tasks such as cron jobs. ndb_context.set_cache_policy(False) yield ndb_context
Wrapper for thread targets to initialize an NDB context, since contexts are thread local.
def thread_wrapper(func): """Wrapper for thread targets to initialize an NDB context, since contexts are thread local.""" @functools.wraps(func) def _wrapper(*args, **kwargs): with context(): return func(*args, **kwargs) return _wrapper
Helper for boolean property filters to avoid lint errors.
def is_true(boolean_prop): """Helper for boolean property filters to avoid lint errors.""" return boolean_prop == True
Helper for boolean property filters to avoid lint errors.
def is_false(boolean_prop): """Helper for boolean property filters to avoid lint errors.""" return boolean_prop == False
Get all results from a ndb.Model.
def get_all_from_model(model): """Get all results from a ndb.Model.""" return get_all_from_query(model.query())
Return all entities based on the query by paging, to avoid query expirations on App Engine.
def get_all_from_query(query, **kwargs): """Return all entities based on the query by paging, to avoid query expirations on App Engine.""" # TODO(ochang): Queries no longer expire with new NDB. Remove this and all # fix up callers. kwargs.pop('batch_size', None) # No longer supported. yield from query.iter(**kwargs)
Generate chunks of iterable.
def _gen_chunks(values, size): """Generate chunks of iterable.""" values = list(values) for i in range(0, len(values), size): yield values[i:i + size]
Get multiple entities, working around a limitation in the NDB library with the maximum number of keys allowed.
def get_multi(keys): """Get multiple entities, working around a limitation in the NDB library with the maximum number of keys allowed.""" result = [] for chunk in _gen_chunks(keys, _GET_BATCH_SIZE): result.extend(ndb.get_multi(chunk)) return result
Put multiple entities, working around a limitation in the NDB library with the maximum number of keys allowed.
def put_multi(entities): """Put multiple entities, working around a limitation in the NDB library with the maximum number of keys allowed.""" result = [] for chunk in _gen_chunks(entities, _MODIFY_BATCH_SIZE): result.extend(ndb.put_multi(chunk)) return result
Delete multiple entities, working around a limitation in the NDB library with the maximum number of keys allowed.
def delete_multi(keys): """Delete multiple entities, working around a limitation in the NDB library with the maximum number of keys allowed.""" for chunk in _gen_chunks(keys, _MODIFY_BATCH_SIZE): ndb.delete_multi(chunk)
Tokenize a string, by line, into atomic tokens and complex tokens.
def tokenize(s): """Tokenize a string, by line, into atomic tokens and complex tokens.""" if not s: s = '' s = '%s' % s tokens = set() lines = s.splitlines() for line in lines: line = line.strip() only_ascii = re.sub(r'\s*[^\x00-\x7F]+\s*', ' ', line) tokens |= _complex_tokenize(only_ascii, limit=10) tokens.add(line.lower()) return tokens
Tokenize bug information for searching.
def tokenize_bug_information(testcase): """Tokenize bug information for searching.""" bug_indices = [] if testcase.bug_information: bug_indices.append(testcase.bug_information.lower().strip()) if testcase.group_bug_information: bug_indices.append(str(testcase.group_bug_information)) return bug_indices
Tokenize impact.
def tokenize_impact_version(version): """Tokenize impact.""" if not version: return [] tokens = set() splitted = version.split('.') for index in range(len(splitted)): tokens.add('.'.join(splitted[0:(index + 1)])) return [t for t in tokens if t.strip()]
Prepare the search keywords into the form that is appropriate for searching according to our tokenization algorithm.
def prepare_search_keyword(s): """Prepare the search keywords into the form that is appropriate for searching according to our tokenization algorithm.""" return s.lower().strip()
Determine if the index is at 'aB', which is the start of a camel token. For example, with 'workAt', this function detects 'kA'.
def _is_camel_case_ab(s, index): """Determine if the index is at 'aB', which is the start of a camel token. For example, with 'workAt', this function detects 'kA'.""" return index >= 1 and s[index - 1].islower() and s[index].isupper()
Determine if the index ends at 'ABb', which is the start of a camel token. For example, with 'HTMLParser', this function detects 'LPa'.
def _is_camel_case_abb(s, index): """Determine if the index ends at 'ABb', which is the start of a camel token. For example, with 'HTMLParser', this function detects 'LPa'.""" return (index >= 2 and s[index - 2].isupper() and s[index - 1].isupper() and s[index].islower())
Iterate through (end_current_token_index, start_next_token_index) of `s`, which is tokenized based on non-alphanumeric characters and camel casing. For example, 'aa:bbCC' have 3 tokens: 'aa', 'bb', 'CC'. This function iterates through (1,3), (4,5), and (6,7); they represent a[a]:[b]bCC, aa:b[b][C]C, and aa:bbC[C][], respectively.
def _token_indices(s): """Iterate through (end_current_token_index, start_next_token_index) of `s`, which is tokenized based on non-alphanumeric characters and camel casing. For example, 'aa:bbCC' have 3 tokens: 'aa', 'bb', 'CC'. This function iterates through (1,3), (4,5), and (6,7); they represent a[a]:[b]bCC, aa:b[b][C]C, and aa:bbC[C][], respectively.""" index = 0 length = len(s) while index < length: if not s[index].isalnum(): end_index = index - 1 while index < length and not s[index].isalnum(): index += 1 yield end_index, index elif _is_camel_case_ab(s, index): yield (index - 1), index index += 1 elif _is_camel_case_abb(s, index): yield (index - 2), (index - 1) index += 1 else: index += 1 yield (length - 1), length
Tokenize a string into complex tokens. For example, a:b:c is tokenized into ['a', 'b', 'c', 'a:b', 'a:b:c', 'b:c']. This method works recursively. It generates all possible complex tokens starting from the first token. Then, it cuts off the first token and calls _complex_tokenize(..) with the rest of `s`. `limit` restricts the number of atomic tokens.
def _complex_tokenize(s, limit): """Tokenize a string into complex tokens. For example, a:b:c is tokenized into ['a', 'b', 'c', 'a:b', 'a:b:c', 'b:c']. This method works recursively. It generates all possible complex tokens starting from the first token. Then, it cuts off the first token and calls _complex_tokenize(..) with the rest of `s`. `limit` restricts the number of atomic tokens.""" if not s: return set() tokens = [] second_token_index = len(s) count = 0 for end_index, next_start_index in _token_indices(s): tokens.append(s[0:(end_index + 1)]) count += 1 second_token_index = min(next_start_index, second_token_index) if count >= limit: break tokens = {t.lower() for t in tokens if t.strip()} tokens |= _complex_tokenize(s[second_token_index:], limit=limit) return tokens
Return the struct pack format and bit mask for the integer values of size |num_bytes|.
def get_pack_format_and_mask_for_num_bytes(num_bytes, signed=False, little_endian=True): """Return the struct pack format and bit mask for the integer values of size |num_bytes|.""" if num_bytes == 1: pack_fmt = 'B' mask = (1 << 8) - 1 elif num_bytes == 2: pack_fmt = 'H' mask = (1 << 16) - 1 elif num_bytes == 4: pack_fmt = 'I' mask = (1 << 32) - 1 elif num_bytes == 8: pack_fmt = 'Q' mask = (1 << 64) - 1 else: raise ValueError if signed: pack_fmt = pack_fmt.lower() if num_bytes > 1: if little_endian: pack_fmt = '<' + pack_fmt else: pack_fmt = '>' + pack_fmt return pack_fmt, mask
Store list of fuzzed testcases from fuzzer in a bot specific testcase list file.
def create_testcase_list_file(testcase_file_paths, input_directory): """Store list of fuzzed testcases from fuzzer in a bot specific testcase list file.""" if not testcase_file_paths: logs.log_error('No testcases found, skipping list file.') return bot_testcases_file_path = utils.get_bot_testcases_file_path(input_directory) with open(bot_testcases_file_path, 'wb') as bot_testcases_file_handle: bot_testcases_file_handle.write('\n'.join(testcase_file_paths))
Return true if the file looks like a testcase file.
def is_valid_testcase_file(file_path, check_if_exists=True, size_limit=None, allowed_extensions=None): """Return true if the file looks like a testcase file.""" return utils.is_valid_testcase_file(file_path, check_if_exists, size_limit, allowed_extensions)
Returns true if the data bundle is locked and unavailable for use.
def is_locked(input_directory): """Returns true if the data bundle is locked and unavailable for use.""" lock_file_path = os.path.join(input_directory, LOCK_FILENAME) return os.path.exists(lock_file_path)
Returns list of |max_testcases| testcases.
def get_random_testcases(input_directory, max_testcases): """Returns list of |max_testcases| testcases.""" testcases_list = get_testcases(input_directory) return random.SystemRandom().sample(testcases_list, max_testcases)
Returns list of testcase files.
def get_testcases(input_directory): """Returns list of testcase files.""" testcase_list_file_path = os.path.join( input_directory, testcase_manager.TESTCASE_LIST_FILENAME) if not os.path.exists(testcase_list_file_path): return [] with open(testcase_list_file_path, 'rb') as testcase_list_file_handle: testcase_relative_file_paths = testcase_list_file_handle.read().splitlines() testcase_file_paths = [] for testcase_relative_file_path in testcase_relative_file_paths: # Discard junk paths. if not testcase_relative_file_path.strip(): continue testcase_file_path = os.path.join( input_directory, testcase_relative_file_path.replace('/', os.sep)) testcase_file_paths.append(testcase_file_path) return testcase_file_paths
Check if the number of errors during rsync is lower than our threshold.
def _rsync_errors_below_threshold(gsutil_result, max_errors): """Check if the number of errors during rsync is lower than our threshold.""" match = re.search(RSYNC_ERROR_REGEX, gsutil_result.output, re.MULTILINE) if not match: return False error_count = int(match.group(1)) # Ignore NotFoundException(s) since they can happen when files can get deleted # e.g. when pruning task is updating corpus. error_count -= gsutil_result.output.count(b'NotFoundException') error_count -= gsutil_result.output.count(b'No such file or directory') return error_count <= max_errors
Handle rsync result.
def _handle_rsync_result(gsutil_result, max_errors): """Handle rsync result.""" if gsutil_result.return_code == 0: sync_succeeded = True else: logs.log_warn( 'gsutil rsync got non-zero:\n' 'Command: %s\n' 'Output: %s\n' % (gsutil_result.command, gsutil_result.output)) sync_succeeded = _rsync_errors_below_threshold(gsutil_result, max_errors) return sync_succeeded and not gsutil_result.timed_out
Count the number of corpus files.
def _count_corpus_files(directory): """Count the number of corpus files.""" return shell.get_directory_file_count(directory)
Convert the name of every file in |file_paths| a name that is legal on Windows. Returns list of legally named files.
def legalize_filenames(file_paths): """Convert the name of every file in |file_paths| a name that is legal on Windows. Returns list of legally named files.""" if environment.is_trusted_host(): return file_paths illegal_chars = {'<', '>', ':', '\\', '|', '?', '*'} failed_to_move_files = [] legally_named = [] for file_path in file_paths: file_dir_path, basename = os.path.split(file_path) if not any(char in illegal_chars for char in basename): legally_named.append(file_path) continue # Hash file to get new name since it also lets us get rid of duplicates, # will not cause collisions for different files and makes things more # consistent (since libFuzzer uses hashes). sha1sum = utils.file_hash(file_path) new_file_path = os.path.join(file_dir_path, sha1sum) try: shutil.move(file_path, new_file_path) legally_named.append(new_file_path) except OSError: failed_to_move_files.append((file_path, new_file_path)) if failed_to_move_files: logs.log_error( 'Failed to rename files.', failed_to_move_files=failed_to_move_files) return legally_named
Convert the name of every corpus file in |directory| to a name that is allowed on Windows.
def legalize_corpus_files(directory): """Convert the name of every corpus file in |directory| to a name that is allowed on Windows.""" # Iterate through return value of legalize_filenames to convert every # filename. files_list = shell.get_files_list(directory) legalize_filenames(files_list)
Build GCS URL for corpus backup file for the given date. Returns: A string giving the GCS url.
def gcs_url_for_backup_file(backup_bucket_name, fuzzer_name, project_qualified_target_name, date): """Build GCS URL for corpus backup file for the given date. Returns: A string giving the GCS url. """ backup_dir = gcs_url_for_backup_directory(backup_bucket_name, fuzzer_name, project_qualified_target_name) backup_file = str(date) + os.extsep + BACKUP_ARCHIVE_FORMAT return f'{backup_dir.rstrip("/")}/{backup_file}'
Archive and store corpus as a backup. Args: dated_backup_signed_url: Signed url to upload the backup. corpus: uworker_msg.FuzzTargetCorpus. directory: Path to directory to be archived and backuped. Returns: The backup GCS url, or None on failure.
def backup_corpus(dated_backup_signed_url, corpus, directory): """Archive and store corpus as a backup. Args: dated_backup_signed_url: Signed url to upload the backup. corpus: uworker_msg.FuzzTargetCorpus. directory: Path to directory to be archived and backuped. Returns: The backup GCS url, or None on failure. """ logs.log(f'Backing up corpus {corpus} {directory}') if not dated_backup_signed_url: logs.log('No backup url provided, skipping corpus backup.') return False timestamp = str(utils.utcnow().date()) # The archive path for shutil.make_archive should be without an extension. backup_archive_path = os.path.join( os.path.dirname(os.path.normpath(directory)), timestamp) backup_succeeded = True try: backup_archive_path = shutil.make_archive(backup_archive_path, BACKUP_ARCHIVE_FORMAT, directory) with open(backup_archive_path, 'rb') as fp: data = fp.read() if not storage.upload_signed_url(data, dated_backup_signed_url): return False except Exception as ex: backup_succeeded = False logs.log_error( f'backup_corpus failed: {ex}\n', directory=directory, backup_archive_path=backup_archive_path) finally: # Remove backup archive. shell.remove_file(backup_archive_path) return backup_succeeded
Build GCS URL for corpus backup directory. Returns: A string giving the GCS URL.
def gcs_url_for_backup_directory(backup_bucket_name, fuzzer_name, project_qualified_target_name): """Build GCS URL for corpus backup directory. Returns: A string giving the GCS URL. """ return (f'gs://{backup_bucket_name}/corpus/{fuzzer_name}/' f'{project_qualified_target_name}/')