response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Create a new table if needed.
def _create_table_if_needed(bigquery, dataset_id, table_id, schema): """Create a new table if needed.""" project_id = utils.get_application_id() table_body = { 'tableReference': { 'datasetId': dataset_id, 'projectId': project_id, 'tableId': table_id, }, 'timePartitioning': { 'type': 'DAY', }, } if schema is not None: table_body['schema'] = schema table_insert = bigquery.tables().insert( projectId=project_id, datasetId=dataset_id, body=table_body) return _execute_insert_request(table_insert)
Poll for completion.
def _poll_completion(bigquery, project_id, job_id): """Poll for completion.""" response = bigquery.jobs().get( projectId=project_id, jobId=job_id).execute(num_retries=NUM_RETRIES) while response['status']['state'] == 'RUNNING': response = bigquery.jobs().get( projectId=project_id, jobId=job_id).execute(num_retries=NUM_RETRIES) time.sleep(POLL_INTERVAL) return response
Load yesterday's stats into BigQuery.
def _load_data(fuzzer): """Load yesterday's stats into BigQuery.""" bigquery = big_query.get_api_client() project_id = utils.get_application_id() yesterday = (_utc_now().date() - datetime.timedelta(days=1)) date_string = yesterday.strftime('%Y%m%d') timestamp = utils.utc_date_to_timestamp(yesterday) dataset_id = fuzzer_stats.dataset_name(fuzzer) if not _create_dataset_if_needed(bigquery, dataset_id): return for kind in STATS_KINDS: kind_name = kind.__name__ table_id = kind_name if kind == fuzzer_stats.TestcaseRun: schema = fuzzer_stats_schema.get(fuzzer) else: schema = kind.SCHEMA if not schema: continue if not _create_table_if_needed(bigquery, dataset_id, table_id, schema): continue gcs_path = fuzzer_stats.get_gcs_stats_path(kind_name, fuzzer, timestamp) # Shard loads by prefix to avoid causing BigQuery to run out of memory. first_write = True for prefix in _HEX_DIGITS: load = { 'destinationTable': { 'projectId': project_id, 'tableId': table_id + '$' + date_string, 'datasetId': dataset_id, }, 'schemaUpdateOptions': ['ALLOW_FIELD_ADDITION',], 'sourceFormat': 'NEWLINE_DELIMITED_JSON', 'sourceUris': ['gs:/' + gcs_path + prefix + '*.json'], # Truncate on the first shard, then append the rest. 'writeDisposition': 'WRITE_TRUNCATE' if first_write else 'WRITE_APPEND', 'schema': schema, } job_body = { 'configuration': { 'load': load, }, } try: logs.log("Uploading job to BigQuery.", job_body=job_body) request = bigquery.jobs().insert(projectId=project_id, body=job_body) # pylint: disable=no-member load_response = request.execute(num_retries=NUM_RETRIES) job_id = load_response['jobReference']['jobId'] logs.log(f'Load job id: {job_id}') response = _poll_completion(bigquery, project_id, job_id) logs.log('Completed load: %s' % response) errors = response['status'].get('errors') if errors: logs.log_error( f'Failed load for {job_id} with errors: {str(errors)})') else: # Successful write. Subsequent writes should be WRITE_APPEND. first_write = False except Exception as e: # Log exception here as otherwise it gets lost in the thread pool # worker. logs.log_error(f'Failed to load: {str(e)}')
Load bigquery stats from GCS.
def main(): """Load bigquery stats from GCS.""" if not big_query.get_bucket(): logs.log_error('Loading stats to BigQuery failed: missing bucket name.') return False thread_pool = ThreadPoolExecutor(max_workers=NUM_THREADS) # Retrieve list of fuzzers before iterating them, since the query can expire # as we create the load jobs. for fuzzer in list(data_types.Fuzzer.query()): logs.log('Loading stats to BigQuery for %s.' % fuzzer.name) thread_pool.submit(_load_data, fuzzer.name) thread_pool.shutdown(wait=True) logs.log('Load big query task finished successfully.') return True
Return the GCE project IDs.
def _get_project_ids(): """Return the GCE project IDs.""" return list(local_config.Config(local_config.GCE_CLUSTERS_PATH).get().keys())
Extract instance name from url.
def _instance_name_from_url(instance_url): """Extract instance name from url.""" return instance_url.split('/')[-1]
Get a name that can be used for GCE resources.
def get_resource_name(prefix, project_name): """Get a name that can be used for GCE resources.""" # https://cloud.google.com/compute/docs/reference/latest/instanceGroupManagers max_name_length = 58 project_name = project_name.lower().replace('_', '-') name = prefix + '-' + project_name return name[:max_name_length]
Return the instance template body.
def get_template_body(gce_project, template_name, task_tag=None, disk_size_gb=None, service_account=None, tls_cert=None): """Return the instance template body.""" template_body = copy.deepcopy( gce_project.get_instance_template(template_name)) if task_tag: template_body['properties']['metadata']['items'].append({ 'key': 'task-tag', 'value': task_tag, }) if disk_size_gb: disk = template_body['properties']['disks'][0] disk['initializeParams']['diskSizeGb'] = disk_size_gb if service_account: template_body['properties']['serviceAccounts'][0]['email'] = service_account if tls_cert: template_body['properties']['metadata']['items'].extend([{ 'key': 'tls-cert', 'value': tls_cert.cert_contents.decode('utf-8'), }, { 'key': 'tls-key', 'value': tls_cert.key_contents.decode('utf-8'), }]) return template_body
Get disk size from template.
def _get_template_disk_size(template): """Get disk size from template.""" return int( template['properties']['disks'][0]['initializeParams']['diskSizeGb'])
Get service account from template.
def _get_template_service_account(template): """Get service account from template.""" return template['properties']['serviceAccounts'][0]['email']
Return whether or not the template needs an update.
def _template_needs_update(current_template, new_template, resource_name): """Return whether or not the template needs an update.""" current_version = json.loads(current_template['description'])['version'] new_version = json.loads(new_template['description'])['version'] if current_version != new_version: logging.info( 'Instance template version out of date ' '(current=%s, new=%s): %s', current_version, new_version, resource_name) return True current_disk_size_gb = _get_template_disk_size(current_template) new_disk_size_gb = _get_template_disk_size(new_template) if current_disk_size_gb != new_disk_size_gb: logging.info( 'Instance template disk size changed ' '(current=%d, new=%d): %s', current_disk_size_gb, new_disk_size_gb, resource_name) return True current_service_account = _get_template_service_account(current_template) new_service_account = _get_template_service_account(new_template) if current_service_account != new_service_account: logging.info('Service account changed ' '(current=%s, new=%s): %s', current_service_account, new_service_account, resource_name) return True current_tls_cert = _get_metadata_value( current_template['properties']['metadata']['items'], 'tls-cert') new_tls_cert = _get_metadata_value( new_template['properties']['metadata']['items'], 'tls-cert') if current_tls_cert != new_tls_cert: logging.info('TLS cert changed.') return True return False
CPU distributor for OSS-Fuzz projects.
def main(): """CPU distributor for OSS-Fuzz projects.""" if utils.is_oss_fuzz(): manager_class = OssFuzzClustersManager else: manager_class = ClustersManager for project_id in _get_project_ids(): manager = manager_class(project_id) manager.update_clusters() logging.info('Mange VMs succeeded.') return True
Return iterator to open testcases with bugs.
def get_open_testcases_with_bugs(): """Return iterator to open testcases with bugs.""" return data_types.Testcase.query( ndb_utils.is_true(data_types.Testcase.open), data_types.Testcase.status == 'Processed', data_types.Testcase.bug_information != '').order( # pylint: disable=g-explicit-bool-comparison data_types.Testcase.bug_information, data_types.Testcase.key)
Return users to CC for a job.
def cc_users_for_job(job_type, security_flag): """Return users to CC for a job.""" # Memoized per cron run. return external_users.cc_users_for_job(job_type, security_flag)
Cron handler for adding new CC's to oss-fuzz bugs..
def main(): """Cron handler for adding new CC's to oss-fuzz bugs..""" for testcase in get_open_testcases_with_bugs(): issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(testcase) if not issue_tracker: logging.error('Failed to get issue tracker manager for %s', testcase.key.id()) continue policy = issue_tracker_policy.get(issue_tracker.project) reported_label = policy.label('reported') if not reported_label: logging.info('No reported label.') return True reported_pattern = issue_filer.get_label_pattern(reported_label) try: issue = issue_tracker.get_original_issue(testcase.bug_information) except: logging.error('Error occurred when fetching issue %s.', testcase.bug_information) continue if not issue or not issue.is_open: continue ccs = cc_users_for_job(testcase.job_type, testcase.security_flag) new_ccs = [cc for cc in ccs if cc not in issue.ccs] if not new_ccs: # Nothing to do. continue for cc in new_ccs: logging.info('CCing %s on %s', cc, issue.id) issue.ccs.add(cc) comment = None if (not issue.labels.has_with_pattern(reported_pattern) and not data_handler.get_value_from_job_definition( testcase.job_type, 'DISABLE_DISCLOSURE', False)): # Add reported label and deadline comment if necessary. for result in issue_filer.apply_substitutions(policy, reported_label, testcase): issue.labels.add(result) if policy.label('restrict_view') in issue.labels: logging.info('Adding deadline comment on %s', issue.id) comment = policy.deadline_policy_message issue.save(new_comment=comment, notify=True) logging.info('OSS fuzz apply ccs succeeded.') return True
Return the issue body for filing new bugs.
def _get_issue_body(project_name, build_id, build_type): """Return the issue body for filing new bugs.""" template = ('The last {num_builds} builds for {project} have been failing.\n' '<b>Build log:</b> {log_link}\n' 'Build type: {build_type}\n\n' 'To reproduce locally, please see: ' 'https://google.github.io/oss-fuzz/advanced-topics/reproducing' '#reproducing-build-failures\n\n' '<b>This bug tracker is not being monitored by OSS-Fuzz team.</b>' ' If you have any questions, please create an issue at ' 'https://github.com/google/oss-fuzz/issues/new.\n\n' '**This bug will be automatically closed within a ' 'day once it is fixed.**') return template.format( num_builds=MIN_CONSECUTIVE_BUILD_FAILURES, project=project_name, log_link=_get_build_link(build_id), build_type=build_type)
Return the OssFuzzProject entity for the given project.
def _get_oss_fuzz_project(project_name): """Return the OssFuzzProject entity for the given project.""" return ndb.Key(data_types.OssFuzzProject, project_name).get()
Return a link to the build log.
def _get_build_link(build_id): """Return a link to the build log.""" return BUCKET_URL + '/log-' + build_id + '.txt'
Constructs a Key literal for build failure entities.
def _get_ndb_key(project_name, build_type): """Constructs a Key literal for build failure entities.""" if build_type == MAIN_BUILD_TYPE: return project_name # Use build type suffix for the auxiliary build (e.g. coverage). return '%s-%s' % (project_name, build_type)
Create new build failure.
def create_build_failure(project_name, failure, build_type): """Create new build failure.""" return data_types.OssFuzzBuildFailure( id=_get_ndb_key(project_name, build_type), project_name=project_name, last_checked_timestamp=get_build_time(failure), build_type=build_type)
Return the last build failure for the project.
def get_build_failure(project_name, build_type): """Return the last build failure for the project.""" key = ndb.Key(data_types.OssFuzzBuildFailure, _get_ndb_key(project_name, build_type)) return key.get()
Delete the build failure.
def close_build_failure(build_failure): """Delete the build failure.""" build_failure.key.delete()
Return a datetime for when the build was done.
def get_build_time(build): """Return a datetime for when the build was done.""" # Strip the nanosecond precision from the timestamp, since it's not # supported by Python. stripped_timestamp = TIMESTAMP_PATTERN.match(build['finish_time']) if not stripped_timestamp: logs.log_error( 'Invalid timestamp %s for %s.' % (build['finish_time'], build['name'])) return None return datetime.datetime.strptime( stripped_timestamp.group(0), TIMESTAMP_FORMAT)
File a new bug for a build failure.
def file_bug(issue_tracker, project_name, build_id, ccs, build_type): """File a new bug for a build failure.""" logs.log('Filing bug for new build failure (project=%s, build_type=%s, ' 'build_id=%s).' % (project_name, build_type, build_id)) issue = issue_tracker.new_issue() issue.title = '{project_name}: {build_type} build failure'.format( project_name=project_name, build_type=build_type.capitalize()) issue.body = _get_issue_body(project_name, build_id, build_type) issue.status = 'New' issue.labels.add('Type-Build-Failure') issue.labels.add('Proj-' + project_name) for cc in ccs: issue.ccs.add(cc) issue.save() return str(issue.id)
Close a build failure bug.
def close_bug(issue_tracker, issue_id, project_name): """Close a build failure bug.""" logs.log('Closing build failure bug (project=%s, issue_id=%s).' % (project_name, issue_id)) issue = issue_tracker.get_original_issue(issue_id) issue.status = 'Verified' issue.save( new_comment='The latest build has succeeded, closing this issue.', notify=True)
Send a reminder about the build still failing.
def send_reminder(issue_tracker, issue_id, build_id): """Send a reminder about the build still failing.""" issue = issue_tracker.get_original_issue(issue_id) comment = ('Friendly reminder that the build is still failing.\n' 'Please try to fix this failure to ensure that fuzzing ' 'remains productive.\n' 'Latest build log: {log_link}\n') comment = comment.format(log_link=_get_build_link(build_id)) issue.save(new_comment=comment, notify=True)
Close bugs for fixed builds.
def _close_fixed_builds(projects, build_type): """Close bugs for fixed builds.""" issue_tracker = issue_tracker_utils.get_issue_tracker() if not issue_tracker: raise OssFuzzBuildStatusError('Failed to get issue tracker.') for project in projects: project_name = project['name'] builds = project['history'] if not builds: continue build_failure = get_build_failure(project_name, build_type) if not build_failure: continue build = builds[0] if not build['success']: continue if build_failure.last_checked_timestamp >= get_build_time(build): logs.log_error('Latest successful build time for %s in %s config is ' 'older than or equal to last failure time.' % (project_name, build_type)) continue if build_failure.issue_id is not None: close_bug(issue_tracker, build_failure.issue_id, project_name) close_build_failure(build_failure)
Process failures.
def _process_failures(projects, build_type): """Process failures.""" issue_tracker = issue_tracker_utils.get_issue_tracker() if not issue_tracker: raise OssFuzzBuildStatusError('Failed to get issue tracker.') for project in projects: project_name = project['name'] builds = project['history'] if not builds: continue build = builds[0] if build['success']: continue project_name = project['name'] # Do not file an issue for non-main build types, if there is a main build # failure for the same project, as the root cause might be the same. if build_type != MAIN_BUILD_TYPE: build_failure = get_build_failure(project_name, MAIN_BUILD_TYPE) if build_failure: continue build_failure = get_build_failure(project_name, build_type) build_time = get_build_time(build) if build_failure: if build_time <= build_failure.last_checked_timestamp: # No updates. continue else: build_failure = create_build_failure(project_name, build, build_type) build_failure.last_checked_timestamp = build_time build_failure.consecutive_failures += 1 if build_failure.consecutive_failures >= MIN_CONSECUTIVE_BUILD_FAILURES: if build_failure.issue_id is None: oss_fuzz_project = _get_oss_fuzz_project(project_name) if not oss_fuzz_project: logs.log( 'Project %s is disabled, skipping bug filing.' % project_name) continue build_failure.issue_id = file_bug(issue_tracker, project_name, build['build_id'], oss_fuzz_project.ccs, build_type) elif (build_failure.consecutive_failures - MIN_CONSECUTIVE_BUILD_FAILURES) % REMINDER_INTERVAL == 0: send_reminder(issue_tracker, build_failure.issue_id, build['build_id']) build_failure.put()
Check that builds are up to date.
def _check_last_get_build_time(projects, build_type): """Check that builds are up to date.""" for project in projects: project_name = project['name'] builds = project['history'] if not builds: continue build = builds[0] time_since_last_build = utils.utcnow() - get_build_time(build) if time_since_last_build >= NO_BUILDS_THRESHOLD: # Something likely went wrong with the build infrastructure, log errors. logs.log_error('%s has not been built in %s config for %d days.' % (project_name, build_type, time_since_last_build.days))
Build status checker.
def main(): """Build status checker.""" for build_type, status_url in BUILD_STATUS_MAPPINGS: try: response = requests.get(status_url, timeout=HTTP_GET_TIMEOUT_SECS) response.raise_for_status() build_status = json.loads(response.text) except (requests.exceptions.RequestException, ValueError) as e: raise RuntimeError(str(e)) projects = build_status['projects'] _check_last_get_build_time(projects, build_type) _close_fixed_builds(projects, build_type) _process_failures(projects, build_type) logs.log('OSS fuzz apply ccs succeeded.') return True
Generate a self signed cerficate.
def generate_cert(project_name): """Generate a self signed cerficate.""" # Defer imports to avoid issues on Python 2. from OpenSSL import crypto key = crypto.PKey() key.generate_key(crypto.TYPE_RSA, 2048) cert = crypto.X509() cert.get_subject().C = 'US' cert.get_subject().CN = '*' + untrusted.internal_network_domain() cert.get_subject().O = project_name cert.set_serial_number(9001) cert.set_notBefore(b'20000101000000Z') cert.set_notAfter(b'21000101000000Z') cert.set_issuer(cert.get_subject()) cert.set_pubkey(key) cert.sign(key, 'sha256') cert_contents = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) key_contents = crypto.dump_privatekey(crypto.FILETYPE_PEM, key) return cert_contents, key_contents
Generate OSS-Fuzz certs.
def main(): """Generate OSS-Fuzz certs.""" for project in data_types.OssFuzzProject.query(): tls_cert_key = ndb.Key(data_types.WorkerTlsCert, project.name) if tls_cert_key.get(): # Already generated. continue logs.log('Generating cert for %s.' % project.name) cert_contents, key_contents = generate_cert(project.name) tls_cert = data_types.WorkerTlsCert( id=project.name, cert_contents=cert_contents, key_contents=key_contents) tls_cert.put() logs.log('OSS fuzz generate certs succeeded.') return True
Periodically gathers new results from Predator requests.
def main(): """Periodically gathers new results from Predator requests.""" subscription = db_config.get_value('predator_result_topic') if not subscription: logs.log('No Predator subscription configured. Aborting.') return False client = pubsub.PubSubClient() messages = client.pull_from_subscription(subscription, acknowledge=True) for message in messages: message = json.loads(message.data) testcase_id = message['crash_identifiers'] try: testcase = data_handler.get_testcase_by_id(testcase_id) except errors.InvalidTestcaseError: logs.log('Testcase %s no longer exists.' % str(testcase_id)) continue testcase.set_metadata('predator_result', message, update_testcase=False) testcase.delete_metadata('blame_pending', update_testcase=False) testcase.put() logs.log('Set predator result for testcase %d.' % testcase.key.id()) logs.log('Finished processing predator results. %d total.' % len(messages)) return True
Return contents of URL.
def get_github_url(url): """Return contents of URL.""" github_credentials = db_config.get_value('github_credentials') if not github_credentials: raise ProjectSetupError('No github credentials.') client_id, client_secret = github_credentials.strip().split(';') response = requests.get( url, auth=(client_id, client_secret), timeout=HTTP_TIMEOUT_SECONDS) if response.status_code != 200: logs.log_error( f'Failed to get github url: {url}.', status_code=response.status_code) response.raise_for_status() return json.loads(response.text)
Get url of a blob/tree from a github json response.
def find_github_item_url(github_json, name): """Get url of a blob/tree from a github json response.""" for item in github_json['tree']: if item['path'] == name: return item['url'] return None
Return list of projects for oss-fuzz.
def get_oss_fuzz_projects(): """Return list of projects for oss-fuzz.""" ossfuzz_tree_url = ('https://api.github.com/repos/google/oss-fuzz/' 'git/trees/master') tree = get_github_url(ossfuzz_tree_url) projects = [] projects_url = find_github_item_url(tree, 'projects') if not projects_url: logs.log_error('No projects found.') return [] tree = get_github_url(projects_url) for item in tree['tree']: if item['type'] != 'tree': continue item_json = get_github_url(item['url']) project_yaml_url = find_github_item_url(item_json, 'project.yaml') if not project_yaml_url: continue projects_yaml = get_github_url(project_yaml_url) info = yaml.safe_load(base64.b64decode(projects_yaml['content'])) has_dockerfile = ( find_github_item_url(item_json, 'Dockerfile') or 'dockerfile' in info) if not has_dockerfile: continue projects.append((item['path'], info)) return projects
Get projects from GCS path.
def get_projects_from_gcs(gcs_url): """Get projects from GCS path.""" try: data = json.loads(storage.read_data(gcs_url)) except json.decoder.JSONDecodeError as e: raise ProjectSetupError(f'Error loading json file from {gcs_url}: {e}') return [(project['name'], project) for project in data['projects']]
Pre-process sanitizers field into a map from sanitizer name -> dict of options.
def _process_sanitizers_field(sanitizers): """Pre-process sanitizers field into a map from sanitizer name -> dict of options.""" processed_sanitizers = {} if not isinstance(sanitizers, list): return None # each field can either be a Map or a String: # sanitizers: # - undefined: # experimental: true # - address # - memory for sanitizer in sanitizers: if isinstance(sanitizer, str): processed_sanitizers[sanitizer] = {} elif isinstance(sanitizer, dict): for key, value in sanitizer.items(): processed_sanitizers[key] = value else: return None return processed_sanitizers
Return jobs for the project.
def get_jobs_for_project(project, info): """Return jobs for the project.""" sanitizers = _process_sanitizers_field( info.get('sanitizers', DEFAULT_SANITIZERS)) if not sanitizers: logs.log_error(f'Invalid sanitizers field for {project}.') return [] engines = info.get('fuzzing_engines', DEFAULT_ENGINES) architectures = info.get('architectures', DEFAULT_ARCHITECTURES) jobs = [] for engine in engines: if engine not in JOB_MAP: continue for architecture in architectures: if architecture not in JOB_MAP[engine]: continue for sanitizer, options in sanitizers.items(): experimental = ( options.get('experimental', False) or info.get('experimental', False)) if sanitizer in JOB_MAP[engine][architecture]: job = JOB_MAP[engine][architecture][sanitizer] if experimental: job = _to_experimental_job(job) jobs.append(job) return jobs
Convert @googlemail.com to @gmail.com.
def convert_googlemail_to_gmail(email): """Convert @googlemail.com to @gmail.com.""" # TODO(ochang): Investiate if we can/need to do this in general, and not just # for cloud storage bucket IAMs. if email.endswith('@googlemail.com'): return email.split('@')[0] + '@gmail.com' return email
Add user account to bucket.
def _add_users_to_bucket(info, client, bucket_name, iam_policy): """Add user account to bucket.""" ccs = sorted( ['user:' + convert_googlemail_to_gmail(cc) for cc in ccs_from_info(info)]) binding = storage.get_bucket_iam_binding(iam_policy, OBJECT_VIEWER_IAM_ROLE) if binding: # buckets.getIamPolicy can return duplicate members when we add a @gmail.com # as well as @googlemail.com address for the same account. binding['members'] = sorted(list(set(binding['members']))) if binding['members'] == ccs: return iam_policy filtered_members = [ member for member in binding['members'] if member in ccs ] if len(filtered_members) != len(binding['members']): # Remove old members. binding['members'] = filtered_members iam_policy = storage.set_bucket_iam_policy(client, bucket_name, iam_policy) # We might have no binding either from start or after filtering members above. # Create a new one in those cases. binding = storage.get_or_create_bucket_iam_binding(iam_policy, OBJECT_VIEWER_IAM_ROLE) for cc in ccs: if cc in binding['members']: continue logs.log(f'Adding {cc} to bucket IAM for {bucket_name}.') # Add CCs one at a time since the API does not work with invalid or # non-Google emails. modified_iam_policy = storage.add_single_bucket_iam( client, iam_policy, OBJECT_VIEWER_IAM_ROLE, bucket_name, cc) if modified_iam_policy: iam_policy = modified_iam_policy binding = storage.get_bucket_iam_binding(iam_policy, OBJECT_VIEWER_IAM_ROLE) if not binding['members']: # Check that the final binding has members. Empty bindings are not valid. storage.remove_bucket_iam_binding(iam_policy, OBJECT_VIEWER_IAM_ROLE) return iam_policy
Set service account for a bucket.
def _set_bucket_service_account(service_account, client, bucket_name, iam_policy): """Set service account for a bucket.""" # Add service account as objectAdmin. binding = storage.get_or_create_bucket_iam_binding(iam_policy, OBJECT_ADMIN_IAM_ROLE) members = ['serviceAccount:' + service_account['email']] if members == binding['members']: # No changes required. return iam_policy binding['members'] = members return storage.set_bucket_iam_policy(client, bucket_name, iam_policy)
Add CC'ed users to storage bucket IAM.
def add_bucket_iams(info, client, bucket_name, service_account): """Add CC'ed users to storage bucket IAM.""" iam_policy = storage.get_bucket_iam_policy(client, bucket_name) if not iam_policy: return iam_policy = _add_users_to_bucket(info, client, bucket_name, iam_policy) _set_bucket_service_account(service_account, client, bucket_name, iam_policy)
Add service account to the gcr.io images bucket.
def add_service_account_to_bucket(client, bucket_name, service_account, role): """Add service account to the gcr.io images bucket.""" iam_policy = storage.get_bucket_iam_policy(client, bucket_name) if not iam_policy: return binding = storage.get_or_create_bucket_iam_binding(iam_policy, role) member = 'serviceAccount:' + service_account['email'] if member in binding['members']: # No changes required. return binding['members'].append(member) storage.set_bucket_iam_policy(client, bucket_name, iam_policy)
Return whether or not a project has at least one maintainer.
def has_maintainer(info): """Return whether or not a project has at least one maintainer.""" return info.get('primary_contact') or info.get('auto_ccs')
Get list of CC's from project info.
def ccs_from_info(info): """Get list of CC's from project info.""" def _get_ccs(field_name, allow_list=True): """Return list of emails to cc given a field name.""" if field_name not in info: return [] field_value = info.get(field_name) if allow_list and isinstance(field_value, list): return field_value if isinstance(field_value, str): return [field_value] if field_value is None: return [] raise ProjectSetupError(f'Bad value for field {field_name}: {field_value}.') ccs = [] ccs.extend(_get_ccs('primary_contact', allow_list=False)) ccs.extend(_get_ccs('auto_ccs')) ccs.extend(_get_ccs('vendor_ccs')) return [utils.normalize_email(cc) for cc in ccs]
Update fuzzer job mappings.
def update_fuzzer_jobs(fuzzer_entities, job_names): """Update fuzzer job mappings.""" to_delete = {} for fuzzer_entity_key in fuzzer_entities: fuzzer_entity = fuzzer_entity_key.get() for job in data_types.Job.query(): if not job.environment_string: continue job_environment = job.get_environment() if not utils.string_is_true(job_environment.get('MANAGED', 'False')): continue if job.name in job_names: continue logs.log(f'Deleting job {job.name}') to_delete[job.name] = job.key try: fuzzer_entity.jobs.remove(job.name) except ValueError: pass fuzzer_entity.put() fuzzer_selection.update_mappings_for_fuzzer(fuzzer_entity) if to_delete: ndb_utils.delete_multi(to_delete.values())
Delete old projects that are no longer used or disabled.
def cleanup_old_projects_settings(project_names): """Delete old projects that are no longer used or disabled.""" to_delete = [] for project in data_types.OssFuzzProject.query(): if project.name not in project_names: logs.log(f'Deleting project {project.name}.') to_delete.append(project.key) if to_delete: ndb_utils.delete_multi(to_delete)
Setup settings for ClusterFuzz (such as CPU distribution).
def create_project_settings(project, info, service_account): """Setup settings for ClusterFuzz (such as CPU distribution).""" key = ndb.Key(data_types.OssFuzzProject, project) oss_fuzz_project = key.get() # Expecting to run a blackbox fuzzer, so use high end hosts. is_high_end = info.get('blackbox', False) ccs = ccs_from_info(info) language = info.get('language') if oss_fuzz_project: if oss_fuzz_project.service_account != service_account['email']: oss_fuzz_project.service_account = service_account['email'] oss_fuzz_project.put() if oss_fuzz_project.high_end != is_high_end: oss_fuzz_project.high_end = is_high_end oss_fuzz_project.put() if oss_fuzz_project.ccs != ccs: oss_fuzz_project.ccs = ccs oss_fuzz_project.put() else: if language in MEMORY_SAFE_LANGUAGES: cpu_weight = OSS_FUZZ_MEMORY_SAFE_LANGUAGE_PROJECT_WEIGHT else: cpu_weight = OSS_FUZZ_DEFAULT_PROJECT_CPU_WEIGHT data_types.OssFuzzProject( id=project, name=project, high_end=is_high_end, cpu_weight=cpu_weight, service_account=service_account['email'], ccs=ccs).put()
Create a pubsub topic and subscription if needed.
def _create_pubsub_topic(name, client): """Create a pubsub topic and subscription if needed.""" application_id = utils.get_application_id() topic_name = pubsub.topic_name(application_id, name) if client.get_topic(topic_name) is None: client.create_topic(topic_name) subscription_name = pubsub.subscription_name(application_id, name) if client.get_subscription(subscription_name) is None: client.create_subscription(subscription_name, topic_name)
Create pubsub topics from untrusted sources for tasks.
def create_pubsub_topics_for_untrusted(project): """Create pubsub topics from untrusted sources for tasks.""" client = pubsub.PubSubClient() for platform in PUBSUB_PLATFORMS: name = untrusted.queue_name(project, platform) _create_pubsub_topic(name, client)
Create pubsub topics from project configs for tasks.
def create_pubsub_topics_for_queue_id(platform): """Create pubsub topics from project configs for tasks.""" platform, queue_id = platform.split(tasks.SUBQUEUE_IDENTIFIER) name = untrusted.queue_name(platform, queue_id) client = pubsub.PubSubClient() _create_pubsub_topic(name, client)
Delete old pubsub topics and subscriptions.
def cleanup_pubsub_topics(project_names): """Delete old pubsub topics and subscriptions.""" client = pubsub.PubSubClient() application_id = utils.get_application_id() expected_topics = set() for platform in PUBSUB_PLATFORMS: expected_topics.update( [untrusted.queue_name(project, platform) for project in project_names]) pubsub_config = local_config.Config('pubsub.queues') unmanaged_queues = [queue['name'] for queue in pubsub_config.get('resources')] for topic in client.list_topics(pubsub.project_name(application_id)): _, name = pubsub.parse_name(topic) if (not name.startswith(tasks.JOBS_PREFIX) and not name.startswith(tasks.HIGH_END_JOBS_PREFIX)): # Some topic created by another service, ignore. continue if name in unmanaged_queues: continue if name in expected_topics: continue for subscription in client.list_topic_subscriptions(topic): client.delete_subscription(subscription) client.delete_topic(topic)
Clean up stale projects.
def cleanup_stale_projects(fuzzer_entities, project_names, job_names, segregate_projects): """Clean up stale projects.""" update_fuzzer_jobs(fuzzer_entities, job_names) cleanup_old_projects_settings(project_names) if segregate_projects: cleanup_pubsub_topics(project_names)
Setup ClusterFuzz jobs for projects.
def main(): """Setup ClusterFuzz jobs for projects.""" libfuzzer = data_types.Fuzzer.query( data_types.Fuzzer.name == 'libFuzzer').get() if not libfuzzer: logs.log_error('Failed to get libFuzzer Fuzzer entity.') return False afl = data_types.Fuzzer.query(data_types.Fuzzer.name == 'afl').get() if not afl: logs.log_error('Failed to get AFL Fuzzer entity.') return False honggfuzz = data_types.Fuzzer.query( data_types.Fuzzer.name == 'honggfuzz').get() if not honggfuzz: logs.log_error('Failed to get honggfuzz Fuzzer entity.') return False gft = data_types.Fuzzer.query( data_types.Fuzzer.name == 'googlefuzztest').get() if not gft: logs.log_error('Failed to get googlefuzztest Fuzzer entity.') return False centipede = data_types.Fuzzer.query( data_types.Fuzzer.name == 'centipede').get() if not centipede: logs.log_error('Failed to get Centipede Fuzzer entity.') return False project_config = local_config.ProjectConfig() segregate_projects = project_config.get('segregate_projects') project_setup_configs = project_config.get('project_setup') project_names = set() job_names = set() fuzzer_entities = { 'afl': afl.key, 'centipede': centipede.key, 'honggfuzz': honggfuzz.key, 'googlefuzztest': gft.key, 'libfuzzer': libfuzzer.key, } for setup_config in project_setup_configs: bucket_config = setup_config.get('build_buckets') if not bucket_config: raise ProjectSetupError('Project setup buckets not specified.') config = ProjectSetup( BUILD_BUCKET_PATH_TEMPLATE, REVISION_URL, setup_config.get('build_type'), config_suffix=setup_config.get('job_suffix', ''), external_config=setup_config.get('external_config', ''), segregate_projects=segregate_projects, experimental_sanitizers=setup_config.get('experimental_sanitizers', []), engine_build_buckets={ 'libfuzzer': bucket_config.get('libfuzzer'), 'libfuzzer-i386': bucket_config.get('libfuzzer_i386'), 'libfuzzer-arm': bucket_config.get('libfuzzer_arm'), 'afl': bucket_config.get('afl'), 'honggfuzz': bucket_config.get('honggfuzz'), 'googlefuzztest': bucket_config.get('googlefuzztest'), 'none': bucket_config.get('no_engine'), 'dataflow': bucket_config.get('dataflow'), 'centipede': bucket_config.get('centipede'), }, fuzzer_entities=fuzzer_entities, add_info_labels=setup_config.get('add_info_labels', False), add_revision_mappings=setup_config.get('add_revision_mappings', False), additional_vars=setup_config.get('additional_vars')) projects_source = setup_config.get('source') if projects_source == 'oss-fuzz': projects = get_oss_fuzz_projects() elif projects_source.startswith(storage.GS_PREFIX): projects = get_projects_from_gcs(projects_source) else: raise ProjectSetupError('Invalid projects source: ' + projects_source) if not projects: raise ProjectSetupError('Missing projects list.') result = config.set_up(projects) project_names.update(result.project_names) job_names.update(result.job_names) cleanup_stale_projects( list(fuzzer_entities.values()), project_names, job_names, segregate_projects) logs.log('Project setup succeeded.') return True
Return (task_target, job_name, queue_name) arguments to schedule a task.
def get_tasks_to_schedule(): """Return (task_target, job_name, queue_name) arguments to schedule a task.""" for job in data_types.Job.query(): if not utils.string_is_true(job.get_environment().get('CORPUS_PRUNE')): continue queue_name = tasks.queue_for_job(job.name) for target_job in fuzz_target_utils.get_fuzz_target_jobs(job=job.name): task_target = target_job.fuzz_target_name yield (task_target, job.name, queue_name)
Schedule corpus pruning tasks.
def main(): """Schedule corpus pruning tasks.""" for task_target, job_name, queue_name in get_tasks_to_schedule(): logs.log(f'Adding corpus pruning task {task_target}.') tasks.add_task('corpus_pruning', task_target, job_name, queue=queue_name) logs.log('Schedule corpus pruning task succeeded.') return True
Creates impact tasks.
def main(): """Creates impact tasks.""" task = 'impact' tasks_scheduler.schedule(task) return True
Creates progression tasks.
def main(): """Creates progression tasks.""" environment.set_bot_environment() task = 'progression' tasks_scheduler.schedule(task) return True
Create a googleapiclient client.
def _create_client(service_name, version='v1'): """Create a googleapiclient client.""" return googleapiclient.discovery.build(service_name, version)
Return full service account email.
def _service_account_email(project_id, service_account_id): """Return full service account email.""" return '%s@%s.iam.gserviceaccount.com' % (service_account_id, project_id)
Return service account ID for project.
def _service_account_id(project): """Return service account ID for project.""" # From # cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/create: # # The account id that is used to generate the service account email address # and a stable unique id. It is unique within a project, must be 6-30 # characters long, and match the regular expression [a-z]([-a-z0-9]*[a-z0-9]) # to comply with RFC1035. account_id = _ACCOUNT_PREFIX + project.replace('_', '-') if not account_id[-1].isalnum(): # Must end in '[a-z][0-9]'. account_id += '0' if len(account_id) < _MIN_LEN: # Must be at least |min_len| in length. account_id = account_id.ljust(_MIN_LEN, '0') # Use a hash prefix as the service account name if the project name is too # long. if len(account_id) > _MAX_LEN: account_id = _ACCOUNT_PREFIX + utils.string_hash(project)[:_HASH_PREFIX_LEN] assert len(account_id) >= _MIN_LEN and len(account_id) <= _MAX_LEN return account_id
Try to get a service account. Returns None if it does not exist.
def get_service_account(iam, project_id, service_account_id): """Try to get a service account. Returns None if it does not exist.""" try: request = iam.projects().serviceAccounts().get( name=(f'projects/{project_id}/serviceAccounts/' f'{_service_account_email(project_id, service_account_id)}')) return request.execute() except googleapiclient.errors.HttpError as e: if e.resp.status == 404: return None raise
Get or create service account for the project.
def get_or_create_service_account(project): """Get or create service account for the project.""" iam = _create_client('iam') project_id = utils.get_application_id() service_account_id = _service_account_id(project) service_account = get_service_account(iam, project_id, service_account_id) if service_account: logging.info('Using existing new service account for %s.', project) return service_account, True logging.info('Creating new service account for %s.', project) request = iam.projects().serviceAccounts().create( name='projects/' + project_id, body={ 'accountId': service_account_id, 'serviceAccount': { 'displayName': project, } }) return request.execute(), False
Return the binding corresponding to the given role. Creates the binding if needed.
def _get_or_insert_iam_binding(policy, role): """Return the binding corresponding to the given role. Creates the binding if needed.""" existing_binding = next( (binding for binding in policy['bindings'] if binding['role'] == role), None) if existing_binding: return existing_binding new_binding = { 'role': role, 'members': [], } policy['bindings'].append(new_binding) return new_binding
Add a role to a service account. Returns whether or not changes were made.
def _add_service_account_role(policy, role, service_account): """Add a role to a service account. Returns whether or not changes were made.""" binding = _get_or_insert_iam_binding(policy, role) service_account_member = 'serviceAccount:' + service_account if service_account_member not in binding['members']: binding['members'].append(service_account_member) return True return False
Set roles for service account.
def set_service_account_roles(service_account): """Set roles for service account.""" project_id = utils.get_application_id() resource_manager = _create_client('cloudresourcemanager') request = resource_manager.projects().getIamPolicy( resource=project_id, body={}) policy = request.execute() # Set logging and metrics permissions. policy_changed = False policy_changed |= _add_service_account_role(policy, 'roles/logging.logWriter', service_account['email']) policy_changed |= _add_service_account_role( policy, 'roles/monitoring.metricWriter', service_account['email']) if not policy_changed: return request = resource_manager.projects().setIamPolicy( resource=project_id, body={ 'policy': policy, }) request.execute()
Gets a list of admins from the IAM policy.
def admins_from_iam_policy(iam_policy): """Gets a list of admins from the IAM policy.""" # Per # https://cloud.google.com/appengine/docs/standard/python/users/adminusers, An # administrator is a user who has the Viewer, Editor, or Owner primitive role, # or the App Engine App Admin predefined role roles = [ 'roles/editor', 'roles/owner', 'roles/viewer', 'roles/appengine.appAdmin', ] admins = [] for binding in iam_policy['bindings']: if binding['role'] not in roles: continue for member in binding['members']: user_type, email = member.split(':', 2) if user_type == 'user': admins.append(email) return admins
Update list of admins.
def update_admins(new_admins): """Update list of admins.""" existing_admins = ndb_utils.get_all_from_model(data_types.Admin) to_remove = [] existing_admin_emails = set() for admin in existing_admins: if admin.email not in new_admins: logs.log('Removing admin ' + admin.email) to_remove.append(admin.key) existing_admin_emails.add(admin.email) ndb_utils.delete_multi(to_remove) to_add = [] for admin in new_admins: if admin not in existing_admin_emails: to_add.append(data_types.Admin(id=admin, email=admin)) logs.log('Adding admin ' + admin) ndb_utils.put_multi(to_add)
Admin user syncing cron.
def main(): """Admin user syncing cron.""" resource_manager = discovery.build('cloudresourcemanager', 'v1') project_id = utils.get_application_id() # pylint: disable=no-member policy = resource_manager.projects().getIamPolicy( resource=project_id, body={}).execute() admins = admins_from_iam_policy(policy) update_admins(admins) logs.log('Sync admins succeeded.') return True
Add a triage message.
def _add_triage_message(testcase, message): """Add a triage message.""" if testcase.get_metadata(TRIAGE_MESSAGE_KEY) == message: # Message already exists, skip update. return # Re-fetch testcase to get latest entity and avoid race condition in updates. testcase = data_handler.get_testcase_by_id(testcase.key.id()) testcase.set_metadata(TRIAGE_MESSAGE_KEY, message)
Create a dummy bug entry for a test case.
def _create_filed_bug_metadata(testcase): """Create a dummy bug entry for a test case.""" metadata = data_types.FiledBug() metadata.timestamp = datetime.datetime.utcnow() metadata.testcase_id = testcase.key.id() metadata.bug_information = int(testcase.bug_information) metadata.group_id = testcase.group_id metadata.crash_type = testcase.crash_type metadata.crash_state = testcase.crash_state metadata.security_flag = testcase.security_flag metadata.platform_id = testcase.platform_id metadata.project_name = testcase.project_name metadata.job_type = testcase.job_type metadata.put()
Return list of jobs excluded from bug filing.
def _get_excluded_jobs(): """Return list of jobs excluded from bug filing.""" excluded_jobs = [] jobs = ndb_utils.get_all_from_model(data_types.Job) for job in jobs: job_environment = job.get_environment() # Exclude experimental jobs. if utils.string_is_true(job_environment.get('EXPERIMENTAL')): excluded_jobs.append(job.name) return excluded_jobs
Indicate if the bug is already filed.
def _is_bug_filed(testcase): """Indicate if the bug is already filed.""" # Check if the testcase is already associated with a bug. if testcase.bug_information: return True # Re-check our stored metadata so that we don't file the same testcase twice. is_bug_filed_for_testcase = data_types.FiledBug.query( data_types.FiledBug.testcase_id == testcase.key.id()).get() if is_bug_filed_for_testcase: return True return False
Indicate if the crash is important to file.
def _is_crash_important(testcase): """Indicate if the crash is important to file.""" if not testcase.one_time_crasher_flag: # A reproducible crash is an important crash. return True if testcase.status != 'Processed': # A duplicate or unreproducible crash is not an important crash. return False # Testcase is unreproducible. Only those crashes that are crashing frequently # are important. if testcase.crash_type in UNREPRODUCIBLE_CRASH_IGNORE_CRASH_TYPES: return False # Ensure that there is no reproducible testcase in our group. if testcase.group_id: other_reproducible_testcase = data_types.Testcase.query( data_types.Testcase.group_id == testcase.group_id, ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if other_reproducible_testcase: # There is another reproducible testcase in our group. So, this crash is # not important. return False # Get crash statistics data on this unreproducible crash for last X days. last_hour = crash_stats.get_last_successful_hour() if not last_hour: # No crash stats available, skip. return False _, rows = crash_stats.get( end=last_hour, block='day', days=data_types.FILE_CONSISTENT_UNREPRODUCIBLE_TESTCASE_DEADLINE, group_by='reproducible_flag', where_clause=( 'crash_type = %s AND crash_state = %s AND security_flag = %s' % (json.dumps(testcase.crash_type), json.dumps(testcase.crash_state), json.dumps(testcase.security_flag))), group_having_clause='', sort_by='total_count', offset=0, limit=1) # Calculate total crash count and crash days count. crash_days_indices = set() total_crash_count = 0 for row in rows: if 'groups' not in row: continue total_crash_count += row['totalCount'] for group in row['groups']: for index in group['indices']: crash_days_indices.add(index['hour']) crash_days_count = len(crash_days_indices) # Only those unreproducible testcases are important that happened atleast once # everyday for the last X days and total crash count exceeded our threshold # limit. return (crash_days_count == data_types.FILE_CONSISTENT_UNREPRODUCIBLE_TESTCASE_DEADLINE and total_crash_count >= data_types.FILE_UNREPRODUCIBLE_TESTCASE_MIN_CRASH_THRESHOLD)
Get list of similar open issues and ones that were recently closed.
def _check_and_update_similar_bug(testcase, issue_tracker): """Get list of similar open issues and ones that were recently closed.""" # Get similar testcases from the same group. similar_testcases_from_group = [] if testcase.group_id: group_query = data_types.Testcase.query( data_types.Testcase.group_id == testcase.group_id) similar_testcases_from_group = ndb_utils.get_all_from_query( group_query, batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT // 2) # Get testcases with the same crash params. These might not be in the a group # if they were just fixed. same_crash_params_query = data_types.Testcase.query( data_types.Testcase.crash_type == testcase.crash_type, data_types.Testcase.crash_state == testcase.crash_state, data_types.Testcase.security_flag == testcase.security_flag, data_types.Testcase.project_name == testcase.project_name, data_types.Testcase.status == 'Processed') similar_testcases_from_query = ndb_utils.get_all_from_query( same_crash_params_query, batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT // 2) for similar_testcase in itertools.chain(similar_testcases_from_group, similar_testcases_from_query): # Exclude ourself from comparison. if similar_testcase.key.id() == testcase.key.id(): continue # Exclude similar testcases without bug information. if not similar_testcase.bug_information: continue # Get the issue object given its ID. issue = issue_tracker.get_issue(similar_testcase.bug_information) if not issue: continue # If the reproducible issue is not verified yet, bug is still valid and # might be caused by non-availability of latest builds. In that case, # don't file a new bug yet. if similar_testcase.open and not similar_testcase.one_time_crasher_flag: return True # If the issue is still open, no need to file a duplicate bug. if issue.is_open: return True # If the issue indicates that this crash needs to be ignored, no need to # file another one. policy = issue_tracker_policy.get(issue_tracker.project) ignore_label = policy.label('ignore') if ignore_label in issue.labels: _add_triage_message( testcase, ('Skipping filing a bug since similar testcase ({testcase_id}) in ' 'issue ({issue_id}) is blacklisted with {ignore_label} label.' ).format( testcase_id=similar_testcase.key.id(), issue_id=issue.id, ignore_label=ignore_label)) return True # If this testcase is not reproducible, and a previous similar # non-reproducible bug was previously filed, don't file it again to avoid # spam. if (testcase.one_time_crasher_flag and similar_testcase.one_time_crasher_flag): _add_triage_message( testcase, 'Skipping filing unreproducible bug since one was already filed ' f'({similar_testcase.key.id()}).') return True # If the issue is recently closed, wait certain time period to make sure # our fixed verification has completed. if (issue.closed_time and not dates.time_has_expired( issue.closed_time, hours=data_types.MIN_ELAPSED_TIME_SINCE_FIXED)): _add_triage_message( testcase, ('Delaying filing a bug since similar testcase ' '({testcase_id}) in issue ({issue_id}) was just fixed.').format( testcase_id=similar_testcase.key.id(), issue_id=issue.id)) return True return False
File an issue for the testcase.
def _file_issue(testcase, issue_tracker, throttler): """File an issue for the testcase.""" logs.log(f'_file_issue for {testcase.key.id()}') filed = False file_exception = None if throttler.should_throttle(testcase): _add_triage_message(testcase, 'Skipping filing as it is throttled.') return False if crash_analyzer.is_experimental_crash(testcase.crash_type): logs.log(f'Skipping bug filing for {testcase.key.id()} as it ' 'has an experimental crash type.') _add_triage_message( testcase, 'Skipping filing as this is an experimental crash type.') return False try: _, file_exception = issue_filer.file_issue(testcase, issue_tracker) filed = True except Exception as e: file_exception = e if file_exception: logs.log_error(f'Failed to file issue for testcase {testcase.key.id()}.') _add_triage_message( testcase, f'Failed to file issue due to exception: {str(file_exception)}') return filed
Files bugs.
def main(): """Files bugs.""" try: logs.log('Grouping testcases.') grouper.group_testcases() logs.log('Grouping done.') except: logs.log_error('Error occurred while grouping test cases.') return False # Free up memory after group task run. utils.python_gc() # Get a list of jobs excluded from bug filing. excluded_jobs = _get_excluded_jobs() # Get a list of all jobs. This is used to filter testcases whose jobs have # been removed. all_jobs = data_handler.get_all_job_type_names() throttler = Throttler() for testcase_id in data_handler.get_open_testcase_id_iterator(): logs.log(f'Triaging {testcase_id}') try: testcase = data_handler.get_testcase_by_id(testcase_id) except errors.InvalidTestcaseError: # Already deleted. continue # Skip if testcase's job is removed. if testcase.job_type not in all_jobs: continue # Skip if testcase's job is in exclusions list. if testcase.job_type in excluded_jobs: continue # Skip if we are running progression task at this time. if testcase.get_metadata('progression_pending'): continue # If the testcase has a bug filed already, no triage is needed. if _is_bug_filed(testcase): continue # Check if the crash is important, i.e. it is either a reproducible crash # or an unreproducible crash happening frequently. if not _is_crash_important(testcase): continue # Require that all tasks like minimizaton, regression testing, etc have # finished. if not data_handler.critical_tasks_completed(testcase): continue # For testcases that are not part of a group, wait an additional time to # make sure it is grouped. # The grouper runs prior to this step in the same cron, but there is a # window of time where new testcases can come in after the grouper starts. # This delay needs to be longer than the maximum time the grouper can take # to account for that. # FIXME: In future, grouping might be dependent on regression range, so we # would have to add an additional wait time. # TODO(ochang): Remove this after verifying that the `ran_grouper` # metadata works well. if not testcase.group_id and not dates.time_has_expired( testcase.timestamp, hours=data_types.MIN_ELAPSED_TIME_SINCE_REPORT): continue if not testcase.get_metadata('ran_grouper'): # Testcase should be considered by the grouper first before filing. continue # If this project does not have an associated issue tracker, we cannot # file this crash anywhere. issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(testcase) if not issue_tracker: issue_filer.notify_issue_update(testcase, 'new') continue # If there are similar issues to this test case already filed or recently # closed, skip filing a duplicate bug. if _check_and_update_similar_bug(testcase, issue_tracker): continue # Clean up old triage messages that would be not applicable now. testcase.delete_metadata(TRIAGE_MESSAGE_KEY, update_testcase=False) # File the bug first and then create filed bug metadata. if not _file_issue(testcase, issue_tracker, throttler): continue _create_filed_bug_metadata(testcase) issue_filer.notify_issue_update(testcase, 'new') logs.log('Filed new issue %s for testcase %d.' % (testcase.bug_information, testcase_id)) logs.log('Triage testcases succeeded.') return True
Creates tasks for open reproducible testcases.
def schedule(task): """Creates tasks for open reproducible testcases.""" testcase_ids = [] for status in ['Processed', 'Duplicate']: for testcase in data_types.Testcase.query( ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag), data_types.Testcase.status == status): testcase_id = testcase.key.id() try: tasks.add_task( task, testcase_id, testcase.job_type, queue=tasks.queue_for_testcase(testcase)) testcase_ids.append(testcase_id) except Exception: logs.log_error(f'Failed to create task for {testcase_id}') logs.log( 'Created progression tasks for testcases.', testcase_ids=testcase_ids)
Send email.
def send(to_email, subject, html_content): """Send email.""" sendgrid_api_key = db_config.get_value('sendgrid_api_key') if not sendgrid_api_key: logs.log_warn('Skipping email as SendGrid API key is not set in config.') return from_email = db_config.get_value('sendgrid_sender') if not from_email: logs.log_warn('Skipping email as SendGrid sender is not set in config.') return message = Mail( from_email=From(str(from_email)), to_emails=To(str(to_email)), subject=Subject(subject), html_content=HtmlContent(str(html_content))) try: sg = SendGridAPIClient(sendgrid_api_key) response = sg.send(message) logs.log( 'Sent email to %s.' % to_email, status_code=response.status_code, body=response.body, headers=response.headers) except Exception: logs.log_error('Failed to send email to %s.' % to_email)
Return all project names.
def get_all_project_names(): """Return all project names.""" query = data_types.Job.query( projection=[data_types.Job.project], distinct=True) return sorted([job.project for job in query])
Get current domain.
def get_domain(): """Get current domain.""" default_domain = '{app_id}.appspot.com'.format( app_id=utils.get_application_id()) return local_config.GAEConfig().get('domains.main', default=default_domain)
Return the testcase with the given id. Raises InvalidTestcaseError if no such testcase exists.
def get_testcase_by_id(testcase_id): """Return the testcase with the given id. Raises InvalidTestcaseError if no such testcase exists. """ try: parsed_id = int(testcase_id) except ValueError: raise errors.InvalidTestcaseError(testcase_id) if parsed_id == 0: raise errors.InvalidTestcaseError(0) testcase = ndb.Key(data_types.Testcase, parsed_id).get() if not testcase: raise errors.InvalidTestcaseError(parsed_id) return testcase
Find an open test case matching certain parameters.
def find_testcase(project_name, crash_type, crash_state, security_flag, testcase_to_exclude=None, fuzz_target=None): """Find an open test case matching certain parameters.""" # Prepare the query. query_args = [ data_types.Testcase.project_name == project_name, data_types.Testcase.crash_type == crash_type, data_types.Testcase.crash_state == crash_state, data_types.Testcase.security_flag == security_flag, data_types.Testcase.status == 'Processed', ndb_utils.is_true(data_types.Testcase.open) ] if fuzz_target and environment.get_value('DEDUP_ONLY_SAME_TARGET'): culprit_engine = None target_without_engine = None for engine in fuzzing.PUBLIC_ENGINES: if fuzz_target.startswith(f'{engine}_'): culprit_engine = engine target_without_engine = fuzz_target[len(culprit_engine) + 1:] break target_with_different_engines = [] assert culprit_engine target_with_different_engines = [ f'{engine}_{target_without_engine}' for engine in fuzzing.PUBLIC_ENGINES ] query_args.append( data_types.Testcase.overridden_fuzzer_name.IN( target_with_different_engines)) query = data_types.Testcase.query(*query_args) # Return any open (not fixed) test cases if they exist. testcases = ndb_utils.get_all_from_query(query) testcase = None testcase_quality = -1 for current_testcase in testcases: if (testcase_to_exclude and current_testcase.key.id() == testcase_to_exclude.key.id()): continue if current_testcase.duplicate_of: continue # Replace the current test case in various situations where we have found # a better one to use. Testcase quality is based on the following factors: # - Is this test case reproducible? Reproducible tests are preferred. # - Is there a bug for this? We prefer showing tests with bugs to point # users to existing bugs. # - Is this test case minimized ? Minimization confirms that testcase is # reproducible and more usable for reproduction. current_testcase_quality = 0 if not current_testcase.one_time_crasher_flag: current_testcase_quality |= 2**2 if current_testcase.bug_information: current_testcase_quality |= 2**1 if current_testcase.minimized_keys: current_testcase_quality |= 2**0 if current_testcase_quality > testcase_quality: testcase = current_testcase testcase_quality = current_testcase_quality if testcase_quality == MAX_TESTCASE_QUALITY: # Already found the best testcase possible, no more work to do. Bail out. break return testcase
Return a crash type string for a testcase.
def get_crash_type_string(testcase): """Return a crash type string for a testcase.""" crash_type = ' '.join(testcase.crash_type.splitlines()) if crash_type not in CRASH_TYPE_VALUE_REGEX_MAP: return crash_type crash_stacktrace = get_stacktrace(testcase) match = re.match(CRASH_TYPE_VALUE_REGEX_MAP[crash_type], crash_stacktrace, re.DOTALL) if not match: return crash_type return '%s (exceeds %s %s)' % (crash_type, match.group(1), CRASH_TYPE_DIMENSION_MAP[crash_type])
Filters stacktrace and returns content appropriate for storage as an appengine entity.
def filter_stacktrace(stacktrace, blob_name=None, signed_upload_url=None): """Filters stacktrace and returns content appropriate for storage as an appengine entity.""" unicode_stacktrace = utils.decode_to_unicode(stacktrace) if len(unicode_stacktrace) <= data_types.STACKTRACE_LENGTH_LIMIT: return unicode_stacktrace # TODO(alhijazi): Once the migration is done, callers are expected to # always pass a `blob_name` and a `signed_upload_url`. if signed_upload_url: try: storage.upload_signed_url( unicode_stacktrace.encode('utf-8'), signed_upload_url) logs.log('Uploaded stacktrace using signed url.') except Exception: print("uplaod failed") logs.log_error('Unable to upload crash stacktrace to signed url.') return unicode_stacktrace[(-1 * data_types.STACKTRACE_LENGTH_LIMIT):] return '%s%s' % (data_types.BLOBSTORE_STACK_PREFIX, blob_name) tmpdir = environment.get_value('BOT_TMPDIR') tmp_stacktrace_file = os.path.join(tmpdir, 'stacktrace.tmp') try: with open(tmp_stacktrace_file, 'wb') as handle: handle.write(unicode_stacktrace.encode('utf-8')) with open(tmp_stacktrace_file, 'rb') as handle: key = blobs.write_blob(handle) except Exception: logs.log_error('Unable to write crash stacktrace to temporary file.') shell.remove_file(tmp_stacktrace_file) return unicode_stacktrace[(-1 * data_types.STACKTRACE_LENGTH_LIMIT):] shell.remove_file(tmp_stacktrace_file) return '%s%s' % (data_types.BLOBSTORE_STACK_PREFIX, key)
Gets an issue description string for a testcase.
def get_issue_summary(testcase): """Gets an issue description string for a testcase.""" # Get summary prefix. Note that values for fuzzers take priority over those # from job definitions. fuzzer_summary_prefix = get_value_from_fuzzer_environment_string( testcase.fuzzer_name, 'SUMMARY_PREFIX') job_summary_prefix = get_value_from_job_definition(testcase.job_type, 'SUMMARY_PREFIX') summary_prefix = fuzzer_summary_prefix or job_summary_prefix or '' issue_summary = summary_prefix binary_name = testcase.get_metadata('fuzzer_binary_name') if binary_name: if summary_prefix: issue_summary += ':' issue_summary += binary_name if issue_summary: issue_summary += ': ' # For ASSERTs and CHECK failures, we should just use the crash type and the # first line of the crash state as titles. Note that ASSERT_NOT_REACHED should # be handled by the general case. if testcase.crash_type in [ 'ASSERT', 'CHECK failure', 'Security CHECK failure', 'Security DCHECK failure' ]: issue_summary += ( testcase.crash_type + ': ' + testcase.crash_state.splitlines()[0]) return issue_summary # Special case for bad-cast style testcases. if testcase.crash_type == 'Bad-cast': filtered_crash_state_lines = testcase.crash_state.splitlines() # Add the to/from line (this should always exist). issue_summary += filtered_crash_state_lines[0] # Add the crash function if available. if len(filtered_crash_state_lines) > 1: issue_summary += ' in ' + filtered_crash_state_lines[1] return issue_summary # Add first lines from crash type and crash_state. if testcase.crash_type: filtered_crash_type = re.sub(r'UNKNOWN( READ| WRITE)?', 'Crash', testcase.crash_type.splitlines()[0]) issue_summary += filtered_crash_type else: issue_summary += 'Unknown error' if testcase.crash_state == 'NULL' or not testcase.crash_state: # Special case for empty stacktrace. issue_summary += ' with empty stacktrace' else: issue_summary += ' in ' + testcase.crash_state.splitlines()[0] return issue_summary
Return url to reproduce the bug.
def get_reproduction_help_url(testcase, config): """Return url to reproduce the bug.""" return get_value_from_job_definition_or_environment( testcase.job_type, 'HELP_URL', default=config.reproduction_help_url)
Return FuzzerDisplay tuple.
def get_fuzzer_display_unprivileged(testcase, fuzz_target): """Return FuzzerDisplay tuple.""" if (testcase.overridden_fuzzer_name == testcase.fuzzer_name or not testcase.overridden_fuzzer_name): return FuzzerDisplay( engine=None, target=None, name=testcase.fuzzer_name, fully_qualified_name=testcase.fuzzer_name) if not fuzz_target: # Legacy testcases. return FuzzerDisplay( engine=testcase.fuzzer_name, target=testcase.get_metadata('fuzzer_binary_name'), name=testcase.fuzzer_name, fully_qualified_name=testcase.overridden_fuzzer_name) return FuzzerDisplay( engine=fuzz_target.engine, target=fuzz_target.binary, name=fuzz_target.engine, fully_qualified_name=fuzz_target.fully_qualified_name())
Filter arguments, removing testcase argument and fuzz target binary names.
def filter_arguments(arguments, fuzz_target_name=None): """Filter arguments, removing testcase argument and fuzz target binary names.""" # Filter out %TESTCASE*% argument. arguments = re.sub(r'[^\s]*%TESTCASE(|_FILE_URL|_HTTP_URL)%', '', arguments) if fuzz_target_name: arguments = arguments.replace(fuzz_target_name, '') return arguments.strip()
Return minimized arguments, without testcase argument and fuzz target binary itself (for engine fuzzers).
def get_arguments(testcase): """Return minimized arguments, without testcase argument and fuzz target binary itself (for engine fuzzers).""" arguments = ( testcase.minimized_arguments or get_value_from_job_definition(testcase.job_type, 'APP_ARGS', default='')) # Filter out fuzz target argument. We shouldn't have any case for this other # than what is needed by launcher.py for engine based fuzzers. fuzzer_display = get_fuzzer_display(testcase) fuzz_target = fuzzer_display.target return filter_arguments(arguments, fuzz_target)
Return memory tool options as a string to pass on command line.
def _get_memory_tool_options(testcase): """Return memory tool options as a string to pass on command line.""" env = testcase.get_metadata('env') if not env: return [] result = [] for options_name, options_value in sorted(env.items()): # Strip symbolize flag, use default symbolize=1. options_value.pop('symbolize', None) if not options_value: continue options_string = environment.join_memory_tool_options(options_value) result.append('{options_name}="{options_string}"'.format( options_name=options_name, options_string=shlex.quote(options_string))) return result
Return arguments to pass to a bazel test.
def _get_bazel_test_args(arguments, sanitizer_options): """Return arguments to pass to a bazel test.""" result = [] for sanitizer_option in sanitizer_options: result.append(f'--test_env={sanitizer_option}') for argument in shlex.split(arguments): result.append(f'--test_arg={shlex.quote(argument)}') return ' '.join(result)
Format a string with information from the testcase.
def format_issue_information(testcase, format_string): """Format a string with information from the testcase.""" arguments = get_arguments(testcase) fuzzer_display = get_fuzzer_display(testcase) fuzzer_name = fuzzer_display.name or 'NA' fuzz_target = fuzzer_display.target or 'NA' engine = fuzzer_display.engine or 'NA' last_tested_crash_revision = str( testcase.get_metadata('last_tested_crash_revision') or testcase.crash_revision) project_name = get_project_name(testcase.job_type) testcase_id = str(testcase.key.id()) sanitizer = environment.get_memory_tool_name(testcase.job_type) sanitizer_options = _get_memory_tool_options(testcase) sanitizer_options_string = ' '.join(sanitizer_options) bazel_test_args = _get_bazel_test_args(arguments, sanitizer_options) # Multi-target binaries. fuzz_target_parts = fuzz_target.split('@') base_fuzz_target = fuzz_target_parts[0] if len(fuzz_target_parts) == 2: fuzz_test_name = fuzz_target_parts[1] else: fuzz_test_name = '' result = format_string.replace('%TESTCASE%', testcase_id) result = result.replace('%PROJECT%', project_name) result = result.replace('%REVISION%', last_tested_crash_revision) result = result.replace('%FUZZER_NAME%', fuzzer_name) result = result.replace('%FUZZ_TARGET%', fuzz_target) result = result.replace('%BASE_FUZZ_TARGET%', base_fuzz_target) result = result.replace('%FUZZ_TEST_NAME%', fuzz_test_name) result = result.replace('%ENGINE%', engine) result = result.replace('%SANITIZER%', sanitizer) result = result.replace('%SANITIZER_OPTIONS%', sanitizer_options_string) result = result.replace('%ARGS%', arguments) result = result.replace('%BAZEL_TEST_ARGS%', bazel_test_args) return result
Return url to reproduce the bug.
def get_formatted_reproduction_help(testcase): """Return url to reproduce the bug.""" help_format = get_value_from_job_definition_or_environment( testcase.job_type, 'HELP_FORMAT') if not help_format: return None # Since this value may be in a job definition, it's non-trivial for it to # include newlines. Instead, it will contain backslash-escaped characters # that must be converted here (e.g. \n). help_format = help_format.encode().decode('unicode-escape') return format_issue_information(testcase, help_format)
Get the help text for this testcase for display in issue descriptions.
def get_plaintext_help_text(testcase, config): """Get the help text for this testcase for display in issue descriptions.""" # Prioritize a HELP_FORMAT message if available. formatted_help = get_formatted_reproduction_help(testcase) if formatted_help: return formatted_help # Show a default message and HELP_URL if only it has been supplied. help_url = get_reproduction_help_url(testcase, config) if help_url: return 'See %s for instructions to reproduce this bug locally.' % help_url return ''
Return url to testcase fixed range.
def get_fixed_range_url(testcase): """Return url to testcase fixed range.""" # Testcase is not fixed yet. if not testcase.fixed: return None # Testcase is unreproducible or coming from a custom binary. if testcase.fixed in ('NA', 'Yes'): return None return TESTCASE_REVISION_RANGE_URL.format( domain=get_domain(), job_type=testcase.job_type, revision_range=testcase.fixed)
Returns testcase as string.
def get_issue_description(testcase, reporter=None, show_reporter=False, hide_crash_state=False): """Returns testcase as string.""" # Get issue tracker configuration parameters. config = db_config.get() domain = get_domain() testcase_id = testcase.key.id() download_url = TESTCASE_DOWNLOAD_URL.format( domain=domain, testcase_id=testcase_id) report_url = TESTCASE_REPORT_URL.format( domain=domain, testcase_id=testcase_id) regressed_revision_range_url = TESTCASE_REVISION_RANGE_URL.format( domain=domain, job_type=testcase.job_type, revision_range=testcase.regression) revision_range_url = TESTCASE_REVISION_URL.format( domain=domain, job_type=testcase.job_type, revision=testcase.crash_revision) fixed_revision_range_url = TESTCASE_REVISION_RANGE_URL.format( domain=domain, job_type=testcase.job_type, revision_range=testcase.fixed) if testcase.status == 'Unreproducible': return ('Testcase {testcase_id} failed to reproduce the crash. ' 'Please inspect the program output at {report_url}.'.format( testcase_id=testcase_id, report_url=report_url)) # Now create the content string. content_string = 'Detailed Report: %s\n\n' % report_url project_name = get_project_name(testcase.job_type) if project_name and project_name != utils.default_project_name(): content_string += 'Project: %s\n' % project_name fuzzer_display = get_fuzzer_display(testcase) if fuzzer_display.engine: content_string += 'Fuzzing Engine: %s\n' % fuzzer_display.engine content_string += 'Fuzz Target: %s\n' % fuzzer_display.target else: content_string += 'Fuzzer: %s\n' % fuzzer_display.name content_string += 'Job Type: %s\n' % testcase.job_type # Add platform id if other than default ones. Only applicable to Android. # e.g. android:shamu_asan if testcase.platform_id: content_string += 'Platform Id: %s\n\n' % testcase.platform_id content_string += 'Crash Type: %s\n' % get_crash_type_string(testcase) content_string += 'Crash Address: %s\n' % testcase.crash_address if hide_crash_state: crash_state = '...see report...' else: crash_state = testcase.crash_state content_string += 'Crash State:\n%s\n' % ( utils.indent_string(crash_state + '\n', 2)) content_string += '%s\n\n' % environment.get_memory_tool_display_string( testcase.job_type) if data_types.SecuritySeverity.is_valid(testcase.security_severity): content_string += ( 'Recommended Security Severity: %s\n\n' % severity_analyzer.severity_to_string(testcase.security_severity)) if (testcase.regression and testcase.regression != 'NA' and not testcase.regression.startswith('0:') and not testcase.regression.endswith('!')): content_string += 'Regressed: %s\n' % regressed_revision_range_url else: content_string += 'Crash Revision: %s\n' % revision_range_url if (testcase.fixed and testcase.fixed != 'NA' and testcase.fixed != 'Yes' and not testcase.fixed.endswith('!')): content_string += 'Fixed: %s\n' % fixed_revision_range_url if not content_string.endswith('\n\n'): content_string += '\n' content_string += 'Reproducer Testcase: %s\n\n' % download_url if testcase.gestures: content_string += 'Additional requirements: Requires Gestures\n\n' if testcase.http_flag: content_string += 'Additional requirements: Requires HTTP\n\n' if show_reporter: if reporter: content_string += ( 'Issue manually filed by: %s\n\n' % reporter.split('@')[0]) else: content_string += 'Issue filed automatically.\n\n' # Jobs can override the help url. content_string += get_plaintext_help_text(testcase, config) # Unreproducible crash text is only applicable when we are consistently seeing # it happening, and hence the reason for auto-filing it. Otherwise, someone # filed it manually, so skip the text in that case. if not reporter and testcase.one_time_crasher_flag: content_string += '\n\n' + FILE_UNREPRODUCIBLE_TESTCASE_TEXT # Add additional body text from metadata. issue_metadata = testcase.get_metadata('issue_metadata', {}) additional_fields = issue_metadata.get('additional_fields', {}) additional_fields_strs = [] for key, value in additional_fields.items(): additional_fields_strs.append(f'{key}: {value}') if additional_fields_strs: content_string += '\n\n' + '\n'.join(additional_fields_strs) return content_string
Returns the stacktrace for a test case. This may require a blobstore read.
def get_stacktrace(testcase, stack_attribute='crash_stacktrace'): """Returns the stacktrace for a test case. This may require a blobstore read. """ result = getattr(testcase, stack_attribute) if not result or not result.startswith(data_types.BLOBSTORE_STACK_PREFIX): return result # For App Engine, we can't write to local file, so use blobs.read_key instead. if environment.is_running_on_app_engine(): key = result[len(data_types.BLOBSTORE_STACK_PREFIX):] return str(blobs.read_key(key), 'utf-8', errors='replace') key = result[len(data_types.BLOBSTORE_STACK_PREFIX):] tmpdir = environment.get_value('BOT_TMPDIR') tmp_stacktrace_file = os.path.join(tmpdir, 'stacktrace.tmp') blobs.read_blob_to_disk(key, tmp_stacktrace_file) try: with open(tmp_stacktrace_file) as handle: result = handle.read() except: logs.log_error( 'Unable to read stacktrace for testcase %d.' % testcase.key.id()) result = '' shell.remove_file(tmp_stacktrace_file) return result
Handles duplicates and deletes unreproducible one.
def handle_duplicate_entry(testcase): """Handles duplicates and deletes unreproducible one.""" # Caller ensures that our testcase object is up-to-date. If someone else # already marked us as a duplicate, no more work to do. if testcase.duplicate_of: return existing_testcase = find_testcase( testcase.project_name, testcase.crash_type, testcase.crash_state, testcase.security_flag, testcase_to_exclude=testcase, fuzz_target=testcase.fuzzer_name) if not existing_testcase: return # If the existing testcase's minimization has not completed yet, we shouldn't # be doing the next step. The testcase might turn out to be a non reproducible # bug and we don't want to delete the other testcase which could be a fully # minimized and reproducible bug. if not existing_testcase.minimized_keys: return testcase_id = testcase.key.id() existing_testcase_id = existing_testcase.key.id() if (not testcase.bug_information and not existing_testcase.one_time_crasher_flag): metadata = data_types.TestcaseUploadMetadata.query( data_types.TestcaseUploadMetadata.testcase_id == testcase_id).get() if metadata: metadata.status = 'Duplicate' metadata.duplicate_of = existing_testcase_id metadata.security_flag = existing_testcase.security_flag metadata.put() testcase.status = 'Duplicate' testcase.duplicate_of = existing_testcase_id testcase.put() logs.log('Marking testcase %d as duplicate of testcase %d.' % (testcase_id, existing_testcase_id)) elif (not existing_testcase.bug_information and not testcase.one_time_crasher_flag): metadata = data_types.TestcaseUploadMetadata.query( data_types.TestcaseUploadMetadata.testcase_id == testcase_id).get() if metadata: metadata.status = 'Duplicate' metadata.duplicate_of = testcase_id metadata.security_flag = testcase.security_flag metadata.put() existing_testcase.status = 'Duplicate' existing_testcase.duplicate_of = testcase_id existing_testcase.put() logs.log('Marking testcase %d as duplicate of testcase %d.' % (existing_testcase_id, testcase_id))