response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Get bucket IAM policy.
def get_bucket_iam_policy(storage, bucket_name): """Get bucket IAM policy.""" try: iam_policy = storage.buckets().getIamPolicy(bucket=bucket_name).execute() except HttpError as e: logs.log_error('Failed to get IAM policies for %s: %s' % (bucket_name, e)) return None return iam_policy
Set bucket IAM policy.
def set_bucket_iam_policy(client, bucket_name, iam_policy): """Set bucket IAM policy.""" filtered_iam_policy = copy.deepcopy(iam_policy) # Bindings returned by getIamPolicy can have duplicates. Remove them or # otherwise, setIamPolicy operation fails. for binding in filtered_iam_policy['bindings']: binding['members'] = sorted(list(set(binding['members']))) # Filtering members can cause a binding to have no members. Remove binding # or otherwise, setIamPolicy operation fails. filtered_iam_policy['bindings'] = [ b for b in filtered_iam_policy['bindings'] if b['members'] ] try: return client.buckets().setIamPolicy( bucket=bucket_name, body=filtered_iam_policy).execute() except HttpError as e: error_reason = _get_error_reason(e) if error_reason == 'Invalid argument': # Expected error for non-Google emails or groups. Warn about these. logs.log_warn('Invalid Google email or group being added to bucket %s.' % bucket_name) elif error_reason and 'is of type "group"' in error_reason: logs.log_warn('Failed to set IAM policy for %s bucket for a group: %s.' % (bucket_name, error_reason)) else: logs.log_error('Failed to set IAM policies for bucket %s.' % bucket_name) return None
Creates a GCS bucket.
def create_bucket_if_needed(bucket_name, object_lifecycle=None, cors=None): """Creates a GCS bucket.""" provider = _provider() if provider.get_bucket(bucket_name): return True if not provider.create_bucket(bucket_name, object_lifecycle, cors): return False time.sleep(CREATE_BUCKET_DELAY) return True
Create a storage client using discovery APIs.
def create_discovery_storage_client(): """Create a storage client using discovery APIs.""" return build('storage', 'v1', cache_discovery=False)
Generate GCS lifecycle management config. For the reference, see https://cloud.google.com/storage/docs/lifecycle and https://cloud.google.com/storage/docs/managing-lifecycles.
def generate_life_cycle_config(action, age=None, num_newer_versions=None): """Generate GCS lifecycle management config. For the reference, see https://cloud.google.com/storage/docs/lifecycle and https://cloud.google.com/storage/docs/managing-lifecycles. """ rule = {} rule['action'] = {'type': action} rule['condition'] = {} if age is not None: rule['condition']['age'] = age if num_newer_versions is not None: rule['condition']['numNewerVersions'] = num_newer_versions config = {'rule': [rule]} return config
Saves a cloud storage file locally.
def copy_file_from(cloud_storage_file_path, local_file_path): """Saves a cloud storage file locally.""" if not _provider().copy_file_from(cloud_storage_file_path, local_file_path): return False return True
Copy local file to a cloud storage path.
def copy_file_to(local_file_path_or_handle, cloud_storage_file_path, metadata=None): """Copy local file to a cloud storage path.""" if (isinstance(local_file_path_or_handle, str) and not os.path.exists(local_file_path_or_handle)): logs.log_error('Local file %s not found.' % local_file_path_or_handle) return False return _provider().copy_file_to( local_file_path_or_handle, cloud_storage_file_path, metadata=metadata)
Copy two blobs on GCS 'in the cloud' without touching local disk.
def copy_blob(cloud_storage_source_path, cloud_storage_target_path): """Copy two blobs on GCS 'in the cloud' without touching local disk.""" return _provider().copy_blob(cloud_storage_source_path, cloud_storage_target_path)
Delete a cloud storage file given its path.
def delete(cloud_storage_file_path): """Delete a cloud storage file given its path.""" return _provider().delete(cloud_storage_file_path)
Return whether if a cloud storage file exists.
def exists(cloud_storage_file_path, ignore_errors=False): """Return whether if a cloud storage file exists.""" try: return bool(_provider().get(cloud_storage_file_path)) except HttpError: if not ignore_errors: logs.log_error('Failed when trying to find cloud storage file %s.' % cloud_storage_file_path) return False
Return last updated value by parsing stats for all blobs under a cloud storage path.
def last_updated(cloud_storage_file_path): """Return last updated value by parsing stats for all blobs under a cloud storage path.""" last_update = None for blob in _provider().list_blobs(cloud_storage_file_path): if not last_update or blob['updated'] > last_update: last_update = blob['updated'] if last_update: # Remove UTC tzinfo to make these comparable. last_update = last_update.replace(tzinfo=None) return last_update
Return content of a cloud storage file.
def read_data(cloud_storage_file_path): """Return content of a cloud storage file.""" return _provider().read_data(cloud_storage_file_path)
Return content of a cloud storage file.
def write_data(data_or_fileobj, cloud_storage_file_path, metadata=None): """Return content of a cloud storage file.""" return _provider().write_data( data_or_fileobj, cloud_storage_file_path, metadata=metadata)
Return content of a cloud storage file.
def write_stream(stream, cloud_storage_file_path, metadata=None): """Return content of a cloud storage file.""" return _provider().write_stream( stream, cloud_storage_file_path, metadata=metadata)
Return blobs under the given cloud storage path.
def get_blobs(cloud_storage_path, recursive=True): """Return blobs under the given cloud storage path.""" yield from _provider().list_blobs(cloud_storage_path, recursive=recursive)
Return blob names under the given cloud storage path.
def list_blobs(cloud_storage_path, recursive=True): """Return blob names under the given cloud storage path.""" for blob in _provider().list_blobs(cloud_storage_path, recursive=recursive): yield blob['name']
Get GCS object data.
def get(cloud_storage_file_path): """Get GCS object data.""" return _provider().get(cloud_storage_file_path)
Get the access control for a file.
def get_acl(cloud_storage_file_path, entity): """Get the access control for a file.""" client = create_discovery_storage_client() bucket, path = get_bucket_name_and_path(cloud_storage_file_path) try: return client.objectAccessControls().get( bucket=bucket, object=path, entity=entity).execute() except HttpError as e: if e.resp.status == 404: return None raise
Set the access control for a file.
def set_acl(cloud_storage_file_path, entity, role='READER'): """Set the access control for a file.""" client = create_discovery_storage_client() bucket, path = get_bucket_name_and_path(cloud_storage_file_path) try: return client.objectAccessControls().insert( bucket=bucket, object=path, body={ 'entity': entity, 'role': role }).execute() except HttpError as e: if e.resp.status == 404: return None raise
Get the metadata for a file.
def get_object_size(cloud_storage_file_path): """Get the metadata for a file.""" gcs_object = get(cloud_storage_file_path) if not gcs_object: return gcs_object return int(gcs_object['size'])
Get the blobs bucket name.
def blobs_bucket(): """Get the blobs bucket name.""" # Allow tests to override blobs bucket name safely. test_blobs_bucket = environment.get_value('TEST_BLOBS_BUCKET') if test_blobs_bucket: return test_blobs_bucket assert not environment.get_value('PY_UNITTESTS') return local_config.ProjectConfig().get('blobs.bucket')
Returns the bucket where uworker input is done.
def uworker_input_bucket(): """Returns the bucket where uworker input is done.""" test_uworker_input_bucket = environment.get_value('TEST_UWORKER_INPUT_BUCKET') if test_uworker_input_bucket: return test_uworker_input_bucket assert not environment.get_value('PY_UNITTESTS') # TODO(metzman): Use local config. bucket = environment.get_value('UWORKER_INPUT_BUCKET') if not bucket: logs.log_error('UWORKER_INPUT_BUCKET is not defined.') return bucket
Returns the bucket where uworker I/O is done.
def uworker_output_bucket(): """Returns the bucket where uworker I/O is done.""" test_uworker_output_bucket = environment.get_value( 'TEST_UWORKER_OUTPUT_BUCKET') if test_uworker_output_bucket: return test_uworker_output_bucket assert not environment.get_value('PY_UNITTESTS') # TODO(metzman): Use local config. bucket = environment.get_value('UWORKER_OUTPUT_BUCKET') if not bucket: logs.log_error('UWORKER_OUTPUT_BUCKET is not defined.') return bucket
Downloads |url| and returns the contents.
def _download_url(url): """Downloads |url| and returns the contents.""" if _integration_test_env_doesnt_support_signed_urls(): return read_data(url) request = requests.get(url, timeout=HTTP_TIMEOUT_SECONDS) if not request.ok: raise RuntimeError('Request to %s failed. Code: %d. Reason: %s' % (url, request.status_code, request.reason)) return request.content
Uploads data to the |signed_url|.
def upload_signed_url(data_or_fileobj, url): """Uploads data to the |signed_url|.""" return _provider().upload_signed_url(str_to_bytes(data_or_fileobj), url)
Returns contents of |url|. Writes to |local_path| if provided.
def download_signed_url(url): """Returns contents of |url|. Writes to |local_path| if provided.""" return _provider().download_signed_url(url)
Returns a signed upload URL for |remote_path|. Does not download the contents.
def get_signed_upload_url(remote_path, minutes=SIGNED_URL_EXPIRATION_MINUTES): """Returns a signed upload URL for |remote_path|. Does not download the contents.""" provider = _provider() return provider.sign_upload_url(remote_path, minutes=minutes)
Returns a signed download URL for |remote_path|. Does not download the contents.
def get_signed_download_url(remote_path, minutes=SIGNED_URL_EXPIRATION_MINUTES): """Returns a signed download URL for |remote_path|. Does not download the contents.""" provider = _provider() return provider.sign_download_url(remote_path, minutes=minutes)
Makes a DELETE HTTP request to |url|.
def delete_signed_url(url): """Makes a DELETE HTTP request to |url|.""" _provider().delete_signed_url(url)
Download |signed_urls| to |directory|.
def download_signed_urls(signed_urls, directory): """Download |signed_urls| to |directory|.""" # TODO(metzman): Use the actual names of the files stored on GCS instead of # renaming them. if not signed_urls: return [] basename = uuid.uuid4().hex filepaths = [ os.path.join(directory, f'{basename}-{idx}') for idx in range(len(signed_urls)) ] logs.log('Downloading URLs.') with _pool() as pool: result = list( pool.map(_error_tolerant_download_signed_url_to_file, zip(signed_urls, filepaths))) logs.log('Done downloading URLs.') return result
Returns |num_uploads| number of signed upload URLs to upload files with unique arbitrary names to remote_directory.
def get_arbitrary_signed_upload_urls(remote_directory, num_uploads): """Returns |num_uploads| number of signed upload URLs to upload files with unique arbitrary names to remote_directory.""" # We verify there are no collisions for uuid4s in CF because it would be bad # if there is a collision and in most cases it's cheap (and because we # probably didn't understand the likelihood of this happening when we started, # see https://stackoverflow.com/a/24876263). It is not cheap if we had to do # this 10,000 times. Instead create a prefix filename and check that no file # has that name. Then the arbitrary names will all use that prefix. unique_id = uuid.uuid4() base_name = unique_id.hex if not remote_directory.endswith('/'): remote_directory = remote_directory + '/' base_path = f'{remote_directory}/{base_name}' base_search_path = f'{base_path}*' if exists(base_search_path): # Raise the error and let retry go again. There is a vanishingly small # chance that we get more collisions. This is vulnerable to races, but is # probably unneeded anyway. raise ValueError(f'UUID collision found {str(unique_id)}') urls = (f'{base_path}-{idx}' for idx in range(num_uploads)) logs.log('Signing URLs for arbitrary uploads.') result = [get_signed_upload_url(url) for url in urls] logs.log('Done signing URLs for arbitrary uploads.') return result
Platform substitution.
def platform_substitution(label, testcase, _): """Platform substitution.""" platform = None if environment.is_chromeos_job(testcase.job_type): # ChromeOS fuzzers run on Linux platform, so use correct OS-Chrome for # tracking. platform = 'Chrome' elif environment.is_ios_job(testcase.job_type): # iOS fuzzers run on macOS platform, so use correct OS-iOS for # tracking. platform = 'iOS' elif testcase.platform_id: platform = testcase.platform_id.split(':')[0].capitalize() if not platform: return [] return [label.replace('%PLATFORM%', platform)]
Date format.
def current_date(): """Date format.""" return utils.utcnow().date().isoformat()
Date substitution.
def date_substitution(label, *_): """Date substitution.""" return [label.replace('%YYYY-MM-DD%', current_date())]
Sanitizer substitution.
def sanitizer_substitution(label, testcase, _): """Sanitizer substitution.""" stacktrace = data_handler.get_stacktrace(testcase) memory_tool_labels = get_memory_tool_labels(stacktrace) return [ label.replace('%SANITIZER%', memory_tool) for memory_tool in memory_tool_labels ]
Severity substitution.
def severity_substitution(label, testcase, security_severity): """Severity substitution.""" # Use severity from testcase if one is not available. if security_severity is None: security_severity = testcase.security_severity # Set to default high severity if we can't determine it automatically. if not data_types.SecuritySeverity.is_valid(security_severity): security_severity = data_types.SecuritySeverity.HIGH security_severity_string = severity_analyzer.severity_to_string( security_severity) return [label.replace('%SEVERITY%', security_severity_string)]
Convert an impact value to a human-readable string.
def impact_to_string(impact): """Convert an impact value to a human-readable string.""" impact_map = { data_types.SecurityImpact.EXTENDED_STABLE: 'Extended', data_types.SecurityImpact.STABLE: 'Stable', data_types.SecurityImpact.BETA: 'Beta', data_types.SecurityImpact.HEAD: 'Head', data_types.SecurityImpact.NONE: 'None', data_types.SecurityImpact.MISSING: data_types.MISSING_VALUE_STRING, } return impact_map[impact]
Get the impact from the label list.
def _get_impact_from_labels(labels): """Get the impact from the label list.""" labels = [label.lower() for label in labels] if 'security_impact-extended' in labels: return data_types.SecurityImpact.EXTENDED_STABLE if 'security_impact-stable' in labels: return data_types.SecurityImpact.STABLE if 'security_impact-beta' in labels: return data_types.SecurityImpact.BETA if 'security_impact-head' in labels: return data_types.SecurityImpact.HEAD if 'security_impact-none' in labels: return data_types.SecurityImpact.NONE return data_types.SecurityImpact.MISSING
Update impact labels on issue.
def update_issue_impact_labels(testcase, issue, policy): """Update impact labels on issue.""" if testcase.one_time_crasher_flag: return existing_impact = _get_impact_from_labels(issue.labels) if testcase.regression.startswith('0:'): # If the regression range starts from the start of time, # then we assume that the bug impacts stable. new_impact = data_types.SecurityImpact.EXTENDED_STABLE elif testcase.is_impact_set_flag: # Add impact label based on testcase's impact value. if testcase.impact_extended_stable_version: new_impact = data_types.SecurityImpact.EXTENDED_STABLE elif testcase.impact_stable_version: new_impact = data_types.SecurityImpact.STABLE elif testcase.impact_beta_version: new_impact = data_types.SecurityImpact.BETA elif testcase.is_crash(): new_impact = data_types.SecurityImpact.HEAD else: # Testcase is unreproducible and does not impact extended stable, stable # and beta branches. In this case, there is no impact information. return else: # No impact information. return update_issue_foundin_labels(testcase, issue) if existing_impact == new_impact: # Correct impact already set. return if existing_impact != data_types.SecurityImpact.MISSING: issue.labels.remove( policy.substitution_mapping('Security_Impact-' + impact_to_string(existing_impact))) issue.labels.add( policy.substitution_mapping('Security_Impact-' + impact_to_string(new_impact)))
Updates FoundIn- labels on issue.
def update_issue_foundin_labels(testcase, issue): """Updates FoundIn- labels on issue.""" if not testcase.is_impact_set_flag: return versions_foundin = [ x for x in [ testcase.impact_beta_version, testcase.impact_stable_version, testcase.impact_extended_stable_version, testcase.impact_head_version ] if x ] milestones_foundin = {x.split('.')[0] for x in versions_foundin} if milestones_foundin: # Only cleanup previous FoundIn labels if we have new ones to add. issue.labels.remove_by_prefix('FoundIn-') for found_milestone in milestones_foundin: issue.labels.add('FoundIn-' + found_milestone)
Apply label substitutions.
def apply_substitutions(policy, label, testcase, security_severity=None): """Apply label substitutions.""" if label is None: # If the label is not configured, then nothing to subsitute. return [] label_substitutions = ( ('%PLATFORM%', platform_substitution), ('%YYYY-MM-DD%', date_substitution), ('%SANITIZER%', sanitizer_substitution), ('%SEVERITY%', severity_substitution), ) for marker, handler in label_substitutions: if marker in label: return [ policy.substitution_mapping(label) for label in handler(label, testcase, security_severity) ] # No match found. Return mapped value if it exists else the original label # will be returned. return [policy.substitution_mapping(label)]
Get the label pattern regex.
def get_label_pattern(label): """Get the label pattern regex.""" return re.compile('^' + re.sub(r'%.*?%', r'(.*)', label) + '$', re.IGNORECASE)
Distinguish memory tools used and return corresponding labels.
def get_memory_tool_labels(stacktrace): """Distinguish memory tools used and return corresponding labels.""" # Remove stack frames and paths to source code files. This helps to avoid # confusion when function names or source paths contain a memory tool token. data = '' for line in stacktrace.split('\n'): if STACKFRAME_LINE_REGEX.match(line): continue data += line + '\n' labels = [t['label'] for t in MEMORY_TOOLS_LABELS if t['token'] in data] return labels
Get values from testcase metadata.
def _get_from_metadata(testcase, name): """Get values from testcase metadata.""" return utils.parse_delimited( testcase.get_metadata(name, ''), delimiter=',', strip=True, remove_empty=True)
Notify that an issue update occurred (i.e. issue was filed or closed).
def notify_issue_update(testcase, status): """Notify that an issue update occurred (i.e. issue was filed or closed).""" topic = local_config.ProjectConfig().get('issue_updates.pubsub_topic') if not topic: return pubsub_client = pubsub.PubSubClient() pubsub_client.publish( topic, [ pubsub.Message( attributes={ 'crash_address': testcase.crash_address, 'crash_state': testcase.crash_state, 'crash_type': testcase.crash_type, 'issue_id': testcase.bug_information or '', 'security': str(testcase.security_flag).lower(), 'status': status, 'testcase_id': str(testcase.key.id()), }) ]) if status in ('verified', 'wontfix'): logs.log(f'Closing issue {testcase.github_issue_num} ' f'in GitHub repo {testcase.github_repo_id}: ' f'Testcase {testcase.key.id()} is marked as {status}.') oss_fuzz_github.close_issue(testcase)
Look for MiraclePtr status string and return the appropriate label.
def check_miracleptr_status(testcase): """Look for MiraclePtr status string and return the appropriate label.""" stacktrace = data_handler.get_stacktrace(testcase) for line in stacktrace.split('\n'): if CHROMIUM_MIRACLEPTR_REGEX.match(line): status = line.split(':')[-1].strip() try: return MIRACLEPTR_STATUS[status] except: logs.log_error(f'Unknown MiraclePtr status: {line}') break return None
File an issue for the given test case.
def file_issue(testcase, issue_tracker, security_severity=None, user_email=None, additional_ccs=None): """File an issue for the given test case.""" logs.log(f'Filing new issue for testcase: {testcase.key.id()}.') policy = issue_tracker_policy.get(issue_tracker.project) is_crash = not utils.sub_string_exists_in(NON_CRASH_TYPES, testcase.crash_type) properties = policy.get_new_issue_properties( is_security=testcase.security_flag, is_crash=is_crash) issue = issue_tracker.new_issue() issue.title = data_handler.get_issue_summary(testcase) issue.body = data_handler.get_issue_description( testcase, reporter=user_email, show_reporter=True) # Add reproducibility flag label. if testcase.one_time_crasher_flag: issue.labels.add(policy.label('unreproducible')) else: issue.labels.add(policy.label('reproducible')) # Chromium-specific labels. if issue_tracker.project in ('chromium', 'chromium-testing'): if testcase.security_flag: # Add reward labels if this is from an external fuzzer contribution. fuzzer = data_types.Fuzzer.query( data_types.Fuzzer.name == testcase.fuzzer_name).get() if fuzzer and fuzzer.external_contribution: issue.labels.add(policy.substitution_mapping('reward-topanel')) issue.labels.add( policy.substitution_mapping('External-Fuzzer-Contribution')) update_issue_impact_labels(testcase, issue, policy) # Check for MiraclePtr in stacktrace. miracle_label = check_miracleptr_status(testcase) if miracle_label: issue.labels.add(policy.substitution_mapping(miracle_label)) # Add additional labels from the job definition and fuzzer. additional_labels = data_handler.get_additional_values_for_variable( 'AUTOMATIC_LABELS', testcase.job_type, testcase.fuzzer_name) for label in additional_labels: issue.labels.add(policy.substitution_mapping(label)) # Add crash-type-specific label crash_type_label = policy.label_for_crash_type(testcase.crash_type) if crash_type_label: issue.labels.add(policy.substitution_mapping(crash_type_label)) # Add labels from crash metadata. for crash_category in testcase.get_metadata('crash_categories', []): crash_category_label = policy.label_for_crash_category(crash_category) if crash_category_label: issue.labels.add(policy.substitution_mapping(crash_category_label)) # Add additional components from the job definition and fuzzer. automatic_components = data_handler.get_additional_values_for_variable( 'AUTOMATIC_COMPONENTS', testcase.job_type, testcase.fuzzer_name) for component in automatic_components: issue.components.add(component) # Add issue assignee from the job definition and fuzzer. automatic_assignee = data_handler.get_additional_values_for_variable( 'AUTOMATIC_ASSIGNEE', testcase.job_type, testcase.fuzzer_name) if automatic_assignee: issue.status = policy.status('assigned') issue.assignee = automatic_assignee[0] else: issue.status = properties.status fuzzer_metadata = testcase.get_metadata('issue_metadata') if fuzzer_metadata and 'assignee' in fuzzer_metadata: issue.status = policy.status('assigned') issue.assignee = fuzzer_metadata['assignee'] logs.log('Testcase has assignee metadata %s' % issue.assignee) # Add additional ccs from the job definition and fuzzer. ccs = data_handler.get_additional_values_for_variable( 'AUTOMATIC_CCS', testcase.job_type, testcase.fuzzer_name) # For externally contributed fuzzers, potentially cc the author. # Use fully qualified fuzzer name if one is available. fully_qualified_fuzzer_name = ( testcase.overridden_fuzzer_name or testcase.fuzzer_name) ccs += external_users.cc_users_for_fuzzer(fully_qualified_fuzzer_name, testcase.security_flag) ccs += external_users.cc_users_for_job(testcase.job_type, testcase.security_flag) # Add the user as a cc if requested, and any default ccs for this job. # Check for additional ccs or labels from the job definition. if additional_ccs: ccs += [cc for cc in additional_ccs if cc not in ccs] # For user uploads, we assume the uploader is interested in the issue. if testcase.uploader_email and testcase.uploader_email not in ccs: ccs.append(testcase.uploader_email) ccs.extend(properties.ccs) # Get view restriction rules for the job. issue_restrictions = data_handler.get_value_from_job_definition( testcase.job_type, 'ISSUE_VIEW_RESTRICTIONS', 'security') should_restrict_issue = ( issue_restrictions == 'all' or (issue_restrictions == 'security' and testcase.security_flag)) has_accountable_people = ( bool(ccs) and not data_handler.get_value_from_job_definition( testcase.job_type, 'DISABLE_DISCLOSURE', False)) # Check for labels with special logic. additional_labels = [] if should_restrict_issue: additional_labels.append(policy.label('restrict_view')) if has_accountable_people: additional_labels.append(policy.label('reported')) if testcase.security_flag: additional_labels.append(policy.label('security_severity')) additional_labels.append(policy.label('os')) # Apply label substitutions. for label in itertools.chain(properties.labels, additional_labels): for result in apply_substitutions(policy, label, testcase, security_severity): issue.labels.add(result) issue.body += data_handler.format_issue_information( testcase, properties.issue_body_footer) if (should_restrict_issue and has_accountable_people and policy.deadline_policy_message): issue.body += '\n\n' + policy.deadline_policy_message for cc in ccs: issue.ccs.add(cc) # Apply extension fields. issue.apply_extension_fields(properties.extension_fields) # Add additional labels and components from testcase metadata. metadata_labels = _get_from_metadata(testcase, 'issue_labels') for label in metadata_labels: issue.labels.add(policy.substitution_mapping(label)) metadata_components = _get_from_metadata(testcase, 'issue_components') for component in metadata_components: issue.components.add(component) if testcase.one_time_crasher_flag and policy.unreproducible_component: issue.components.add(policy.unreproducible_component) issue.reporter = user_email if issue_tracker.project in ('chromium', 'chromium-testing'): logs.log( 'google_issue_tracker labels before saving: %s' % list(issue.labels)) recovered_exception = None try: issue.save() except Exception as e: if policy.fallback_component: # If a fallback component is set, try clearing the existing components # and filing again. # Also save the exception we recovered from. recovered_exception = e issue.components.clear() issue.components.add(policy.fallback_component) if policy.fallback_policy_message: message = policy.fallback_policy_message.replace( '%COMPONENTS%', ' '.join(metadata_components)) issue.body += '\n\n' + message issue.save() else: raise # Update the testcase with this newly created issue. testcase.bug_information = str(issue.id) oss_fuzz_github.file_issue(testcase) testcase.put() data_handler.update_group_bug(testcase.group_id) return issue.id, recovered_exception
Convert a list to a list of strs.
def _to_str_list(values): """Convert a list to a list of strs.""" return [str(value) for value in values]
Get policy.
def get(project_name): """Get policy.""" issue_tracker_config = local_config.IssueTrackerConfig() project_config = issue_tracker_config.get(project_name) if not project_config: raise ConfigurationError( 'Issue tracker for {} does not exist'.format(project_name)) if not 'policies' in project_config: raise ConfigurationError( 'Policies for {} do not exist'.format(project_name)) return IssueTrackerPolicy(project_config['policies'])
Get an empty policy.
def get_empty(): """Get an empty policy.""" return IssueTrackerPolicy({ 'status': { 'assigned': 'unused', 'duplicate': 'unused', 'wontfix': 'unused', 'fixed': 'unused', 'verified': 'unused', 'new': 'unused', }, 'labels': {}, })
Register an issue tracker implementation.
def register_issue_tracker(tracker_type, constructor): """Register an issue tracker implementation.""" if tracker_type in _ISSUE_TRACKER_CONSTRUCTORS: raise ValueError( 'Tracker type {type} is already registered.'.format(type=tracker_type)) _ISSUE_TRACKER_CONSTRUCTORS[tracker_type] = constructor
Return issue tracker project name given a testcase or default.
def _get_issue_tracker_project_name(testcase=None): """Return issue tracker project name given a testcase or default.""" from clusterfuzz._internal.datastore import data_handler job_type = testcase.job_type if testcase else None return data_handler.get_issue_tracker_name(job_type)
Cache that lasts for a bot task's lifetime, or an App Engine request lifetime.
def request_or_task_cache(func): """Cache that lasts for a bot task's lifetime, or an App Engine request lifetime.""" if environment.is_running_on_app_engine(): from libs import request_cache return request_cache.wrap(capacity=_ISSUE_TRACKER_CACHE_CAPACITY)(func) return memoize.wrap(memoize.FifoInMemory(256))(func)
Get the issue tracker with the given type and name.
def get_issue_tracker(project_name=None): """Get the issue tracker with the given type and name.""" issue_tracker_config = local_config.IssueTrackerConfig() if not project_name: from clusterfuzz._internal.datastore import data_handler project_name = data_handler.get_issue_tracker_name() issue_project_config = issue_tracker_config.get(project_name) if not issue_project_config: raise ValueError('Issue tracker for {} does not exist'.format(project_name)) constructor = _ISSUE_TRACKER_CONSTRUCTORS.get(issue_project_config['type']) if not constructor: raise ValueError('Invalid issue tracker type: ' + issue_project_config['type']) return constructor(project_name, issue_project_config)
Get the issue tracker with the given type and name.
def get_issue_tracker_for_testcase(testcase): """Get the issue tracker with the given type and name.""" issue_tracker_project_name = _get_issue_tracker_project_name(testcase) if not issue_tracker_project_name or issue_tracker_project_name == 'disabled': return None return get_issue_tracker(issue_tracker_project_name)
Get the issue tracker with the given type and name.
def get_issue_tracker_policy_for_testcase(testcase): """Get the issue tracker with the given type and name.""" issue_tracker_project_name = _get_issue_tracker_project_name(testcase) if not issue_tracker_project_name or issue_tracker_project_name == 'disabled': return None return issue_tracker_policy.get(issue_tracker_project_name)
Return issue object associated with testcase.
def get_issue_for_testcase(testcase): """Return issue object associated with testcase.""" if not testcase.bug_information: # Do not check |testcase.group_bug_information| as we look for an issue # associated with the testcase directly, not through a group of testcases. return None issue_tracker = get_issue_tracker_for_testcase(testcase) if not issue_tracker: return None issue_id = testcase.bug_information return issue_tracker.get_original_issue(issue_id)
Get search keywords for a testcase.
def get_search_keywords(testcase): """Get search keywords for a testcase.""" crash_state_lines = testcase.crash_state.splitlines() # Use top 2 frames for searching. return crash_state_lines[:2]
Get issue objects that seem to be related to a particular test case.
def get_similar_issues(issue_tracker, testcase, only_open=True): """Get issue objects that seem to be related to a particular test case.""" # Get list of issues using the search query. keywords = get_search_keywords(testcase) issues = issue_tracker.find_issues(keywords=keywords, only_open=only_open) if issues: issues = list(issues) else: issues = [] issue_ids = [issue.id for issue in issues] # Add issues from similar testcases sharing the same group id. if testcase.group_id: group_query = data_types.Testcase.query( data_types.Testcase.group_id == testcase.group_id) similar_testcases = ndb_utils.get_all_from_query(group_query) for similar_testcase in similar_testcases: if not similar_testcase.bug_information: continue # Exclude issues already added above from search terms. issue_id = int(similar_testcase.bug_information) if issue_id in issue_ids: continue # Get issue object using ID. issue = issue_tracker.get_issue(issue_id) if not issue: continue # If our search criteria allows open bugs only, then check issue and # testcase status so as to exclude closed ones. if (only_open and (not issue.is_open or not testcase.open)): continue issues.append(issue) issue_ids.append(issue_id) return issues
Get similar issues web URL.
def get_similar_issues_url(issue_tracker, testcase, only_open=True): """Get similar issues web URL.""" keywords = get_search_keywords(testcase) return issue_tracker.find_issues_url(keywords=keywords, only_open=only_open)
Return issue url for a testcase. This is used when rendering a testcase, details page, therefore it accounts for |group_bug_information| as well.
def get_issue_url(testcase): """Return issue url for a testcase. This is used when rendering a testcase, details page, therefore it accounts for |group_bug_information| as well.""" issue_tracker = get_issue_tracker_for_testcase(testcase) if not issue_tracker: return None issue_id = ( testcase.bug_information if testcase.bug_information else testcase.group_bug_information) if not issue_id: return None # Use str(issue_id) as |group_bug_information| might be an integer. return issue_tracker.issue_url(str(issue_id))
Check if a label was ever added to an issue.
def was_label_added(issue, label): """Check if a label was ever added to an issue.""" if not label: return False # Optimization that does not require pulling in issue's actions. if any(label.lower() == l.lower() for l in issue.labels): return True for action in issue.actions: for added in action.labels.added: if label.lower() == added.lower(): return True return False
Generate the title of the issue
def get_issue_title(testcase): """Generate the title of the issue""" return ISSUE_TITTLE_TEXT.format( issue_title_prefix=ISSUE_TITTLE_TEXT_PREFIX, bug_information=testcase.bug_information)
Generate the body of the issue
def get_issue_body(testcase): """Generate the body of the issue""" return ISSUE_CONTENT_TEXT.format( domain=data_handler.get_domain(), testcase_id=testcase.key.id(), bug_information=testcase.bug_information)
Generate the closing comment of the issue
def get_issue_close_comment(testcase): """Generate the closing comment of the issue""" return ISSUE_ClOSE_COMMENT_TEXT.format( bug_information=testcase.bug_information)
Get access to GitHub with the oss-fuzz personal access token
def _get_access_token(): """Get access to GitHub with the oss-fuzz personal access token""" token = db_config.get_value('oss_fuzz_robot_github_personal_access_token') if not token: raise RuntimeError('Unable to access GitHub account to ' 'file/close the issue.') return github.Github(token)
Check if the project YAML file requires to file a GitHub issue.
def _filing_enabled(testcase): """Check if the project YAML file requires to file a GitHub issue.""" require_issue = data_handler.get_value_from_job_definition( testcase.job_type, 'FILE_GITHUB_ISSUE', default='False') return utils.string_is_true(require_issue)
Get the GitHub repository to file the issue
def _get_repo(testcase, access): """Get the GitHub repository to file the issue""" repo_url = data_handler.get_value_from_job_definition(testcase.job_type, 'MAIN_REPO', '') if not repo_url: logs.log('Unable to fetch the MAIN_REPO URL from job definition.') return None if not repo_url.startswith(GITHUB_PREFIX): logs.log(f'MAIN REPO is not a GitHub url: {repo_url}.') return None repo_name = repo_url[len(GITHUB_PREFIX):] if repo_url.endswith(GIT_SUFFIX): repo_name = repo_name[:-len(GIT_SUFFIX)] try: target_repo = access.get_repo(repo_name) except github.UnknownObjectException: logs.log_error(f'Unable to locate GitHub repository ' f'named {repo_name} from URL: {repo_url}.') target_repo = None return target_repo
Checking if there is an existing open issue under the same name
def _find_existing_issue(repo, issue_title): """Checking if there is an existing open issue under the same name""" for issue in repo.get_issues(): if issue.title == issue_title: logs.log(f'Issue ({issue_title}) already exists in Repo ({repo.id}).') return issue return None
Post the issue to the Github repo of the project.
def _post_issue(repo, testcase): """Post the issue to the Github repo of the project.""" issue_title = get_issue_title(testcase) issue_body = get_issue_body(testcase) return (_find_existing_issue(repo, issue_title) or repo.create_issue(title=issue_title, body=issue_body))
Update the GitHub-related properties in the FiledBug entity.
def update_testcase_properties(testcase, repo, issue): """Update the GitHub-related properties in the FiledBug entity.""" testcase.github_repo_id = repo.id testcase.github_issue_num = issue.number
File an issue to the GitHub repo of the project
def file_issue(testcase): """File an issue to the GitHub repo of the project""" if not _filing_enabled(testcase): return if testcase.github_repo_id and testcase.github_issue_num: logs.log('Issue already filed under' f'issue number {testcase.github_issue_num} in ' f'Repo {testcase.github_repo_id}.') return access_token = _get_access_token() repo = _get_repo(testcase, access_token) if not repo: logs.log('Unable to file issues to the main repo of the project') return if not repo.has_issues: logs.log_warn('Unable to file issues to the main repo: ' 'Repo has disabled issues.') return issue = _post_issue(repo, testcase) update_testcase_properties(testcase, repo, issue)
Verify the issue has been filed.
def _issue_recorded(testcase): """Verify the issue has been filed.""" return testcase.github_repo_id and testcase.github_issue_num
Locate the issue of the testcase.
def _get_issue(testcase, access): """Locate the issue of the testcase.""" repo_id = testcase.github_repo_id issue_num = testcase.github_issue_num try: repo = access.get_repo(repo_id) except github.UnknownObjectException: logs.log_error(f'Unable to locate the GitHub repository id {repo_id}.') return None if not repo.has_issues: logs.log_warn('Unable to close issues of the main repo: ' 'Repo has disabled issues.') return None try: target_issue = repo.get_issue(issue_num) except github.UnknownObjectException: logs.log_error(f'Unable to locate the GitHub issue number {issue_num}.') target_issue = None return target_issue
Generate closing comment, comment, and close the GitHub issue.
def _close_issue_with_comment(testcase, issue): """Generate closing comment, comment, and close the GitHub issue.""" issue_close_comment = get_issue_close_comment(testcase) issue.create_comment(issue_close_comment) issue.edit(state='closed')
Close the issue on GitHub, when the same issue is closed on Monorail.
def close_issue(testcase): """Close the issue on GitHub, when the same issue is closed on Monorail.""" if not _issue_recorded(testcase): return access_token = _get_access_token() issue = _get_issue(testcase, access_token) if not issue: logs.log_error('Unable to locate and close the issue.') return if issue.state == 'closed': logs.log(f'issue number {testcase.github_issue_num} ' f'in GitHub repository {testcase.github_repo_id} ' 'is already closed.') return _close_issue_with_comment(testcase, issue) logs.log(f'Closed issue number {testcase.github_issue_num} ' f'in GitHub repository {testcase.github_repo_id}.')
Get all issues filed by oss-fuzz-robot.
def get_my_issues(): """Get all issues filed by oss-fuzz-robot.""" access_token = _get_access_token() return access_token.search_issues(f'author:{GITHUB_HANDLE}')
Builds a httplib2.Http.
def build_http(): """Builds a httplib2.Http.""" creds, _ = google.auth.default() if creds.requires_scopes: creds = creds.with_scopes([_SCOPE]) return google_auth_httplib2.AuthorizedHttp( creds, http=httplib2.Http(timeout=_REQUEST_TIMEOUT))
Calls the discovery service. Retries upto twice if there are any UnknownApiNameOrVersion errors.
def _call_discovery(api, http): """Calls the discovery service. Retries upto twice if there are any UnknownApiNameOrVersion errors. """ return discovery.build( api, 'v1', discoveryServiceUrl=_DISCOVERY_URL, http=http, static_discovery=False)
Builds a google api client for buganizer.
def build(api='issuetracker', http=None): """Builds a google api client for buganizer.""" if not http: http = build_http() return _call_discovery(api, http)
Extract all label values.
def _extract_all_labels(labels: issue_tracker.LabelStore, prefix: str) -> List[str]: """Extract all label values.""" results = [] labels_to_remove = [] for label in labels: if not label.startswith(prefix): continue results.append(label[len(prefix):]) labels_to_remove.append(label) for label in labels_to_remove: labels.remove(label) return results
Sanitize the OS custom field values. The OS custom field no longer has the 'Chrome' value. It was replaced by 'ChromeOS'.
def _sanitize_oses(oses: List[str]): """Sanitize the OS custom field values. The OS custom field no longer has the 'Chrome' value. It was replaced by 'ChromeOS'. """ for i, os_field in enumerate(oses): if os_field == 'Chrome': oses[i] = 'ChromeOS'
Extract a label value.
def _extract_label(labels: issue_tracker.LabelStore, prefix: str) -> Optional[str]: """Extract a label value.""" for label in labels: if not label.startswith(prefix): continue result = label[len(prefix):] labels.remove(label) return result return None
Return the values of all labels with the given prefix.
def _get_labels(labels: Sequence[str], prefix: str) -> List[str]: """Return the values of all labels with the given prefix.""" results = [] for label in labels: if not label.startswith(prefix): continue results.append(label[len(prefix):]) return results
Return the value of the first severity label, if any.
def _get_severity_from_labels(labels: Sequence[str]) -> Optional[str]: """Return the value of the first severity label, if any.""" # Ignore case to match `issue_tracker.LabelStore.remove_by_prefix()`. values = _get_labels((l.lower() for l in labels), _SEVERITY_LABEL_PREFIX.lower()) if not values: return None if len(values) > 1: extra = ','.join(values[1:]) logs.log_error( f'google_issue_tracker: ignoring additional severity labels: [{extra}]') value = values[0] severity = _get_severity_from_label_value(value) logs.log( f'google_issue_tracker: severity label = {value}, field = {severity}') return severity
Convert a severity label value into a Google issue tracker severity.
def _get_severity_from_label_value(value): """Convert a severity label value into a Google issue tracker severity.""" value = value.lower() if value == 'critical': return 'S0' if value == 'high': return 'S1' if value == 'medium': return 'S2' if value == 'low': return 'S3' # Default case. return _DEFAULT_SEVERITY
Makes a User.
def _make_user(email): """Makes a User.""" return { 'emailAddress': email, }
Makes Users.
def _make_users(emails): """Makes Users.""" return [_make_user(email) for email in emails]
Parses a datetime.
def _parse_datetime(date_string): """Parses a datetime.""" datetime_obj, _, microseconds_string = date_string.rstrip('Z').partition('.') datetime_obj = datetime.datetime.strptime(datetime_obj, '%Y-%m-%dT%H:%M:%S') if microseconds_string: microseconds = int(microseconds_string, 10) return datetime_obj + datetime.timedelta(microseconds=microseconds) return datetime_obj
Gets a search query.
def _get_query(keywords, only_open): """Gets a search query.""" query = ' '.join('"{}"'.format(keyword) for keyword in keywords) if only_open: query += ' status:open' return query
Gets an IssueTracker for the project.
def get_issue_tracker(project, config, issue_tracker_client=None): """Gets an IssueTracker for the project.""" return IssueTracker(project, issue_tracker_client, config)
Return jira issue tracker manager for the given project.
def _get_issue_tracker_manager_for_project(project_name): """Return jira issue tracker manager for the given project.""" # If there is no issue tracker set, bail out. if not project_name or project_name == 'disabled': return None return IssueTrackerManager(project_name=project_name)
Get the issue tracker for the project name.
def get_issue_tracker(project_name, config): # pylint: disable=unused-argument """Get the issue tracker for the project name.""" itm = _get_issue_tracker_manager_for_project(project_name) if itm is None: return None return IssueTracker(itm)
Get search text.
def _get_search_text(keywords): """Get search text.""" jira_special_characters = '+-&|!(){}[]^~*?\\:' search_text = '' for keyword in keywords: # Replace special characters with whitespace as they are not allowed and # can't be searched for. stripped_keyword = keyword for special_character in jira_special_characters: stripped_keyword = stripped_keyword.replace(special_character, ' ') # coalesce multiple spaces into one. stripped_keyword = ' '.join(stripped_keyword.split()) search_text += f' AND text ~ "{stripped_keyword}"' return search_text
Convert an issue entry object into a comment object.
def convert_entry_to_comment(entry): """Convert an issue entry object into a comment object.""" comment = Comment() comment.author = entry['author']['name'] if 'author' in entry else None comment.comment = entry['content'] comment.created = parse_datetime(entry['published']) comment.id = entry['id'] if 'updates' in entry and entry['updates']: comment.cc = ChangeList(entry['updates'].get('cc', [])) comment.components = ChangeList(entry['updates'].get('components', [])) comment.labels = ChangeList(entry['updates'].get('labels', [])) comment.owner = entry['updates'].get('owner', None) comment.status = entry['updates'].get('status', None) comment.summary = entry['updates'].get('summary', None) return comment
Convert an issue entry object into a issue object.
def convert_entry_to_issue(entry, itm, old_issue=None): """Convert an issue entry object into a issue object.""" if old_issue: issue = old_issue else: issue = Issue() issue.blocked_on = [e['issueId'] for e in entry.get('blockedOn', [])] issue.blocking = [e['issueId'] for e in entry.get('blocking', [])] issue.cc = ChangeList([e['name'] for e in entry.get('cc', [])]) issue.comments = None issue.components = ChangeList(entry.get('components', [])) issue.created = parse_datetime(entry['published']) issue.id = entry['id'] issue.itm = itm issue.labels = ChangeList(entry.get('labels', [])) issue.new = False issue.open = entry['state'] == 'open' issue.reporter = entry['author']['name'] if 'author' in entry else None issue.stars = entry['stars'] issue.summary = entry['summary'] issue.updated = parse_datetime(entry['updated']) if entry.get('closed', []): issue.closed = parse_datetime(entry.get('closed', [])) if entry.get('mergedInto'): issue.merged_into = entry['mergedInto'].get('issueId') issue.merged_into_project = entry['mergedInto'].get('projectId') if entry.get('owner', []): issue.owner = entry['owner']['name'] if entry.get('status', []): issue.status = entry['status'] # The issue will be flagged as dirty when most of the above fields are set, # so this must be set last. issue.dirty = False return issue
Parse a date time string into a datetime object.
def parse_datetime(date_string): """Parse a date time string into a datetime object.""" datetime_obj, _, microseconds_string = date_string.partition('.') datetime_obj = datetime.datetime.strptime(datetime_obj, '%Y-%m-%dT%H:%M:%S') if microseconds_string: microseconds = int(microseconds_string.rstrip('Z'), 10) return datetime_obj + datetime.timedelta(microseconds=microseconds) return datetime_obj
Convert a list of changed items to a issue_tracker.ChangeList.
def _to_change_list(monorail_list): """Convert a list of changed items to a issue_tracker.ChangeList.""" change_list = issue_tracker.ChangeList() if not monorail_list: return change_list for item in monorail_list: if item.startswith('-'): change_list.removed.append(item[1:]) else: change_list.added.append(item) return change_list
Return monorail issue tracker manager for the given project.
def _get_issue_tracker_manager_for_project(project_name): """Return monorail issue tracker manager for the given project.""" # If there is no issue tracker set, bail out. if not project_name or project_name == 'disabled': return None return IssueTrackerManager(project_name=project_name)
Get search text.
def _get_search_text(keywords): """Get search text.""" search_text = ' '.join(['"{}"'.format(keyword) for keyword in keywords]) search_text = search_text.replace(':', ' ') search_text = search_text.replace('=', ' ') return search_text