response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Clean up function name. | def fix_function_name(function_name):
"""Clean up function name."""
if function_name.startswith('??'):
return ''
return function_name |
Return a stack frame entry. | def get_stack_frame(binary, addr, function_name, file_name):
"""Return a stack frame entry."""
# Cleanup file and function name.
file_name = fix_filename(file_name)
function_name = fix_function_name(function_name)
# Check if we don't have any symbols at all. If yes, this is probably
# a system library. In this case, just return the binary name.
if not function_name and not file_name:
return '%s in %s' % (addr, os.path.basename(binary))
# We just have a file name. Probably running in global context.
if not function_name:
# Filter the filename to act as a function name.
filtered_file_name = os.path.basename(file_name)
return '%s in %s %s' % (addr, filtered_file_name, file_name)
# Regular stack frame.
return '%s in %s %s' % (addr, function_name, file_name) |
Check if this is a valid supported architecture. | def is_valid_arch(s):
"""Check if this is a valid supported architecture."""
return s in [
"i386", "x86_64", "x86_64h", "arm", "armv6", "armv7", "armv7s", "armv7k",
"arm64", "powerpc64", "powerpc64le", "s390x", "s390"
] |
Guess which architecture we're running on (32/64).
10 = len('0x') + 8 hex digits. | def guess_arch(address):
"""Guess which architecture we're running on (32/64).
10 = len('0x') + 8 hex digits."""
if len(address) > 10:
return 'x86_64'
else:
return 'i386' |
Filters binary path to provide a local copy. | def filter_binary_path(binary_path):
"""Filters binary path to provide a local copy."""
if environment.is_android() or environment.is_lkl_job():
return symbols_downloader.filter_binary_path(binary_path)
if environment.platform() == 'CHROMEOS':
# FIXME: Add code to pull binaries from ChromeOS device.
return binary_path
if environment.is_chromeos_system_job():
# This conditional is True for ChromeOS system fuzzers that are running on
# Linux. Ensure that the binary is always looked for in the chroot and not
# in system directories.
build_dir = environment.get_value('BUILD_DIR')
if not binary_path.startswith(build_dir):
# Fixup path so |binary_path| points to a binary in the chroot (probably
# a system library).
return os.path.join(build_dir, binary_path[1:])
# For Linux and Mac, the binary exists locally. No work to do,
# just return the same binary path.
return binary_path |
Symbolize a crash stacktrace. | def symbolize_stacktrace(unsymbolized_crash_stacktrace,
enable_inline_frames=True):
"""Symbolize a crash stacktrace."""
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import symbolize_host
return symbolize_host.symbolize_stacktrace(unsymbolized_crash_stacktrace,
enable_inline_frames)
platform = environment.platform()
if platform == 'WINDOWS':
# Windows Clang ASAN provides symbolized stacktraces anyway.
return unsymbolized_crash_stacktrace
if platform == 'FUCHSIA':
# Fuchsia Clang ASAN provides symbolized stacktraces anyway.
return unsymbolized_crash_stacktrace
# FIXME: Support symbolization on ChromeOS device.
if platform == 'CHROMEOS':
return unsymbolized_crash_stacktrace
# Initialize variables.
global llvm_symbolizer_path
global pipes
global stack_inlining
global symbolizers
pipes = []
stack_inlining = str(enable_inline_frames).lower()
symbolizers = {}
# Make sure we have a llvm symbolizer for this platform.
llvm_symbolizer_path = environment.get_llvm_symbolizer_path()
if not llvm_symbolizer_path:
return unsymbolized_crash_stacktrace
# Disable buffering for stdout.
disable_buffering()
loop = SymbolizationLoop(
binary_path_filter=filter_binary_path,
dsym_hint_producer=chrome_dsym_hints)
if environment.is_android_emulator():
symbolized_crash_stacktrace = loop.process_trusty_stacktrace(
unsymbolized_crash_stacktrace)
symbolized_crash_stacktrace = loop.process_stacktrace(
unsymbolized_crash_stacktrace)
return symbolized_crash_stacktrace |
Returns an api client for datastore. | def _datastore_client():
"""Returns an api client for datastore."""
return discovery.build('datastore', 'v1') |
Backups all entities in a datastore bucket. | def main():
"""Backups all entities in a datastore bucket."""
backup_bucket = local_config.Config(
local_config.PROJECT_PATH).get('backup.bucket')
if not backup_bucket:
logs.log_error('No backup bucket is set, skipping.')
return False
kinds = [
kind for kind in ndb.Model._kind_map # pylint: disable=protected-access
if (not kind.startswith('_') and kind not in EXCLUDED_MODELS)
]
app_id = utils.get_application_id()
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%S')
output_url_prefix = f'gs://{backup_bucket}/datastore-backups/{timestamp}'
body = {
'output_url_prefix': output_url_prefix,
'entity_filter': {
'kinds': kinds
}
}
try:
# pylint: disable=no-member
request = _datastore_client().projects().export(projectId=app_id, body=body)
response = request.execute()
message = 'Datastore export succeeded.'
logs.log(message, response=response)
return True
except errors.HttpError as e:
status_code = e.resp.status
message = f'Datastore export failed. Status code: {status_code}'
logs.log_error(message)
return False |
Batches FuzzerJobs for reduced Datastore read ops by bots. | def batch_fuzzer_jobs():
"""Batches FuzzerJobs for reduced Datastore read ops by bots."""
platforms = [
item.platform for item in data_types.FuzzerJob.query(
projection=[data_types.FuzzerJob.platform], distinct=True)
]
for platform in platforms:
fuzzer_jobs = list(
data_types.FuzzerJob.query(data_types.FuzzerJob.platform == platform))
fuzzer_jobs.sort(key=lambda item: item.job)
batches_to_remove = {
b.key for b in data_types.FuzzerJobs.query(
data_types.FuzzerJobs.platform == platform)
}
batch_count = 0
for i in range(0, len(fuzzer_jobs), FUZZER_JOB_BATCH_SIZE):
key_id = platform + '-' + str(batch_count)
end = min(i + FUZZER_JOB_BATCH_SIZE, len(fuzzer_jobs))
batched = data_types.FuzzerJobs(id=key_id, platform=platform)
batched.platform = platform
batched.fuzzer_jobs = fuzzer_jobs[i:end]
batched.put()
batch_count += 1
batches_to_remove.discard(batched.key)
# Removes additional batches if number reduced.
if batches_to_remove:
ndb.delete_multi(batches_to_remove) |
Batches FuzzerJobs. | def main():
"""Batches FuzzerJobs."""
batch_fuzzer_jobs()
logs.log('Batch fuzzer jobs succeeded.')
return True |
Gets the start hour from the first crash. | def get_start_hour():
"""Gets the start hour from the first crash."""
client = big_query.Client()
sql = """
SELECT min(CAST(FLOOR(UNIX_SECONDS(created_at) / 3600) AS INT64)) as min_hour
FROM main.crashes
"""
result = client.query(query=sql)
if result and result.rows:
return result.rows[0]['min_hour']
return 0 |
Gets the last hour that ran successfully or the start hour. | def get_last_successful_hour_or_start_hour():
"""Gets the last hour that ran successfully or the start hour."""
last_hour = crash_stats.get_last_successful_hour()
if last_hour:
return last_hour
return get_start_hour() |
Gets the next end hour. If it's too early to compute data for the next end
hour, return None. | def get_next_end_hour():
"""Gets the next end hour. If it's too early to compute data for the next end
hour, return None."""
last_successful_hour = get_last_successful_hour_or_start_hour()
if not last_successful_hour:
# No crashes seen, too early to start building stats.
raise TooEarlyError()
next_end_hour = last_successful_hour + 1
next_datetime = crash_stats.get_datetime(next_end_hour)
if (utils.utcnow() - next_datetime) <= BIGQUERY_INSERTION_DELAY:
raise TooEarlyError()
return next_end_hour |
Makes a request to BigQuery to build crash stats. | def make_request(client, job_id, end_hour):
"""Makes a request to BigQuery to build crash stats."""
table_id = (
'crash_stats$%s' % crash_stats.get_datetime(end_hour).strftime('%Y%m%d'))
sql = SQL.format(
end_hour=end_hour,
end_date=(crash_stats.get_datetime(end_hour).strftime('%Y-%m-%d')))
logging.info('TableID: %s\nJobID: %s\nSQL: %s', table_id, job_id, sql)
client.insert_from_query(
dataset_id='main', table_id=table_id, job_id=job_id, query=sql) |
Builds crash stats for the end hour. | def build(end_hour):
"""Builds crash stats for the end hour."""
logging.info('Started building crash stats for %s.',
crash_stats.get_datetime(end_hour))
job_id = JOB_ID_TEMPLATE.format(unique_number=int(time.time()))
client = big_query.Client()
make_request(client, job_id, end_hour)
start_time = time.time()
while (time.time() - start_time) < TIMEOUT:
time.sleep(10)
result = client.get_job(job_id)
logging.info('Checking %s', json.dumps(result))
if result['status']['state'] == 'DONE':
if result['status'].get('errors'):
raise Exception(json.dumps(result)) # pylint: disable=broad-exception-raised
return
raise Exception('Building crash stats exceeded %d seconds.' % TIMEOUT) |
Gets the next end hour and decide whether to execute build(). If build()
succeeds, then record the next end hour. | def build_if_needed():
"""Gets the next end hour and decide whether to execute build(). If build()
succeeds, then record the next end hour."""
try:
end_hour = get_next_end_hour()
build(end_hour)
job_history = data_types.BuildCrashStatsJobHistory()
job_history.end_time_in_hours = end_hour
job_history.put()
logging.info('CrashStatistics for end_hour=%s is built successfully',
crash_stats.get_datetime(end_hour))
return end_hour
except TooEarlyError:
logging.info("Skip building crash stats because it's too early.")
return None |
Builds crash stats from data_types.CrashsStats2. | def main():
"""Builds crash stats from data_types.CrashsStats2."""
end_hour = build_if_needed()
logging.info('OK (end_hour=%s)', end_hour)
return True |
Return the suspected components for a test case. | def _get_predator_result_item(testcase, key, default=None):
"""Return the suspected components for a test case."""
predator_result = testcase.get_metadata('predator_result')
if not predator_result:
return default
return predator_result['result'].get(key, default) |
Get the generic incorrect comment. | def _append_generic_incorrect_comment(comment, policy, issue, suffix):
"""Get the generic incorrect comment."""
wrong_label = policy.label('wrong')
if not wrong_label:
return comment
return comment + GENERIC_INCORRECT_COMMENT.format(
label_text=issue.issue_tracker.label_text(wrong_label)) + suffix |
Get real platform from job platform. | def job_platform_to_real_platform(job_platform):
"""Get real platform from job platform."""
for platform in data_types.PLATFORMS:
if platform in job_platform:
return platform
raise ValueError('Unknown platform: ' + job_platform) |
Delete ReportMetadata for uploaded reports. | def cleanup_reports_metadata():
"""Delete ReportMetadata for uploaded reports."""
uploaded_reports = ndb_utils.get_all_from_query(
data_types.ReportMetadata.query(
ndb_utils.is_true(data_types.ReportMetadata.is_uploaded)),
keys_only=True)
ndb_utils.delete_multi(uploaded_reports) |
Clean up unneeded open testcases and their associated issues. | def cleanup_testcases_and_issues():
"""Clean up unneeded open testcases and their associated issues."""
logs.log('Getting all job type names.')
jobs = data_handler.get_all_job_type_names()
logs.log('Getting test case keys from query.')
testcase_keys = ndb_utils.get_all_from_query(
data_types.Testcase.query(
ndb_utils.is_false(data_types.Testcase.triaged)),
keys_only=True)
logs.log('Getting top crashes for all projects and platforms.')
top_crashes_by_project_and_platform_map = (
get_top_crashes_for_all_projects_and_platforms())
utils.python_gc()
testcases_processed = 0
empty_issue_tracker_policy = issue_tracker_policy.get_empty()
for testcase_key in testcase_keys:
testcase_id = testcase_key.id()
try:
testcase = data_handler.get_testcase_by_id(testcase_id)
except errors.InvalidTestcaseError:
# Already deleted.
continue
logs.log(f'Processing testcase {testcase_id}.')
try:
issue = issue_tracker_utils.get_issue_for_testcase(testcase)
policy = issue_tracker_utils.get_issue_tracker_policy_for_testcase(
testcase)
if not policy:
logs.log('No policy')
policy = empty_issue_tracker_policy
# Issue updates.
update_os_labels(policy, testcase, issue)
logs.log('maybe updated os')
update_fuzz_blocker_label(policy, testcase, issue,
top_crashes_by_project_and_platform_map)
logs.log('maybe updated fuzz blocker')
update_component_labels(policy, testcase, issue)
logs.log('maybe updated component labels')
update_issue_ccs_from_owners_file(policy, testcase, issue)
logs.log('maybe updated issueccs')
update_issue_owner_and_ccs_from_predator_results(policy, testcase, issue)
logs.log('maybe updated update_issue_owner_and_ccs_from_predator_results')
update_issue_labels_for_flaky_testcase(policy, testcase, issue)
# Testcase marking rules.
mark_duplicate_testcase_as_closed_with_no_issue(testcase)
mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue)
mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue)
mark_testcase_as_closed_if_job_is_invalid(testcase, jobs)
mark_unreproducible_testcase_as_fixed_if_issue_is_closed(testcase, issue)
mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy, testcase, issue)
mark_na_testcase_issues_as_wontfix(policy, testcase, issue)
# Notification, to be done at end after testcase state is updated from
# previous rules.
notify_closed_issue_if_testcase_is_open(policy, testcase, issue)
notify_issue_if_testcase_is_invalid(policy, testcase, issue)
notify_uploader_when_testcase_is_processed(policy, testcase, issue)
# Mark testcase as triage complete if both testcase and associated issue
# are closed. This also need to be done before the deletion rules.
mark_testcase_as_triaged_if_needed(testcase, issue)
# Testcase deletion rules.
delete_unreproducible_testcase_with_no_issue(testcase)
except Exception:
logs.log_error(f'Failed to process testcase {testcase_id}.')
testcases_processed += 1
if testcases_processed % 100 == 0:
utils.python_gc() |
Clean up unused FuzzTarget and FuzzTargetJob entities. | def cleanup_unused_fuzz_targets_and_jobs():
"""Clean up unused FuzzTarget and FuzzTargetJob entities."""
last_run_cutoff = utils.utcnow() - datetime.timedelta(
days=FUZZ_TARGET_UNUSED_THRESHOLD)
unused_target_jobs = data_types.FuzzTargetJob.query(
data_types.FuzzTargetJob.last_run < last_run_cutoff)
valid_target_jobs = data_types.FuzzTargetJob.query(
data_types.FuzzTargetJob.last_run >= last_run_cutoff)
to_delete = [t.key for t in unused_target_jobs]
valid_fuzz_targets = {t.fuzz_target_name for t in valid_target_jobs}
for fuzz_target in ndb_utils.get_all_from_model(data_types.FuzzTarget):
if fuzz_target.fully_qualified_name() not in valid_fuzz_targets:
to_delete.append(fuzz_target.key)
ndb_utils.delete_multi(to_delete) |
Return a map of projects to jobs and platforms map to use for picking top
crashes. | def get_jobs_and_platforms_for_project():
"""Return a map of projects to jobs and platforms map to use for picking top
crashes."""
all_jobs = ndb_utils.get_all_from_model(data_types.Job)
projects_to_jobs_and_platforms = {}
for job in all_jobs:
job_environment = job.get_environment()
# Skip experimental jobs.
if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
continue
# Skip custom binary jobs.
if (utils.string_is_true(job_environment.get('CUSTOM_BINARY')) or
job_environment.get('SYSTEM_BINARY_DIR')):
continue
# Skip if explicitly excluded using flag.
if utils.string_is_true(job_environment.get('EXCLUDE_FROM_TOP_CRASHES')):
continue
if job.project not in projects_to_jobs_and_platforms:
projects_to_jobs_and_platforms[job.project] = ProjectMap(set(), set())
projects_to_jobs_and_platforms[job.project].jobs.add(job.name)
projects_to_jobs_and_platforms[job.project].platforms.add(
job_platform_to_real_platform(job.platform))
return projects_to_jobs_and_platforms |
Get platforms from crash stats based on crash parameters. | def _get_crash_occurrence_platforms_from_crash_parameters(
crash_type, crash_state, security_flag, project_name, lookbehind_days):
"""Get platforms from crash stats based on crash parameters."""
last_hour = crash_stats.get_last_successful_hour()
if not last_hour:
# No crash stats available, skip.
return []
where_clause = (f'crash_type = {json.dumps(crash_type)} AND '
f'crash_state = {json.dumps(crash_state)} AND '
f'security_flag = {json.dumps(security_flag)} AND '
f'project = {json.dumps(project_name)}')
_, rows = crash_stats.get(
end=last_hour,
block='day',
days=lookbehind_days,
group_by='platform',
where_clause=where_clause,
group_having_clause='',
sort_by='total_count',
offset=0,
limit=1)
platforms = set()
for row in rows:
for group in row['groups']:
platform = group['name'].split(':')[0]
platforms.add(platform.lower())
return list(platforms) |
Get platforms from crash stats based on crash parameters. | def get_platforms_from_testcase_variants(testcase):
"""Get platforms from crash stats based on crash parameters."""
variant_query = data_types.TestcaseVariant.query(
data_types.TestcaseVariant.testcase_id == testcase.key.id())
platforms = {
variant.platform
for variant in variant_query
if variant.is_similar and variant.platform
}
return platforms |
Get platforms from crash stats for a testcase. | def get_crash_occurrence_platforms(testcase, lookbehind_days=1):
"""Get platforms from crash stats for a testcase."""
return set(
_get_crash_occurrence_platforms_from_crash_parameters(
testcase.crash_type, testcase.crash_state, testcase.security_flag,
testcase.project_name, lookbehind_days)) |
Return top crashes for all projects and platforms. | def get_top_crashes_for_all_projects_and_platforms(limit=TOP_CRASHES_LIMIT):
"""Return top crashes for all projects and platforms."""
last_hour = crash_stats.get_last_successful_hour()
if not last_hour:
# No crash stats available, skip.
return {}
projects_to_jobs_and_platforms = get_jobs_and_platforms_for_project()
top_crashes_by_project_and_platform_map = {}
for project_name, project_map in projects_to_jobs_and_platforms.items():
top_crashes_by_project_and_platform_map[project_name] = {}
for platform in project_map.platforms:
where_clause = (
'crash_type NOT IN UNNEST'
f'({json.dumps(TOP_CRASHES_IGNORE_CRASH_TYPES)}) AND '
'crash_state NOT IN UNNEST'
f'({json.dumps(TOP_CRASHES_IGNORE_CRASH_STATES)}) AND '
f'job_type IN UNNEST({json.dumps(list(project_map.jobs))}) AND '
f'platform LIKE {json.dumps(platform.lower() + "%")} AND '
f'project = {json.dumps(project_name)}')
_, rows = crash_stats.get(
end=last_hour,
block='day',
days=TOP_CRASHES_DAYS_LOOKBEHIND,
group_by='platform',
where_clause=where_clause,
group_having_clause='',
sort_by='total_count',
offset=0,
limit=limit)
if not rows:
continue
top_crashes_by_project_and_platform_map[project_name][platform] = [{
'crashState': row['crashState'],
'crashType': row['crashType'],
'isSecurity': row['isSecurity'],
'totalCount': row['totalCount'],
} for row in rows if row['totalCount'] >= TOP_CRASHES_MIN_THRESHOLD]
return top_crashes_by_project_and_platform_map |
Return list of platforms where this testcase is a top crasher. | def get_top_crash_platforms(testcase, top_crashes_by_project_and_platform_map):
"""Return list of platforms where this testcase is a top crasher."""
if testcase.project_name not in top_crashes_by_project_and_platform_map:
return []
top_crashes_by_platform_map = top_crashes_by_project_and_platform_map[
testcase.project_name]
top_crash_platforms = set()
for platform in list(top_crashes_by_platform_map.keys()):
top_crashes = top_crashes_by_platform_map[platform]
if not top_crashes:
continue
for top_crash in top_crashes:
crash_state_comparer = crash_comparer.CrashComparer(
top_crash['crashState'], testcase.crash_state)
crash_type_comparer = crash_comparer.CrashComparer(
top_crash['crashType'], testcase.crash_type)
if (crash_state_comparer.is_similar() and
top_crash['isSecurity'] == testcase.security_flag and
(top_crash['isSecurity'] or crash_type_comparer.is_similar())):
top_crash_platforms.add(platform.lower())
return sorted(list(top_crash_platforms)) |
Delete an unreproducible testcase if it has no associated issue and has
been open for a certain time interval. | def delete_unreproducible_testcase_with_no_issue(testcase):
"""Delete an unreproducible testcase if it has no associated issue and has
been open for a certain time interval."""
# Make sure that this testcase is an unreproducible bug. If not, bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that this testcase has no associated bug. If not, bail out.
if testcase.bug_information:
return
# Make sure that testcase is atleast older than
# |UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE|, otherwise it will be seen in
# crash stats anyway.
if (testcase.timestamp and not dates.time_has_expired(
testcase.timestamp,
days=data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE)):
return
# Make sure that testcase is not seen in crash stats for a certain time
# interval.
if get_crash_occurrence_platforms(
testcase, data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE):
return
testcase.key.delete()
logs.log(
f'Deleted unreproducible testcase {testcase.key.id()} with no issue.') |
Closes a duplicate testcase if it has no associated issue and has been open
for a certain time interval. | def mark_duplicate_testcase_as_closed_with_no_issue(testcase):
"""Closes a duplicate testcase if it has no associated issue and has been open
for a certain time interval."""
# Make sure that this testcase is a duplicate bug. If not, bail out.
if testcase.status != 'Duplicate':
return
# Make sure that this testcase has no associated bug. If not, bail out.
if testcase.bug_information:
return
# Make sure that testcase has been open for a certain time interval. We do
# a null timestamp check since some older testcases could be missing it.
if (testcase.timestamp and not dates.time_has_expired(
testcase.timestamp, days=data_types.DUPLICATE_TESTCASE_NO_BUG_DEADLINE)):
return
testcase.fixed = 'NA'
testcase.open = False
testcase.put()
logs.log(f'Closed duplicate testcase {testcase.key.id()} with no issue.') |
Mark an issue as fixed if all of its associated reproducible testcase are
fixed. | def mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue):
"""Mark an issue as fixed if all of its associated reproducible testcase are
fixed."""
verified_label = policy.label('verified')
if not verified_label:
return
# If there is no associated issue, then bail out.
if not issue or not testcase.bug_information:
return
# If the issue is closed in a status other than Fixed, like Duplicate, WontFix
# or Archived, we shouldn't change it. Bail out.
if not issue.is_open and issue.status != policy.status('fixed'):
return
# Check testcase status, so as to skip unreproducible uploads.
if testcase.status not in ['Processed', 'Duplicate']:
return
# If the testcase is still open, no work needs to be done. Bail out.
if testcase.open:
return
# FIXME: Find a better solution to skip over reproducible tests that are now
# showing up a flaky (esp when we are unable to reproduce crash in original
# crash revision).
if testcase.fixed == 'NA':
return
# We can only verify fixed issues for reproducible testcases. If the testcase
# is unreproducible, bail out. Exception is if we explicitly marked this as
# fixed.
if testcase.one_time_crasher_flag and testcase.fixed != 'Yes':
return
# Make sure that no other testcases associated with this issue are open.
similar_testcase = data_types.Testcase.query(
data_types.Testcase.bug_information == testcase.bug_information,
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if similar_testcase:
return
# As a last check, do the expensive call of actually checking all issue
# comments to make sure we didn't do the verification already and we didn't
# get called out on issue mistriage.
# If a "good" label was set, we ignore past "verified" flipping.
good_label = policy.label('good')
if good_label and good_label in issue.labels:
was_verified_added = verified_label in issue.labels
else:
was_verified_added = issue_tracker_utils.was_label_added(
issue, verified_label)
if (was_verified_added or
issue_tracker_utils.was_label_added(issue, policy.label('wrong'))):
return
issue.labels.add(verified_label)
comment = f'ClusterFuzz testcase {testcase.key.id()} is verified as fixed'
fixed_range_url = data_handler.get_fixed_range_url(testcase)
if fixed_range_url:
comment += ' in ' + fixed_range_url
else:
comment += '.'
if utils.is_oss_fuzz():
comment += OSS_FUZZ_INCORRECT_COMMENT
else:
comment = _append_generic_incorrect_comment(comment, policy, issue,
' and re-open the issue.')
skip_auto_close = data_handler.get_value_from_job_definition(
testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE')
if not skip_auto_close:
issue.status = policy.status('verified')
issue.save(new_comment=comment, notify=True)
logs.log(f'Mark issue {issue.id} as verified for '
f'fixed testcase {testcase.key.id()}.')
issue_filer.notify_issue_update(testcase, 'verified') |
Mark an unreproducible testcase as fixed if the associated issue is
closed. | def mark_unreproducible_testcase_as_fixed_if_issue_is_closed(testcase, issue):
"""Mark an unreproducible testcase as fixed if the associated issue is
closed."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Make sure that this testcase is an unreproducible bug. If not, bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that this testcase has an associated bug. If not, bail out.
if not testcase.bug_information:
return
# Make sure that there is an associated bug and it is in closed state.
if not issue or issue.is_open:
return
testcase.fixed = 'NA'
testcase.open = False
testcase.put()
logs.log(f'Closed unreproducible testcase {testcase.key.id()} '
'with issue closed.') |
Closes an unreproducible testcase and its associated issue after a certain
time period. | def mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy, testcase, issue):
"""Closes an unreproducible testcase and its associated issue after a certain
time period."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Check testcase status, so as to skip unreproducible uploads.
if testcase.status not in ['Processed', 'Duplicate']:
return
# Make sure that this testcase is an unreproducible bug. If not, bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that this testcase has an associated bug. If not, bail out.
if not testcase.bug_information:
return
# If this testcase was manually uploaded, don't change issue state as our
# reproduction result might be incorrect.
if testcase.uploader_email:
return
# Make sure that there is an associated bug and it is in open state.
if not issue or not issue.is_open:
return
# Skip closing if flag is set.
skip_auto_close = data_handler.get_value_from_job_definition(
testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE')
if skip_auto_close:
return
# Check if there are any reproducible open testcases are associated with
# this bug. If yes, return.
similar_testcase = data_types.Testcase.query(
data_types.Testcase.bug_information == testcase.bug_information,
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if similar_testcase:
return
# Make sure that testcase is atleast older than
# |UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE|, otherwise it will be seen in
# crash stats anyway.
if (testcase.timestamp and not dates.time_has_expired(
testcase.timestamp,
days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)):
return
# Handle testcase that turned from reproducible to unreproducible. Account
# for the recent progression task run time.
last_tested_crash_time = testcase.get_metadata('last_tested_crash_time')
if (last_tested_crash_time and not dates.time_has_expired(
last_tested_crash_time,
days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)):
return
# Make that there is no crash seen in the deadline period.
if get_crash_occurrence_platforms(
testcase, data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE):
return
# As a last check, do the expensive call of actually checking all issue
# comments to make sure we we didn't get called out on issue mistriage.
if issue_tracker_utils.was_label_added(issue, policy.label('wrong')):
return
# Close associated issue and testcase.
comment = (f'ClusterFuzz testcase {testcase.key.id()} '
'is flaky and no longer crashes, so closing issue.')
if utils.is_oss_fuzz():
comment += OSS_FUZZ_INCORRECT_COMMENT
else:
comment = _append_generic_incorrect_comment(comment, policy, issue,
' and re-open the issue.')
issue.status = policy.status('wontfix')
issue.save(new_comment=comment, notify=True)
testcase.fixed = 'NA'
testcase.open = False
testcase.put()
issue_filer.notify_issue_update(testcase, 'wontfix')
logs.log(f'Closed unreproducible testcase {testcase.key.id()} '
'and associated issue.') |
Mark issues for testcases with fixed == 'NA' as fixed. | def mark_na_testcase_issues_as_wontfix(policy, testcase, issue):
"""Mark issues for testcases with fixed == 'NA' as fixed."""
# Check for for closed, NA testcases.
if testcase.open or testcase.fixed != 'NA':
return
# Nothing to be done if no issue is attached, or if issue is already closed.
if not issue or not issue.is_open:
return
# Make sure that no other testcases associated with this issue are open.
similar_testcase = data_types.Testcase.query(
data_types.Testcase.bug_information == testcase.bug_information,
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if similar_testcase:
return
# Make that there is no crash seen in the deadline period.
if get_crash_occurrence_platforms(
testcase, data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE):
return
# As a last check, do the expensive call of actually checking all issue
# comments to make sure we we didn't get called out on issue mistriage.
if issue_tracker_utils.was_label_added(issue, policy.label('wrong')):
return
skip_auto_close = data_handler.get_value_from_job_definition(
testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE')
if skip_auto_close:
return
comment = (f'ClusterFuzz testcase {testcase.key.id()} is closed as invalid, '
'so closing issue.')
issue.status = policy.status('wontfix')
issue.save(new_comment=comment, notify=True)
issue_filer.notify_issue_update(testcase, 'wontfix')
logs.log(
f'Closing issue {issue.id} for invalid testcase {testcase.key.id()}.') |
Mark testcase as triage complete if both testcase and associated issue
are closed. | def mark_testcase_as_triaged_if_needed(testcase, issue):
"""Mark testcase as triage complete if both testcase and associated issue
are closed."""
# Check if testcase is open. If yes, bail out.
if testcase.open:
return
# Check if there is an associated bug in open state. If yes, bail out.
if issue:
# Get latest issue object to ensure our update went through.
issue = issue_tracker_utils.get_issue_for_testcase(testcase)
if issue.is_open:
return
testcase.triaged = True
testcase.put() |
Mark testcase as closed if the associated issue is closed. | def mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue):
"""Mark testcase as closed if the associated issue is closed."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# If there is no associated issue, then bail out.
if not issue or not testcase.bug_information:
return
# If the issue is still open, no work needs to be done. Bail out.
if issue.is_open:
return
# Make sure we passed our deadline based on issue closed timestamp.
if (issue.closed_time and not dates.time_has_expired(
issue.closed_time,
days=data_types.CLOSE_TESTCASE_WITH_CLOSED_BUG_DEADLINE)):
return
# If the issue has an ignore label, don't close the testcase and bail out.
# This helps to prevent new bugs from getting filed for legit WontFix cases.
if issue_tracker_utils.was_label_added(issue, policy.label('ignore')):
return
testcase.open = False
testcase.fixed = 'NA'
testcase.put()
logs.log(f'Closed testcase {testcase.key.id()} with issue closed.') |
Mark testcase as closed if the associated job type does not exist. | def mark_testcase_as_closed_if_job_is_invalid(testcase, jobs):
"""Mark testcase as closed if the associated job type does not exist."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Check if the testcase job name is in the list of jobs.
if testcase.job_type in jobs:
return
testcase.open = False
testcase.fixed = 'NA'
testcase.put()
logs.log(f'Closed testcase {testcase.key.id()} with invalid job.') |
Notify closed issue if associated testcase is still open after a certain
time period. | def notify_closed_issue_if_testcase_is_open(policy, testcase, issue):
"""Notify closed issue if associated testcase is still open after a certain
time period."""
needs_feedback_label = policy.label('needs_feedback')
if not needs_feedback_label:
return
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Check testcase status, so as to skip unreproducible uploads.
if testcase.status not in ['Processed', 'Duplicate']:
return
# If there is no associated issue, then bail out.
if not issue or not testcase.bug_information:
return
# If the issue is still open, no work needs to be done. Bail out.
if issue.is_open:
return
# If we have already passed our deadline based on issue closed timestamp,
# no need to notify. We will close the testcase instead.
if (issue.closed_time and not dates.time_has_expired(
issue.closed_time,
days=data_types.NOTIFY_CLOSED_BUG_WITH_OPEN_TESTCASE_DEADLINE)):
return
# Check if there is ignore label on issue already. If yes, bail out.
if issue_tracker_utils.was_label_added(issue, policy.label('ignore')):
return
# Check if we did add the notification comment already. If yes, bail out.
if issue_tracker_utils.was_label_added(issue, needs_feedback_label):
return
issue.labels.add(needs_feedback_label)
last_tested_revision = testcase.get_metadata('last_tested_crash_revision')
crash_revision = ''
if last_tested_revision:
crash_revision = f' r{last_tested_revision}'
if issue.status in [policy.status('fixed'), policy.status('verified')]:
issue_comment = (
f'ClusterFuzz testcase {testcase.key.id()} is still reproducing '
f'on the latest available build {crash_revision}.'
'\n\nPlease re-test your fix against this testcase and if the '
'fix was incorrect or incomplete, please re-open the bug.')
wrong_label = policy.label('wrong')
if wrong_label:
issue_comment += (' Otherwise, ignore this notification and add the '
f'{issue.issue_tracker.label_text(wrong_label)}.')
else:
# Covers WontFix, Archived cases.
issue_comment = (
f'ClusterFuzz testcase {testcase.key.id()} '
f'is still reproducing on the latest available build {crash_revision}.'
'\n\nIf this testcase was not reproducible locally or '
'unworkable, ignore this notification and we will file another '
'bug soon with hopefully a better and workable testcase.\n\n')
ignore_label = policy.label('ignore')
if ignore_label:
issue_comment += (
'Otherwise, if this is not intended to be fixed (e.g. this is an '
'intentional crash), please add the '
f'{issue.issue_tracker.label_text(ignore_label)} to '
'prevent future bug filing with similar crash stacktrace.')
issue.save(new_comment=issue_comment, notify=True)
logs.log(f'Notified closed issue for open testcase {testcase.key.id()}.') |
Leave comments on associated issues when test cases are no longer valid. | def notify_issue_if_testcase_is_invalid(policy, testcase, issue):
"""Leave comments on associated issues when test cases are no longer valid."""
invalid_fuzzer_label = policy.label('invalid_fuzzer')
if not invalid_fuzzer_label:
return
if not issue or not testcase.bug_information:
return
# If the issue is closed, there's no work to do.
if not issue.is_open:
return
# Currently, this only happens if a test case relies on a fuzzer that has
# been deleted. This can be modified if more cases are needed in the future.
if not testcase.get_metadata('fuzzer_was_deleted'):
return
# Check if we added this message once. If yes, bail out.
if issue_tracker_utils.was_label_added(issue, invalid_fuzzer_label):
return
issue_comment = (
f'ClusterFuzz testcase {testcase.key.id()}'
'is associated with an obsolete fuzzer and can '
'no longer be processed. Please close the issue if it is no longer '
'actionable.')
issue.labels.add(invalid_fuzzer_label)
issue.save(new_comment=issue_comment, notify=True)
logs.log(f'Closed issue {issue.id} for '
f'invalid testcase {testcase.key.id()}.') |
Send email to uploader when all the testcase tasks are finished. | def _send_email_to_uploader(testcase_id, to_email, content):
"""Send email to uploader when all the testcase tasks are finished."""
subject = f'Your testcase upload {testcase_id} analysis is complete.'
content_with_footer = (f'{content.strip()}\n\n'
'If you suspect that the result above is incorrect, '
'try re-doing that job on the testcase report page.')
html_content = content_with_footer.replace('\n', '<br>')
mail.send(to_email, subject, html_content) |
Get the severity from the label list. | def _get_severity_from_labels(security_severity_label, labels):
"""Get the severity from the label list."""
pattern = issue_filer.get_label_pattern(security_severity_label)
for label in labels:
match = pattern.match(label)
if match:
return severity_analyzer.string_to_severity(match.group(1))
return data_types.SecuritySeverity.MISSING |
Apply a new security severity label if none exists on issue already
and return a comment on this addition. If a label already exists and does
not match security severity label on issue, then just return a comment on
what the recommended severity is. | def _update_issue_security_severity_and_get_comment(policy, testcase, issue):
"""Apply a new security severity label if none exists on issue already
and return a comment on this addition. If a label already exists and does
not match security severity label on issue, then just return a comment on
what the recommended severity is."""
security_severity_label = policy.label('security_severity')
if not security_severity_label:
return ''
if not data_types.SecuritySeverity.is_valid(testcase.security_severity):
return ''
issue_severity = _get_severity_from_labels(security_severity_label,
issue.labels)
recommended_severity = issue_filer.apply_substitutions(
policy, security_severity_label, testcase)
if not recommended_severity:
return ''
recommended_severity = recommended_severity[0]
if issue_severity == data_types.SecuritySeverity.MISSING:
issue.labels.add(recommended_severity)
return ('\n\nA recommended severity was added to this bug. '
'Please change the severity if it is inaccurate.')
if issue_severity != testcase.security_severity:
return ('\n\nThe recommended severity '
f'({recommended_severity}) is different from what was assigned '
'to the bug. Please double check the accuracy of the assigned '
'severity.')
return '' |
Add issue comment when uploaded testcase is processed. | def _update_issue_when_uploaded_testcase_is_processed(
policy, testcase, issue, description, update_bug_summary, notify):
"""Add issue comment when uploaded testcase is processed."""
if update_bug_summary and testcase.is_crash():
issue.title = data_handler.get_issue_summary(testcase)
# Impact labels like impacting head/beta/stable only apply for Chromium.
if testcase.project_name in ('chromium', 'chromium-testing'):
issue_filer.update_issue_impact_labels(testcase, issue, policy)
# Add severity labels for all project types.
comment = description + _update_issue_security_severity_and_get_comment(
policy, testcase, issue)
issue.save(new_comment=comment, notify=notify) |
Notify uploader by email when all the testcase tasks are finished. | def notify_uploader_when_testcase_is_processed(policy, testcase, issue):
"""Notify uploader by email when all the testcase tasks are finished."""
testcase_id = testcase.key.id()
# Check if this is a user upload. If not, bail out.
upload_metadata = data_types.TestcaseUploadMetadata.query(
data_types.TestcaseUploadMetadata.testcase_id == testcase_id).get()
if not upload_metadata:
return
# Check that we have a valid email to send the notification. If not, bail out.
to_email = upload_metadata.uploader_email
if not to_email:
return
# If this is a bundled archive with multiple testcases, then don't send email
# for individual testcases.
if upload_metadata.bundled:
return
# Check if the notification is already sent once. If yes, bail out.
if data_handler.is_notification_sent(testcase_id, to_email):
return
# Make sure all testcase taks are done (e.g. minimization, regression, etc).
if not data_handler.critical_tasks_completed(testcase):
return
notify = not upload_metadata.quiet_flag
# If the same issue was specified at time of upload, update it.
if (issue and str(issue.id) == upload_metadata.bug_information and
not testcase.duplicate_of):
issue_description = data_handler.get_issue_description(testcase)
_update_issue_when_uploaded_testcase_is_processed(
policy, testcase, issue, issue_description,
upload_metadata.bug_summary_update_flag, notify)
if notify:
issue_description_without_crash_state = data_handler.get_issue_description(
testcase, hide_crash_state=True)
_send_email_to_uploader(testcase_id, to_email,
issue_description_without_crash_state)
# Make sure to create notification entry, as we use this to update bug.
data_handler.create_notification_entry(testcase_id, to_email) |
Add OS labels to issue. | def update_os_labels(policy, testcase, issue):
"""Add OS labels to issue."""
os_label = policy.label('os')
if not os_label:
return
if not issue:
return
platforms = get_crash_occurrence_platforms(testcase)
platforms = platforms.union(get_platforms_from_testcase_variants(testcase))
logs.log(
f'Found {len(platforms)} platforms for the testcase {testcase.key.id()}.',
platforms=platforms)
for platform in platforms:
label = os_label.replace('%PLATFORM%', platform.capitalize())
if not issue_tracker_utils.was_label_added(issue, label):
issue.labels.add(label)
issue.save(notify=False)
logs.log(f'Updated labels of issue {issue.id}.', labels=issue.labels) |
Add top crash label to issue. | def update_fuzz_blocker_label(policy, testcase, issue,
top_crashes_by_project_and_platform_map):
"""Add top crash label to issue."""
fuzz_blocker_label = policy.label('fuzz_blocker')
if not fuzz_blocker_label:
return
if not issue:
return
if not testcase.open:
return
top_crash_platforms = get_top_crash_platforms(
testcase, top_crashes_by_project_and_platform_map)
if not top_crash_platforms:
# Not a top crasher, bail out.
return
if issue_tracker_utils.was_label_added(issue, fuzz_blocker_label):
# Issue was already marked a top crasher, bail out.
return
if len(top_crash_platforms) == 1:
platform_message = f'{top_crash_platforms[0]} platform'
else:
platform_message = f'{", ".join(top_crash_platforms[:-1])} and ' \
f'{top_crash_platforms[-1]} platforms'
fuzzer_name = (
testcase.get_metadata('fuzzer_binary_name') or testcase.fuzzer_name)
update_message = (
f'This crash occurs very frequently on {platform_message} and '
f'is likely preventing the fuzzer {fuzzer_name} '
'from making much progress. Fixing this will allow more bugs '
'to be found.')
if utils.is_oss_fuzz():
update_message += OSS_FUZZ_INCORRECT_COMMENT
elif utils.is_chromium():
label_text = issue.issue_tracker.label_text(
data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL)
update_message += '\n\nMarking this bug as a blocker for next Beta release.'
update_message = _append_generic_incorrect_comment(
update_message, policy, issue, f' and remove the {label_text}.')
issue.labels.add(data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL)
# Update with the next beta for trunk, and remove existing milestone label.
beta_milestone_label = (
f'M-{build_info.get_release_milestone("head", testcase.platform)}')
if beta_milestone_label not in issue.labels:
issue.labels.remove_by_prefix('M-')
issue.labels.add(beta_milestone_label)
logs.log(update_message)
issue.labels.add(fuzz_blocker_label)
issue.save(new_comment=update_message, notify=True) |
Add components to the issue if needed. | def update_component_labels(policy, testcase, issue):
"""Add components to the issue if needed."""
if not issue:
return
components = _get_predator_result_item(
testcase, 'suspected_components', default=[])
# Remove components already in issue or whose more specific variants exist.
filtered_components = []
for component in components:
found_component_in_issue = any(
component == issue_component or issue_component.startswith(component +
'>')
for issue_component in issue.components)
if not found_component_in_issue:
filtered_components.append(component)
if not filtered_components:
# If there are no new components to add, then we shouldn't make any changes
# to issue.
return
# Don't run on issues we've already applied automatic components to in case
# labels are removed manually. This may cause issues in the event that we
# rerun a test case, but it seems like a reasonable tradeoff to avoid spam.
logs.log(
'google_issue_tracker: Checking if auto_components_label %s (policy %s) '
'is in %s. Result: %s' %
(data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL,
policy.substitution_mapping(
data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL),
list(issue.labels),
issue_tracker_utils.was_label_added(
issue,
policy.substitution_mapping(
data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL))))
if issue_tracker_utils.was_label_added(
issue,
policy.substitution_mapping(
data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL)):
return
for filtered_component in filtered_components:
issue.components.add(filtered_component)
issue.labels.add(
policy.substitution_mapping(
data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL))
label_text = issue.issue_tracker.label_text(
policy.substitution_mapping(
data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_COMPONENTS_LABEL))
issue_comment = (
'Automatically applying components based on crash stacktrace and '
'information from OWNERS files.\n\n'
f'If this is incorrect, please apply the {label_text}.')
issue.save(new_comment=issue_comment, notify=True) |
Remove and log all entries with trailing comments.
Eg: Do not add "[email protected] #{LAST_RESORT_SUGGESTION}". | def _sanitize_ccs_list(ccs_list):
"""Remove and log all entries with trailing comments.
Eg: Do not add "[email protected] #{LAST_RESORT_SUGGESTION}".
"""
ret_list = []
for cc in ccs_list:
if len(cc.split()) == 1:
ret_list.append(cc)
else:
logs.log(f'google_issue_tracker: Filtering out CC "{cc}"')
return ret_list |
Add cc to an issue based on owners list from owners file. This is
currently applicable to fuzz targets only. | def update_issue_ccs_from_owners_file(policy, testcase, issue):
"""Add cc to an issue based on owners list from owners file. This is
currently applicable to fuzz targets only."""
auto_cc_label = policy.label('auto_cc_from_owners')
if not auto_cc_label:
return
if not issue or not issue.is_open:
return
if testcase.get_metadata('has_issue_ccs_from_owners_file'):
return
ccs_list = utils.parse_delimited(
testcase.get_metadata('issue_owners', ''),
delimiter=',',
strip=True,
remove_empty=True)
if not ccs_list:
return
# Remove unsupported entries.
ccs_list = _sanitize_ccs_list(ccs_list)
# If we've assigned the ccs before, it likely means we were incorrect.
# Don't try again for this particular issue.
logs.log(
'google_issue_tracker: Checking if auto_cc_label %s (policy: %s) is in '
'%s. Result: %s' %
(auto_cc_label, policy.label(auto_cc_label), list(issue.labels),
issue_tracker_utils.was_label_added(issue, auto_cc_label)))
if issue_tracker_utils.was_label_added(issue, auto_cc_label):
return
ccs_added = False
actions = list(issue.actions)
for cc in ccs_list:
if cc in issue.ccs:
continue
# If cc was previously manually removed from the cc list, we assume that
# they were incorrectly added. Don't try to add them again.
cc_was_removed = any(cc in action.ccs.removed for action in actions)
if cc_was_removed:
continue
issue.ccs.add(cc)
ccs_added = True
if not ccs_added:
# Everyone we'd expect to see has already been cced on the issue. No need
# to spam it with another comment. Also, set the metadata to avoid doing
# this again.
testcase.set_metadata('has_issue_ccs_from_owners_file', True)
return
issue_comment = (
'Automatically adding ccs based on OWNERS file / target commit history.')
if utils.is_oss_fuzz():
issue_comment += OSS_FUZZ_INCORRECT_COMMENT + '.'
else:
issue_comment = _append_generic_incorrect_comment(issue_comment, policy,
issue, '.')
issue.labels.add(auto_cc_label)
issue.save(new_comment=issue_comment, notify=True) |
Update issue reproducibility label when testcase becomes flaky or
unreproducible. | def update_issue_labels_for_flaky_testcase(policy, testcase, issue):
"""Update issue reproducibility label when testcase becomes flaky or
unreproducible."""
if not issue or not issue.is_open:
return
# If the testcase is reproducible, then no change is needed. Bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that no other reproducible testcases associated with this issue
# are open. If yes, no need to update label.
similar_reproducible_testcase = data_types.Testcase.query(
data_types.Testcase.bug_information == testcase.bug_information,
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if similar_reproducible_testcase:
return
reproducible_label = policy.label('reproducible')
unreproducible_label = policy.label('unreproducible')
if not reproducible_label or not unreproducible_label:
return
# Make sure that this issue is not already marked Unreproducible.
if unreproducible_label in issue.labels:
return
issue.labels.remove(reproducible_label)
issue.labels.add(unreproducible_label)
comment = (f'ClusterFuzz testcase {testcase.key.id()} appears to be flaky, '
f'updating reproducibility {issue.issue_tracker.label_type}.')
issue.save(new_comment=comment) |
Assign the issue to an appropriate owner if possible. | def update_issue_owner_and_ccs_from_predator_results(policy,
testcase,
issue,
only_allow_ccs=False):
"""Assign the issue to an appropriate owner if possible."""
logs.log(f'{update_issue_owner_and_ccs_from_predator_results}')
if not issue or not issue.is_open:
return
logs.log('is_open')
# If the issue already has an owner, we don't need to update the bug.
if issue.assignee:
return
logs.log('noassignee')
# If there are more than 3 suspected CLs, we can't be confident in the
# results. Just skip any sort of notification to CL authors in this case.
suspected_cls = _get_predator_result_item(testcase, 'suspected_cls')
logs.log(f'suspected_cls {suspected_cls}')
if not suspected_cls or len(suspected_cls) > 3:
return
logs.log('suspected_cls2')
# If we've assigned an owner or cc once before, it likely means we were
# incorrect. Don't try again for this particular issue.
if (issue_tracker_utils.was_label_added(
issue,
policy.substitution_mapping(
data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_OWNER_LABEL)) or
issue_tracker_utils.was_label_added(
issue,
policy.substitution_mapping(
data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_CC_LABEL))):
return
logs.log('never assigned')
# Validate that the suspected CLs have all of the information we need before
# continuing. This allows us to assume that they are well-formed later,
# avoiding any potential exceptions that would interrupt this task.
for suspected_cl in suspected_cls:
url = suspected_cl.get('url')
description = suspected_cl.get('description')
author = suspected_cl.get('author')
if not url or not description or not author:
logs.log_error(f'Suspected CL for testcase {testcase.key.id()} '
'is missing required information.')
return
if len(suspected_cls) == 1 and not only_allow_ccs:
logs.log('only 1 CL')
suspected_cl = suspected_cls[0]
# If this owner has already been assigned before but has since been removed,
# don't assign it to them again.
for action in issue.actions:
if action.assignee == suspected_cls[0]['author']:
logs.log('already assigned')
return
# We have high confidence for the single-CL case, so we assign the owner.
logs.log('Updating issue')
issue.labels.add(
policy.substitution_mapping(
data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_OWNER_LABEL))
issue.assignee = suspected_cl['author']
issue.status = policy.status('assigned')
label_text = issue.issue_tracker.label_text(
policy.substitution_mapping(
data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_CL_LABEL))
issue_comment = (
'Automatically assigning owner based on suspected regression '
f'changelist {suspected_cl["url"]} ({suspected_cl["description"]}).\n\n'
'If this is incorrect, please let us know why and apply the '
f'{label_text}. If you aren\'t the correct owner for this issue, '
'please unassign yourself as soon as possible so it can be re-triaged.')
else:
if testcase.get_metadata('has_issue_ccs_from_predator_results'):
logs.log('has_issue_ccs_from_predator_results')
return
issue_comment = (
'Automatically adding ccs based on suspected regression changelists:'
'\n\n')
ccs_added = False
for suspected_cl in suspected_cls:
# Update the comment with the suspected CL, regardless of whether or not
# we're ccing the author. This might, for example, catch the attention of
# someone who has already been cced.
author = suspected_cl['author']
issue_comment += f'{suspected_cl["description"]} by ' \
f'{author} - {suspected_cl["url"]}\n\n'
logs.log('Suspected')
if author in issue.ccs:
logs.log('AUthor CCed')
continue
# If an author has previously been manually removed from the cc list,
# we assume they were incorrectly added. Don't try to add them again.
author_was_removed = False
for action in issue.actions:
if author in action.ccs.removed:
author_was_removed = True
logs.log('Breaking')
break
if author_was_removed:
logs.log('Author removed')
continue
issue.ccs.add(author)
ccs_added = True
if not ccs_added:
# Everyone we'd expect to see has already been cced on the issue. No need
# to spam it with another comment. Also, set the metadata to avoid doing
# this again.
testcase.set_metadata('has_issue_ccs_from_owners_file', True)
logs.log('not ccs_added')
return
label_text = issue.issue_tracker.label_text(
policy.substitution_mapping(
data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_CL_LABEL))
issue.labels.add(
policy.substitution_mapping(
data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_CC_LABEL))
issue_comment += (
'If this is incorrect, please let us know why and apply the '
f'{label_text}.')
try:
issue.save(new_comment=issue_comment, notify=True)
except HttpError:
# If we see such an error when we aren't setting an owner, it's unexpected.
if only_allow_ccs or not issue.assignee:
logs.log_error(
f'Unable to update issue for test case {testcase.key.id()}.')
return
# Retry without setting the owner. They may not be a chromium project
# member, in which case we can try falling back to cc.
issue = issue_tracker_utils.get_issue_for_testcase(testcase)
update_issue_owner_and_ccs_from_predator_results(
policy, testcase, issue, only_allow_ccs=True) |
Clean up unused heartbeat entities. | def cleanup_unused_heartbeats():
"""Clean up unused heartbeat entities."""
cutoff_time = utils.utcnow() - datetime.timedelta(
days=UNUSED_HEARTBEAT_THRESHOLD)
unused_heartbeats = ndb_utils.get_all_from_query(
data_types.Heartbeat.query(
data_types.Heartbeat.last_beat_time < cutoff_time),
keys_only=True)
ndb_utils.delete_multi(unused_heartbeats) |
Cleaning up unneeded testcases | def main():
"""Cleaning up unneeded testcases"""
cleanup_testcases_and_issues()
cleanup_reports_metadata()
leak_blacklist.cleanup_global_blacklist()
cleanup_unused_fuzz_targets_and_jobs()
cleanup_unused_heartbeats()
logs.log('Cleanup task finished successfully.')
return True |
Sets public ACL on the object with given URL, if it's not public yet. | def _set_public_acl_if_needed(url):
"""Sets public ACL on the object with given URL, if it's not public yet."""
if storage.get_acl(url, 'allUsers'):
logs.log('%s is already marked public, skipping.' % url)
return True
if not storage.set_acl(url, 'allUsers'):
logs.log_error('Failed to mark %s public.' % url)
return False
return True |
Identifies old corpus backups and makes them public. | def _make_corpus_backup_public(target, corpus_fuzzer_name_override,
corpus_backup_bucket_name):
"""Identifies old corpus backups and makes them public."""
corpus_backup_date = utils.utcnow().date() - datetime.timedelta(
days=data_types.CORPUS_BACKUP_PUBLIC_LOOKBACK_DAYS)
corpus_backup_url = corpus_manager.gcs_url_for_backup_file(
corpus_backup_bucket_name, corpus_fuzzer_name_override or target.engine,
target.project_qualified_name(), corpus_backup_date)
if not storage.get(corpus_backup_url):
logs.log_warn('Failed to find corpus backup %s.' % corpus_backup_url)
return
if not _set_public_acl_if_needed(corpus_backup_url):
return
filename = (
corpus_manager.PUBLIC_BACKUP_TIMESTAMP + os.extsep +
corpus_manager.BACKUP_ARCHIVE_FORMAT)
public_url = os.path.join(os.path.dirname(corpus_backup_url), filename)
if not storage.copy_blob(corpus_backup_url, public_url):
logs.log_error(
'Failed to overwrite %s with the latest public corpus backup.' %
public_url)
return
if not _set_public_acl_if_needed(public_url):
return
logs.log('Corpus backup %s is now marked public.' % corpus_backup_url) |
Makes corpuses older than 90 days public. | def main():
"""Makes corpuses older than 90 days public."""
jobs = ndb_utils.get_all_from_model(data_types.Job)
default_backup_bucket = utils.default_backup_bucket()
for job in jobs:
job_environment = job.get_environment()
if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
# Don't use corpus backups from experimental jobs. Skip.
continue
if not utils.string_is_true(job_environment.get('CORPUS_PRUNE')):
# There won't be any corpus backups for these jobs. Skip.
continue
corpus_backup_bucket_name = job_environment.get('BACKUP_BUCKET',
default_backup_bucket)
if not corpus_backup_bucket_name:
# No backup bucket found. Skip.
continue
corpus_fuzzer_name_override = job_environment.get(
'CORPUS_FUZZER_NAME_OVERRIDE')
target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(job=job.name))
fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(
target_jobs)
for target in fuzz_targets:
if not target:
# This is expected if any fuzzer/job combinations become outdated.
continue
try:
_make_corpus_backup_public(target, corpus_fuzzer_name_override,
corpus_backup_bucket_name)
except:
logs.log_error(f'Failed to make {target} corpus backup public.')
logs.log('Corpus backup succeeded.')
return True |
Simple formatter to get stats for the past day. | def _past_day_formatter(query_format, dataset):
"""Simple formatter to get stats for the past day."""
end_time = utils.utcnow().date()
start_time = end_time - datetime.timedelta(days=1)
return query_format.format(
dataset=dataset, start_time=start_time, end_time=end_time) |
Prepare a query to check for new fuzzers from the past week. | def _new_fuzzer_formatter(query_format, dataset):
"""Prepare a query to check for new fuzzers from the past week."""
now = utils.utcnow().date()
cutoff_time = now - datetime.timedelta(days=7)
return query_format.format(dataset=dataset, cutoff_time=cutoff_time) |
Prepare a query to check for changes in coverage week over week. | def _coverage_formatter(query_format, dataset):
"""Prepare a query to check for changes in coverage week over week."""
end_date = utils.utcnow().date() - datetime.timedelta(days=1)
middle_date = end_date - datetime.timedelta(days=7)
start_date = end_date - datetime.timedelta(days=14)
return query_format.format(
dataset=dataset,
start_date=start_date,
middle_date=middle_date,
end_date=end_date) |
Helper function to get fuzzer stats. | def _query_helper(client, query):
"""Helper function to get fuzzer stats."""
return client.query(query=query).rows |
Update the weight for a fuzzer/job. | def _update_match(matches, fuzzer, job, match):
"""Update the weight for a fuzzer/job."""
key = (fuzzer, job)
old_match = matches.get(key, RESTORE_DEFAULT_MATCH)
new_weight = match.new_weight
old_weight = old_match.new_weight
# Rules that increase weights are expected to take precedence over any that
# lower the weight. Issues with new fuzzers may be fixed intraday and other
# issues like crashes shouldn't be penalized for them.
if old_weight > 1.0:
return
# Always update the weight if the previous value is the default. This is
# required to deal with specifications that are meant to set the weight above
# 1.0. Otherwise, prioritize only the most penalizing match for this pairing.
if old_match == RESTORE_DEFAULT_MATCH or new_weight < old_weight:
matches[key] = match |
Set the weight for a particular target. | def update_weight_for_target(fuzz_target_name, job, match):
"""Set the weight for a particular target."""
target_job = data_handler.get_fuzz_target_job(fuzz_target_name, job)
if not target_job:
# Bail out. This is expected if any fuzzer/job combinations become outdated.
return
weight = match.new_weight
logs.log('Adjusted weight to %f for target %s and job %s (%s).' %
(weight, fuzz_target_name, job, match.reason))
target_job.weight = weight
target_job.put() |
Run a query and adjust weights based on a given query specification. | def update_matches_for_specification(specification, client, engine, matches,
run_set):
"""Run a query and adjust weights based on a given query specification."""
query = specification.formatter(specification.query_format,
fuzzer_stats.dataset_name(engine))
results = _query_helper(client, query)
for result in results:
fuzzer = result['fuzzer']
job = result['job']
new_weight = result['new_weight']
if new_weight is None:
continue
run_set.add((fuzzer, job))
if new_weight != 1.0:
match = SpecificationMatch(
new_weight=new_weight, reason=specification.reason)
_update_match(matches, fuzzer, job, match) |
Update all fuzz target weights for the specified engine. | def update_target_weights_for_engine(client, engine, specifications):
"""Update all fuzz target weights for the specified engine."""
matches = {}
run_set = set()
# All fuzzers with non-default weights must be tracked with a special
# specification. This ensures that they will be restored to normal weight
# once conditions causing adjustments are no longer met.
target_jobs = data_types.FuzzTargetJob.query(
data_types.FuzzTarget.engine == engine).filter(
data_types.FuzzTargetJob.weight != 1.0)
for target_job in target_jobs:
matches[(target_job.fuzz_target_name,
target_job.job)] = RESTORE_DEFAULT_MATCH
for match in specifications:
update_matches_for_specification(match, client, engine, matches, run_set)
for (fuzzer, job), match in matches.items():
if (fuzzer, job) not in run_set:
# This ensures that we don't reset weights for fuzzers with problems if
# they didn't run in the time covered by our queries.
continue
update_weight_for_target(fuzzer, job, match)
logs.log('Weight adjustments complete for engine %s.' % engine) |
Update a bigquery table containing the daily stats. | def store_current_weights_in_bigquery():
"""Update a bigquery table containing the daily stats."""
rows = []
target_jobs = ndb_utils.get_all_from_model(data_types.FuzzTargetJob)
for target_job in target_jobs:
row = {
'fuzzer': target_job.fuzz_target_name,
'job': target_job.job,
'weight': target_job.weight
}
rows.append(big_query.Insert(row=row, insert_id=None))
client = big_query.Client(dataset_id='main', table_id='fuzzer_weights')
client.insert(rows) |
Update a job weight. | def update_job_weight(job_name, multiplier):
"""Update a job weight."""
tool_name = environment.get_memory_tool_name(job_name)
multiplier *= SANITIZER_WEIGHTS.get(tool_name, DEFAULT_SANITIZER_WEIGHT)
engine = environment.get_engine_for_job(job_name)
multiplier *= ENGINE_WEIGHTS.get(engine, DEFAULT_ENGINE_WEIGHT)
query = data_types.FuzzerJob.query(data_types.FuzzerJob.job == job_name)
changed_weights = []
for fuzzer_job in query:
if fuzzer_job.multiplier != multiplier:
fuzzer_job.multiplier = multiplier
changed_weights.append(fuzzer_job)
if changed_weights:
ndb_utils.put_multi(changed_weights) |
Update job weights. | def update_job_weights():
"""Update job weights."""
for job in data_types.Job.query():
multiplier = DEFAULT_MULTIPLIER
if environment.is_engine_fuzzer_job(job.name):
targets_count = ndb.Key(data_types.FuzzTargetsCount, job.name).get()
# If the count is 0, it may be due to a bad build or some other issue. Use
# the default weight in that case to allow for recovery.
if targets_count and targets_count.count:
multiplier = targets_count.count
multiplier = min(multiplier, TARGET_COUNT_WEIGHT_CAP)
update_job_weight(job.name, multiplier) |
Periodically update fuzz target weights based on performance. | def main():
"""Periodically update fuzz target weights based on performance."""
client = big_query.Client()
update_target_weights_for_engine(client, 'libFuzzer',
LIBFUZZER_SPECIFICATIONS)
update_target_weights_for_engine(client, 'afl', AFL_SPECIFICATIONS)
update_job_weights()
store_current_weights_in_bigquery()
logs.log('Fuzzer and job weights succeeded.')
return True |
Returns a GCS URL to the latest report info for the given bucket. | def _latest_report_info_dir(bucket):
"""Returns a GCS URL to the latest report info for the given bucket."""
return f'gs://{bucket}/latest_report_info/' |
Returns the basename for the given path without file extension. | def _basename(gcs_path):
"""Returns the basename for the given path without file extension."""
return os.path.splitext(os.path.basename(gcs_path))[0] |
Returns a JSON obejct loaded from the given GCS url. | def _read_json(url):
"""Returns a JSON obejct loaded from the given GCS url."""
data = storage.read_data(url)
result = None
try:
result = json.loads(data)
except Exception as e:
logs.log_warn(
'Empty or malformed code coverage JSON (%s): %s.' % (url, str(e)))
return result |
Returns a CoverageInformation entity with coverage stats populated. | def _coverage_information(summary_path, name, report_info):
"""Returns a CoverageInformation entity with coverage stats populated."""
date = datetime.datetime.strptime(
report_info['report_date'],
data_types.COVERAGE_INFORMATION_DATE_FORMAT).date()
# |name| can be either a project qualified fuzz target name or a project name.
cov_info = data_handler.get_coverage_information(
name, date, create_if_needed=True)
cov_info.fuzzer = name
cov_info.date = date
# Link to a per project report as long as we don't have per fuzzer reports.
cov_info.html_report_url = report_info['html_report_url']
summary = _read_json(summary_path)
if not summary:
# We can encounter empty JSON files for broken fuzz targets.
return cov_info
try:
# Don't rely on the coverage data being well-formatted. Otherwise new
# languages can break everything else.
total_stats = summary['data'][0]['totals']
cov_info.functions_covered = total_stats['functions']['covered']
cov_info.functions_total = total_stats['functions']['count']
cov_info.edges_covered = total_stats['regions']['covered']
cov_info.edges_total = total_stats['regions']['count']
return cov_info
except KeyError:
logs.log_error('Malformed code coverage for %s.' % name)
return None |
Processes coverage stats for a single fuzz target. | def _process_fuzzer_stats(fuzzer, project_info, project_name, bucket):
"""Processes coverage stats for a single fuzz target."""
fuzzer_name = data_types.fuzz_target_project_qualified_name(
project_name, _basename(fuzzer))
fuzzer_info_path = storage.get_cloud_storage_file_path(bucket, fuzzer)
logs.log(
'Processing fuzzer stats for %s (%s).' % (fuzzer_name, fuzzer_info_path))
return _coverage_information(fuzzer_info_path, fuzzer_name, project_info) |
Processes coverage stats for a single project. | def _process_project_stats(project_info, project_name):
"""Processes coverage stats for a single project."""
summary_path = project_info['report_summary_path']
logs.log('Processing total stats for %s project (%s).' % (project_name,
summary_path))
return _coverage_information(summary_path, project_name, project_info) |
Collects coverage information for all fuzz targets in the given project and
the total stats for the project. | def _process_project(project_name, latest_project_info_url, bucket):
"""Collects coverage information for all fuzz targets in the given project and
the total stats for the project."""
logs.log('Processing coverage for %s project.' % project_name)
report_info = _read_json(latest_project_info_url)
if not report_info:
logs.log_warn('Skipping code coverage for %s project.' % project_name)
return
# Iterate through report_info['fuzzer_stats_dir'] and prepare
# CoverageInformation entities for invididual fuzz targets.
entities = []
for fuzzer in storage.list_blobs(
report_info['fuzzer_stats_dir'], recursive=False):
fuzzer_stats = _process_fuzzer_stats(fuzzer, report_info, project_name,
bucket)
if fuzzer_stats:
entities.append(fuzzer_stats)
logs.log('Processed coverage for %d targets in %s project.' % (len(entities),
project_name))
# Prepare CoverageInformation entity for the total project stats.
project_stats = _process_project_stats(report_info, project_name)
if project_stats:
entities.append(project_stats)
ndb_utils.put_multi(entities) |
Actual implementation of the fuzzer coverage task. | def collect_fuzzer_coverage(bucket):
"""Actual implementation of the fuzzer coverage task."""
url = _latest_report_info_dir(bucket)
for latest_project_report_info_path in storage.list_blobs(
url, recursive=False):
project = _basename(latest_project_report_info_path)
latest_project_info_url = storage.get_cloud_storage_file_path(
bucket,
latest_project_report_info_path) # Path is relative to the bucket.
_process_project(project, latest_project_info_url, bucket) |
Collects the latest code coverage stats and links to reports. | def main():
"""Collects the latest code coverage stats and links to reports."""
# The task is supposed to be super reliable and never fail. If anything goes
# wrong, we just fail with the exception going straight into StackDriver.
logs.log('FuzzerCoverage task started.')
bucket = local_config.ProjectConfig().get('coverage.reports.bucket')
if not bucket:
logs.log_error(
'Coverage bucket is not specified. Skipping FuzzerCoverage task.')
return False
collect_fuzzer_coverage(bucket)
logs.log('FuzzerCoverage task finished successfully.')
return True |
Get query results.
Queries above BANDIT_PROBABILITY_QUERY and yields results
from bigquery. This query is sorted by strategies implemented. | def _query_multi_armed_bandit_probabilities(engine):
"""Get query results.
Queries above BANDIT_PROBABILITY_QUERY and yields results
from bigquery. This query is sorted by strategies implemented."""
strategy_names_list = [
strategy_entry.name for strategy_entry in engine.query_strategy_list
]
strategies_subquery = '\n'.join([
STRATEGY_SUBQUERY_FORMAT.format(strategy_name=strategy_name)
for strategy_name in strategy_names_list
])
client = big_query.Client()
strategies = ','.join(
['strategy_' + strategy_name for strategy_name in strategy_names_list])
formatted_query = BANDIT_PROBABILITY_QUERY_FORMAT.format(
performance_metric=engine.performance_metric,
temperature_value=TEMPERATURE_PARAMETER,
strategies=strategies,
strategies_subquery=strategies_subquery,
engine=engine.name)
return client.query(query=formatted_query).rows |
Update a bigquery table containing the daily updated
probability distribution over strategies. | def _store_probabilities_in_bigquery(engine, data):
"""Update a bigquery table containing the daily updated
probability distribution over strategies."""
bigquery_data = []
# TODO(mukundv): Update once we choose a temperature parameter for final
# implementation.
for row in data:
bigquery_row = {
'strategy_name': row['strategy'],
'probability': row['bandit_weight'],
'engine': engine.name
}
bigquery_data.append(big_query.Insert(row=bigquery_row, insert_id=None))
if bigquery_data:
client = big_query.Client(
dataset_id='main', table_id='fuzz_strategy_probability')
client.insert(bigquery_data)
else:
logs.log('No fuzz strategy distribution data was found to upload to '
'BigQuery.') |
Uploads queried data into datastore.
Calls query functions and uploads query results
to datastore to use as new probabilities. Probabilities
are based on new_edges feature. | def _query_and_upload_strategy_probabilities(engine):
"""Uploads queried data into datastore.
Calls query functions and uploads query results
to datastore to use as new probabilities. Probabilities
are based on new_edges feature."""
strategy_data = []
data = _query_multi_armed_bandit_probabilities(engine)
logs.log('Queried distribution for {}.'.format(engine.name))
# TODO(mukundv): Update once we choose a temperature parameter for final
# implementation.
for row in data:
curr_strategy = data_types.FuzzStrategyProbability()
curr_strategy.strategy_name = str(row['strategy'])
curr_strategy.probability = float(row['bandit_weight'])
curr_strategy.engine = engine.name
strategy_data.append(curr_strategy)
query = data_types.FuzzStrategyProbability.query(
data_types.FuzzStrategyProbability.engine == engine.name)
ndb_utils.delete_multi(
[entity.key for entity in ndb_utils.get_all_from_query(query)])
ndb_utils.put_multi(strategy_data)
logs.log('Uploaded queried distribution to ndb for {}'.format(engine.name))
_store_probabilities_in_bigquery(engine, data)
logs.log('Uploaded queried distribution to BigQuery for {}'.format(
engine.name)) |
Periodically update fuzz strategy bandit probabilities
based on a performance metric (currently based on new_edges). | def main():
"""Periodically update fuzz strategy bandit probabilities
based on a performance metric (currently based on new_edges)."""
for engine in ENGINE_LIST:
_query_and_upload_strategy_probabilities(engine)
logs.log('Fuzz strategy selection succeeded.')
return True |
Combine two testcases into a group. | def combine_testcases_into_group(testcase_1, testcase_2, testcase_map):
"""Combine two testcases into a group."""
logs.log(
'Grouping testcase 1 '
'(crash_type=%s, crash_state=%s, security_flag=%s, group=%s) '
'and testcase 2 '
'(crash_type=%s, crash_state=%s, security_flag=%s, group=%s).' %
(testcase_1.crash_type, testcase_1.crash_state, testcase_1.security_flag,
testcase_1.group_id, testcase_2.crash_type, testcase_2.crash_state,
testcase_2.security_flag, testcase_2.group_id))
# If none of the two testcases have a group id, just assign a new group id to
# both.
if not testcase_1.group_id and not testcase_2.group_id:
new_group_id = _get_new_group_id()
testcase_1.group_id = new_group_id
testcase_2.group_id = new_group_id
return
# If one of the testcase has a group id, then assign the other to reuse that
# group id.
if testcase_1.group_id and not testcase_2.group_id:
testcase_2.group_id = testcase_1.group_id
return
if testcase_2.group_id and not testcase_1.group_id:
testcase_1.group_id = testcase_2.group_id
return
# If both the testcase have their own groups, then just merge the two groups
# together and reuse one of their group ids.
group_id_to_reuse = testcase_1.group_id
group_id_to_move = testcase_2.group_id
for testcase in testcase_map.values():
if testcase.group_id == group_id_to_move:
testcase.group_id = group_id_to_reuse |
Get a new group id for testcase grouping. | def _get_new_group_id():
"""Get a new group id for testcase grouping."""
new_group = data_types.TestcaseGroup()
new_group.put()
return new_group.key.id() |
Checks for the testcase variants equality. | def is_same_variant(variant1, variant2):
"""Checks for the testcase variants equality."""
return (variant1.crash_type == variant2.crash_type and
variant1.crash_state == variant2.crash_state and
variant1.security_flag == variant2.security_flag) |
Returns whether or not a testcase is a top crash. | def matches_top_crash(testcase, top_crashes_by_project_and_platform):
"""Returns whether or not a testcase is a top crash."""
if testcase.project_name not in top_crashes_by_project_and_platform:
return False
crashes_by_platform = top_crashes_by_project_and_platform[
testcase.project_name]
for crashes in crashes_by_platform.values():
for crash in crashes:
if (crash['crashState'] == testcase.crash_state and
crash['crashType'] == testcase.crash_type and
crash['isSecurity'] == testcase.security_flag):
return True
return False |
Group testcases that are associated based on variant analysis. | def _group_testcases_based_on_variants(testcase_map):
"""Group testcases that are associated based on variant analysis."""
# Skip this if the project is configured so (like Google3).
enable = local_config.ProjectConfig().get('deduplication.variant', True)
if not enable:
return
logs.log('Grouping based on variant analysis.')
grouping_candidates = collections.defaultdict(list)
project_num_testcases = collections.defaultdict(int)
# Phase 1: collect all grouping candidates.
for testcase_1_id, testcase_1 in testcase_map.items():
# Count the number of testcases for each project.
project_num_testcases[testcase_1.project_name] += 1
for testcase_2_id, testcase_2 in testcase_map.items():
# Rule: Don't group the same testcase and use different combinations for
# comparisons.
if testcase_1_id <= testcase_2_id:
continue
# Rule: If both testcase have the same group id, then no work to do.
if testcase_1.group_id == testcase_2.group_id and testcase_1.group_id:
continue
# Rule: Check both testcase are under the same project.
if testcase_1.project_name != testcase_2.project_name:
continue
# Rule: If both testcase have same job_type, then skip variant anlysis.
if testcase_1.job_type == testcase_2.job_type:
continue
# Rule: Skip variant analysis if any testcase is timeout or OOM.
if (VARIANT_CRASHES_IGNORE.match(testcase_1.crash_type) or
VARIANT_CRASHES_IGNORE.match(testcase_2.crash_type)):
continue
# Rule: Skip variant analysis if any testcase states is NULL.
if (VARIANT_STATES_IGNORE.match(testcase_1.crash_state) or
VARIANT_STATES_IGNORE.match(testcase_2.crash_state)):
continue
# Rule: Skip variant analysis if any testcase is not reproducible.
if testcase_1.one_time_crasher_flag or testcase_2.one_time_crasher_flag:
continue
# Rule: Group testcase with similar variants.
# For each testcase2, get the related variant1 and check for equivalence.
candidate_variant = data_handler.get_testcase_variant(
testcase_1_id, testcase_2.job_type)
if (not candidate_variant or
not is_same_variant(candidate_variant, testcase_2)):
continue
current_project = testcase_1.project_name
grouping_candidates[current_project].append((testcase_1_id,
testcase_2_id))
# Top crashes are usually startup crashes, so don't group them.
top_crashes_by_project_and_platform = (
cleanup.get_top_crashes_for_all_projects_and_platforms(
limit=TOP_CRASHES_LIMIT))
# Phase 2: check for the anomalous candidates
# i.e. candiates matched with many testcases.
for project, candidate_list in grouping_candidates.items():
project_ignore_testcases = set()
# Count the number of times a testcase is matched for grouping.
project_counter = collections.defaultdict(int)
for candidate_tuple in candidate_list:
for testcase_id in candidate_tuple:
project_counter[testcase_id] += 1
# Determine anomalous candidates.
threshold = VARIANT_THRESHOLD_PERCENTAGE * project_num_testcases[project]
threshold = min(threshold, VARIANT_MAX_THRESHOLD)
threshold = max(threshold, VARIANT_MIN_THRESHOLD)
# Check threshold to be above a minimum, to avoid unnecessary filtering.
for testcase_id, count in project_counter.items():
if count >= threshold:
project_ignore_testcases.add(testcase_id)
for (testcase_1_id, testcase_2_id) in candidate_list:
if (testcase_1_id in project_ignore_testcases or
testcase_2_id in project_ignore_testcases):
logs.log('VARIANT ANALYSIS (Pruning): Anomalous match: (id1=%s, '
'matched_count1=%d) matched with (id2=%d, matched_count2=%d), '
'threshold=%.2f.' %
(testcase_1_id, project_counter[testcase_1_id], testcase_2_id,
project_counter[testcase_2_id], threshold))
continue
testcase_1 = testcase_map[testcase_1_id]
testcase_2 = testcase_map[testcase_2_id]
if (matches_top_crash(testcase_1, top_crashes_by_project_and_platform) or
matches_top_crash(testcase_2, top_crashes_by_project_and_platform)):
logs.log(f'VARIANT ANALYSIS: {testcase_1_id} or {testcase_2_id} '
'is a top crash, skipping.')
continue
logs.log(
'VARIANT ANALYSIS: Grouping testcase 1 '
'(id=%s, '
'crash_type=%s, crash_state=%s, security_flag=%s, job=%s, group=%s) '
'and testcase 2 (id=%s, '
'crash_type=%s, crash_state=%s, security_flag=%s, job=%s, group=%s).'
%
(testcase_1.id, testcase_1.crash_type, testcase_1.crash_state,
testcase_1.security_flag, testcase_1.job_type, testcase_1.group_id,
testcase_2.id, testcase_2.crash_type, testcase_2.crash_state,
testcase_2.security_flag, testcase_2.job_type, testcase_2.group_id))
combine_testcases_into_group(testcase_1, testcase_2, testcase_map) |
Group testcases that are associated with same underlying issue. | def _group_testcases_with_same_issues(testcase_map):
"""Group testcases that are associated with same underlying issue."""
logs.log('Grouping based on same issues.')
for testcase_1_id, testcase_1 in testcase_map.items():
for testcase_2_id, testcase_2 in testcase_map.items():
# Rule: Don't group the same testcase and use different combinations for
# comparisons.
if testcase_1_id <= testcase_2_id:
continue
# Rule: If both testcase have the same group id, then no work to do.
if testcase_1.group_id == testcase_2.group_id and testcase_1.group_id:
continue
# Rule: Check both testcase have an associated issue id.
if testcase_1.issue_id is None or testcase_2.issue_id is None:
continue
# Rule: Check both testcase are under the same project.
if testcase_1.project_name != testcase_2.project_name:
continue
# Rule: Group testcase with same underlying issue.
if testcase_1.issue_id != testcase_2.issue_id:
continue
combine_testcases_into_group(testcase_1, testcase_2, testcase_map) |
Group testcases with similar looking crash states. | def _group_testcases_with_similar_states(testcase_map):
"""Group testcases with similar looking crash states."""
logs.log('Grouping based on similar states.')
for testcase_1_id, testcase_1 in testcase_map.items():
for testcase_2_id, testcase_2 in testcase_map.items():
# Rule: Don't group the same testcase and use different combinations for
# comparisons.
if testcase_1_id <= testcase_2_id:
continue
# If both testcase have the same group id, then no work to do.
if testcase_1.group_id == testcase_2.group_id and testcase_1.group_id:
continue
# Rule: Check both testcase are under the same project.
if testcase_1.project_name != testcase_2.project_name:
continue
# Rule: Security bugs should never be grouped with functional bugs.
if testcase_1.security_flag != testcase_2.security_flag:
continue
# Rule: Follow different comparison rules when crash types is one of the
# ones that have unique crash state (custom ones specifically).
if (testcase_1.crash_type in data_types.CRASH_TYPES_WITH_UNIQUE_STATE or
testcase_2.crash_type in data_types.CRASH_TYPES_WITH_UNIQUE_STATE):
# For grouping, make sure that both crash type and state match.
if (testcase_1.crash_type != testcase_2.crash_type or
testcase_1.crash_state != testcase_2.crash_state):
continue
else:
# Rule: For functional bugs, compare for similar crash states.
if not testcase_1.security_flag:
crash_comparer = CrashComparer(testcase_1.crash_type,
testcase_2.crash_type)
if not crash_comparer.is_similar():
continue
# Rule: Check for crash state similarity.
crash_comparer = CrashComparer(testcase_1.crash_state,
testcase_2.crash_state)
if not crash_comparer.is_similar():
continue
combine_testcases_into_group(testcase_1, testcase_2, testcase_map) |
Return a bool whether there is another testcase with same params. | def _has_testcase_with_same_params(testcase, testcase_map):
"""Return a bool whether there is another testcase with same params."""
for other_testcase_id in testcase_map:
# yapf: disable
if (testcase.project_name ==
testcase_map[other_testcase_id].project_name and
testcase.crash_state ==
testcase_map[other_testcase_id].crash_state and
testcase.crash_type ==
testcase_map[other_testcase_id].crash_type and
testcase.security_flag ==
testcase_map[other_testcase_id].security_flag and
testcase.one_time_crasher_flag ==
testcase_map[other_testcase_id].one_time_crasher_flag):
return True
# yapf: enable
return False |
Shrinks groups that exceed a particular limit. | def _shrink_large_groups_if_needed(testcase_map):
"""Shrinks groups that exceed a particular limit."""
def _key_func(testcase):
weight = 0
if not testcase.one_time_crasher_flag:
weight |= 2**1
if testcase.issue_id:
weight |= 2**2
return weight
group_id_with_testcases_map = {}
for testcase in testcase_map.values():
if not testcase.group_id:
continue
if testcase.group_id not in group_id_with_testcases_map:
group_id_with_testcases_map[testcase.group_id] = [testcase]
else:
group_id_with_testcases_map[testcase.group_id].append(testcase)
for testcases_in_group in group_id_with_testcases_map.values():
if len(testcases_in_group) <= GROUP_MAX_TESTCASE_LIMIT:
continue
testcases_in_group = sorted(testcases_in_group, key=_key_func)
for testcase in testcases_in_group[:-GROUP_MAX_TESTCASE_LIMIT]:
try:
testcase_entity = data_handler.get_testcase_by_id(testcase.id)
except errors.InvalidTestcaseError:
# Already deleted.
continue
if testcase_entity.bug_information:
continue
logs.log_warn(('Deleting testcase {testcase_id} due to overflowing group '
'{group_id}.').format(
testcase_id=testcase.id, group_id=testcase.group_id))
testcase_entity.key.delete() |
Group testcases based on rules like same bug numbers, similar crash
states, etc. | def group_testcases():
"""Group testcases based on rules like same bug numbers, similar crash
states, etc."""
testcase_map = {}
cached_issue_map = {}
for testcase_id in data_handler.get_open_testcase_id_iterator():
try:
testcase = data_handler.get_testcase_by_id(testcase_id)
except errors.InvalidTestcaseError:
# Already deleted.
continue
# Remove duplicates early on to avoid large groups.
if (not testcase.bug_information and not testcase.uploader_email and
_has_testcase_with_same_params(testcase, testcase_map)):
logs.log('Deleting duplicate testcase %d.' % testcase_id)
testcase.key.delete()
continue
# Wait for minimization to finish as this might change crash params such
# as type and may mark it as duplicate / closed.
if not testcase.minimized_keys:
continue
# Store needed testcase attributes into |testcase_map|.
testcase_map[testcase_id] = TestcaseAttributes(testcase_id)
testcase_attributes = testcase_map[testcase_id]
for attribute_name in FORWARDED_ATTRIBUTES:
setattr(testcase_attributes, attribute_name,
getattr(testcase, attribute_name))
# Store original issue mappings in the testcase attributes.
if testcase.bug_information:
issue_id = int(testcase.bug_information)
project_name = testcase.project_name
if (project_name in cached_issue_map and
issue_id in cached_issue_map[project_name]):
testcase_attributes.issue_id = (
cached_issue_map[project_name][issue_id])
else:
try:
issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
testcase)
except ValueError:
logs.log_error('Couldn\'t get issue tracker for issue.')
del testcase_map[testcase_id]
continue
if not issue_tracker:
logs.log_error(
'Unable to access issue tracker for issue %d.' % issue_id)
testcase_attributes.issue_id = issue_id
continue
# Determine the original issue id traversing the list of duplicates.
try:
issue = issue_tracker.get_original_issue(issue_id)
original_issue_id = int(issue.id)
except:
# If we are unable to access the issue, then we can't determine
# the original issue id. Assume that it is the same as issue id.
logs.log_error(
'Unable to determine original issue for issue %d.' % issue_id)
testcase_attributes.issue_id = issue_id
continue
if project_name not in cached_issue_map:
cached_issue_map[project_name] = {}
cached_issue_map[project_name][issue_id] = original_issue_id
cached_issue_map[project_name][original_issue_id] = original_issue_id
testcase_attributes.issue_id = original_issue_id
# No longer needed. Free up some memory.
cached_issue_map.clear()
_group_testcases_with_similar_states(testcase_map)
_group_testcases_with_same_issues(testcase_map)
_group_testcases_based_on_variants(testcase_map)
_shrink_large_groups_if_needed(testcase_map)
group_leader.choose(testcase_map)
# TODO(aarya): Replace with an optimized implementation using dirty flag.
# Update the group mapping in testcase object.
for testcase_id in data_handler.get_open_testcase_id_iterator():
if testcase_id not in testcase_map:
# A new testcase that was just created. Skip for now, will be grouped in
# next iteration of group task.
continue
# If we are part of a group, then calculate the number of testcases in that
# group and lowest issue id of issues associated with testcases in that
# group.
updated_group_id = testcase_map[testcase_id].group_id
updated_is_leader = testcase_map[testcase_id].is_leader
updated_group_id_count = 0
updated_group_bug_information = 0
if updated_group_id:
for other_testcase in testcase_map.values():
if other_testcase.group_id != updated_group_id:
continue
updated_group_id_count += 1
# Update group issue id to be lowest issue id in the entire group.
if other_testcase.issue_id is None:
continue
if (not updated_group_bug_information or
updated_group_bug_information > other_testcase.issue_id):
updated_group_bug_information = other_testcase.issue_id
# If this group id is used by only one testcase, then remove it.
if updated_group_id_count == 1:
data_handler.delete_group(updated_group_id, update_testcases=False)
updated_group_id = 0
updated_group_bug_information = 0
updated_is_leader = True
try:
testcase = data_handler.get_testcase_by_id(testcase_id)
except errors.InvalidTestcaseError:
# Already deleted.
continue
is_changed = (
(testcase.group_id != updated_group_id) or
(testcase.group_bug_information != updated_group_bug_information) or
(testcase.is_leader != updated_is_leader))
if not testcase.get_metadata('ran_grouper'):
testcase.set_metadata('ran_grouper', True, update_testcase=not is_changed)
if not is_changed:
continue
testcase.group_bug_information = updated_group_bug_information
testcase.group_id = updated_group_id
testcase.is_leader = updated_is_leader
testcase.put()
logs.log(
'Updated testcase %d group to %d.' % (testcase_id, updated_group_id)) |
Return the index of the first item whose condition_fn is True. | def find_index(items, condition_fn):
"""Return the index of the first item whose condition_fn is True."""
for index, item in enumerate(items):
if condition_fn(item):
return index
return None |
Return True if the testcase is reproducible by checking the
one_time_crasher_flag. | def is_reproducible(item):
"""Return True if the testcase is reproducible by checking the
one_time_crasher_flag."""
return not item.one_time_crasher_flag |
Return True if the testcase has an issue. | def has_issue(item):
"""Return True if the testcase has an issue."""
return bool(item.issue_id) |
Return True if the testcase is reproducible and has an issue. | def is_reproducible_and_has_issue(item):
"""Return True if the testcase is reproducible and has an issue."""
return is_reproducible(item) and has_issue(item) |
Choose one leader for each group. We choose the highest quality testcase to
be the leader.
Args:
testcase_map: a dict of (testcase_id, testcase). A dict contains testcases
from multiple groups. | def choose(testcase_map):
"""Choose one leader for each group. We choose the highest quality testcase to
be the leader.
Args:
testcase_map: a dict of (testcase_id, testcase). A dict contains testcases
from multiple groups.
"""
scores = {}
def _key_func(testcase):
return testcase.group_id
def _get_score(testcase_id):
return scores[testcase_id]
testcases = sorted([v for _, v in testcase_map.items()], key=_key_func)
for group_id, items in itertools.groupby(testcases, _key_func):
if group_id == 0: # group_id=0 means there's no group.
continue
items = sorted(items, reverse=True, key=lambda t: t.timestamp)
for item in items:
item.is_leader = False
item_score = 0
if item.security_flag:
item_score += 1
if item.job_type and '_asan_' in item.job_type:
item_score += 1
if item.job_type and not environment.is_i386(item.job_type):
item_score += 1
scores[item.id] = item_score
items = sorted(items, reverse=True, key=lambda t: _get_score(t.id))
leader_index = find_index(items, is_reproducible_and_has_issue)
if leader_index is None:
leader_index = find_index(items, has_issue)
if leader_index is None:
leader_index = find_index(items, is_reproducible)
if leader_index is None:
leader_index = 0
items[leader_index].is_leader = True |
Return datetime.datetime.utcnow(). | def _utc_now():
"""Return datetime.datetime.utcnow()."""
return datetime.datetime.utcnow() |
Executes a table/dataset insert request, retrying on transport errors. | def _execute_insert_request(request):
"""Executes a table/dataset insert request, retrying on transport errors."""
for i in range(NUM_RETRIES + 1):
try:
request.execute()
return True
except HttpError as e:
if e.resp.status == 409:
# Already exists.
return True
logs.log_error('Failed to insert table/dataset.')
return False
except httplib2.HttpLib2Error:
# Transport error.
time.sleep(random.uniform(0, (1 << i) * RETRY_SLEEP_TIME))
continue
logs.log_error('Failed to insert table/dataset.')
return False |
Create a new dataset if necessary. | def _create_dataset_if_needed(bigquery, dataset_id):
"""Create a new dataset if necessary."""
project_id = utils.get_application_id()
dataset_body = {
'datasetReference': {
'datasetId': dataset_id,
'projectId': project_id,
},
}
dataset_insert = bigquery.datasets().insert(
projectId=project_id, body=dataset_body)
return _execute_insert_request(dataset_insert) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.