response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Get the current request. | def get_current_request():
"""Get the current request."""
return flask.request |
Get the cache backing for saving current context data. | def get_cache_backing():
"""Get the cache backing for saving current context data."""
return flask.g |
Wraps a function to use the per request cache. | def wrap(capacity):
"""Wraps a function to use the per request cache."""
def decorator(func):
"""Decorator function."""
engine = _FifoRequestCache(id(func), capacity)
return memoize.wrap(engine)(func)
return decorator |
Return the function to get attr of an item. This is used in sorting. | def _get_key_fn(attribute_name):
"""Return the function to get attr of an item. This is used in sorting."""
def get_key(item):
return getattr(item, attribute_name)
return get_key |
Set projection. | def compute_projection(projection, order_property):
"""Set projection."""
if projection is None:
return None
combined_projection = set(projection)
combined_projection.add(order_property)
return list(combined_projection) |
Combine KeyQuery q1 and q2. We ignore or_filters because we assume q1 and
q2 are flat. In other words, they are results of _KeyQuery.flatten(..). | def _combine(q1, q2):
"""Combine KeyQuery q1 and q2. We ignore or_filters because we assume q1 and
q2 are flat. In other words, they are results of _KeyQuery.flatten(..)."""
assert not q1.or_filters
assert not q2.or_filters
assert q1.order_property == q2.order_property
assert q1.order_desc == q2.order_desc
result = _KeyQuery(q1.model)
result.filters = q1.filters + q2.filters
result.order_property = q1.order_property
result.order_desc = q1.order_desc
return result |
Register a fuzzing engine. | def register(name, engine_class):
"""Register a fuzzing engine."""
if name in _ENGINES:
raise ValueError('Engine {name} is already registered'.format(name=name))
_ENGINES[name] = engine_class |
Gets an implementation of a fuzzing engine, or None if one does not
exist. | def get(name):
"""Gets an implementation of a fuzzing engine, or None if one does not
exist."""
engine_class = _ENGINES.get(name)
if engine_class:
return engine_class()
return None |
Initialize the engine implementations. | def _initialize():
"""Initialize the engine implementations."""
global _initialized
init.run(include_private=False, include_lowercase=True)
_initialized = True |
Get the engine with the given name. | def get_engine(name):
"""Get the engine with the given name."""
if not _initialized:
_initialize()
engine_impl = engine.get(name)
engine_impl.do_strategies = False
return engine_impl |
Returns whether |file_path| is a fuzz target. | def is_fuzz_target(file_path, file_handle=None):
"""Returns whether |file_path| is a fuzz target."""
return utils.is_fuzz_target_local(file_path, file_handle) |
Returns the list of fuzz targets in |directory|. | def get_fuzz_targets(directory):
"""Returns the list of fuzz targets in |directory|."""
return utils.get_fuzz_targets_local(directory) |
Filter stack frame. | def _filter_stack_frame(stack_frame):
"""Filter stack frame."""
# Filter out anonymous namespaces.
anonymous_namespaces = [
'non-virtual thunk to ',
'(anonymous namespace)::',
'`anonymous namespace\'::',
]
for ns in anonymous_namespaces:
stack_frame = stack_frame.replace(ns, '')
# Rsplit around '!'.
stack_frame = stack_frame.split('!')[-1]
# Lsplit around '(', '['.
m = re.match(r'(.*?)[\(\[].*', stack_frame)
if m and m.group(1):
return m.group(1).strip()
# Lsplit around ' '.
stack_frame = stack_frame.strip().split(' ')[0]
return stack_frame |
Return a normalized string without unique addresses and numbers. | def filter_addresses_and_numbers(stack_frame):
"""Return a normalized string without unique addresses and numbers."""
# Remove offset part from end of every line.
result = re.sub(r'\+0x[0-9a-fA-F]+\n', '\n', stack_frame, re.DOTALL)
# Replace sections that appear to be addresses with the string "ADDRESS".
address_expression = r'0x[a-fA-F0-9]{4,}[U]*'
address_replacement = r'ADDRESS'
result = re.sub(address_expression, address_replacement, result)
# Replace sections that appear to be numbers with the string "NUMBER".
# Cases that we are avoiding:
# - source.cc:1234
# - libsomething-1.0.so (to avoid things like NUMBERso in replacements)
number_expression = r'(^|[^:0-9.])[0-9.]{4,}($|[^A-Za-z0-9.])'
number_replacement = r'\1NUMBER\2'
return re.sub(number_expression, number_replacement, result) |
Check to see if a line should be displayed in a report, but ignored when
processing crashes. | def should_ignore_line_for_crash_processing(line, state):
"""Check to see if a line should be displayed in a report, but ignored when
processing crashes."""
# If we detected that the process had died, we won't use any further stack
# frames to make decision on crash parameters.
if state.process_died:
return True
# Ignore console information messages, as they are not relevant to crash
# parameters parsing.
if ':INFO:CONSOLE' in line:
return True
# Ignore summary lines.
if 'SUMMARY:' in line:
return True
# Ignore warnings from ASan, but not other sanitizer tools.
if 'WARNING: AddressSanitizer' in line:
return True
# Exclusion for mprotect warning on address 0x00010000. This is a harmless
# coverage buffer size warning, and is fixed in clang r234602.
if 'failed to mprotect 0x00010000' in line:
return True
# Ignore certain lines printed by dump render tree.
if 'text run at (' in line:
return True
# Ignore this unneeded JNI abort error message since it will be followed by
# the needed stacktrace later.
if 'Please include Java exception stack in crash report' in line:
return True
# Ignore DEADLYSIGNAL lines from sanitizers.
if SAN_DEADLYSIGNAL_REGEX.match(line):
return True
return False |
Ensure that Sanitizer crashes use generic formats. | def fix_sanitizer_crash_type(crash_type):
"""Ensure that Sanitizer crashes use generic formats."""
# General normalization.
crash_type = crash_type.lower().replace('_', '-').capitalize()
# Use more generic types for certain Sanitizer ones.
crash_type = crash_type.replace('Int-divide-by-zero', 'Divide-by-zero')
return crash_type |
Convert a Windows CDB crash type into ASAN like format. | def fix_win_cdb_crash_type(crash_type):
"""Convert a Windows CDB crash type into ASAN like format."""
# Strip application verifier string from crash type suffix.
crash_type = utils.strip_from_right(crash_type, '_AVRF')
# Standardize crash type with lowercase, hyphens and capitalization.
crash_type = crash_type.replace('_', '-').lower().capitalize()
# Change crash type to other common types.
crash_type = crash_type.replace('Status-integer-overflow', 'Integer-overflow')
crash_type = crash_type.replace('Status-integer-divide-by-zero',
'Divide-by-zero')
return crash_type |
Cleanup values that should not be included in CHECK failure strings. | def fix_check_failure_string(failure_string):
"""Cleanup values that should not be included in CHECK failure strings."""
# Remove |CHECK_FAILURE_PATTERN| from start of failure string.
failure_string = utils.strip_from_left(failure_string, CHECK_FAILURE_PATTERN)
# Handle cases like "CHECK_EQ( (unsigned)ptr[0],1u) failed: 25 vs. 1".
# This only happens on Android, where we cannot strip the
# CHECK_FAILURE_PATTERN, so we looked for "failed:" as preceding string.
failure_string = re.sub(r'(?<=failed): .*\svs\.\s.*$', r'', failure_string)
# Handle cases like "len > 0 (-1 vs. 0)".
failure_string = re.sub(r' \(.*\s+vs\.\s+.*', r'', failure_string)
# Handle cases like ": '....'", '= "..."', etc.
failure_string = re.sub(r'\s*[:=]\s*([\'"]).*\1$', r'', failure_string)
# Strip unneeded chars at end.
return failure_string.strip(' .\'"[]') |
Fix filename string to remove line number, path and other invalid chars. | def fix_filename_string(filename_string):
"""Fix filename string to remove line number, path and other invalid chars."""
# Remove invalid chars at ends first.
filename_string = filename_string.strip(' .\'"[]')
# Remove the source line number information.
filename_string = filename_string.split(':')[0].split('(')[0]
# Replace backslashes with forward slashes for platform consistency.
filename_string = filename_string.replace('\\', '/')
# Remove the path information.
filename_string = os.path.basename(filename_string)
return filename_string |
Return human readable fault description based on numeric FSR value. | def get_fault_description_for_android_kernel(code):
"""Return human readable fault description based on numeric FSR value."""
# Convert code from string to number.
try:
code = int(code, 16)
except:
return 'BUG'
# Figure out where is out-of-bounds read or write.
if code & 0x800 == 0:
fault = 'READ'
else:
fault = 'WRITE'
fault += ' '
# The full status code is bits 12, 10, and 0-3, but we're ignoring 12 and 10.
status = code & 0b1111
try:
fault += ANDROID_KERNEL_STATUS_TO_STRING[status]
except KeyError:
fault += 'Unknown'
fault += ' (%s)' % str(code)
return 'Kernel failure\n' + fault |
Filter a KASan crash type. | def filter_kasan_crash_type(crash_type):
"""Filter a KASan crash type."""
return 'Kernel failure\n%s' % crash_type.replace(' ', '-').capitalize() |
Filter a kernel panic crash type. | def filter_kernel_panic_crash_type(crash_type):
"""Filter a kernel panic crash type."""
return 'Kernel failure\n%s' % crash_type.replace(' ', '-') |
For stack-overflow bugs, updates crash state based on cycle detected. | def update_crash_state_for_stack_overflow_if_needed(state):
"""For stack-overflow bugs, updates crash state based on cycle detected."""
if state.crash_type != 'Stack-overflow':
return
num_frames = len(state.raw_frames)
for frame_index in range(num_frames):
for cycle_length in range(1, MAX_CYCLE_LENGTH + 1):
# Create frame potential cycles of a given length starting from
# |frame_index|.
frame_potential_cycles = []
end_reached = False
for i in range(0, REPEATED_CYCLE_COUNT):
start_index = frame_index + i * cycle_length
end_index = frame_index + (i + 1) * cycle_length
if end_index >= num_frames:
end_reached = True
break
frame_potential_cycles.append(state.raw_frames[start_index:end_index])
if end_reached:
# Reached end while trying to find cycle, skip iteration.
continue
# Check if all the potential_cycles are equal. If yes, we found a cycle.
potential_cycles_are_equal = all(
frame_potential_cycle == frame_potential_cycles[0]
for frame_potential_cycle in frame_potential_cycles)
# Update crash state based on cycle detected.
if potential_cycles_are_equal:
state.crash_state = '\n'.join(
frame_potential_cycles[0][:MAX_CRASH_STATE_FRAMES]) + '\n'
return |
Override frame matching for LLVMFuzzerTestOneInput frames. | def llvm_test_one_input_override(frame, frame_struct):
"""Override frame matching for LLVMFuzzerTestOneInput frames."""
if not frame.startswith('LLVMFuzzerTestOneInput'):
return frame
if frame_struct and frame_struct.filename:
# Use the filename as the frame instead.
return frame.replace(
'LLVMFuzzerTestOneInput',
os.path.basename(frame_struct.filename.replace('\\', '/')))
return frame |
Extract a Python stacktrace.
Python stacktraces are a bit special: they are reversed,
and followed by a sanitizer one, so we need to extract them, reverse them,
and put their "title" back on top. | def reverse_python_stacktrace(stacktrace):
"""Extract a Python stacktrace.
Python stacktraces are a bit special: they are reversed,
and followed by a sanitizer one, so we need to extract them, reverse them,
and put their "title" back on top."""
python_stacktrace_split = []
in_python_stacktrace = False
for line in stacktrace:
# Locate the beginning of the python stacktrace.
if in_python_stacktrace is False:
for regex, _ in PYTHON_CRASH_TYPES_MAP:
if regex.match(line):
in_python_stacktrace = True
python_stacktrace_split = [line] # Add the "title" of the stacktrace
break
else:
# Locate beginning of the sanitizer stacktrace.
if '=========' in line or '== ERROR: ' in line:
break
python_stacktrace_split.insert(1, line)
return python_stacktrace_split |
For KASan crashes, additional information about a bad access may exist. | def update_kasan_crash_details(state, line):
"""For KASan crashes, additional information about a bad access may exist."""
if state.crash_type.startswith('Kernel failure'):
kasan_access_match = KASAN_ACCESS_TYPE_ADDRESS_REGEX.match(line)
if kasan_access_match:
if not state.crash_address:
state.crash_address = '0x%s' % kasan_access_match.group(4)
else:
kasan_access_match = KASAN_ACCESS_TYPE_REGEX.match(line)
if kasan_access_match:
state.crash_type += '\n%s %s' % (kasan_access_match.group(1).upper(),
kasan_access_match.group(2)) |
Load test data. | def _load_test_data(path):
"""Load test data."""
with open(os.path.join(TEST_DATA_DIR, path)) as f:
return f.read() |
Get the Pub/Sub topic for publishing tasks. | def _get_topic():
"""Get the Pub/Sub topic for publishing tasks."""
return local_config.ProjectConfig().get('bisect_service.pubsub_topic') |
Notify the bisection infrastructure of a testcase getting into invalid
state. | def notify_bisection_invalid(testcase):
"""Notify the bisection infrastructure of a testcase getting into invalid
state."""
pubsub_topic = _get_topic()
if not pubsub_topic:
return
pubsub_client = pubsub.PubSubClient()
pubsub_client.publish(pubsub_topic, [
pubsub.Message(b'', {
'type': 'invalid',
'testcase_id': str(testcase.key.id()),
})
]) |
Request precise bisection. | def request_bisection(testcase):
"""Request precise bisection."""
pubsub_topic = _get_topic()
if not pubsub_topic:
return
# Only request bisects for reproducible security bugs with a bug filed, found
# by engine fuzzers.
if not testcase.security_flag:
return
if testcase.fixed == 'NA':
# Testcase got into an invalid state.
notify_bisection_invalid(testcase)
return
if testcase.one_time_crasher_flag:
return
if not testcase.bug_information:
return
target = testcase.get_fuzz_target()
if not target:
return
# Only make 1 request of each type per testcase.
if (not testcase.get_metadata('requested_regressed_bisect') and
_make_bisection_request(pubsub_topic, testcase, target, 'regressed')):
testcase.set_metadata('requested_regressed_bisect', True)
if (not testcase.get_metadata('requested_fixed_bisect') and
_make_bisection_request(pubsub_topic, testcase, target, 'fixed')):
testcase.set_metadata('requested_fixed_bisect', True) |
Check old and new commit validity. | def _check_commits(testcase, bisect_type, old_commit, new_commit):
"""Check old and new commit validity."""
if old_commit != new_commit or build_manager.is_custom_binary():
return old_commit, new_commit
# Something went wrong during bisection for the same commit to be chosen for
# both the start and end range.
# Get the bisection infrastructure to re-bisect.
if environment.is_running_on_app_engine():
bucket_path = data_handler.get_value_from_job_definition(
testcase.job_type, 'RELEASE_BUILD_BUCKET_PATH')
else:
bucket_path = build_manager.get_primary_bucket_path()
# TODO(https://github.com/google/clusterfuzz/issues/3008): This cannot be done
# on a uworker, move it to preprocess.
bad_revisions = build_manager.get_job_bad_revisions()
revision_list = build_manager.get_revisions_list(bucket_path, bad_revisions)
last_tested_revision = testcase.get_metadata('last_tested_crash_revision')
known_crash_revision = last_tested_revision or testcase.crash_revision
if bisect_type == 'fixed':
# Narrowest range: last crashing revision up to the latest build.
return _get_commits(
str(known_crash_revision) + ':' + str(revision_list[-1]),
testcase.job_type)
if bisect_type == 'regressed':
# Narrowest range: first build to the first crashing revision.
return _get_commits(
str(revision_list[0]) + ':' + str(testcase.crash_revision),
testcase.job_type)
raise ValueError('Invalid bisection type: ' + bisect_type) |
Make a bisection request to the external bisection service. Returns whether
or not a request was actually made. | def _make_bisection_request(pubsub_topic, testcase, target, bisect_type):
"""Make a bisection request to the external bisection service. Returns whether
or not a request was actually made."""
if bisect_type == 'fixed':
old_commit, new_commit = _get_commits(testcase.fixed, testcase.job_type)
elif bisect_type == 'regressed':
old_commit, new_commit = _get_commits(testcase.regression,
testcase.job_type)
else:
raise ValueError('Invalid bisection type: ' + bisect_type)
if not new_commit:
# old_commit can be empty (i.e. '0' case), but new_commit should never be.
return False
old_commit, new_commit = _check_commits(testcase, bisect_type, old_commit,
new_commit)
repo_url = data_handler.get_main_repo(testcase.job_type) or ''
reproducer = blobs.read_key(testcase.minimized_keys or testcase.fuzzed_keys)
pubsub_client = pubsub.PubSubClient()
pubsub_client.publish(pubsub_topic, [
pubsub.Message(
reproducer, {
'type':
bisect_type,
'project_name':
target.project,
'sanitizer':
environment.SANITIZER_NAME_MAP[
environment.get_memory_tool_name(testcase.job_type)
],
'fuzz_target':
target.binary,
'old_commit':
old_commit,
'new_commit':
new_commit,
'testcase_id':
str(testcase.key.id()),
'issue_id':
testcase.bug_information,
'crash_type':
testcase.crash_type,
'crash_state':
testcase.crash_state,
'security':
str(testcase.security_flag),
'severity':
severity_analyzer.severity_to_string(
testcase.security_severity),
'timestamp':
testcase.timestamp.isoformat(),
'repo_url':
repo_url,
})
])
return True |
Get commits from range. | def _get_commits(commit_range, job_type):
"""Get commits from range."""
if not commit_range or commit_range == 'NA':
return None, None
start, end = revisions.get_start_and_end_revision(commit_range)
components = revisions.get_component_range_list(start, end, job_type)
if not components:
return None, None
commits = components[0]['link_text']
if ':' not in commits:
return commits, commits
old_commit, new_commit = commits.split(':')
if old_commit == '0':
old_commit = ''
return old_commit, new_commit |
Initializes timezone for date functions based on environment. | def initialize_timezone_from_environment():
"""Initializes timezone for date functions based on environment."""
plt = environment.platform()
if plt == 'WINDOWS':
return
# Only available on Unix platforms.
time.tzset() |
Checks to see if a timestamp is older than another by a certain amount. | def time_has_expired(timestamp,
compare_to=None,
days=0,
hours=0,
minutes=0,
seconds=0):
"""Checks to see if a timestamp is older than another by a certain amount."""
if compare_to is None:
compare_to = utils.utcnow()
total_time = days * 3600 * 24 + hours * 3600 + minutes * 60 + seconds
return (compare_to - timestamp).total_seconds() > total_time |
Returns if the error exists in the error list. | def error_in_list(error_stacktrace, error_list):
"""Returns if the error exists in the error list."""
# Change all strings to lowercase for comparison.
error_stacktrace = error_stacktrace.lower()
error_list = [error.lower() for error in error_list]
for error in error_list:
if error in error_stacktrace:
return True
return False |
Add external task. | def add_external_task(command, testcase_id, job):
"""Add external task."""
if command != 'progression':
# Only progression is supported.
return
pubsub_client = pubsub.PubSubClient()
topic_name = job.external_reproduction_topic
assert topic_name is not None
testcase = data_handler.get_testcase_by_id(testcase_id)
fuzz_target = testcase.get_fuzz_target()
memory_tool_name = environment.get_memory_tool_name(job.name)
sanitizer = environment.SANITIZER_NAME_MAP.get(memory_tool_name)
job_environment = job.get_environment()
if job_environment.get('CUSTOM_BINARY'):
raise RuntimeError('External jobs should never have custom binaries.')
build_path = (
job_environment.get('RELEASE_BUILD_BUCKET_PATH') or
job_environment.get('FUZZ_TARGET_BUILD_BUCKET_PATH'))
if build_path is None:
raise RuntimeError(f'{job.name} has no build path defined.')
min_revision = (
testcase.get_metadata('last_tested_revision') or testcase.crash_revision)
logs.log(f'Publishing external reproduction task for {testcase_id}.')
attributes = {
'project': job.project,
'target': fuzz_target.binary,
'fuzzer': testcase.fuzzer_name,
'sanitizer': sanitizer,
'job': job.name,
'testcaseId': str(testcase_id),
'buildPath': build_path,
'minRevisionAbove': str(min_revision),
'numTrials': str(_NUM_TRIALS),
}
reproducer = blobs.read_key(testcase.fuzzed_keys)
message = pubsub.Message(data=reproducer, attributes=attributes)
pubsub_client.publish(topic_name, [message]) |
Return all fuzzers that have the job associated.
Args:
job_type: The job type.
include_parents: Include the parent fuzzer.
Returns:
A list of fuzzer names. | def _fuzzers_for_job(job_type, include_parents):
"""Return all fuzzers that have the job associated.
Args:
job_type: The job type.
include_parents: Include the parent fuzzer.
Returns:
A list of fuzzer names.
"""
fuzzers = []
engine_fuzzers = data_handler.get_fuzzing_engines()
for fuzzer in data_types.Fuzzer.query():
if job_type not in fuzzer.jobs:
continue
# Add this if we're including all parents or this is not an engine fuzzer
# with fuzz targets.
if include_parents or fuzzer.name not in engine_fuzzers:
fuzzers.append(fuzzer.name)
for target_job in fuzz_target_utils.get_fuzz_target_jobs(job=job_type):
fuzzers.append(target_job.fuzz_target_name)
return sorted(fuzzers) |
Expand the given prefix into real entity names.
Args:
all_names: A list of all entity names.
prefix: A prefix string.
Returns:
A list of entity names that the pattern expands to. | def _expand_prefix(all_names, prefix):
"""Expand the given prefix into real entity names.
Args:
all_names: A list of all entity names.
prefix: A prefix string.
Returns:
A list of entity names that the pattern expands to.
"""
return [name for name in all_names if name.startswith(prefix)] |
Get a permissions query for a given user.
Args:
user_email: The email of the user.
entity_kind: The type (data_types.PermissionEntityKind) of the permission to
filter by, or None.
Returns:
A ndb.Query giving the permissions for the given parameters. | def _get_permissions_query_for_user(user_email, entity_kind=None):
"""Get a permissions query for a given user.
Args:
user_email: The email of the user.
entity_kind: The type (data_types.PermissionEntityKind) of the permission to
filter by, or None.
Returns:
A ndb.Query giving the permissions for the given parameters.
"""
permissions_for_user = data_types.ExternalUserPermission.query(
data_types.ExternalUserPermission.email == utils.normalize_email(
user_email))
if entity_kind is not None:
permissions_for_user = permissions_for_user.filter(
data_types.ExternalUserPermission.entity_kind == entity_kind)
return permissions_for_user |
Return the entity names that the given user can access.
Args:
user_email: The email of the user.
entity_kind: The type (data_types.PermissionEntityKind) of the entity.
Returns:
A list of entity names that the user has access to. | def _allowed_entities_for_user(user_email, entity_kind):
"""Return the entity names that the given user can access.
Args:
user_email: The email of the user.
entity_kind: The type (data_types.PermissionEntityKind) of the entity.
Returns:
A list of entity names that the user has access to.
"""
if not user_email:
return []
allowed = []
permissions = _get_permissions_query_for_user(user_email, entity_kind)
if entity_kind == data_types.PermissionEntityKind.FUZZER:
all_names = data_handler.get_all_fuzzer_names_including_children()
else:
all_names = data_handler.get_all_job_type_names()
for permission in permissions:
if permission.is_prefix:
allowed.extend(_expand_prefix(all_names, permission.entity_name))
elif permission.entity_name in all_names:
allowed.append(permission.entity_name)
return sorted(allowed) |
Return whether if the given user has access to the entity.
Args:
user_email: The email of the user.
name: The name of the entity.
entity_kind: The type of the entity.
Returns:
A bool indicating whether the given user has access to the entity. | def _is_entity_allowed_for_user(user_email, name, entity_kind):
"""Return whether if the given user has access to the entity.
Args:
user_email: The email of the user.
name: The name of the entity.
entity_kind: The type of the entity.
Returns:
A bool indicating whether the given user has access to the entity.
"""
if not user_email or not name:
return False
permissions = _get_permissions_query_for_user(user_email, entity_kind)
for permission in permissions:
if permission.is_prefix:
if name.startswith(permission.entity_name):
return True
elif permission.entity_name == name:
return True
return False |
Return a list of users that have permissions for the given entity.
Args:
name: The name of the entity.
entity_kind: The type (data_types.PermissionEntityKind) of the entity.
auto_cc: The Auto CC type (data_types.AutoCCType) to filter on, or None.
Returns:
A list of user emails that have permission to access the given entity. | def _allowed_users_for_entity(name, entity_kind, auto_cc=None):
"""Return a list of users that have permissions for the given entity.
Args:
name: The name of the entity.
entity_kind: The type (data_types.PermissionEntityKind) of the entity.
auto_cc: The Auto CC type (data_types.AutoCCType) to filter on, or None.
Returns:
A list of user emails that have permission to access the given entity.
"""
if not name:
return []
# Easy case: direct matches.
direct_match_permissions = data_types.ExternalUserPermission.query(
data_types.ExternalUserPermission.entity_kind == entity_kind,
data_types.ExternalUserPermission.entity_name == name,
ndb_utils.is_false(data_types.ExternalUserPermission.is_prefix),
projection=[data_types.ExternalUserPermission.email])
if auto_cc is not None:
direct_match_permissions = direct_match_permissions.filter(
data_types.ExternalUserPermission.auto_cc == auto_cc)
allowed_users = [permission.email for permission in direct_match_permissions]
# Find all permissions where the prefix matches the fuzzer_name.
# Unfortunately, Datastore doesn't give us an easy way of doing so. To iterate
# through a smaller set than every single permission, get all permissions that
# contain a prefix string <= than the actual fuzzer name and >= the first
# character.
prefix_match_permissions = data_types.ExternalUserPermission.query(
data_types.ExternalUserPermission.entity_kind == entity_kind,
data_types.ExternalUserPermission.entity_name <= name,
data_types.ExternalUserPermission.entity_name >= name[0],
ndb_utils.is_true(data_types.ExternalUserPermission.is_prefix),
projection=[
data_types.ExternalUserPermission.email,
data_types.ExternalUserPermission.entity_name
])
if auto_cc is not None:
prefix_match_permissions = prefix_match_permissions.filter(
data_types.ExternalUserPermission.auto_cc == auto_cc)
for permission in prefix_match_permissions:
if not permission.entity_name:
# No external user should have an empty prefix (access to all
# fuzzers/jobs).
continue
if name.startswith(permission.entity_name):
allowed_users.append(permission.email)
return sorted(allowed_users) |
Return CC users for entity. | def _cc_users_for_entity(name, entity_type, security_flag):
"""Return CC users for entity."""
users = _allowed_users_for_entity(name, entity_type,
data_types.AutoCCType.ALL)
if security_flag:
users.extend(
_allowed_users_for_entity(name, entity_type,
data_types.AutoCCType.SECURITY))
return sorted(users) |
Return allowed fuzzers for the given user.
Args:
user_email: The email of the user.
include_from_jobs: Include all fuzzers for the allowed jobs of the user.
include_parents: Include parent fuzzers when there is no explicit permission
for the parent fuzzer, but there are permissions for its children as a
result of the user's job permissions. Only applies when
include_from_jobs is set.
Returns:
A list of fuzzer names for which this user is allowed to view information
about. | def allowed_fuzzers_for_user(user_email,
include_from_jobs=False,
include_parents=False):
"""Return allowed fuzzers for the given user.
Args:
user_email: The email of the user.
include_from_jobs: Include all fuzzers for the allowed jobs of the user.
include_parents: Include parent fuzzers when there is no explicit permission
for the parent fuzzer, but there are permissions for its children as a
result of the user's job permissions. Only applies when
include_from_jobs is set.
Returns:
A list of fuzzer names for which this user is allowed to view information
about.
"""
allowed_fuzzers = _allowed_entities_for_user(
user_email, data_types.PermissionEntityKind.FUZZER)
if include_from_jobs:
allowed_jobs = allowed_jobs_for_user(user_email)
for allowed_job in allowed_jobs:
allowed_fuzzers.extend(_fuzzers_for_job(allowed_job, include_parents))
allowed_fuzzers = list(set(allowed_fuzzers))
return sorted(allowed_fuzzers) |
Return allowed jobs for the given user.
Args:
user_email: The email of the user.
Returns:
A list of job names for which this user is allowed to view information
about. | def allowed_jobs_for_user(user_email):
"""Return allowed jobs for the given user.
Args:
user_email: The email of the user.
Returns:
A list of job names for which this user is allowed to view information
about.
"""
return _allowed_entities_for_user(user_email,
data_types.PermissionEntityKind.JOB) |
Return allowed external users for the given fuzzer.
Args:
fuzzer_name: The name of the fuzzer.
Returns:
A list of user emails that are allowed to view information relating to this
fuzzer. | def allowed_users_for_fuzzer(fuzzer_name):
"""Return allowed external users for the given fuzzer.
Args:
fuzzer_name: The name of the fuzzer.
Returns:
A list of user emails that are allowed to view information relating to this
fuzzer.
"""
# TODO(ochang): Once we support jobs, take that into account.
return _allowed_users_for_entity(fuzzer_name,
data_types.PermissionEntityKind.FUZZER) |
Return external users that should be CC'ed according to the given rule.
Args:
fuzzer_name: The name of the fuzzer.
security_flag: Whether or not the CC is for a security issue.
Returns:
A list of user emails that should be CC'ed. | def cc_users_for_fuzzer(fuzzer_name, security_flag):
"""Return external users that should be CC'ed according to the given rule.
Args:
fuzzer_name: The name of the fuzzer.
security_flag: Whether or not the CC is for a security issue.
Returns:
A list of user emails that should be CC'ed.
"""
return _cc_users_for_entity(
fuzzer_name, data_types.PermissionEntityKind.FUZZER, security_flag) |
Return whether if the given user has access to the fuzzer.
Args:
user_email: The email of the user.
fuzzer_name: The name of the fuzzer.
include_from_jobs: Include all fuzzers for the allowed jobs of the user.
Returns:
A bool indicating whether the given user has access to the fuzzer. | def is_fuzzer_allowed_for_user(user_email, fuzzer_name,
include_from_jobs=False):
"""Return whether if the given user has access to the fuzzer.
Args:
user_email: The email of the user.
fuzzer_name: The name of the fuzzer.
include_from_jobs: Include all fuzzers for the allowed jobs of the user.
Returns:
A bool indicating whether the given user has access to the fuzzer.
"""
is_allowed = _is_entity_allowed_for_user(
user_email, fuzzer_name, data_types.PermissionEntityKind.FUZZER)
if not is_allowed and include_from_jobs:
is_allowed = fuzzer_name in allowed_fuzzers_for_user(
user_email, include_from_jobs=True)
return is_allowed |
Return whether if the given user has access to the job.
Args:
user_email: The email of the user.
job_type: The name of the job.
Returns:
A bool indicating whether the given user has access to the job. | def is_job_allowed_for_user(user_email, job_type):
"""Return whether if the given user has access to the job.
Args:
user_email: The email of the user.
job_type: The name of the job.
Returns:
A bool indicating whether the given user has access to the job.
"""
return _is_entity_allowed_for_user(user_email, job_type,
data_types.PermissionEntityKind.JOB) |
Return whether if the given user has upload permissions.
Args:
user_email: The email of the user.
Returns:
A bool indicating whether the given user has upload permissions. | def is_upload_allowed_for_user(user_email):
"""Return whether if the given user has upload permissions.
Args:
user_email: The email of the user.
Returns:
A bool indicating whether the given user has upload permissions.
"""
permissions = _get_permissions_query_for_user(
user_email, data_types.PermissionEntityKind.UPLOADER)
return bool(permissions.get()) |
Return external users that should be CC'ed according to the given rule.
Args:
job_type: The name of the job
security_flag: Whether or not the CC is for a security issue.
Returns:
A list of user emails that should be CC'ed. | def cc_users_for_job(job_type, security_flag):
"""Return external users that should be CC'ed according to the given rule.
Args:
job_type: The name of the job
security_flag: Whether or not the CC is for a security issue.
Returns:
A list of user emails that should be CC'ed.
"""
return _cc_users_for_entity(job_type, data_types.PermissionEntityKind.JOB,
security_flag) |
Custom json.dumps using custom encoder JSONEncoder defined in this file. | def dumps(obj, *args, **kwargs):
"""Custom json.dumps using custom encoder JSONEncoder defined in this file."""
kwargs['cls'] = JSONEncoder
kwargs['sort_keys'] = True
return json.dumps(obj, *args, **kwargs) |
Custom json.loads using custom encoder JSONDecoder defined in this file. | def loads(obj, *args, **kwargs):
"""Custom json.loads using custom encoder JSONDecoder defined in this file."""
kwargs['cls'] = JSONDecoder
return json.loads(obj, *args, **kwargs) |
Get the redis client. | def _redis_client():
"""Get the redis client."""
import redis
if hasattr(_local, 'redis'):
return _local.redis
host = environment.get_value('REDIS_HOST', _DEFAULT_REDIS_HOST)
port = environment.get_value('REDIS_PORT', _DEFAULT_REDIS_PORT)
_local.redis = redis.Redis(host=host, port=port)
return _local.redis |
Get a key name based on function, arguments and keyword arguments. | def _default_key(func, args, kwargs):
"""Get a key name based on function, arguments and keyword arguments."""
# Use unicode instead of str where possible. This makes it less likely to
# have false misses.
args = tuple(arg if not isinstance(arg, str) else str(arg) for arg in args)
kwargs = {
key: value if not isinstance(value, str) else str(value)
for key, value in kwargs.items()
}
return 'memoize:%s' % [func.__name__, args, sorted(kwargs.items())] |
Decorator for caching the result of method calls. Arguments must
be hashable. None is not cached because we don't tell the difference
between having None and not having a key. | def wrap(engine):
"""Decorator for caching the result of method calls. Arguments must
be hashable. None is not cached because we don't tell the difference
between having None and not having a key."""
def decorator(func):
"""Decorator function."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Wrapper function."""
force_update = kwargs.pop('__memoize_force__', False)
key = engine.get_key(func, args, kwargs)
result = engine.get(key)
if result is not None and not force_update:
return result
result = func(*args, **kwargs)
engine.put(key, result)
return result
return wrapper
return decorator |
Get the config modules directory. | def _config_modules_directory(root_directory):
"""Get the config modules directory."""
config_dir = os.getenv('CONFIG_DIR_OVERRIDE')
if not config_dir:
config_dir = os.path.join(root_directory, 'src', 'appengine', 'config')
return os.path.join(config_dir, 'modules') |
Patch out App Engine reliant behaviour from bots. | def _patch_appengine_modules_for_bots():
"""Patch out App Engine reliant behaviour from bots."""
if os.getenv('SERVER_SOFTWARE'):
# Not applicable on App Engine.
return
# google.auth uses App Engine credentials based on importability of
# google.appengine.api.app_identity.
try:
from google.auth import app_engine as auth_app_engine
if auth_app_engine.app_identity:
auth_app_engine.app_identity = None
except ImportError:
pass |
Add directories that we must be able to import from to path. | def fix_module_search_paths():
"""Add directories that we must be able to import from to path."""
root_directory = os.environ['ROOT_DIR']
source_directory = os.path.join(root_directory, 'src')
python_path = os.getenv('PYTHONPATH', '').split(os.pathsep)
third_party_libraries_directory = os.path.join(source_directory,
'third_party')
config_modules_directory = _config_modules_directory(root_directory)
if (os.path.exists(config_modules_directory) and
config_modules_directory not in sys.path):
sys.path.insert(0, config_modules_directory)
python_path.insert(0, config_modules_directory)
if third_party_libraries_directory not in sys.path:
sys.path.insert(0, third_party_libraries_directory)
python_path.insert(0, third_party_libraries_directory)
if source_directory not in sys.path:
sys.path.insert(0, source_directory)
python_path.insert(0, source_directory)
os.environ['PYTHONPATH'] = os.pathsep.join(python_path)
# Add site directory to make from imports work in google namespace.
site.addsitedir(third_party_libraries_directory)
# TODO(ochang): Remove this once SDK is removed from images.
_patch_appengine_modules_for_bots() |
Initialize the persistent cache, creating the directory used to store the
values. | def initialize():
"""Initialize the persistent cache, creating the directory used to store the
values."""
cache_directory_path = environment.get_value('CACHE_DIR')
if os.path.exists(cache_directory_path):
clear_values()
else:
os.makedirs(cache_directory_path) |
Remove all values. | def clear_values(clear_all=False):
"""Remove all values."""
cache_directory_path = environment.get_value('CACHE_DIR')
if not os.path.exists(cache_directory_path):
return
for root_directory, _, filenames in os.walk(cache_directory_path):
for filename in filenames:
if filename.endswith(PERSIST_FILE_EXTENSION) and not clear_all:
continue
file_path = os.path.join(root_directory, filename)
persist_file_path = file_path + PERSIST_FILE_EXTENSION
if os.path.exists(persist_file_path) and not clear_all:
continue
os.remove(file_path) |
Removes the value for a key. | def delete_value(key):
"""Removes the value for a key."""
value_path = get_value_file_path(key)
if os.path.exists(value_path):
os.remove(value_path) |
Get the value for a key. | def get_value(key, default_value=None, constructor=None):
"""Get the value for a key."""
value_path = get_value_file_path(key)
if not os.path.exists(value_path):
return default_value
try:
with open(value_path, 'rb') as f:
value_str = f.read()
except OSError:
logs.log_error('Failed to read %s from persistent cache.' % key)
return default_value
try:
value = json_utils.loads(value_str)
except Exception:
logs.log_warn('Non-serializable value read from cache key %s: "%s"' %
(key, value_str))
return default_value
if constructor:
try:
value = constructor(value)
except Exception:
logs.log_warn('Failed to construct value "%s" using %s '
'and key "%s" in persistent cache. Using default value %s.'
% (value, constructor, key, default_value))
return default_value
return value |
Return the full path to the value file for the given key. | def get_value_file_path(key):
"""Return the full path to the value file for the given key."""
# Not using utils.string_hash here to avoid a circular dependency.
# TODO(mbarbella): Avoid this once utils.py is broken into multiple files.
key_filename = 'cache-%s.json' % hashlib.sha1(str(key).encode()).hexdigest()
cache_directory_path = environment.get_value('CACHE_DIR')
return os.path.join(cache_directory_path, key_filename) |
Set the value for a key. If |persist_across_restarts| is set, then the key
won't be deleted even run.py is restarted. | def set_value(key, value, persist_across_reboots=False):
"""Set the value for a key. If |persist_across_restarts| is set, then the key
won't be deleted even run.py is restarted. """
value_path = get_value_file_path(key)
try:
value_str = json_utils.dumps(value)
except Exception:
logs.log_error(
'Non-serializable value stored to cache key %s: "%s"' % (key, value))
return
try:
with open(value_path, 'wb') as f:
f.write(value_str.encode())
except OSError:
logs.log_error('Failed to write %s to persistent cache.' % key)
if not persist_across_reboots:
return
persist_value_path = value_path + PERSIST_FILE_EXTENSION
if os.path.exists(persist_value_path):
return
try:
open(persist_value_path, 'wb').close()
except OSError:
logs.log_error(
'Failed to write presistent metadata file for cache key %s' % key) |
Invoke time.sleep. This is to avoid the flakiness of time.sleep. See:
crbug.com/770375 | def sleep(seconds):
"""Invoke time.sleep. This is to avoid the flakiness of time.sleep. See:
crbug.com/770375"""
time.sleep(seconds) |
Compute backoff delay. | def get_delay(num_try, delay, backoff):
"""Compute backoff delay."""
delay = delay * (backoff**(num_try - 1))
if _should_ignore_delay_for_testing():
# Don't sleep for long during tests. Flake is better.
return min(delay, 3)
return delay |
Retry decorator for a function. | def wrap(retries,
delay,
function,
backoff=2,
exception_types=None,
retry_on_false=False):
"""Retry decorator for a function."""
assert delay > 0
assert backoff >= 1
assert retries >= 0
if exception_types is None:
exception_types = [Exception]
def is_exception_type(exception):
return any(
isinstance(exception, exception_type)
for exception_type in exception_types)
def decorator(func):
"""Decorator for the given function."""
tries = retries + 1
is_generator = inspect.isgeneratorfunction(func)
function_with_type = function
if is_generator:
function_with_type += ' (generator)'
def handle_retry(num_try, exception=None):
"""Handle retry."""
from clusterfuzz._internal.metrics import monitoring_metrics
if (exception is None or
is_exception_type(exception)) and num_try < tries:
logs.log(
'Retrying on %s failed with %s. Retrying again.' %
(function_with_type, sys.exc_info()[1]),
num=num_try,
total=tries)
sleep(get_delay(num_try, delay, backoff))
return True
monitoring_metrics.TRY_COUNT.increment({
'function': function,
'is_succeeded': False
})
logs.log_error(
'Retrying on %s failed with %s. Raise.' % (function_with_type,
sys.exc_info()[1]),
total=tries)
return False
@functools.wraps(func)
def _wrapper(*args, **kwargs):
"""Regular function wrapper."""
from clusterfuzz._internal.metrics import monitoring_metrics
for num_try in range(1, tries + 1):
try:
result = func(*args, **kwargs)
if retry_on_false and not result:
if not handle_retry(num_try):
return result
continue
monitoring_metrics.TRY_COUNT.increment({
'function': function,
'is_succeeded': True
})
return result
except Exception as e:
if not handle_retry(num_try, exception=e):
raise
return None
@functools.wraps(func)
def _generator_wrapper(*args, **kwargs):
"""Generator function wrapper."""
# This argument is not applicable for generator functions.
assert not retry_on_false
from clusterfuzz._internal.metrics import monitoring_metrics
already_yielded_element_count = 0
for num_try in range(1, tries + 1):
try:
for index, result in enumerate(func(*args, **kwargs)):
if index >= already_yielded_element_count:
yield result
already_yielded_element_count += 1
monitoring_metrics.TRY_COUNT.increment({
'function': function,
'is_succeeded': True
})
break
except Exception as e:
if not handle_retry(num_try, exception=e):
raise
if is_generator:
return _generator_wrapper
return _wrapper
return decorator |
Get the queue suffix for a platform. | def queue_suffix_for_platform(platform):
"""Get the queue suffix for a platform."""
# Handle the case where a subqueue is used.
platform = platform.lower().replace(SUBQUEUE_IDENTIFIER, '-')
return '-' + platform.lower().replace('_', '-') |
Get the queue suffix for the current platform. | def default_queue_suffix():
"""Get the queue suffix for the current platform."""
queue_override = environment.get_value('QUEUE_OVERRIDE')
if queue_override:
return queue_suffix_for_platform(queue_override)
return queue_suffix_for_platform(environment.platform()) |
Get the regular jobs queue. | def regular_queue(prefix=JOBS_PREFIX):
"""Get the regular jobs queue."""
return prefix + default_queue_suffix() |
Get the high end jobs queue. | def high_end_queue():
"""Get the high end jobs queue."""
return regular_queue(prefix=HIGH_END_JOBS_PREFIX) |
Get the default jobs queue. | def default_queue():
"""Get the default jobs queue."""
thread_multiplier = environment.get_value('THREAD_MULTIPLIER')
if thread_multiplier and thread_multiplier > 1:
return high_end_queue()
return regular_queue() |
Get command override task. | def get_command_override():
"""Get command override task."""
command_override = environment.get_value('COMMAND_OVERRIDE', '').strip()
if not command_override:
return None
parts = command_override.split()
if len(parts) != 3:
raise ValueError('Command override should have 3 components.')
return Task(*parts, is_command_override=True) |
Try to get a fuzz task. | def get_fuzz_task():
"""Try to get a fuzz task."""
argument, job = fuzzer_selection.get_fuzz_task_payload()
if not argument:
return None
return Task('fuzz', argument, job) |
Get a high end task. | def get_high_end_task():
"""Get a high end task."""
task = get_regular_task(queue=high_end_queue())
if not task:
return None
task.high_end = True
return task |
Get a regular task. | def get_regular_task(queue=None):
"""Get a regular task."""
if not queue:
queue = regular_queue()
pubsub_puller = PubSubPuller(queue)
while True:
messages = pubsub_puller.get_messages(max_messages=1)
if not messages:
return None
task = get_task_from_message(messages[0])
if task:
return task |
Gets the machine template for the instance used to execute a task from
|queue_name|. This will be used by tworkers to schedule the appropriate
machine using batch to execute the utask_main part of a utask. | def get_machine_template_for_queue(queue_name):
"""Gets the machine template for the instance used to execute a task from
|queue_name|. This will be used by tworkers to schedule the appropriate
machine using batch to execute the utask_main part of a utask."""
initial_queue_name = queue_name
# Handle it being high-end (preemptible) or not.
if queue_name.startswith(JOBS_PREFIX):
is_high_end = False
prefix = JOBS_PREFIX
else:
assert queue_name.startswith(HIGH_END_JOBS_PREFIX)
is_high_end = True
prefix = HIGH_END_JOBS_PREFIX
# Add 1 for hyphen.
queue_name = queue_name[len(prefix) + 1:]
template_name = f'clusterfuzz-{queue_name}'
if not is_high_end:
template_name = f'{template_name}-pre'
templates = get_machine_templates()
for template in templates:
if template['name'] == template_name:
logs.log(
f'Found machine template for {initial_queue_name}',
machine_template=template)
return template
return None |
Returns machine templates. | def get_machine_templates():
"""Returns machine templates."""
# TODO(metzman): Cache this.
clusters_config = local_config.Config(local_config.GCE_CLUSTERS_PATH).get()
project = utils.get_application_id()
conf = clusters_config[project]
return conf['instance_templates'] |
Gets a postprocess task if one exists. | def get_postprocess_task():
"""Gets a postprocess task if one exists."""
# This should only be run on non-preemptible bots.
if not task_utils.is_remotely_executing_utasks():
return None
# Postprocess is platform-agnostic, so we run all such tasks on our
# most generic and plentiful bots only. In other words, we avoid
# wasting our precious non-linux bots on generic postprocess tasks.
if not environment.platform().lower() == 'linux':
return None
pubsub_puller = PubSubPuller(POSTPROCESS_QUEUE)
logs.log('Pulling from postprocess queue')
messages = pubsub_puller.get_messages(max_messages=1)
if not messages:
return None
task = get_task_from_message(messages[0])
if task:
logs.log('Pulled from postprocess queue.')
return task |
Returns an ordinary (non-postprocess, non-utask_main) task that is pulled
from a ClusterFuzz task queue. | def get_task():
"""Returns an ordinary (non-postprocess, non-utask_main) task that is pulled
from a ClusterFuzz task queue."""
task = get_command_override()
if task:
return task
if allow_all_tasks():
# Postprocess tasks need to be executed on a non-preemptible otherwise we
# can lose the output of a task.
# Postprocess tasks get priority because they are so quick. They typically
# only involve a few DB writes and never run user code.
task = get_postprocess_task()
if task:
return task
# Check the high-end jobs queue for bots with multiplier greater than 1.
thread_multiplier = environment.get_value('THREAD_MULTIPLIER')
if thread_multiplier and thread_multiplier > 1:
task = get_high_end_task()
if task:
return task
task = get_regular_task()
if task:
return task
task = get_fuzz_task()
if not task:
logs.log_error('Failed to get any fuzzing tasks. This should not happen.')
time.sleep(TASK_EXCEPTION_WAIT_INTERVAL)
return task |
Constructs payload for task, a standard description of tasks. | def construct_payload(command, argument, job):
"""Constructs payload for task, a standard description of tasks."""
return ' '.join([command, str(argument), str(job)]) |
Returns a task constructed from the first of |messages| if possible. | def get_task_from_message(message) -> Optional[PubSubTask]:
"""Returns a task constructed from the first of |messages| if possible."""
if message is None:
return None
try:
task = initialize_task(message)
except KeyError:
logs.log_error('Received an invalid task, discarding...')
message.ack()
return None
# Check that this task should be run now (past the ETA). Otherwise we defer
# its execution.
if task.defer():
return None
return task |
Returns a list of tasks for preprocessing many utasks on this bot and then
running the uworker_mains in the same batch job. | def get_utask_mains() -> List[PubSubTask]:
"""Returns a list of tasks for preprocessing many utasks on this bot and then
running the uworker_mains in the same batch job."""
if not task_utils.is_remotely_executing_utasks():
return None
pubsub_puller = PubSubPuller(UTASK_MAINS_QUEUE)
messages = pubsub_puller.get_messages_time_limited(MAX_UTASKS,
UTASK_QUEUE_PULL_SECONDS)
return handle_multiple_utask_main_messages(messages) |
Merges tasks specified in |messages| into a list for processing on this
bot. | def handle_multiple_utask_main_messages(messages) -> List[PubSubTask]:
"""Merges tasks specified in |messages| into a list for processing on this
bot."""
tasks = []
for message in messages:
task = get_task_from_message(message)
if task is None:
continue
tasks.append(task)
logs.log(
'Got utask_mains.',
tasks_extras_info=[task.extra_info for task in tasks if task])
return tasks |
Creates a task from |messages|. | def initialize_task(message) -> PubSubTask:
"""Creates a task from |messages|."""
if message.attributes.get('eventType') != 'OBJECT_FINALIZE':
return PubSubTask(message)
# Handle postprocess task.
# The GCS API for pub/sub notifications uses the data field unlike
# ClusterFuzz which uses attributes more.
data = json.loads(message.data)
name = data['name']
bucket = data['bucket']
output_url_argument = storage.get_cloud_storage_file_path(bucket, name)
return PostprocessPubSubTask(output_url_argument, message) |
Adds the utask_main portion of a utask to the utasks queue for scheduling
on batch. This should only be done after preprocessing. | def add_utask_main(command, input_url, job_type, wait_time=None):
"""Adds the utask_main portion of a utask to the utasks queue for scheduling
on batch. This should only be done after preprocessing."""
initial_command = environment.get_value('TASK_PAYLOAD')
add_task(
command,
input_url,
job_type,
queue=UTASK_MAINS_QUEUE,
wait_time=wait_time,
extra_info={'initial_command': initial_command}) |
Add a new task to the job queue. | def add_task(command,
argument,
job_type,
queue=None,
wait_time=None,
extra_info=None):
"""Add a new task to the job queue."""
# Old testcases may pass in queue=None explicitly,
# so we must check this here.
if not queue:
queue = default_queue()
if wait_time is None:
wait_time = random.randint(1, TASK_CREATION_WAIT_INTERVAL)
if job_type != 'none':
job = data_types.Job.query(data_types.Job.name == job_type).get()
if not job:
raise Error(f'Job {job_type} not found.')
if job.is_external():
external_tasks.add_external_task(command, argument, job)
return
# Add the task.
eta = utils.utcnow() + datetime.timedelta(seconds=wait_time)
task = Task(command, argument, job_type, eta=eta, extra_info=extra_info)
pubsub_client = pubsub.PubSubClient()
pubsub_client.publish(
pubsub.topic_name(utils.get_application_id(), queue),
[task.to_pubsub_message()]) |
Return the task lease timeout. | def get_task_lease_timeout():
"""Return the task lease timeout."""
return environment.get_value('TASK_LEASE_SECONDS', TASK_LEASE_SECONDS) |
Return task completion deadline. This gives an additional buffer over the
task lease deadline. | def get_task_completion_deadline():
"""Return task completion deadline. This gives an additional buffer over the
task lease deadline."""
start_time = time.time()
task_lease_timeout = get_task_lease_timeout()
return start_time + task_lease_timeout - TASK_COMPLETION_BUFFER |
Return the queue for the platform. | def queue_for_platform(platform, is_high_end=False):
"""Return the queue for the platform."""
prefix = HIGH_END_JOBS_PREFIX if is_high_end else JOBS_PREFIX
return prefix + queue_suffix_for_platform(platform) |
Return the right queue for the testcase. | def queue_for_testcase(testcase):
"""Return the right queue for the testcase."""
is_high_end = (
testcase.queue and testcase.queue.startswith(HIGH_END_JOBS_PREFIX))
return queue_for_job(testcase.job_type, is_high_end=is_high_end) |
Queue for job. | def queue_for_job(job_name, is_high_end=False):
"""Queue for job."""
job = data_types.Job.query(data_types.Job.name == job_name).get()
if not job:
raise Error('Job {} not found.'.format(job_name))
return queue_for_platform(job.platform, is_high_end) |
Redo specific tasks for a testcase. This is requested by the user from the
web interface. | def redo_testcase(testcase, tasks, user_email):
"""Redo specific tasks for a testcase. This is requested by the user from the
web interface."""
for task in tasks:
if task not in VALID_REDO_TASKS:
raise InvalidRedoTask(task)
minimize = 'minimize' in tasks
regression = 'regression' in tasks
progression = 'progression' in tasks
impact = 'impact' in tasks
blame = 'blame' in tasks
task_list = []
testcase_id = testcase.key.id()
# Metadata keys to clear based on which redo tasks were selected.
metadata_keys_to_clear = ['potentially_flaky']
if minimize:
task_list.append('minimize')
testcase.minimized_keys = ''
testcase.set_metadata('redo_minimize', True, update_testcase=False)
metadata_keys_to_clear += [
'env', 'current_minimization_phase_attempts', 'minimization_phase'
]
# If this testcase was archived during minimization, update the state.
testcase.archive_state &= ~data_types.ArchiveStatus.MINIMIZED
if regression:
task_list.append('regression')
testcase.regression = ''
metadata_keys_to_clear += ['last_regression_min', 'last_regression_max']
if progression:
task_list.append('progression')
testcase.fixed = ''
testcase.open = True
testcase.last_tested_crash_stacktrace = None
testcase.triaged = False
testcase.set_metadata('progression_pending', True, update_testcase=False)
metadata_keys_to_clear += [
'last_progression_min', 'last_progression_max', 'last_tested_revision'
]
if impact:
task_list.append('impact')
testcase.is_impact_set_flag = False
if blame:
task_list.append('blame')
testcase.set_metadata('blame_pending', True, update_testcase=False)
testcase.set_metadata('predator_result', None, update_testcase=False)
for key in metadata_keys_to_clear:
testcase.delete_metadata(key, update_testcase=False)
testcase.comments += '[%s] %s: Redo task(s): %s\n' % (
utils.current_date_time(), user_email, ', '.join(sorted(task_list)))
testcase.one_time_crasher_flag = False
testcase.put()
# Allow new notifications to be sent for this testcase.
notifications = ndb_utils.get_all_from_query(
data_types.Notification.query(
data_types.Notification.testcase_id == testcase.key.id()),
keys_only=True)
ndb_utils.delete_multi(notifications)
# Use wait_time=0 to execute the task ASAP, since it is user-facing.
wait_time = 0
# If we are re-doing minimization, other tasks will be done automatically
# after minimization completes. So, don't add those tasks.
if minimize:
add_task(
'minimize',
testcase_id,
testcase.job_type,
queue_for_testcase(testcase),
wait_time=wait_time)
return
if regression:
add_task(
'regression',
testcase_id,
testcase.job_type,
queue_for_testcase(testcase),
wait_time=wait_time)
if progression:
add_task(
'progression',
testcase_id,
testcase.job_type,
queue_for_testcase(testcase),
wait_time=wait_time)
if impact:
add_task(
'impact',
testcase_id,
testcase.job_type,
queue_for_testcase(testcase),
wait_time=wait_time)
if blame:
add_task(
'blame',
testcase_id,
testcase.job_type,
queue_for_testcase(testcase),
wait_time=wait_time) |
Return current task payload. | def get_task_payload():
"""Return current task payload."""
return persistent_cache.get_value(TASK_PAYLOAD_KEY) |
Return current task end time. | def get_task_end_time():
"""Return current task end time."""
return persistent_cache.get_value(
TASK_END_TIME_KEY, constructor=datetime.datetime.utcfromtimestamp) |
Cache task information. | def track_task_start(task, task_duration):
"""Cache task information."""
persistent_cache.set_value(TASK_PAYLOAD_KEY, task.payload())
persistent_cache.set_value(TASK_END_TIME_KEY, time.time() + task_duration)
# Don't wait on |run_heartbeat|, update task information as soon as it starts.
from clusterfuzz._internal.datastore import data_handler
data_handler.update_heartbeat(force_update=True) |
Remove cached task information. | def track_task_end():
"""Remove cached task information."""
persistent_cache.delete_value(TASK_PAYLOAD_KEY)
persistent_cache.delete_value(TASK_END_TIME_KEY)
# Don't wait on |run_heartbeat|, remove task information as soon as it ends.
from clusterfuzz._internal.datastore import data_handler
data_handler.update_heartbeat(force_update=True) |
Returns True if the utask_main portions of utasks are being remotely
executed on Google cloud batch. | def is_remotely_executing_utasks() -> bool:
"""Returns True if the utask_main portions of utasks are being remotely
executed on Google cloud batch."""
return bool(environment.is_production() and
environment.get_value('REMOTE_UTASK_EXECUTION')) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.