response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Get the target list for a given fuzz target bucket path. This is done by reading the targets.list file, which contains a list of the currently active fuzz targets.
def _get_targets_list(bucket_path): """Get the target list for a given fuzz target bucket path. This is done by reading the targets.list file, which contains a list of the currently active fuzz targets.""" bucket_dir_path = os.path.dirname(os.path.dirname(bucket_path)) targets_list_path = os.path.join(bucket_dir_path, TARGETS_LIST_FILENAME) data = storage.read_data(targets_list_path) if not data: return None # Filter out targets which are not yet built. targets = data.decode('utf-8').splitlines() listed_targets = { os.path.basename(path.rstrip('/')) for path in storage.list_blobs(bucket_dir_path, recursive=False) } return [t for t in targets if _base_fuzz_target_name(t) in listed_targets]
Get the full fuzz target bucket path.
def _full_fuzz_target_path(bucket_path, fuzz_target): """Get the full fuzz target bucket path.""" return bucket_path.replace('%TARGET%', _base_fuzz_target_name(fuzz_target))
Set up targets build.
def _setup_split_targets_build(bucket_path, target_weights, revision=None): """Set up targets build.""" targets_list = _get_targets_list(bucket_path) if not targets_list: raise BuildManagerError( 'No targets found in targets.list (path=%s).' % bucket_path) fuzz_target = set_random_fuzz_target_for_fuzzing_if_needed( targets_list, target_weights) if not fuzz_target: raise BuildManagerError( 'Failed to choose a fuzz target (path=%s).' % bucket_path) if fuzz_target not in targets_list: raise errors.BuildNotFoundError(revision, environment.get_value('JOB_NAME')) fuzz_target_bucket_path = _full_fuzz_target_path(bucket_path, fuzz_target) if not revision: revision = _get_latest_revision([fuzz_target_bucket_path]) return setup_regular_build( revision, bucket_path=fuzz_target_bucket_path, fuzz_targets=targets_list)
Get the latest revision.
def _get_latest_revision(bucket_paths): """Get the latest revision.""" build_urls = [] for bucket_path in bucket_paths: urls_list = get_build_urls_list(bucket_path) if not urls_list: logs.log_error('Error getting list of build urls from %s.' % bucket_path) return None build_urls.append(BuildUrls(bucket_path=bucket_path, urls_list=urls_list)) if len(build_urls) == 0: logs.log_error( 'Attempted to get latest revision, but no build urls were found.') return None main_build_urls = build_urls[0] other_build_urls = build_urls[1:] revision_pattern = revisions.revision_pattern_from_build_bucket_path( main_build_urls.bucket_path) for build_url in main_build_urls.urls_list: match = re.match(revision_pattern, build_url) if not match: continue revision = revisions.convert_revision_to_integer(match.group(1)) if (not other_build_urls or all( revisions.find_build_url(url.bucket_path, url.urls_list, revision) for url in other_build_urls)): return revision return None
Sets up latest trunk build.
def setup_trunk_build(bucket_paths, build_prefix=None, target_weights=None): """Sets up latest trunk build.""" latest_revision = _get_latest_revision(bucket_paths) if latest_revision is None: logs.log_error('Unable to find a matching revision.') return None build = setup_regular_build( latest_revision, bucket_path=bucket_paths[0], build_prefix=build_prefix, target_weights=target_weights) if not build: logs.log_error('Failed to set up a build.') return None return build
Sets up build with a particular revision.
def setup_regular_build(revision, bucket_path=None, build_prefix='', target_weights=None, fuzz_targets=None) -> RegularBuild: """Sets up build with a particular revision.""" if not bucket_path: # Bucket path can be customized, otherwise get it from the default env var. bucket_path = get_bucket_path('RELEASE_BUILD_BUCKET_PATH') build_urls = get_build_urls_list(bucket_path) job_type = environment.get_value('JOB_NAME') if not build_urls: logs.log_error('Error getting build urls for job %s.' % job_type) return None build_url = revisions.find_build_url(bucket_path, build_urls, revision) if not build_url: logs.log_error( 'Error getting build url for job %s (r%d).' % (job_type, revision)) return None base_build_dir = _base_build_dir(bucket_path) build_class = RegularBuild if environment.is_trusted_host(): from clusterfuzz._internal.bot.untrusted_runner import build_setup_host build_class = build_setup_host.RemoteRegularBuild elif environment.platform() == 'FUCHSIA': build_class = FuchsiaBuild elif (environment.is_android_cuttlefish() and environment.is_kernel_fuzzer_job()): build_class = CuttlefishKernelBuild result = None build = build_class( base_build_dir, revision, build_url, target_weights=target_weights, build_prefix=build_prefix, fuzz_targets=fuzz_targets) if build.setup(): result = build else: return None # Additional binaries to pull (for fuzzing engines such as Centipede). extra_bucket_path = get_bucket_path('EXTRA_BUILD_BUCKET_PATH') if extra_bucket_path: # Import here as this path is not available in App Engine context. from clusterfuzz._internal.bot.fuzzers import utils as fuzzer_utils extra_build_urls = get_build_urls_list(extra_bucket_path) extra_build_url = revisions.find_build_url(extra_bucket_path, extra_build_urls, revision) if not extra_build_url: logs.log_error('Error getting extra build url for job %s (r%d).' % (job_type, revision)) return None build = build_class( build.build_dir, # Store inside the main build. revision, extra_build_url, target_weights=target_weights, build_prefix=fuzzer_utils.EXTRA_BUILD_DIR) if not build.setup(): return None return result
Set up symbolized release and debug build.
def setup_symbolized_builds(revision): """Set up symbolized release and debug build.""" sym_release_build_bucket_path = environment.get_value( 'SYM_RELEASE_BUILD_BUCKET_PATH') sym_debug_build_bucket_path = environment.get_value( 'SYM_DEBUG_BUILD_BUCKET_PATH') sym_release_build_urls = get_build_urls_list(sym_release_build_bucket_path) sym_debug_build_urls = get_build_urls_list(sym_debug_build_bucket_path) # We should at least have a symbolized debug or release build. if not sym_release_build_urls and not sym_debug_build_urls: logs.log_error( 'Error getting list of symbolized build urls from (%s, %s).' % (sym_release_build_bucket_path, sym_debug_build_bucket_path)) return None sym_release_build_url = revisions.find_build_url( sym_release_build_bucket_path, sym_release_build_urls, revision) sym_debug_build_url = revisions.find_build_url(sym_debug_build_bucket_path, sym_debug_build_urls, revision) base_build_dir = _base_build_dir(sym_release_build_bucket_path) build_class = SymbolizedBuild if environment.is_trusted_host(): from clusterfuzz._internal.bot.untrusted_runner import build_setup_host build_class = build_setup_host.RemoteSymbolizedBuild # pylint: disable=no-member build = build_class(base_build_dir, revision, sym_release_build_url, sym_debug_build_url) if build.setup(): return build return None
Set up the custom binary for a particular job.
def setup_custom_binary(target_weights=None): """Set up the custom binary for a particular job.""" # Check if this build is dependent on any other custom job. If yes, # then fake out our job name for setting up the build. old_job_name = '' share_build_job_type = environment.get_value('SHARE_BUILD_WITH_JOB_TYPE') if share_build_job_type: job_name = share_build_job_type old_job_name = environment.get_value('JOB_NAME', '') environment.set_value('JOB_NAME', job_name) else: job_name = environment.get_value('JOB_NAME', '') # Verify that this is really a custom binary job. job = data_types.Job.query(data_types.Job.name == job_name).get() if not job or not job.custom_binary_key or not job.custom_binary_filename: logs.log_error( 'Job does not have a custom binary, even though CUSTOM_BINARY is set.') return False base_build_dir = _base_build_dir('') build = CustomBuild( base_build_dir, job.custom_binary_key, job.custom_binary_filename, job.custom_binary_revision, target_weights=target_weights) # Revert back the actual job name. if share_build_job_type: environment.set_value('JOB_NAME', old_job_name) if build.setup(): return build return None
Set up a build that we assume is already installed on the system.
def setup_system_binary(): """Set up a build that we assume is already installed on the system.""" system_binary_directory = environment.get_value('SYSTEM_BINARY_DIR', '') build = SystemBuild(system_binary_directory) if build.setup(): return build return None
Set up a custom or regular build based on revision.
def setup_build(revision=0, target_weights=None): """Set up a custom or regular build based on revision.""" # For custom binaries we always use the latest version. Revision is ignored. custom_binary = environment.get_value('CUSTOM_BINARY') if custom_binary: return setup_custom_binary(target_weights=target_weights) # In this case, we assume the build is already installed on the system. system_binary = environment.get_value('SYSTEM_BINARY_DIR') if system_binary: return setup_system_binary() fuzz_target_build_bucket_path = get_bucket_path( 'FUZZ_TARGET_BUILD_BUCKET_PATH') if fuzz_target_build_bucket_path: # Split fuzz target build. return _setup_split_targets_build( fuzz_target_build_bucket_path, target_weights, revision=revision) if revision: # Setup regular build with revision. return setup_regular_build(revision, target_weights=target_weights) # If no revision is provided, we default to a trunk build. bucket_paths = [] for env_var in DEFAULT_BUILD_BUCKET_PATH_ENV_VARS: bucket_path = get_bucket_path(env_var) if bucket_path: bucket_paths.append(bucket_path) else: logs.log('Bucket path not found for %s' % env_var) if len(bucket_paths) == 0: logs.log_error('Attempted a trunk build, but no bucket paths were found.') return None return setup_trunk_build(bucket_paths, target_weights=target_weights)
Determine if this is a custom or preinstalled system binary.
def is_custom_binary(): """Determine if this is a custom or preinstalled system binary.""" return bool( environment.get_value('CUSTOM_BINARY') or environment.get_value('SYSTEM_BINARY_DIR'))
Return a bool on if job type has either a release or debug build for stack symbolization.
def has_symbolized_builds(): """Return a bool on if job type has either a release or debug build for stack symbolization.""" return (environment.get_value('SYM_RELEASE_BUILD_BUCKET_PATH') or environment.get_value('SYM_DEBUG_BUILD_BUCKET_PATH'))
Set rpaths using chrpath.
def _set_rpaths_chrpath(binary_path, rpaths): """Set rpaths using chrpath.""" chrpath = environment.get_default_tool_path('chrpath') if not chrpath: raise BuildManagerError('Failed to find chrpath') subprocess.check_output( [chrpath, '-r', ':'.join(rpaths), binary_path], stderr=subprocess.PIPE)
Set rpaths using patchelf.
def _set_rpaths_patchelf(binary_path, rpaths): """Set rpaths using patchelf.""" patchelf = shutil.which('patchelf') if not patchelf: raise BuildManagerError('Failed to find patchelf') subprocess.check_output( [patchelf, '--force-rpath', '--set-rpath', ':'.join(rpaths), binary_path], stderr=subprocess.PIPE)
Set rpath of a binary.
def set_rpaths(binary_path, rpaths): """Set rpath of a binary.""" # Patchelf handles rpath patching much better, and allows e.g. extending the # length of the rpath. However, it loads the entire binary into memory so # does not work for large binaries, so use chrpath for larger binaries. binary_size = os.path.getsize(binary_path) if binary_size >= PATCHELF_SIZE_LIMIT: _set_rpaths_chrpath(binary_path, rpaths) else: _set_rpaths_patchelf(binary_path, rpaths)
Get rpath of a binary.
def get_rpaths(binary_path): """Get rpath of a binary.""" chrpath = environment.get_default_tool_path('chrpath') if not chrpath: raise BuildManagerError('Failed to find chrpath') try: rpaths = subprocess.check_output( [chrpath, '-l', binary_path], stderr=subprocess.PIPE).strip().decode('utf-8') except subprocess.CalledProcessError as e: if b'no rpath or runpath tag found' in e.output: return [] raise if rpaths: search_marker = 'RPATH=' start_index = rpaths.index(search_marker) + len(search_marker) return rpaths[start_index:].split(':') return []
Check if APP_PATH is properly set.
def check_app_path(app_path='APP_PATH') -> bool: """Check if APP_PATH is properly set.""" # If APP_NAME is not set (e.g. for grey box jobs), then we don't need # APP_PATH. if not environment.get_value('APP_NAME'): logs.log('APP_NAME is not set.') return True logs.log('APP_NAME is set.') app_path_value = environment.get_value(app_path) logs.log(f'app_path: {app_path} {app_path_value}') return bool(app_path_value)
Return build bucket path, applying any set overrides.
def get_bucket_path(name): """Return build bucket path, applying any set overrides.""" bucket_path = environment.get_value(name) bucket_path = overrides.check_and_apply_overrides( bucket_path, overrides.PLATFORM_ID_TO_BUILD_PATH_KEY) return bucket_path
Check if the given file points to a config, if so, use that to override any given paths
def check_and_apply_overrides(curr_path, config_key, platform_id=None): """Check if the given file points to a config, if so, use that to override any given paths""" if not curr_path: return curr_path if not platform_id: platform_id = environment.get_platform_id() if os.path.basename(curr_path) == PLATFORM_ID_URLS_FILENAME: curr_path = _apply_platform_id_overrides(platform_id, curr_path, config_key) return curr_path
read the `bucket_path`, parse as JSON, and map based on platform_id.
def _apply_platform_id_overrides(platform_id, config_url, config_key): """read the `bucket_path`, parse as JSON, and map based on platform_id.""" config_dict = _get_config_dict(config_url) path = _get_path_from_config(config_dict, config_key, platform_id) if not path: raise BuildOverrideError( OVERRIDE_PATH_NOT_FOUND_ERROR.format(config_key, config_url, platform_id)) return path
Read configs from a json and return them as a dict
def _get_config_dict(url): """Read configs from a json and return them as a dict""" url_data = storage.read_data(url) if not url_data: raise BuildOverrideError(OVERRIDE_CONFIG_NOT_READ_ERROR.format(url)) return json.loads(url_data)
Return True if a path override is present and return the override.
def _get_path_from_config(config_dict, config_key, platform_id): """Return True if a path override is present and return the override.""" if config_key not in config_dict: return None return config_dict[config_key].get(platform_id)
Add components from a dict representing a DEPS file.
def _add_components_from_dict(deps_dict, vars_dict, revisions_dict): """Add components from a dict representing a DEPS file.""" if not deps_dict: # If the dictionary is None, bail out early. return for key, value in deps_dict.items(): url = rev = None if isinstance(value, str): url, _, rev = value.partition('@') elif isinstance(value, dict): if 'revision' in value: url = value['url'] rev = value['revision'] elif 'url' in value and value['url'] is not None: url, _, rev = value['url'].partition('@') if url and rev: url = url.format(**vars_dict) rev = rev.format(**vars_dict) revisions_dict[key] = { 'name': _get_component_display_name(key), 'rev': rev, 'url': url }
Parse Clank revision file and return revisions dict.
def _clank_revision_file_to_revisions_dict(content): """Parse Clank revision file and return revisions dict.""" component_revision_mappings = {} for line in content.splitlines(): match = CLANK_REVISION_FILE_COMPONENT_REGEX.match(line) if not match: continue component = match.group(1) revision = match.group(2) component_revision_mappings[component] = revision if not component_revision_mappings: logs.log_error('Failed to get component revision mappings for clank.') return None chromium_revision = component_revision_mappings['chromium_revision'] clank_revision = component_revision_mappings['clank_revision'] # Initialize revisions dictionary with chromium repo. revisions_dict = get_component_revisions_dict(chromium_revision, None) if revisions_dict is None: logs.log_error( 'Failed to get chromium component revisions.', chromium_revision=chromium_revision, clank_revision=clank_revision) return None # Add info on clank repo. revisions_dict['/src/clank'] = { 'name': 'Clank', 'url': CLANK_URL, 'rev': clank_revision } return revisions_dict
Display name for a component.
def _get_component_display_name(name, default=None): """Display name for a component.""" if default and name in ['', 'default', '/src']: return default.capitalize() names = name.split('/') name_index = -1 if len(names) > 1 and names[-1] in COMPONENT_NAMES_BLACKLIST: # Skip the blacklisted names from right. name_index -= 1 return names[name_index].capitalize()
Return display revision for a component revision dict.
def _get_display_revision(component_revision_dict): """Return display revision for a component revision dict.""" if 'commit_pos' in component_revision_dict: return component_revision_dict['commit_pos'] return component_revision_dict['rev'] or '<empty>'
Return link text given a start and end revision. This is used in cases when revision url is not available.
def _get_link_text(start_component_revision_dict, end_component_revision_dict): """Return link text given a start and end revision. This is used in cases when revision url is not available.""" start_revision = _get_display_revision(start_component_revision_dict) end_revision = _get_display_revision(end_component_revision_dict) if start_revision == end_revision: return str(start_revision) return '%s:%s' % (start_revision, end_revision)
Return link text given a start and end revision. This is used in cases when revision url is not available.
def _get_link_url(start_component_revision_dict, end_component_revision_dict): """Return link text given a start and end revision. This is used in cases when revision url is not available.""" url = start_component_revision_dict['url'] if not url: return None vcs_viewer = source_mapper.get_vcs_viewer_for_url(url) if not vcs_viewer: # If we don't support the vcs yet, bail out. return None start_revision = _get_revision(start_component_revision_dict) end_revision = _get_revision(end_component_revision_dict) if start_revision == end_revision: return vcs_viewer.get_source_url_for_revision(start_revision) return vcs_viewer.get_source_url_for_revision_diff(start_revision, end_revision)
Return revision for a component revision dict.
def _get_revision(component_revision_dict): """Return revision for a component revision dict.""" return component_revision_dict['rev']
Read a potentially base64-encoded resource from the given URL.
def _get_url_content(url): """Read a potentially base64-encoded resource from the given URL.""" if url.startswith(storage.GS_PREFIX): # Fetch a GCS path with authentication. url_data = storage.read_data(url) if url_data is None: return None url_content = url_data.decode('utf-8') else: # Fetch a regular url without authentication. url_content = utils.fetch_url(url) # Urls on googlesource.com return file data as base64 encoded to avoid # cross-site scripting attacks. If the requested url contains |format=text|, # then the output is base64 encoded. So, decode it first. if url_content and url.endswith('format=text'): url_content = base64.b64decode(url_content) return url_content
Return git url for a chromium repository.
def _git_url_for_chromium_repository(repository): """Return git url for a chromium repository.""" return '%s/%s.git' % (CHROMIUM_GIT_ROOT_URL, repository)
Return bool on whether this is a clank url or not.
def _is_clank(url): """Return bool on whether this is a clank url or not.""" # FIXME: Need a better way to check for this. return '/chrome-test-builds/android' in url
Return bool on whether this is a DEPS url or not.
def _is_deps(url): """Return bool on whether this is a DEPS url or not.""" return urllib.parse.urlparse(url).path.endswith('/DEPS')
Convert src map contents to revisions dict.
def _src_map_to_revisions_dict(src_map, project_name): """Convert src map contents to revisions dict.""" revisions_dict = {} for key in src_map: # Only add keys that have both url and rev attributes. if 'url' in src_map[key] and 'rev' in src_map[key]: revisions_dict[key] = { 'name': _get_component_display_name(key, project_name), 'rev': src_map[key]['rev'], 'url': src_map[key]['url'] } return revisions_dict
Return git hash for a git commit position using cr-rev.appspot.com.
def _git_commit_position_to_git_hash_for_chromium(revision, repository): """Return git hash for a git commit position using cr-rev.appspot.com.""" request_variables = { 'number': revision, 'numbering_identifier': 'refs/heads/main', 'numbering_type': 'COMMIT_POSITION', 'project': 'chromium', 'repo': repository, 'fields': 'git_sha', } query_string = urllib.parse.urlencode(request_variables) query_url = '%s?%s' % (CRREV_NUMBERING_URL, query_string) url_content = _get_url_content(query_url) if url_content is None: logs.log_error('Failed to fetch git hash from url: ' + query_url) return None result_dict = _to_dict(url_content) if result_dict is None: logs.log_error('Failed to parse git hash from url: ' + query_url) return None return result_dict['git_sha']
Parse |contents| as a dict, returning None on failure or if it's not a dict.
def _to_dict(contents): """Parse |contents| as a dict, returning None on failure or if it's not a dict.""" try: result = ast.literal_eval(contents) if isinstance(result, dict): return result except (ValueError, TypeError): pass return None
Parses DEPS content and returns a dictionary of revision variables.
def deps_to_revisions_dict(content): """Parses DEPS content and returns a dictionary of revision variables.""" local_context = {} global_context = { 'Var': lambda x: local_context.get('vars', {}).get(x), 'Str': str, } # pylint: disable=exec-used exec(content, global_context, local_context) revisions_dict = {} vars_dict = local_context.get('vars', {}) deps_dict = local_context.get('deps') if not deps_dict: # |deps| variable is required. If it does not exist, we should raise an # exception. logs.log_error('Deps format has changed, code needs fixing.') return None _add_components_from_dict(deps_dict, vars_dict, revisions_dict) deps_os_dict = local_context.get('deps_os') if deps_os_dict: # |deps_os| variable is optional. for deps_os in list(deps_os_dict.values()): _add_components_from_dict(deps_os, vars_dict, revisions_dict) return revisions_dict
Return a prioritized order of components based on job type.
def get_components_list(component_revisions_dict, job_type): """Return a prioritized order of components based on job type.""" components = sorted(component_revisions_dict.keys()) if utils.is_chromium(): # Components prioritization only applies to non-chromium projects. return components project_name = data_handler.get_project_name(job_type) if not project_name: # No project name found in job environment, return list as-is. return components main_repo = data_handler.get_main_repo(job_type) project_src = '/src/' + project_name for component in components.copy(): if component_revisions_dict[component]['url'] == main_repo: # Matches recorded main repo. components.remove(component) components.insert(0, component) break if component == project_src: components.remove(component) components.insert(0, component) break if project_name.lower() in os.path.basename(component).lower(): components.remove(component) components.insert(0, component) # Keep trying in case an exact match is found later. return components
Return REVISION_VARS_URL from job environment if available. Otherwise, default to one set in project.yaml. For custom binary jobs, this is not applicable.
def _get_revision_vars_url_format(job_type, platform_id=None): """Return REVISION_VARS_URL from job environment if available. Otherwise, default to one set in project.yaml. For custom binary jobs, this is not applicable.""" if job_type is None: # Force it to use env attribute in project.yaml. return local_config.ProjectConfig().get('env.REVISION_VARS_URL') custom_binary = data_handler.get_value_from_job_definition( job_type, 'CUSTOM_BINARY') if utils.string_is_true(custom_binary): return None rev_path = data_handler.get_value_from_job_definition_or_environment( job_type, 'REVISION_VARS_URL') rev_path = overrides.check_and_apply_overrides( rev_path, overrides.PLATFORM_ID_TO_REV_PATH_KEY, platform_id=platform_id) return rev_path
Retrieve revision vars dict.
def get_component_revisions_dict(revision, job_type, platform_id=None): """Retrieve revision vars dict.""" if revision == 0 or revision == '0' or revision is None: # Return empty dict for zero start revision. return {} revision_vars_url_format = _get_revision_vars_url_format( job_type, platform_id=platform_id) if not revision_vars_url_format: return None project_name = data_handler.get_project_name(job_type) revisions_dict = {} if utils.is_chromium(): component = data_handler.get_component_name(job_type) repository = data_handler.get_repository_for_component(component) if repository and not _is_clank(revision_vars_url_format): revision_hash = _git_commit_position_to_git_hash_for_chromium( revision, repository) if revision_hash is None: return None # FIXME: While we check for this explicitly appended component in all # applicable cases that we know of within this codebase, if the dict # is shared with an external service (e.g. Predator) we may need to clean # this up beforehand. revisions_dict['/src'] = { 'name': _get_component_display_name(component, project_name), 'url': _git_url_for_chromium_repository(repository), 'rev': revision_hash, 'commit_pos': revision } # Use revision hash for info url later. revision = revision_hash revision_vars_url = revision_vars_url_format % revision url_content = _get_url_content(revision_vars_url) if not url_content: logs.log_error( 'Failed to get component revisions from %s.' % revision_vars_url) return None # Parse as per DEPS format. if _is_deps(revision_vars_url): deps_revisions_dict = deps_to_revisions_dict(url_content) if not deps_revisions_dict: return None revisions_dict.update(deps_revisions_dict) return revisions_dict # Parse as per Clank DEPS format. if _is_clank(revision_vars_url): return _clank_revision_file_to_revisions_dict(url_content) # Default case: parse content as yaml. revisions_dict = _to_dict(url_content) if revisions_dict is None: logs.log_error( 'Failed to parse component revisions from %s.' % revision_vars_url) return None # Parse as per source map format. if revision_vars_url.endswith(SOURCE_MAP_EXTENSION): revisions_dict = _src_map_to_revisions_dict(revisions_dict, project_name) return revisions_dict
Gets mapped revisions for a given revision.
def get_component_list(revision, job_type): """Gets mapped revisions for a given revision.""" return get_component_range_list(revision, revision, job_type)
Gets revision variable ranges for a changeset range.
def get_component_range_list(start_revision, end_revision, job_type, platform_id=None): """Gets revision variable ranges for a changeset range.""" start_component_revisions_dict = get_component_revisions_dict( start_revision, job_type, platform_id=platform_id) if start_revision == end_revision: end_component_revisions_dict = start_component_revisions_dict else: end_component_revisions_dict = get_component_revisions_dict( end_revision, job_type, platform_id=platform_id) if (start_component_revisions_dict is None or end_component_revisions_dict is None): return [] component_revisions = [] keys = get_components_list(end_component_revisions_dict, job_type) for key in keys: if not start_component_revisions_dict: # 0 start revision, can only show link text. end_component_display_revision = _get_display_revision( end_component_revisions_dict[key]) component_name = end_component_revisions_dict[key]['name'] component_revisions.append({ 'component': component_name, 'link_text': '0:%s' % end_component_display_revision }) continue if key not in start_component_revisions_dict: logs.log_warn('Key %s not found in start revision %s for job %s.' % (key, start_revision, job_type)) continue start_component_revision_dict = start_component_revisions_dict[key] end_component_revision_dict = end_component_revisions_dict[key] component_revisions.append({ 'component': start_component_revision_dict['name'], 'link_text': _get_link_text(start_component_revision_dict, end_component_revision_dict), 'link_url': _get_link_url(start_component_revision_dict, end_component_revision_dict) }) return component_revisions
Return start and end revision for a regression range.
def get_start_and_end_revision(revision_range): """Return start and end revision for a regression range.""" try: revision_range_list = revision_range.split(':') start_revision = int(revision_range_list[0]) end_revision = int(revision_range_list[1]) except: return [0, 0] return [start_revision, end_revision]
Converts component revision list to html.
def format_revision_list(revisions, use_html=True): """Converts component revision list to html.""" result = '' for revision in revisions: if revision['component']: result += '%s: ' % revision['component'] if 'link_url' in revision and revision['link_url'] and use_html: result += '<a target="_blank" href="{link_url}">{link_text}</a>'.format( link_url=revision['link_url'], link_text=revision['link_text']) else: result += revision['link_text'] if use_html: result += '<br />' else: result += '\n' return result
Returns an integer that represents the given revision.
def convert_revision_to_integer(revision): """Returns an integer that represents the given revision.""" # If revision is only decimal digits, like '249055', then do a simple # conversion. match = re.match(r'^\d+$', revision) if match: return int(revision) # If the revision has 4 parts separated by dots, like '34.0.1824.2', then do # the following conversion: # Pad the heads with up to 5 "0"s to allow them to be sorted properly, eg.: # '34.0.1824.2' -> 00034000000182400002 # '32.0.1700.107' -> 00032000000170000107 # If neither of the two patterns matches, raise an error. match = re.match(r'^(\d{1,5})\.(\d{1,5})\.(\d{1,5})\.(\d{1,5})$', revision) if match: revision = '%s%s%s%s' % (match.group(1).zfill(5), match.group(2).zfill(5), match.group(3).zfill(5), match.group(4).zfill(5)) return int(revision) error = 'Unknown revision pattern: %s' % revision logs.log_error(error) raise ValueError(error)
Returns the build url associated with a revision.
def find_build_url(bucket_path, build_url_list, revision): """Returns the build url associated with a revision.""" if not build_url_list: return None revision_pattern = revision_pattern_from_build_bucket_path(bucket_path) for build_url in build_url_list: match = re.match(revision_pattern, build_url) if not match: continue current_revision = convert_revision_to_integer(match.group(1)) if current_revision == revision: return build_url return None
Find the min index for bisection. Find largest revision <= the given revision.
def find_min_revision_index(revisions_list, revision): """Find the min index for bisection. Find largest revision <= the given revision.""" # bisect_left partitions |revisions_list| into 2 such that: # all(val < revision for val in a[:index]) # all(val >= revision for val in a[index:]) index = bisect.bisect_left(revisions_list, revision) if index < len(revisions_list) and revisions_list[index] == revision: return index if index > 0: return index - 1 # No revisions <= given revision. return None
Find the max index for bisection. Find smallest revision >= the given revision.
def find_max_revision_index(revisions_list, revision): """Find the max index for bisection. Find smallest revision >= the given revision.""" index = bisect.bisect_left(revisions_list, revision) if index < len(revisions_list): return index # No revisions >= given revision. return None
Gets the first revision in list greater than or equal to MIN_REVISION.
def get_first_revision_in_list(revision_list): """Gets the first revision in list greater than or equal to MIN_REVISION.""" first_revision = revision_list[0] min_revision = environment.get_value('MIN_REVISION') if not min_revision: return first_revision for revision in revision_list: if revision >= min_revision: return revision # No revision >= |MIN_REVISION| was found, store the error and just return # first revision. logs.log_error('Unable to find a revision >= MIN_REVISION.') return first_revision
Gets the last revision in list.
def get_last_revision_in_list(revision_list): """Gets the last revision in list.""" return revision_list[-1]
Convert the revision number into a real revision hash (e.g. git hash).
def get_real_revision(revision, job_type, display=False, platform_id=None): """Convert the revision number into a real revision hash (e.g. git hash).""" if revision is None: # Bail early when caller passes revision from a non-existent attribute. return None component_revisions_dict = get_component_revisions_dict( revision, job_type, platform_id=platform_id) if not component_revisions_dict: return str(revision) keys = list(component_revisions_dict.keys()) key = ('/src' if '/src' in keys else get_components_list( component_revisions_dict, job_type)[0]) helper = _get_display_revision if display else _get_revision return helper(component_revisions_dict[key])
Check a revision file against the provided revision to see if an update is required.
def needs_update(revision_file, revision): """Check a revision file against the provided revision to see if an update is required.""" failure_wait_interval = environment.get_value('FAIL_WAIT') file_exists = False retry_limit = environment.get_value('FAIL_RETRIES') # TODO(metzman): Delete this. for _ in range(retry_limit): # NFS can sometimes return a wrong result on file existence, so redo # this check a couple of times to be sure. if not os.path.exists(revision_file): file_exists = False time.sleep(15) continue # Found the file, now try to read its contents. file_exists = True try: with open(revision_file) as file_handle: current_revision = file_handle.read() except: logs.log_error( 'Error occurred while reading revision file %s.' % revision_file) time.sleep(utils.random_number(1, failure_wait_interval)) continue if current_revision.isdigit(): return int(revision) > int(current_revision) return str(revision) != str(current_revision) # If there is no revision file or if we have lost track of its revision, # then we do need to update the data bundle. if not file_exists: return True # An error has occurred and we have failed to read revision file # despite several retries. So, don't bother updating the data # bundle as it will probably fail as well. logs.log_error('Failed to read revision file, exiting.') return False
Writes a revision to the revision file.
def write_revision_to_revision_file(revision_file, revision): """Writes a revision to the revision file.""" try: with open(revision_file, 'wb') as file_handle: file_handle.write(str(revision).encode('utf-8')) except: logs.log_error( "Could not save revision to revision file '%s'" % revision_file)
Get the revision pattern from a build bucket path.
def revision_pattern_from_build_bucket_path(bucket_path): """Get the revision pattern from a build bucket path.""" return '.*?' + os.path.basename(bucket_path)
Interrogates git code review server to find the branch-from revision of a component.
def revision_to_branched_from(uri, revision): """Interrogates git code review server to find the branch-from revision of a component.""" full_uri = "%s/+/%s?format=JSON" % (uri, revision) url_content = _get_url_content(full_uri) # gerrit intentionally returns nonsense in the first line. # See 'cross site script inclusion here: # https://gerrit-review.googlesource.com/Documentation/rest-api.html url_content = '\n'.join(url_content.splitlines()[1:]) result = _to_dict(url_content) if not result: logs.log_error("Unable to retrieve and parse url: %s" % full_uri) return None msg = result.get('message', None) if not msg: logs.log_error("%s JSON had no 'message'" % full_uri) return None m = FIND_BRANCHED_FROM.search(msg) if not m: logs.log_error("%s JSON message lacked Cr-Branched-From" % full_uri) return None return m.group(1)
Get component source and relative path given a revisions dictionary and path.
def get_component_source_and_relative_path(path, revisions_dict): """Get component source and relative path given a revisions dictionary and path.""" if not revisions_dict: return ComponentPath() normalized_path = normalize_source_path(path) if normalized_path is None: return ComponentPath() component_sources = sorted(list(revisions_dict.keys()), key=len, reverse=True) default_component_source = None for component_source in component_sources: # Trailing slash is important so that we match the exact component source. # E.g. without slash, we would match src/webrtc_overrides with src/webrtc # which is incorrect. stripped_component_source = ( SOURCE_STRIP_REGEX.sub('', component_source) + '/') if normalized_path.startswith(stripped_component_source): relative_path = utils.strip_from_left(normalized_path, stripped_component_source) return ComponentPath(component_source, relative_path, normalized_path) if stripped_component_source == '/': default_component_source = component_source if default_component_source is None: return ComponentPath() return ComponentPath(default_component_source, normalized_path, normalized_path)
Return a VCS instance given an input url.
def get_vcs_viewer_for_url(url): """Return a VCS instance given an input url.""" for vcs in VCS_LIST: if vcs.VCS_URL_REGEX.match(url): return vcs(url) return None
Normalizes source path for comparison with component sources.
def normalize_source_path(path): """Normalizes source path for comparison with component sources.""" # Account for ../../ at start of path due to working directory # out/<build_dir>/ at time of build generation (chromium only). path = utils.remove_prefix(path, '../../') # Remove /proc/self/cwd prefix added by Bazel. path = utils.remove_prefix(path, '/proc/self/cwd/') # Cross-platform way to determine path absoluteness. is_path_absolute = path.startswith('/') or DRIVE_LETTER_REGEX.match(path) # Normalize backslashes into slashes. normalized_path = path.replace('\\', '/') if is_path_absolute: source_start_id_index = normalized_path.find(SOURCE_START_ID) if source_start_id_index == -1: # This absolute path does not have source start id, so we cannot # figure out a relative path. Bail out. return None return normalized_path[source_start_id_index + len(SOURCE_START_ID):] return normalized_path
Converts platform to Chromium Dash platform. Note that Windows in Chromium Dash is win64 and we only want win32.
def _convert_platform_to_chromiumdash_platform(platform): """Converts platform to Chromium Dash platform. Note that Windows in Chromium Dash is win64 and we only want win32.""" platform_lower = platform.lower() if platform_lower == 'windows': return 'Win32' return platform_lower.capitalize()
Makes a Call to chromiumdash's fetch_releases api, and returns its json array response.
def _fetch_releases_from_chromiumdash(platform, channel=None): """Makes a Call to chromiumdash's fetch_releases api, and returns its json array response.""" chromiumdash_platform = _convert_platform_to_chromiumdash_platform(platform) query_url = BUILD_INFO_URL_CD.format(platform=chromiumdash_platform) if channel: query_url = query_url + '&channel=' + channel build_info = utils.fetch_url(query_url) if not build_info: logs.log_error('Failed to fetch build info from %s' % query_url) return [] try: build_info_json = json.loads(build_info) if not build_info_json: logs.log_error('Empty response from %s' % query_url) return [] except Exception: logs.log_error('Malformed response from %s' % query_url) return [] return build_info_json
Gets the build information from Chromium Dash for production builds. Omits platforms containing digits, namely, win64. Omits channels containing underscore, namely, canary_asan. Platform is e.g. ANDROID, LINUX, MAC, WINDOWS.
def get_production_builds_info_from_cd(platform): """Gets the build information from Chromium Dash for production builds. Omits platforms containing digits, namely, win64. Omits channels containing underscore, namely, canary_asan. Platform is e.g. ANDROID, LINUX, MAC, WINDOWS. """ builds_metadata = [] build_info_json = _fetch_releases_from_chromiumdash(platform) for info in build_info_json: build_type = info['channel'].lower() if build_type == 'extended': build_type = 'extended_stable' version = info['version'] revision = info['hashes']['chromium'] builds_metadata.append(BuildInfo(platform, build_type, version, revision)) # Hack: pretend Windows extended stable info to be Linux extended stable info. # Because Linux doesn't have extended stable channel. if platform.lower() == 'linux': es_info = _fetch_releases_from_chromiumdash( 'WINDOWS', channel='Extended')[0] builds_metadata.append( BuildInfo(platform, 'extended_stable', es_info['version'], es_info['hashes']['chromium'])) return builds_metadata
Return milestone for a particular release.
def get_release_milestone(build_type, platform): """Return milestone for a particular release.""" if build_type == 'head': actual_build_type = 'canary' else: actual_build_type = build_type builds_metadata = get_production_builds_info_from_cd(platform) for build_metadata in builds_metadata: if build_metadata.build_type == actual_build_type: version_parts = build_metadata.version.split('.') milestone = version_parts[0] if milestone and milestone.isdigit(): return int(milestone) if actual_build_type == 'canary': # If there is no canary for that platform, just return canary from windows. return get_release_milestone('canary', 'windows') return None
Gets the build information.
def get_build_to_revision_mappings(platform=None): """Gets the build information.""" if not platform: platform = environment.platform() result = {} build_info_json = _fetch_releases_from_chromiumdash(platform) for info in build_info_json: build_type = info['channel'].lower() if build_type == 'extended': build_type = 'extended_stable' version = info['version'] revision = str(info['chromium_main_branch_position']) result[build_type] = {'revision': revision, 'version': version} # Hack: pretend Windows extended stable info to be Linux extended stable info. # Because Linux doesn't have extended stable channel. if platform.lower() == 'linux': es_info = _fetch_releases_from_chromiumdash( 'WINDOWS', channel='Extended')[0] result['extended_stable'] = { 'revision': str(es_info['chromium_main_branch_position']), 'version': es_info['version'] } return result
Return configuration data.
def get(): """Return configuration data.""" return data_types.Config.query().get()
Return a configuration key value.
def get_value(key): """Return a configuration key value.""" config = get() if not config: return None value = config.__getattribute__(key) # Decode if the value is base64 encoded. if value.startswith(BASE64_MARKER): return base64.b64decode(value[len(BASE64_MARKER):]) return value
Parses a value for a particular job type. If job type is not found, return the default value.
def get_value_for_job(data, target_job_type): """Parses a value for a particular job type. If job type is not found, return the default value.""" # All data is in a single line, just return that. if ';' not in data: return data result = '' for line in data.splitlines(): job_type, value = (line.strip()).split(';') if job_type == target_job_type or (job_type == 'default' and not result): result = value return result
Sets a configuration key value and commits change.
def set_value(key, value): """Sets a configuration key value and commits change.""" config = get() if not config: return try: config.__setattr__(key, value) except UnicodeDecodeError: value = '%s%s' % (BASE64_MARKER, base64.b64encode(value)) config.__setattr__(key, value) config.put()
Load yaml file and return parsed contents.
def _load_yaml_file(yaml_file_path): """Load yaml file and return parsed contents.""" with open(yaml_file_path) as f: try: return yaml.safe_load(f.read()) except Exception: raise errors.ConfigParseError(yaml_file_path)
Find a key in a yaml file.
def _find_key_in_yaml_file(yaml_file_path, search_keys, full_key_name, value_is_relative_path): """Find a key in a yaml file.""" if not os.path.isfile(yaml_file_path): return None result = _load_yaml_file(yaml_file_path) if not search_keys: # Give the entire yaml file contents. # |value_is_relative_path| is not applicable here. return result for search_key in search_keys: if not isinstance(result, dict): raise errors.InvalidConfigKey(full_key_name) if search_key not in result: return None result = result[search_key] if value_is_relative_path: yaml_directory = os.path.dirname(yaml_file_path) if isinstance(result, list): result = [os.path.join(yaml_directory, str(i)) for i in result] else: result = os.path.join(yaml_directory, str(result)) return result
Get the path of the the yaml file and the key components given a full key name.
def _get_key_location(search_path, full_key_name): """Get the path of the the yaml file and the key components given a full key name.""" key_parts = full_key_name.split(SEPARATOR) dir_path = search_path # Find the directory components of the key path for i, search_key in enumerate(key_parts): search_path = os.path.join(dir_path, search_key) if os.path.isdir(search_path): # Don't allow both a/b/... and a/b.yaml if os.path.isfile(search_path + YAML_FILE_EXTENSION): raise errors.InvalidConfigKey(full_key_name) dir_path = search_path else: # The remainder of the key path is a yaml_filename.key1.key2... key_parts = key_parts[i:] break else: # The entirety of the key reference a directory. key_parts = [] if key_parts: return dir_path, key_parts[0] + YAML_FILE_EXTENSION, key_parts[1:] return dir_path, '', []
Validate that a root is valid.
def _validate_root(search_path, root): """Validate that a root is valid.""" if root is None: return True directory, filename, search_keys = _get_key_location(search_path, root) if not filename: # _get_key_location already validated that the directory exists, so the root # is valid. return True # Check that the yaml file and keys exist. yaml_path = os.path.join(directory, filename) return (_find_key_in_yaml_file( yaml_path, search_keys, root, value_is_relative_path=False) is not None)
Search the key in a search path.
def _search_key(search_path, full_key_name, value_is_relative_path): """Search the key in a search path.""" directory, filename, search_keys = _get_key_location(search_path, full_key_name) # Search in the yaml file. yaml_path = os.path.join(directory, filename) return _find_key_in_yaml_file(yaml_path, search_keys, full_key_name, value_is_relative_path)
Attempt to convert an address from a string (hex) to an integer.
def address_to_integer(address): """Attempt to convert an address from a string (hex) to an integer.""" try: return int(address, 16) except: return 0
Return true if the stacktrace has atleast one marker in the marker list.
def has_marker(stacktrace, marker_list): """Return true if the stacktrace has atleast one marker in the marker list.""" for marker in marker_list: if marker in stacktrace: return True return False
Return whether the stacktrace needs to be ignored.
def ignore_stacktrace(crash_stacktrace): """Return whether the stacktrace needs to be ignored.""" # Filter crash based on search exclude pattern specified in job definition. search_excludes = environment.get_value('SEARCH_EXCLUDES') if search_excludes and re.search(search_excludes, crash_stacktrace): return True # Match stacktrace against custom defined blacklist regexes in project config. stack_blacklist_regexes = ( local_config.ProjectConfig().get('stacktrace.stack_blacklist_regexes')) if not stack_blacklist_regexes: return False stack_blacklist_regex = re.compile( r'(%s)' % '|'.join(stack_blacklist_regexes)) for line in crash_stacktrace.splitlines(): if stack_blacklist_regex.match(line): return True return False
Analyze the return code and console output to see if this was a crash.
def is_crash(return_code, console_output): """Analyze the return code and console output to see if this was a crash.""" if not return_code: return False crash_signature = environment.get_value('CRASH_SIGNATURE') if crash_signature: return re.search(crash_signature, console_output) return True
Return true if it a CHECK failure crash.
def is_check_failure_crash(stacktrace): """Return true if it a CHECK failure crash.""" # Android-specific exception patterns. if environment.is_android(): if 'Device rebooted' in stacktrace: return True if 'JNI DETECTED ERROR IN APPLICATION:' in stacktrace: return True if re.match(r'.*FATAL EXCEPTION.*:', stacktrace, re.DOTALL): return True # FIXME: Analyze why this is not working with chrome. # If the process has died, it is worthwhile to catch this with even a # NULL stack. # process_died_regex = (r'.*Process %s.*\(pid [0-9]+\) has died' % # environment.get_value('PKG_NAME')) # if re.match(process_died_regex, stacktrace, re.DOTALL): # return True # Application CHECK failure known patterns. if re.match(r'.*#\s*Fatal error in', stacktrace, re.DOTALL): return True if 'Check failed:' in stacktrace: return True # Memory debugging tool CHECK failure. if 'Sanitizer CHECK failed:' in stacktrace: return True return False
Return true if it is a memory debugging tool crash.
def is_memory_tool_crash(stacktrace): """Return true if it is a memory debugging tool crash.""" # Job-specific generic checks. crash_signature = environment.get_value('CRASH_SIGNATURE') if crash_signature and re.search(crash_signature, stacktrace): return True # Android specific check. # FIXME: Share this regex with stack_analyzer. if (environment.is_android() and re.match(r'.*signal.*\(SIG.*fault addr ([^ ]*)', stacktrace, re.DOTALL)): return True # Check if we have a complete stacktrace by location stacktrace end marker. # If not, bail out. if not has_marker(stacktrace, STACKTRACE_END_MARKERS): return False # Check if have a UBSan error. if has_ubsan_error(stacktrace): return True # Check if have a stacktrace start marker. if has_marker(stacktrace, STACKTRACE_TOOL_MARKERS): return True return False
Check to see if this is a null dereference crash address.
def is_null_dereference(int_address): """Check to see if this is a null dereference crash address.""" return int_address < NULL_DEREFERENCE_BOUNDARY
Check to see if this is an ASSERT crash based on the address.
def is_assert_crash_address(int_address): """Check to see if this is an ASSERT crash based on the address.""" return int_address in ASSERT_CRASH_ADDRESSES
Checks if any signal which means not security bug presented.
def has_signal_for_non_security_bug_type(stacktrace): """Checks if any signal which means not security bug presented.""" if re.search(r'^[ \t]+#0[ \t]+0x[0-9a-f]+[ \t]+in gsignal ', stacktrace, re.MULTILINE): return True for signature in SIGNAL_SIGNATURES_NOT_SECURITY: if signature in stacktrace: return True return False
Based on unsymbolized crash parameters, determine whether it has security consequences or not.
def is_security_issue(crash_stacktrace, crash_type, crash_address): """Based on unsymbolized crash parameters, determine whether it has security consequences or not.""" # Stack traces of any type can be manually labelled as a security issue. if re.search('FuzzerSecurityIssue(Critical|High|Medium|Low)', crash_stacktrace): return True # eip == 0. if 'pc (nil) ' in crash_stacktrace: return True if 'pc 0x00000000 ' in crash_stacktrace: return True if 'pc 0x000000000000 ' in crash_stacktrace: return True if crash_type in CRASH_TYPES_NON_SECURITY: return False # JNI security crashes. if re.match( '.*JNI DETECTED ERROR[^\n]+(deleted|invalid|unexpected|unknown|wrong)', crash_stacktrace, re.DOTALL): return True if crash_type == 'CHECK failure': # TODO(ochang): Remove this once we pick up newer builds that distinguish # DCHECKs from CHECKs. checks_have_security_implication = environment.get_value( 'CHECKS_HAVE_SECURITY_IMPLICATION', False) return checks_have_security_implication # Debug CHECK failure should be marked with security implications. if crash_type in ('Security DCHECK failure', 'DCHECK failure'): return True # Hard crash, explicitly enforced in code. if (crash_type == 'Fatal error' or crash_type == 'Unreachable code' or crash_type.endswith('Exception') or crash_type.endswith('CHECK failure')): return False # LeakSanitizer, finds memory leaks. if '-leak' in crash_type: return False # ThreadSanitizer, finds data races. if 'Data race' in crash_type: return False # ThreadSanitizer, finds lock order issues. if 'Lock-order-inversion' in crash_type: return False if crash_type in UBSAN_CRASH_TYPES_SECURITY: return True if crash_type in UBSAN_CRASH_TYPES_NON_SECURITY: return False if crash_type in GOLANG_CRASH_TYPES_NON_SECURITY: return False # By default, any assert crash is a security crash. # This behavior can be changed by defining # |ASSERTS_HAVE_SECURITY_IMPLICATION| in job definition. if crash_type == 'ASSERT' or 'ASSERTION FAILED' in crash_stacktrace: asserts_have_security_implication = environment.get_value( 'ASSERTS_HAVE_SECURITY_IMPLICATION', True) return asserts_have_security_implication # Kernel Failures are security bugs if crash_type.startswith('Kernel failure'): return True if crash_type in EXTRA_SANITIZERS_SECURITY: return True if crash_type in EXTERNAL_TOOL_SECURITY: return True # No crash type, can't process. if not crash_type: return False if has_signal_for_non_security_bug_type(crash_stacktrace): return False # Anything we don't understand will be marked as security. if crash_type not in GENERIC_CRASH_TYPES: return True # Crash on an unknown address. if crash_type in GENERIC_CRASH_TYPES: # If the address is not near null, then we it is highly likely # to have security consequences. int_address = address_to_integer(crash_address) # This indicates that there was no assert, but a hard crash. # (as the assert would be caught by checks above). So, it # does have any security implication. if is_assert_crash_address(int_address): return False if not is_null_dereference(int_address): return True return False
Return a bool whether the process output contains UBSan errors that should be handled as crashes. Suppressions file alone does not provide granular control, e.g. to ignore left shift of negative value which can cause false positives in some projects e.g. Chromium.
def has_ubsan_error(stacktrace): """Return a bool whether the process output contains UBSan errors that should be handled as crashes. Suppressions file alone does not provide granular control, e.g. to ignore left shift of negative value which can cause false positives in some projects e.g. Chromium.""" if UBSAN_RUNTIME_ERROR not in stacktrace: return False # FIXME: Avoid opening this file on every single call. ubsan_ignores_file_path = environment.get_suppressions_file( 'ubsan', suffix='ignores') if not ubsan_ignores_file_path: # No ignore file exists or is empty, everything is allowed. return True with open(ubsan_ignores_file_path) as f: ubsan_ignore_signatures = f.read().splitlines() for line in stacktrace.splitlines(): ignore_line = False for signature in ubsan_ignore_signatures: if signature in line: ignore_line = True if ignore_line: continue if UBSAN_RUNTIME_ERROR in line: return True return False
Return whether or not the crash type is experimental.
def is_experimental_crash(crash_type): """Return whether or not the crash type is experimental.""" return crash_type in EXPERIMENTAL_CRASH_TYPES
"Levenshtein_distance calculation: Iterative with two matrix rows, based on Wikipedia article and code by Christopher P. Matthews.
def _levenshtein_distance(string_1, string_2): """"Levenshtein_distance calculation: Iterative with two matrix rows, based on Wikipedia article and code by Christopher P. Matthews.""" if string_1 == string_2: return 0 if not string_1: return len(string_2) if not string_2: return len(string_1) v0 = list(range(len(string_2) + 1)) v1 = [None] * (len(string_2) + 1) for i in range(len(string_1)): v1[0] = i + 1 for j in range(len(string_2)): cost = 0 if string_1[i] == string_2[j] else 1 v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost) for j in range(len(v0)): v0[j] = v1[j] return v1[len(string_2)]
Return a ratio on how similar two strings are.
def _similarity_ratio(string_1, string_2): """Return a ratio on how similar two strings are.""" length_sum = len(string_1) + len(string_2) if length_sum == 0: return 1.0 return (length_sum - _levenshtein_distance(string_1, string_2)) / ( 1.0 * length_sum)
Count number of frames which are the same (taking into account order).
def longest_common_subsequence(first_frames, second_frames): """Count number of frames which are the same (taking into account order).""" first_len = len(first_frames) second_len = len(second_frames) solution = [[0 for _ in range(second_len + 1)] for _ in range(first_len + 1)] for i in range(1, first_len + 1): for j in range(1, second_len + 1): if first_frames[i - 1] == second_frames[j - 1]: solution[i][j] = solution[i - 1][j - 1] + 1 else: solution[i][j] = max(solution[i - 1][j], solution[i][j - 1]) return solution[first_len][second_len]
Increase/decrease the given |severity| by |delta|.
def _modify_severity(severity, delta, min_severity=SecuritySeverity.LOW, max_severity=SecuritySeverity.CRITICAL): """Increase/decrease the given |severity| by |delta|.""" min_index = SEVERITY_ORDER.index(min_severity) max_index = SEVERITY_ORDER.index(max_severity) assert min_index != -1 and max_index != -1 severity_index = SEVERITY_ORDER.index(severity) assert severity_index != -1 max_index = min(len(SEVERITY_ORDER) - 1, max_index) severity_index += delta severity_index = min(severity_index, max_index) severity_index = max(severity_index, min_index) return SEVERITY_ORDER[severity_index]
Return an analyzer for the given |name|.
def get_analyzer(name): """Return an analyzer for the given |name|.""" if name == 'sanitizer_generic': return SeverityAnalyzerSanitizer() if name == 'sanitizer_chrome': return SeverityAnalyzerSanitizerChrome(is_compromised_renderer=False) if name == 'sanitizer_chrome_compromised_renderer': return SeverityAnalyzerSanitizerChrome(is_compromised_renderer=True) return None
Convenience function to get the security severity of a crash.
def get_security_severity(crash_type, crash_output, job_name, requires_gestures): """Convenience function to get the security severity of a crash.""" analyzer = None severity_analyzer_name = environment.get_value('SECURITY_SEVERITY_ANALYZER') if severity_analyzer_name: analyzer = get_analyzer(severity_analyzer_name) else: is_chrome = 'chrome' in job_name or 'content_shell' in job_name is_sanitizer = ('_asan' in job_name or '_cfi' in job_name or '_lsan' in job_name or '_msan' in job_name or '_tsan' in job_name or '_ubsan' in job_name) if is_sanitizer: if is_chrome: analyzer = get_analyzer('sanitizer_chrome') else: analyzer = get_analyzer('sanitizer_generic') if not analyzer: return None return analyzer.analyze(crash_type, crash_output, requires_gestures)
Convert a severity value to a human-readable string.
def severity_to_string(severity): """Convert a severity value to a human-readable string.""" severity_map = { SecuritySeverity.CRITICAL: 'Critical', SecuritySeverity.HIGH: 'High', SecuritySeverity.MEDIUM: 'Medium', SecuritySeverity.LOW: 'Low', SecuritySeverity.MISSING: MISSING_VALUE_STRING, } return severity_map.get(severity, '')
Convert a string value to a severity value.
def string_to_severity(severity): """Convert a string value to a severity value.""" severity_map = { 'critical': SecuritySeverity.CRITICAL, 'high': SecuritySeverity.HIGH, 'medium': SecuritySeverity.MEDIUM, 'low': SecuritySeverity.LOW, } if severity.lower() in severity_map: return severity_map[severity.lower()] return SecuritySeverity.MISSING
Linkify Android Kernel or lkl stacktrace.
def linkify_kernel_or_lkl_stacktrace_if_needed(crash_info): """Linkify Android Kernel or lkl stacktrace.""" kernel_prefix = '' kernel_hash = '' if (environment.is_android_kernel() and not environment.is_android_cuttlefish() and (crash_info.found_android_kernel_crash or crash_info.is_kasan)): kernel_prefix, kernel_hash = \ android_kernel.get_kernel_prefix_and_full_hash() elif (environment.is_lkl_job() and crash_info.is_lkl and crash_info.lkl_kernel_build_id): kernel_prefix, kernel_hash = \ lkl_kernel.get_kernel_prefix_and_full_hash(crash_info.lkl_kernel_build_id) if kernel_prefix and kernel_hash: _linkify_android_kernel_stacktrace(crash_info, kernel_prefix, kernel_hash)
Linkify Android Kernel or lkl stacktrace.
def _linkify_android_kernel_stacktrace(crash_info, kernel_prefix, kernel_hash): """Linkify Android Kernel or lkl stacktrace.""" temp_crash_stacktrace = '' for line in crash_info.crash_stacktrace.splitlines(): temp_crash_stacktrace += android_kernel.get_kernel_stack_frame_link( line, kernel_prefix, kernel_hash) + '\n' crash_info.crash_stacktrace = temp_crash_stacktrace
Get crash parameters from crash data. Crash parameters include crash type, address, state and stacktrace. If the stacktrace is not already symbolized, we will try to symbolize it unless |symbolize| flag is set to False. Symbolized stacktrace will contain inline frames, but we do exclude them for purposes of crash state generation (helps in testcase deduplication).
def get_crash_data(crash_data, symbolize_flag=True, fuzz_target=None, already_symbolized=False, detect_ooms_and_hangs=None) -> stacktraces.CrashInfo: """Get crash parameters from crash data. Crash parameters include crash type, address, state and stacktrace. If the stacktrace is not already symbolized, we will try to symbolize it unless |symbolize| flag is set to False. Symbolized stacktrace will contain inline frames, but we do exclude them for purposes of crash state generation (helps in testcase deduplication).""" # Decide whether to symbolize or not symbolize the input stacktrace. # Note that Fuchsia logs are always symbolized. if symbolize_flag: # Defer imports since stack_symbolizer pulls in a lot of things. from clusterfuzz._internal.crash_analysis.stack_parsing import \ stack_symbolizer crash_stacktrace_with_inlines = stack_symbolizer.symbolize_stacktrace( crash_data, enable_inline_frames=True) crash_stacktrace_without_inlines = stack_symbolizer.symbolize_stacktrace( crash_data, enable_inline_frames=False) else: # We are explicitly indicated to not symbolize using |symbolize_flag|. There # is no distinction between inline and non-inline frames for an unsymbolized # stacktrace. crash_stacktrace_with_inlines = crash_data crash_stacktrace_without_inlines = crash_data # Additional stack frame ignore regexes. custom_stack_frame_ignore_regexes = ( local_config.ProjectConfig().get('stacktrace.stack_frame_ignore_regexes', [])) if environment.get_value('TASK_NAME') == 'analyze': detect_v8_runtime_errors = True else: detect_v8_runtime_errors = environment.get_value('DETECT_V8_RUNTIME_ERRORS', False) fuzz_target = fuzz_target or environment.get_value('FUZZ_TARGET') redzone_size = environment.get_value('REDZONE') if detect_ooms_and_hangs is None: detect_ooms_and_hangs = ( environment.get_value('REPORT_OOMS_AND_HANGS') and (not redzone_size or redzone_size <= MAX_REDZONE_SIZE_FOR_OOMS_AND_HANGS)) include_ubsan = 'halt_on_error=0' not in environment.get_value( 'UBSAN_OPTIONS', '') stack_parser = stacktraces.StackParser( symbolized=symbolize_flag or already_symbolized, detect_ooms_and_hangs=detect_ooms_and_hangs, detect_v8_runtime_errors=detect_v8_runtime_errors, custom_stack_frame_ignore_regexes=custom_stack_frame_ignore_regexes, fuzz_target=fuzz_target, include_ubsan=include_ubsan) result = stack_parser.parse(crash_stacktrace_without_inlines) # Use stacktrace with inlines for the result. if result.crash_stacktrace: result.crash_stacktrace = crash_stacktrace_with_inlines # Linkify Android Kernel or lkl stacktrace. linkify_kernel_or_lkl_stacktrace_if_needed(result) return result
Convert unsigned address to signed int64 (as defined in the proto).
def unsigned_to_signed(address): """Convert unsigned address to signed int64 (as defined in the proto).""" return (address - 2**64) if address >= 2**63 else address
Addresses may be formatted as decimal, hex string with 0x or 0X prefix, or without any prefix. Convert to decimal int.
def format_address_to_dec(address, base=16): """Addresses may be formatted as decimal, hex string with 0x or 0X prefix, or without any prefix. Convert to decimal int.""" if address is None: return None address = str(address).replace('`', '').strip() if not address: return None # This is required for Chrome Win and Mac stacks, which mix decimal and hex. try_bases = [base, 16] if base != 16 else [base] for base_try in try_bases: try: address = int(address, base_try) return address except Exception: continue logs.log_warn('Error formatting address %s to decimal int64 in bases %s.' % (str(address), str(try_bases))) return None
Construct a path to the .dSYM bundle for the given binary. There are three possible cases for binary location in Chromium: 1. The binary is a standalone executable or dynamic library in the product dir, the debug info is in "binary.dSYM" in the product dir. 2. The binary is a standalone framework or .app bundle, the debug info is in "Framework.framework.dSYM" or "App.app.dSYM" in the product dir. 3. The binary is a framework or an .app bundle within another .app bundle (e.g. Outer.app/Contents/Versions/1.2.3.4/Inner.app), and the debug info is in Inner.app.dSYM in the product dir. The first case is handled by llvm-symbolizer, so we only need to construct .dSYM paths for .app bundles and frameworks.
def chrome_dsym_hints(binary): """Construct a path to the .dSYM bundle for the given binary. There are three possible cases for binary location in Chromium: 1. The binary is a standalone executable or dynamic library in the product dir, the debug info is in "binary.dSYM" in the product dir. 2. The binary is a standalone framework or .app bundle, the debug info is in "Framework.framework.dSYM" or "App.app.dSYM" in the product dir. 3. The binary is a framework or an .app bundle within another .app bundle (e.g. Outer.app/Contents/Versions/1.2.3.4/Inner.app), and the debug info is in Inner.app.dSYM in the product dir. The first case is handled by llvm-symbolizer, so we only need to construct .dSYM paths for .app bundles and frameworks.""" path_parts = binary.split(os.path.sep) app_positions = [] framework_positions = [] for index, part in enumerate(path_parts): if part.endswith('.app'): app_positions.append(index) elif part.endswith('.framework'): framework_positions.append(index) bundle_positions = app_positions + framework_positions if len(bundle_positions) == 0: # Case 1: this is a standalone executable or dylib. return [] # Cases 2 and 3. The outermost bundle (which is the only bundle in the case 2) # is located in the product dir. bundle_positions.sort() outermost_bundle = bundle_positions[0] product_dir = path_parts[:outermost_bundle] # In case 2 this is the same as |outermost_bundle|. innermost_bundle = bundle_positions[-1] innermost_bundle_dir = path_parts[innermost_bundle] innermost_bundle_dir = utils.strip_from_right(innermost_bundle_dir, '.app') innermost_bundle_dir = utils.strip_from_right(innermost_bundle_dir, '.framework') dsym_path = product_dir + [innermost_bundle_dir] result = '%s.dSYM' % os.path.sep.join(dsym_path) return [result]
Make this process and child processes stdout unbuffered.
def disable_buffering(): """Make this process and child processes stdout unbuffered.""" os.environ['PYTHONUNBUFFERED'] = '1' if not isinstance(sys.stdout, LineBuffered): # Don't wrap sys.stdout if it is already wrapped. # See https://github.com/google/clusterfuzz/issues/234 for why. # Since sys.stdout is a C++ object, it's impossible to do sys.stdout.write = # lambda... sys.stdout = LineBuffered(sys.stdout)
Clean up the filename, nulls out tool specific ones.
def fix_filename(file_name): """Clean up the filename, nulls out tool specific ones.""" file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name) file_name = re.sub('.*crtstuff.c:0', '', file_name) file_name = re.sub(':0$', '', file_name) # If we don't have a file name, just bail out. if not file_name or file_name.startswith('??'): return '' return os.path.normpath(file_name)