response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Executes the postprocess step on the trusted (t)worker. | def tworker_postprocess(output_download_url) -> None:
"""Executes the postprocess step on the trusted (t)worker."""
with _MetricRecorder(_Subtask.POSTPROCESS, _Mode.BATCH) as recorder:
uworker_output = uworker_io.download_and_deserialize_uworker_output(
output_download_url)
set_uworker_env(uworker_output.uworker_input.uworker_env)
utask_module = get_utask_module(uworker_output.uworker_input.module_name)
recorder.set_task_details(
utask_module, uworker_output.uworker_input.job_type,
environment.platform(),
uworker_output.uworker_input.preprocess_start_time)
utask_module.utask_postprocess(uworker_output) |
Set up a regular build. | def setup_regular_build(request):
"""Set up a regular build."""
build = build_manager.RegularBuild(request.base_build_dir, request.revision,
request.build_url, request.target_weights,
request.build_prefix)
return _build_response(build.setup()) |
Clear build env vars. | def _clear_env():
"""Clear build env vars."""
environment.remove_key('APP_PATH')
environment.remove_key('APP_REVISION')
environment.remove_key('APP_PATH_DEBUG')
environment.remove_key('APP_DIR')
environment.remove_key('BUILD_DIR')
environment.remove_key('BUILD_URL')
environment.remove_key('FUZZ_TARGET') |
Handle build setup response. | def _handle_response(build, response):
"""Handle build setup response."""
if not response.result:
_clear_env()
return False
_update_env_from_response(response)
if not environment.get_value('APP_PATH'):
fuzzer_directory = environment.get_value('FUZZER_DIR')
if fuzzer_directory:
build_manager.set_environment_vars([fuzzer_directory])
environment.set_value('APP_REVISION', build.revision)
return True |
Update environment variables from response. | def _update_env_from_response(response):
"""Update environment variables from response."""
environment.set_value('APP_PATH', response.app_path)
environment.set_value('APP_PATH_DEBUG', response.app_path_debug)
environment.set_value('APP_DIR', response.app_dir)
environment.set_value('BUILD_DIR', response.build_dir)
environment.set_value('BUILD_URL', response.build_url)
environment.set_value('FUZZ_TARGET', response.fuzz_target)
environment.set_value('FUZZ_TARGET_COUNT', response.fuzz_target_count) |
Return whether or not |environment_variable| should be forwarded. | def is_forwarded_environment_variable(environment_variable):
"""Return whether or not |environment_variable| should be forwarded."""
return any(
pattern.match(environment_variable)
for pattern in FORWARDED_ENVIRONMENT_VARIABLES) |
Return whether or not |environment_variable|'s value should be rebased. | def should_rebase_environment_value(environment_variable):
"""Return whether or not |environment_variable|'s value should be rebased."""
return environment_variable in REBASED_ENVIRONMENT_VARIABLES |
Update worker's environment. | def update_environment(env):
"""Update worker's environment."""
processed_env = {}
for key, value in env.items():
if should_rebase_environment_value(key):
value = file_host.rebase_to_worker_root(value)
processed_env[key] = value
request = untrusted_runner_pb2.UpdateEnvironmentRequest(env=processed_env) # pylint: disable=no-member
host.stub().UpdateEnvironment(request) |
Copy allowed environment variables from |source_env|. | def set_environment_vars(env, source_env):
"""Copy allowed environment variables from |source_env|."""
if not source_env:
return
for name, value in source_env.items():
if is_forwarded_environment_variable(name):
# Avoid creating circular dependencies from importing environment by
# using os.getenv.
if os.getenv('TRUSTED_HOST') and should_rebase_environment_value(name):
value = file_host.rebase_to_worker_root(value)
env[name] = value |
Return environment for running an untrusted process. | def get_env_for_untrusted_process(overrides):
"""Return environment for running an untrusted process."""
env = {}
if overrides is not None:
set_environment_vars(env, overrides)
else:
set_environment_vars(env, os.environ)
return env |
Forward the environment variable if needed. | def forward_environment_variable(key, value):
"""Forward the environment variable if needed."""
if not host.is_initialized():
return
if is_forwarded_environment_variable(key):
update_environment({key: value}) |
Reset environment variables. | def reset_environment():
"""Reset environment variables."""
request = untrusted_runner_pb2.ResetEnvironmentRequest() # pylint: disable=no-member
host.stub().ResetEnvironment(request) |
Check whether if |directory| is a parent of |path|. | def is_directory_parent(path, directory):
"""Check whether if |directory| is a parent of |path|."""
path = os.path.abspath(path)
directory = os.path.abspath(directory)
path_components = path.split(os.sep)
directory_components = directory.split(os.sep)
if len(path_components) <= len(directory_components):
return False
return all(path_components[i] == directory_components[i]
for i in range(len(directory_components))) |
Rebase a path. | def _rebase(path, target_base, cur_base):
"""Rebase a path."""
if not path:
# Don't rebase if the path is None or empty string (in case of default
# variable value).
return path
if os.path.abspath(path).startswith(target_base):
# Already rebased.
return path
rel_path = os.path.relpath(os.path.abspath(path), cur_base)
if rel_path == os.curdir:
return target_base
# Only paths relative to ROOT_DIR are supported.
assert not rel_path.startswith(os.pardir), 'Bad relative path %s' % rel_path
return os.path.join(target_base, rel_path) |
Return corresponding host root given a worker CF path. | def rebase_to_host_root(worker_path):
"""Return corresponding host root given a worker CF path."""
return _rebase(worker_path, environment.get_value('ROOT_DIR'),
environment.get_value('WORKER_ROOT_DIR')) |
Return corresponding worker path given a host CF path. | def rebase_to_worker_root(host_path):
"""Return corresponding worker path given a host CF path."""
return _rebase(host_path, environment.get_value('WORKER_ROOT_DIR'),
environment.get_value('ROOT_DIR')) |
Create a directory. | def create_directory(path, create_intermediates=False):
"""Create a directory."""
request = untrusted_runner_pb2.CreateDirectoryRequest( # pylint: disable=no-member
path=path,
create_intermediates=create_intermediates)
response = host.stub().CreateDirectory(request)
return response.result |
Remove a directory. If |recreate| is set, always creates the directory even
if it did not exist. | def remove_directory(path, recreate=False):
"""Remove a directory. If |recreate| is set, always creates the directory even
if it did not exist."""
request = untrusted_runner_pb2.RemoveDirectoryRequest( # pylint: disable=no-member
path=path, recreate=recreate)
response = host.stub().RemoveDirectory(request)
return response.result |
List files in the directory. Returns full file paths. | def list_files(path, recursive=False):
"""List files in the directory. Returns full file paths."""
request = untrusted_runner_pb2.ListFilesRequest( # pylint: disable=no-member
path=path, recursive=recursive)
response = host.stub().ListFiles(request)
return response.file_paths |
Copy file from host to worker. |worker_path| must be a full path (including
the filename). Any directories will be created if needed. | def copy_file_to_worker(host_path, worker_path):
"""Copy file from host to worker. |worker_path| must be a full path (including
the filename). Any directories will be created if needed."""
with open(host_path, 'rb') as f:
request_iterator = file_utils.file_chunk_generator(f)
metadata = [('path-bin', worker_path.encode('utf-8'))]
response = host.stub().CopyFileTo(request_iterator, metadata=metadata)
return response.result |
Write data to a file on the worker. | def write_data_to_worker(data, worker_path):
"""Write data to a file on the worker."""
request_iterator = file_utils.data_chunk_generator(data)
metadata = [('path-bin', worker_path.encode('utf-8'))]
response = host.stub().CopyFileTo(request_iterator, metadata=metadata)
return response.result |
Copy file from worker to host. | def copy_file_from_worker(worker_path, host_path):
"""Copy file from worker to host."""
request = untrusted_runner_pb2.CopyFileFromRequest(path=worker_path) # pylint: disable=no-member
response = host.stub().CopyFileFrom(request)
file_utils.write_chunks(host_path, response)
metadata = dict(response.trailing_metadata())
if metadata.get('result') != 'ok':
# file_utils.write_chunks always opens the file for writing, so remove it
# here.
os.remove(host_path)
return False
return True |
Recursively copy a directory to the worker. Directories are created as
needed. Unless |replace| is True, files already in |worker_path| will remain
after this call. | def copy_directory_to_worker(host_directory, worker_directory, replace=False):
"""Recursively copy a directory to the worker. Directories are created as
needed. Unless |replace| is True, files already in |worker_path| will remain
after this call."""
if replace:
remove_directory(worker_directory, recreate=True)
for root, _, files in shell.walk(host_directory):
for filename in files:
file_path = os.path.join(root, filename)
worker_file_path = os.path.join(
worker_directory, os.path.relpath(file_path, host_directory))
if not copy_file_to_worker(file_path, worker_file_path):
logs.log_warn('Failed to copy %s to worker.' % file_path)
return False
return True |
Recursively copy a directory from the worker. Directories are created as
needed. Unless |replace| is True, files already in |host_directory| will
remain after this call. | def copy_directory_from_worker(worker_directory, host_directory, replace=False):
"""Recursively copy a directory from the worker. Directories are created as
needed. Unless |replace| is True, files already in |host_directory| will
remain after this call."""
if replace and os.path.exists(host_directory):
shutil.rmtree(host_directory, ignore_errors=True)
os.mkdir(host_directory)
for worker_file_path in list_files(worker_directory, recursive=True):
relative_worker_file_path = os.path.relpath(worker_file_path,
worker_directory)
host_file_path = os.path.join(host_directory, relative_worker_file_path)
# Be careful with the path provided by the worker here. We want to make sure
# we're only writing files to |host_directory| and not outside it.
if not is_directory_parent(host_file_path, host_directory):
logs.log_warn('copy_directory_from_worker: Attempt to escape |host_dir|.')
return False
host_file_directory = os.path.dirname(host_file_path)
if not os.path.exists(host_file_directory):
os.makedirs(host_file_directory)
if not copy_file_from_worker(worker_file_path, host_file_path):
logs.log_warn('Failed to copy %s from worker.' % worker_file_path)
return False
return True |
stat() a path. | def stat(path):
"""stat() a path."""
request = untrusted_runner_pb2.StatRequest(path=path) # pylint: disable=no-member
response = host.stub().Stat(request)
if not response.result:
return None
return response |
Clear the testcases directories on the worker. | def clear_testcase_directories():
"""Clear the testcases directories on the worker."""
remove_directory(
rebase_to_worker_root(environment.get_value('FUZZ_INPUTS')),
recreate=True)
remove_directory(
rebase_to_worker_root(environment.get_value('FUZZ_INPUTS_DISK')),
recreate=True) |
Clear the build urls directory on the worker. | def clear_build_urls_directory():
"""Clear the build urls directory on the worker."""
remove_directory(
rebase_to_worker_root(environment.get_value('BUILD_URLS_DIR')),
recreate=True) |
Clear the temp directory on the worker. | def clear_temp_directory():
"""Clear the temp directory on the worker."""
remove_directory(environment.get_value('WORKER_BOT_TMPDIR'), recreate=True) |
Push all testcases to the worker. | def push_testcases_to_worker():
"""Push all testcases to the worker."""
local_testcases_directory = environment.get_value('FUZZ_INPUTS')
worker_testcases_directory = rebase_to_worker_root(local_testcases_directory)
return copy_directory_to_worker(
local_testcases_directory, worker_testcases_directory, replace=True) |
Pull all testcases to the worker. | def pull_testcases_from_worker():
"""Pull all testcases to the worker."""
local_testcases_directory = environment.get_value('FUZZ_INPUTS')
worker_testcases_directory = rebase_to_worker_root(local_testcases_directory)
return copy_directory_from_worker(
worker_testcases_directory, local_testcases_directory, replace=True) |
Get list of fuzz target paths. | def get_fuzz_targets(path):
"""Get list of fuzz target paths."""
request = untrusted_runner_pb2.GetFuzzTargetsRequest(path=path) # pylint: disable=no-member
response = host.stub().GetFuzzTargets(request)
return response.fuzz_target_paths |
Create a directory. | def create_directory(request, _):
"""Create a directory."""
result = shell.create_directory(request.path, request.create_intermediates)
return untrusted_runner_pb2.CreateDirectoryResponse(result=result) |
Remove a directory. | def remove_directory(request, _):
"""Remove a directory."""
result = shell.remove_directory(request.path, request.recreate)
return untrusted_runner_pb2.RemoveDirectoryResponse(result=result) |
List files. | def list_files(request, _):
"""List files."""
file_paths = []
if request.recursive:
for root, _, files in shell.walk(request.path):
for filename in files:
file_paths.append(os.path.join(root, filename))
else:
file_paths.extend(
os.path.join(request.path, path) for path in os.listdir(request.path))
return untrusted_runner_pb2.ListFilesResponse(file_paths=file_paths) |
Copy file from host to worker. | def copy_file_to_worker(request_iterator, context):
"""Copy file from host to worker."""
metadata = dict(context.invocation_metadata())
path = metadata['path-bin'].decode('utf-8')
# Create intermediate directories if needed.
directory = os.path.dirname(path)
if not os.path.exists(directory):
try:
os.makedirs(directory)
except Exception:
pass
if not os.path.isdir(directory):
# Failed to create intermediate directories.
return untrusted_runner_pb2.CopyFileToResponse(result=False)
file_utils.write_chunks(path, request_iterator)
return untrusted_runner_pb2.CopyFileToResponse(result=True) |
Copy file from worker to host. | def copy_file_from_worker(request, context):
"""Copy file from worker to host."""
path = request.path
if not os.path.isfile(path):
context.set_trailing_metadata([('result', 'invalid-path')])
return
with open(path, 'rb') as f:
yield from file_utils.file_chunk_generator(f)
context.set_trailing_metadata([('result', 'ok')]) |
Stat a path. | def stat(request, _):
"""Stat a path."""
if not os.path.exists(request.path):
return untrusted_runner_pb2.StatResponse(result=False)
stat_result = os.stat(request.path)
return untrusted_runner_pb2.StatResponse(
result=True,
st_mode=stat_result.st_mode,
st_size=stat_result.st_size,
st_atime=stat_result.st_atime,
st_mtime=stat_result.st_mtime,
st_ctime=stat_result.st_ctime) |
Get list of fuzz targets. | def get_fuzz_targets(request, _):
"""Get list of fuzz targets."""
fuzz_target_paths = fuzzers_utils.get_fuzz_targets_local(request.path)
return untrusted_runner_pb2.GetFuzzTargetsResponse(
fuzz_target_paths=fuzz_target_paths) |
Yields chunks from handle. | def file_chunk_generator(handle):
"""Yields chunks from handle."""
data = handle.read(config.FILE_TRANSFER_CHUNK_SIZE)
while data:
yield untrusted_runner_pb2.FileChunk(data=data) # pylint: disable=no-member
data = handle.read(config.FILE_TRANSFER_CHUNK_SIZE) |
Yields chunks for data. | def data_chunk_generator(data):
"""Yields chunks for data."""
index = 0
while index < len(data):
cur_chunk = data[index:index + config.FILE_TRANSFER_CHUNK_SIZE]
yield untrusted_runner_pb2.FileChunk(data=cur_chunk) # pylint: disable=no-member
index += config.FILE_TRANSFER_CHUNK_SIZE |
Writes chunks to file. | def write_chunks(file_path, chunk_iterator):
"""Writes chunks to file."""
with open(file_path, 'wb') as f:
for chunk in chunk_iterator:
f.write(chunk.data) |
Check the channel's state. | def _check_channel_state(wait_time):
"""Check the channel's state."""
with _host_state.channel_condition:
if (_host_state.channel_state in (ChannelState.READY,
ChannelState.INCONSISTENT)):
# Nothing to do in these states.
return _host_state.channel_state
# The channel is not ready, so we wait for a (re)connect.
_host_state.channel_condition.wait(wait_time)
return _host_state.channel_state |
Wrapper for stub calls to add error handling and retry logic. | def _wrap_call(func, num_retries=config.RPC_RETRY_ATTEMPTS):
"""Wrapper for stub calls to add error handling and retry logic."""
def wrapped(*args, **kwargs):
"""Wrapper for adding retry logic."""
for retry_attempt in range(num_retries + 1):
# Wait for channel to (re)connect if necessary.
state = _check_channel_state(config.RECONNECT_TIMEOUT_SECONDS)
if state == ChannelState.INCONSISTENT:
# No point retrying if the worker is inconsistent.
monitoring_metrics.HOST_INCONSISTENT_COUNT.increment()
logs.log_warn('Worker got into an inconsistent state.')
host_exit_no_return(return_code=0)
if state == ChannelState.NOT_READY:
# Channel still isn't ready.
logs.log_warn(
'Channel failed to become ready within reconnect timeout.')
if retry_attempt == num_retries:
# Last attempt.
host_exit_no_return()
continue
try:
return func(*args, **kwargs)
except grpc.RpcError as e:
# For timeouts, which aren't fatal errors, resurface the right
# exception.
if 'TimeoutError' in repr(e):
raise TimeoutError(str(e))
if num_retries == 0:
# Just re-raise the original exception if this RPC is not configured
# for retries.
raise
logs.log_warn('Failed RPC: ' + repr(e))
if retry_attempt == num_retries:
# Last attempt.
host_exit_no_return()
time.sleep(RPC_FAIL_WAIT_TIME)
return None
return wrapped |
Heartbeat thread. | def _do_heartbeat():
"""Heartbeat thread."""
# grpc stubs and channels should be thread-safe.
heartbeat_stub = heartbeat_pb2_grpc.HeartbeatStub(_host_state.channel)
while True:
try:
heartbeat_stub.Beat(
heartbeat_pb2.HeartbeatRequest(), # pylint: disable=no-member
timeout=config.HEARTBEAT_TIMEOUT_SECONDS)
except grpc.RpcError as e:
logs.log_warn('worker heartbeat failed: ' + repr(e))
time.sleep(config.HEARTBEAT_INTERVAL_SECONDS) |
Get the host worker assignment for the current host. | def _get_host_worker_assignment():
"""Get the host worker assignment for the current host."""
# This only needs to be called once before the host connects to the worker.
# This is because the host->worker assignment algorithm should ensure that a
# worker is reassigned only if it is also reimaged.
#
# If a worker is reimaged, then the host's connection state will be lost and
# it will restart its run_bot.py instance to figure out which worker to
# connect to again. We should never get into a case where worker re-assignment
# happens without them being reimaged.
key = ndb.Key(data_types.HostWorkerAssignment,
environment.get_value('BOT_NAME'))
return key.get() |
Get the root TLS cert for connecting to the worker. | def _get_root_cert(project_name):
"""Get the root TLS cert for connecting to the worker."""
key = ndb.Key(data_types.WorkerTlsCert, project_name)
tls_cert = key.get()
if not tls_cert:
return None
assert tls_cert.cert_contents, 'Cert contents should not be empty.'
return tls_cert.cert_contents |
Initial connect to the worker. | def _connect():
"""Initial connect to the worker."""
worker_assignment = _get_host_worker_assignment()
assert worker_assignment is not None
assert worker_assignment.worker_name is not None
assert worker_assignment.project_name is not None
root_cert = _get_root_cert(worker_assignment.project_name)
if not root_cert:
logs.log_warn('TLS certs not yet generated.')
time.sleep(WAIT_TLS_CERT_SECONDS)
sys.exit(0)
environment.set_value(
'QUEUE_OVERRIDE',
untrusted.platform_name(worker_assignment.project_name, 'linux'))
server_name = worker_assignment.worker_name
if not environment.get_value('LOCAL_DEVELOPMENT'):
server_name += untrusted.internal_network_domain()
_host_state.worker_bot_name = worker_assignment.worker_name
credentials = grpc.ssl_channel_credentials(root_cert)
_host_state.channel = grpc.secure_channel(
'%s:%d' % (server_name, config.PORT),
credentials=credentials,
options=config.GRPC_OPTIONS)
_host_state.stub = UntrustedRunnerStub(_host_state.channel)
logs.log('Connecting to worker %s...' % server_name)
_host_state.channel.subscribe(
_channel_connectivity_changed, try_to_connect=True)
channel_state = _check_channel_state(config.INITIAL_CONNECT_TIMEOUT_SECONDS)
if channel_state == ChannelState.INCONSISTENT:
logs.log_warn('Worker inconsistent on initial connect.')
monitoring_metrics.HOST_INCONSISTENT_COUNT.increment()
host_exit_no_return(return_code=0)
if channel_state != ChannelState.READY:
raise untrusted.HostError('Failed to connect to worker.')
environment.set_value('WORKER_BOT_NAME', worker_assignment.worker_name)
_host_state.heartbeat_thread = threading.Thread(target=_do_heartbeat)
_host_state.heartbeat_thread.daemon = True
_host_state.heartbeat_thread.start() |
Callback for channel connectivity changes. | def _channel_connectivity_changed(connectivity):
"""Callback for channel connectivity changes."""
try:
with _host_state.channel_condition:
if connectivity == grpc.ChannelConnectivity.READY:
if _check_state():
logs.log('Connected to worker.')
_host_state.channel_state = ChannelState.READY
else:
_host_state.channel_state = ChannelState.INCONSISTENT
_host_state.channel_condition.notify_all()
return
_host_state.channel_state = ChannelState.NOT_READY
if connectivity == grpc.ChannelConnectivity.SHUTDOWN:
if _host_state.expect_shutdown:
# We requested a shutdown to update the source.
logs.log('Worker shutting down.')
return
raise untrusted.HostError('Unrecoverable error.')
except AttributeError:
# Python sets all globals to None on shutdown. Ignore.
logs.log('Shutting down.')
return
if connectivity == grpc.ChannelConnectivity.TRANSIENT_FAILURE:
logs.log_warn('Transient failure detected on worker channel.')
if connectivity == grpc.ChannelConnectivity.CONNECTING:
logs.log('Reconnecting to worker.') |
Check that the worker's state is consistent with the host's knowledge. | def _check_state():
"""Check that the worker's state is consistent with the host's knowledge."""
try:
status = stub().GetStatus(
untrusted_runner_pb2.GetStatusRequest(), # pylint: disable=no-member
timeout=config.GET_STATUS_TIMEOUT_SECONDS)
except grpc.RpcError:
logs.log_error('GetStatus failed.')
return False
if status.revision != utils.current_source_version():
logs.log_warn('Mismatching source revision: %s (host) vs %s (worker).' %
(utils.current_source_version(), status.revision))
return False
if _host_state.worker_bot_name != status.bot_name:
logs.log_warn('Worker bot name invalid (IP changed?).')
return False
if _host_state.worker_start_time:
if _host_state.worker_start_time == status.start_time:
return True
logs.log_warn('Worker start time changed.')
return False
_host_state.worker_start_time = status.start_time
return True |
Initialize channel to untrusted instance. | def init():
"""Initialize channel to untrusted instance."""
_connect() |
Return the UntrustedRunnerStub. | def stub():
"""Return the UntrustedRunnerStub."""
return _host_state.stub |
Update untrusted worker. | def update_worker():
"""Update untrusted worker."""
_host_state.expect_shutdown = True
try:
stub().UpdateSource(
untrusted_runner_pb2.UpdateSourceRequest(), # pylint: disable=no-member
timeout=config.UPDATE_SOURCE_TIMEOUT_SECONDS)
except grpc.RpcError:
# Assume server got the shutdown request.
pass |
Called when there is a host error. | def host_exit_no_return(return_code=1):
"""Called when there is a host error."""
if return_code:
monitoring_metrics.HOST_ERROR_COUNT.increment({'return_code': return_code})
# Always try to get the worker to exit too.
update_worker()
# Prevent exceptions during shutdown.
_host_state.channel.unsubscribe(_channel_connectivity_changed)
# This should bypass most exception handlers and avoid callers from catching
# this incorrectly.
logs.log('Shutting down host.', return_code=return_code)
raise untrusted.HostError(return_code) |
Return whether or not the host is initialized. | def is_initialized():
"""Return whether or not the host is initialized."""
return _host_state.stub is not None |
Add protobuf field to dict, if the field exists. | def get_protobuf_field(result, buf, field_name):
"""Add protobuf field to dict, if the field exists."""
if buf.HasField(field_name):
result[field_name] = getattr(buf, field_name) |
Encode string as utf-8 if it's unicode. | def encode_utf8_if_unicode(data):
"""Encode string as utf-8 if it's unicode."""
if isinstance(data, str):
return data.encode('utf-8')
return data |
Convert new_process.ProcessResult to proto. | def process_result_to_proto(process_result):
"""Convert new_process.ProcessResult to proto."""
process_result_proto = untrusted_runner_pb2.ProcessResult( # pylint: disable=no-member
return_code=process_result.return_code,
output=process_result.output,
time_executed=process_result.time_executed,
timed_out=process_result.timed_out)
process_result_proto.command.extend(process_result.command) # pylint: disable=no-member
return process_result_proto |
Implementation of RunAndWait. | def run_and_wait(request, _):
"""Implementation of RunAndWait."""
process_runner = new_process.ProcessRunner(request.executable_path,
request.default_args)
args = {}
protobuf_utils.get_protobuf_field(args, request.popen_args, 'bufsize')
protobuf_utils.get_protobuf_field(args, request.popen_args, 'executable')
protobuf_utils.get_protobuf_field(args, request.popen_args, 'shell')
protobuf_utils.get_protobuf_field(args, request.popen_args, 'cwd')
if request.popen_args.env_is_set:
args['env'] = request.popen_args.env
else:
args['env'] = None
args['additional_args'] = request.additional_args
protobuf_utils.get_protobuf_field(args, request, 'timeout')
protobuf_utils.get_protobuf_field(args, request, 'terminate_before_kill')
protobuf_utils.get_protobuf_field(args, request, 'terminate_wait_time')
protobuf_utils.get_protobuf_field(args, request, 'input_data')
protobuf_utils.get_protobuf_field(args, request, 'max_stdout_len')
logs.log('Running command: %s' % process_runner.get_command())
return untrusted_runner_pb2.RunAndWaitResponse( # pylint: disable=no-member
result=process_result_to_proto(process_runner.run_and_wait(**args))) |
Implementation of RunProcess. | def run_process(request, _):
"""Implementation of RunProcess."""
args = {}
protobuf_utils.get_protobuf_field(args, request, 'cmdline')
protobuf_utils.get_protobuf_field(args, request, 'current_working_directory')
protobuf_utils.get_protobuf_field(args, request, 'timeout')
protobuf_utils.get_protobuf_field(args, request, 'need_shell')
if request.gestures:
args['gestures'] = request.gestures
if request.env_copy:
args['env_copy'] = request.env_copy
protobuf_utils.get_protobuf_field(args, request, 'testcase_run')
protobuf_utils.get_protobuf_field(args, request, 'ignore_children')
return_code, execution_time, output = process_handler.run_process(**args)
response = untrusted_runner_pb2.RunProcessResponse( # pylint: disable=no-member
return_code=return_code,
execution_time=execution_time,
output=output)
return response |
Convert ProcessResult proto to new_process.ProcessResult. | def process_result_from_proto(process_result_proto):
"""Convert ProcessResult proto to new_process.ProcessResult."""
return new_process.ProcessResult(
process_result_proto.command, process_result_proto.return_code,
process_result_proto.output, process_result_proto.time_executed,
process_result_proto.timed_out) |
Remote version of process_handler.run_process. | def run_process(cmdline,
current_working_directory=None,
timeout=process_handler.DEFAULT_TEST_TIMEOUT,
need_shell=False,
gestures=None,
env_copy=None,
testcase_run=True,
ignore_children=True):
"""Remote version of process_handler.run_process."""
request = untrusted_runner_pb2.RunProcessRequest(
cmdline=cmdline,
current_working_directory=current_working_directory,
timeout=timeout,
need_shell=need_shell,
testcase_run=testcase_run,
ignore_children=ignore_children)
if gestures:
request.gestures.extend(gestures)
env = {}
# run_process's local behaviour is to apply the passed |env_copy| on top of
# the current environment instead of replacing it completely (like with
# subprocess).
environment.set_environment_vars(env, os.environ)
environment.set_environment_vars(env, env_copy)
request.env_copy.update(env)
response = host.stub().RunProcess(request)
return response.return_code, response.execution_time, response.output |
Terminate stale application instances. | def terminate_stale_application_instances():
"""Terminate stale application instances."""
host.stub().TerminateStaleApplicationInstances(
untrusted_runner_pb2.TerminateStaleApplicationInstancesRequest()) |
Symbolize stacktrace. | def symbolize_stacktrace(request):
"""Symbolize stacktrace."""
symbolized_stacktrace = stack_symbolizer.symbolize_stacktrace(
request.unsymbolized_crash_stacktrace, request.enable_inline_frames)
return untrusted_runner_pb2.SymbolizeStacktraceResponse( # pylint: disable=no-member
symbolized_stacktrace=symbolized_stacktrace) |
Symbolize stacktrace. | def symbolize_stacktrace(unsymbolized_crash_stacktrace,
enable_inline_frames=True):
"""Symbolize stacktrace."""
request = untrusted_runner_pb2.SymbolizeStacktraceRequest( # pylint: disable=no-member
unsymbolized_crash_stacktrace=protobuf_utils.encode_utf8_if_unicode(
unsymbolized_crash_stacktrace),
enable_inline_frames=enable_inline_frames)
response = host.stub().SymbolizeStacktrace(request)
return response.symbolized_stacktrace |
Convert fuzz_target to protobuf. | def _fuzz_target_to_proto(fuzz_target):
"""Convert fuzz_target to protobuf."""
return untrusted_runner_pb2.FuzzTarget(
engine=fuzz_target.engine,
project=fuzz_target.project,
binary=fuzz_target.binary,
) |
Do corpus pruning on untrusted worker. | def do_corpus_pruning(context, revision):
"""Do corpus pruning on untrusted worker."""
cross_pollinate_fuzzers = [
untrusted_runner_pb2.CrossPollinateFuzzer(
fuzz_target=_fuzz_target_to_proto(cpf.fuzz_target),
backup_bucket_name=cpf.backup_bucket_name,
corpus_engine_name=cpf.corpus_engine_name,
) for cpf in context.cross_pollinate_fuzzers
]
request = untrusted_runner_pb2.PruneCorpusRequest(
fuzz_target=_fuzz_target_to_proto(context.fuzz_target),
cross_pollinate_fuzzers=cross_pollinate_fuzzers,
revision=revision)
response = host.stub().PruneCorpus(request)
project_qualified_name = context.fuzz_target.project_qualified_name()
today_date = datetime.datetime.utcnow().date()
coverage_info = data_types.CoverageInformation(
fuzzer=project_qualified_name, date=today_date)
# Intentionally skip edge and function coverage values as those would come
# from fuzzer coverage cron task (see src/go/server/cron/coverage.go).
coverage_info.corpus_size_units = response.coverage_info.corpus_size_units
coverage_info.corpus_size_bytes = response.coverage_info.corpus_size_bytes
coverage_info.corpus_location = response.coverage_info.corpus_location
coverage_info.corpus_backup_location = (
response.coverage_info.corpus_backup_location)
coverage_info.quarantine_size_units = (
response.coverage_info.quarantine_size_units)
coverage_info.quarantine_size_bytes = (
response.coverage_info.quarantine_size_bytes)
coverage_info.quarantine_location = response.coverage_info.quarantine_location
crashes = [
corpus_pruning_task.CorpusCrash(
crash_state=crash.crash_state,
crash_type=crash.crash_type,
crash_address=crash.crash_address,
crash_stacktrace=crash.crash_stacktrace,
unit_path=crash.unit_path,
security_flag=crash.security_flag,
) for crash in response.crashes
]
result_stats = response.cross_pollination_stats
pollination_stats = corpus_pruning_task.CrossPollinationStats(
project_qualified_name=result_stats.project_qualified_name,
sources=result_stats.sources,
initial_corpus_size=result_stats.initial_corpus_size,
corpus_size=result_stats.corpus_size,
initial_edge_coverage=result_stats.initial_edge_coverage,
edge_coverage=result_stats.edge_coverage,
initial_feature_coverage=result_stats.initial_feature_coverage,
feature_coverage=result_stats.feature_coverage)
return corpus_pruning_task.CorpusPruningResult(
coverage_info=coverage_info,
crashes=crashes,
fuzzer_binary_name=response.fuzzer_binary_name,
revision=response.revision,
cross_pollination_stats=pollination_stats) |
Process testcase on untrusted worker. | def process_testcase(engine_name, tool_name, target_name, arguments,
testcase_path, output_path, timeout):
"""Process testcase on untrusted worker."""
if tool_name == 'minimize':
operation = untrusted_runner_pb2.ProcessTestcaseRequest.MINIMIZE
else:
operation = untrusted_runner_pb2.ProcessTestcaseRequest.CLEANSE
rebased_testcase_path = file_host.rebase_to_worker_root(testcase_path)
file_host.copy_file_to_worker(testcase_path, rebased_testcase_path)
request = untrusted_runner_pb2.ProcessTestcaseRequest(
engine=engine_name,
operation=operation,
target_name=target_name,
arguments=arguments,
testcase_path=file_host.rebase_to_worker_root(testcase_path),
output_path=file_host.rebase_to_worker_root(output_path),
timeout=timeout)
response = host.stub().ProcessTestcase(request)
rebased_output_path = file_host.rebase_to_worker_root(output_path)
file_host.copy_file_from_worker(rebased_output_path, output_path)
return engine.ReproduceResult(
list(response.command), response.return_code, response.time_executed,
response.output) |
Unpack protobuf values. | def _unpack_values(values):
"""Unpack protobuf values."""
unpacked = {}
for key, packed_value in values.items():
if packed_value.Is(wrappers_pb2.DoubleValue.DESCRIPTOR):
value = wrappers_pb2.DoubleValue()
elif packed_value.Is(wrappers_pb2.Int64Value.DESCRIPTOR):
value = wrappers_pb2.Int64Value()
elif packed_value.Is(wrappers_pb2.StringValue.DESCRIPTOR):
value = wrappers_pb2.StringValue()
else:
raise ValueError('Unknown stat type for ' + key)
packed_value.Unpack(value)
unpacked[key] = value.value
return unpacked |
Run engine fuzzer on untrusted worker. | def engine_fuzz(engine_impl, target_name, sync_corpus_directory,
testcase_directory):
"""Run engine fuzzer on untrusted worker."""
request = untrusted_runner_pb2.EngineFuzzRequest(
engine=engine_impl.name,
target_name=target_name,
sync_corpus_directory=file_host.rebase_to_worker_root(
sync_corpus_directory),
testcase_directory=file_host.rebase_to_worker_root(testcase_directory))
response = host.stub().EngineFuzz(request)
crashes = [
engine.Crash(
input_path=file_host.rebase_to_host_root(crash.input_path),
stacktrace=crash.stacktrace,
reproduce_args=crash.reproduce_args,
crash_time=crash.crash_time) for crash in response.crashes
]
unpacked_stats = _unpack_values(response.stats)
unpacked_strategies = _unpack_values(response.strategies)
result = engine.FuzzResult(
logs=response.logs,
command=list(response.command),
crashes=crashes,
stats=unpacked_stats,
time_executed=response.time_executed)
file_host.pull_testcases_from_worker()
return result, dict(response.fuzzer_metadata), unpacked_strategies |
Run engine reproduce on untrusted worker. | def engine_reproduce(engine_impl, target_name, testcase_path, arguments,
timeout):
"""Run engine reproduce on untrusted worker."""
rebased_testcase_path = file_host.rebase_to_worker_root(testcase_path)
file_host.copy_file_to_worker(testcase_path, rebased_testcase_path)
request = untrusted_runner_pb2.EngineReproduceRequest(
engine=engine_impl.name,
target_name=target_name,
testcase_path=rebased_testcase_path,
arguments=arguments,
timeout=timeout)
try:
response = host.stub().EngineReproduce(request)
except grpc.RpcError as e:
if 'TargetNotFoundError' in repr(e):
# Resurface the right exception.
raise testcase_manager.TargetNotFoundError('Failed to find target ' +
target_name)
raise
return engine.ReproduceResult(
list(response.command), response.return_code, response.time_executed,
response.output) |
Convert protobuf to FuzzTarget. | def _proto_to_fuzz_target(proto):
"""Convert protobuf to FuzzTarget."""
return data_types.FuzzTarget(
engine=proto.engine, project=proto.project, binary=proto.binary) |
Convert protobuf to CrossPollinateFuzzer. | def _proto_to_cross_pollinate_fuzzer(proto):
"""Convert protobuf to CrossPollinateFuzzer."""
return corpus_pruning_task.CrossPollinateFuzzer(
fuzz_target=_proto_to_fuzz_target(proto.fuzz_target),
backup_bucket_name=proto.backup_bucket_name,
corpus_engine_name=proto.corpus_engine_name) |
Prune corpus. | def prune_corpus(request, _):
"""Prune corpus."""
context = corpus_pruning_task.Context(
None, _proto_to_fuzz_target(request.fuzz_target), [
_proto_to_cross_pollinate_fuzzer(proto)
for proto in request.cross_pollinate_fuzzers
])
result = corpus_pruning_task.do_corpus_pruning(context, request.revision)
cross_pollination_stats = None
if result.cross_pollination_stats:
cross_pollination_stats = untrusted_runner_pb2.CrossPollinationStats(
project_qualified_name=result.cross_pollination_stats.
project_qualified_name,
sources=result.cross_pollination_stats.sources,
initial_corpus_size=result.cross_pollination_stats.initial_corpus_size,
corpus_size=result.cross_pollination_stats.corpus_size,
initial_edge_coverage=result.cross_pollination_stats.
initial_edge_coverage,
edge_coverage=result.cross_pollination_stats.edge_coverage,
initial_feature_coverage=result.cross_pollination_stats.
initial_feature_coverage,
feature_coverage=result.cross_pollination_stats.feature_coverage)
# Intentionally skip edge and function coverage values as those would come
# from fuzzer coverage cron task (see src/go/server/cron/coverage.go).
coverage_info = untrusted_runner_pb2.CoverageInfo(
corpus_size_units=result.coverage_info.corpus_size_units,
corpus_size_bytes=result.coverage_info.corpus_size_bytes,
corpus_location=result.coverage_info.corpus_location,
corpus_backup_location=result.coverage_info.corpus_backup_location,
quarantine_size_units=result.coverage_info.quarantine_size_units,
quarantine_size_bytes=result.coverage_info.quarantine_size_bytes,
quarantine_location=result.coverage_info.quarantine_location)
crashes = [
untrusted_runner_pb2.CorpusCrash(
crash_state=crash.crash_state,
crash_type=crash.crash_type,
crash_address=crash.crash_address,
crash_stacktrace=crash.crash_stacktrace,
unit_path=crash.unit_path,
security_flag=crash.security_flag,
) for crash in result.crashes
]
return untrusted_runner_pb2.PruneCorpusResponse(
coverage_info=coverage_info,
crashes=crashes,
fuzzer_binary_name=result.fuzzer_binary_name,
revision=result.revision,
cross_pollination_stats=cross_pollination_stats) |
Process testcase. | def process_testcase(request, _):
"""Process testcase."""
tool_name_map = {
untrusted_runner_pb2.ProcessTestcaseRequest.MINIMIZE: 'minimize',
untrusted_runner_pb2.ProcessTestcaseRequest.CLEANSE: 'cleanse',
}
# TODO(ochang): Support other engines.
assert request.engine == 'libFuzzer'
assert request.operation in tool_name_map
result = minimize_task.run_libfuzzer_engine(
tool_name_map[request.operation], request.target_name, request.arguments,
request.testcase_path, request.output_path, request.timeout)
return untrusted_runner_pb2.EngineReproduceResult(
return_code=result.return_code,
time_executed=result.time_executed,
output=result.output) |
Pack protobuf values. | def _pack_values(values):
"""Pack protobuf values."""
packed = {}
if values is None:
return packed
for key, value in values.items():
packed_value = Any()
if isinstance(value, float):
packed_value.Pack(wrappers_pb2.DoubleValue(value=value))
elif isinstance(value, int):
packed_value.Pack(wrappers_pb2.Int64Value(value=value))
elif isinstance(value, str):
packed_value.Pack(wrappers_pb2.StringValue(value=value))
else:
raise ValueError('Unknown stat type for ' + key)
packed[key] = packed_value
return packed |
Run engine fuzzer. | def engine_fuzz(request, _):
"""Run engine fuzzer."""
engine_impl = engine.get(request.engine)
result, fuzzer_metadata, strategies = fuzz_task.run_engine_fuzzer(
engine_impl, request.target_name, request.sync_corpus_directory,
request.testcase_directory)
crashes = [
untrusted_runner_pb2.EngineCrash(
input_path=crash.input_path,
stacktrace=crash.stacktrace,
reproduce_args=crash.reproduce_args,
crash_time=crash.crash_time) for crash in result.crashes
]
packed_stats = _pack_values(result.stats)
packed_strategies = _pack_values(strategies)
return untrusted_runner_pb2.EngineFuzzResponse(
logs=result.logs,
command=result.command,
crashes=crashes,
stats=packed_stats,
time_executed=result.time_executed,
fuzzer_metadata=fuzzer_metadata,
strategies=packed_strategies) |
Run engine reproduce. | def engine_reproduce(request, _):
"""Run engine reproduce."""
engine_impl = engine.get(request.engine)
result = testcase_manager.engine_reproduce(engine_impl, request.target_name,
request.testcase_path,
request.arguments, request.timeout)
return untrusted_runner_pb2.EngineReproduceResult(
command=result.command,
return_code=result.return_code,
time_executed=result.time_executed,
output=result.output) |
Wrap a servicer to add additional functionality. | def wrap_servicer(func):
"""Wrap a servicer to add additional functionality."""
@functools.wraps(func)
def wrapper(self, request, context): # pylint: disable=unused-argument
"""Wrapper function."""
global _rpc_count
# Check if there is a in-progress RPC.
with _rpc_count_lock:
if _rpc_count > 0:
logs.log_error('Hung RPC detected, shutting down.')
_worker_state.shutting_down.set()
return None
_rpc_count += 1
try:
result = func(self, request, context)
except Exception:
# Include full exception details.
context.set_code(grpc.StatusCode.UNKNOWN)
context.set_details(traceback.format_exc())
raise
finally:
with _rpc_count_lock:
assert _rpc_count > 0
_rpc_count -= 1
return result
return wrapper |
Get the TLS cert from instance metadata. | def _get_tls_cert_and_key():
"""Get the TLS cert from instance metadata."""
# TODO(ochang): Implement a fake metadata server for testing.
local_cert_location = environment.get_value('UNTRUSTED_TLS_CERT_FOR_TESTING')
local_key_location = environment.get_value('UNTRUSTED_TLS_KEY_FOR_TESTING')
if local_cert_location and local_key_location:
with open(local_cert_location, 'rb') as f:
cert_contents = f.read()
with open(local_key_location, 'rb') as f:
key_contents = f.read()
return cert_contents, key_contents
cert_contents = compute_metadata.get('instance/attributes/tls-cert').encode()
key_contents = compute_metadata.get('instance/attributes/tls-key').encode()
return cert_contents, key_contents |
Start the server. | def start_server():
"""Start the server."""
# Check overall free disk space. If we are running too low, clear all
# data directories like builds, fuzzers, data bundles, etc.
shell.clear_data_directories_on_low_disk_space()
cert_contents, key_contents = _get_tls_cert_and_key()
assert cert_contents and key_contents
server_credentials = grpc.ssl_server_credentials([(key_contents,
cert_contents)])
_worker_state.server = grpc.server(
futures.ThreadPoolExecutor(max_workers=config.NUM_WORKER_THREADS),
options=config.GRPC_OPTIONS)
untrusted_runner_pb2_grpc.add_UntrustedRunnerServicer_to_server(
UntrustedRunnerServicer(), _worker_state.server)
heartbeat_pb2_grpc.add_HeartbeatServicer_to_server(HeartbeatServicer(),
_worker_state.server)
_worker_state.server.add_secure_port('[::]:%d' % config.PORT,
server_credentials)
_worker_state.start_time = int(time.time())
_worker_state.server.start()
logs.log('Server started.')
# Run forever until shutdown.
_worker_state.shutting_down.wait()
logs.log('Server shutting down.')
stopped = _worker_state.server.stop(SHUTDOWN_GRACE_SECONDS)
stopped.wait()
# Prevent python GIL deadlocks on shutdown. See https://crbug.com/744680.
# pylint: disable=protected-access
os._exit(0) |
Return the grpc.Server. | def server():
"""Return the grpc.Server."""
return _worker_state.server |
Search the input directory and additional paths for the requested file. | def get_absolute_testcase_file(request_path):
"""Search the input directory and additional paths for the requested file."""
# Gather the list of search path directories.
current_working_directory = os.getcwd()
data_directory = environment.get_value('FUZZ_DATA')
input_directory = environment.get_value('INPUT_DIR')
fuzzer_directory = environment.get_value('FUZZERS_DIR')
layout_tests_directory = os.path.join(data_directory, 'LayoutTests')
layout_tests_http_tests_directory = os.path.join(layout_tests_directory,
'http', 'tests')
layout_tests_wpt_tests_directory = os.path.join(layout_tests_directory,
'external', 'wpt')
# TODO(mbarbella): Add support for aliasing and directories from
# https://cs.chromium.org/chromium/src/third_party/blink/tools/blinkpy/web_tests/servers/apache_http.py?q=apache_http.py&sq=package:chromium&dr&l=60
# Check all search paths for the requested file.
search_paths = [
current_working_directory,
fuzzer_directory,
input_directory,
layout_tests_directory,
layout_tests_http_tests_directory,
layout_tests_wpt_tests_directory,
]
for search_path in search_paths:
base_string = search_path + os.path.sep
path = request_path.lstrip('/')
if not path or path.endswith('/'):
path += 'index.html'
absolute_path = os.path.abspath(os.path.join(search_path, path))
if (absolute_path.startswith(base_string) and
os.path.exists(absolute_path) and not os.path.isdir(absolute_path)):
return absolute_path
return None |
Guess mime type based of file extension. | def guess_mime_type(filename):
"""Guess mime type based of file extension."""
if not mimetypes.inited:
mimetypes.init()
return mimetypes.guess_type(filename)[0] |
Run the HTTP server on the given port. | def run_server(host, port):
"""Run the HTTP server on the given port."""
httpd = BotHTTPServer((host, port), RequestHandler)
httpd.serve_forever() |
Initialize the HTTP server on the specified ports. | def start():
"""Initialize the HTTP server on the specified ports."""
http_host = 'localhost'
http_port_1 = environment.get_value('HTTP_PORT_1', 8000)
http_port_2 = environment.get_value('HTTP_PORT_2', 8080)
if not port_is_open(http_host, http_port_1):
start_server_thread(http_host, http_port_1)
else:
logs.log_warn(
f"HTTP_PORT_1 ({http_port_1}) already open, not starting server thread."
)
if not port_is_open(http_host, http_port_2):
start_server_thread(http_host, http_port_2)
else:
logs.log_warn(
f"HTTP_PORT_2 ({http_port_2}) already open, not starting server thread."
) |
Opens the archive and gets the appropriate build archive based on the
`archive_path`. The resulting object is usable as a normal archive reader,
but provides additional feature related to build handling.
Args:
archive_path: the path to the archive.
Raises:
If the file could not be opened or if the archive type cannot be handled.
Returns:
the build archive. | def open(archive_path: str) -> BuildArchive:
"""Opens the archive and gets the appropriate build archive based on the
`archive_path`. The resulting object is usable as a normal archive reader,
but provides additional feature related to build handling.
Args:
archive_path: the path to the archive.
Raises:
If the file could not be opened or if the archive type cannot be handled.
Returns:
the build archive.
"""
reader = archive.open(archive_path)
# Unfortunately, there is no good heuristic for determining which build
# archive implementation to use.
# Hopefully, we can search in the archive whether some files are present and
# give us some hints.
# For instance, chrome build archives are embedding `gn.args`. Let's use
# this for now.
# Being wrong is no big deal here, because BuildArchive is designed so that
# we always fall back on default behaviour.
args_gn_path = os.path.join(reader.root_dir(), 'args.gn')
if reader.file_exists(args_gn_path):
return ChromeBuildArchive(reader)
return DefaultBuildArchive(reader) |
Get the base directory for a build. | def _base_build_dir(bucket_path):
"""Get the base directory for a build."""
job_name = environment.get_value('JOB_NAME')
return _get_build_directory(bucket_path, job_name) |
Try to make the requested number of bytes available by deleting builds. | def _make_space(requested_size, current_build_dir=None):
"""Try to make the requested number of bytes available by deleting builds."""
if utils.is_chromium():
min_free_disk_space = MIN_FREE_DISK_SPACE_CHROMIUM
else:
min_free_disk_space = MIN_FREE_DISK_SPACE_DEFAULT
builds_directory = environment.get_value('BUILDS_DIR')
error_message = 'Need at least %d GB of free disk space.' % (
(min_free_disk_space + requested_size) // 1024**3)
for _ in range(MAX_EVICTED_BUILDS):
free_disk_space = shell.get_free_disk_space(builds_directory)
if free_disk_space is None:
# Can't determine free disk space, bail out.
return False
if requested_size + min_free_disk_space < free_disk_space:
return True
if not _evict_build(current_build_dir):
logs.log_error(error_message)
return False
free_disk_space = shell.get_free_disk_space(builds_directory)
result = requested_size + min_free_disk_space < free_disk_space
if not result:
logs.log_error(error_message)
return result |
Remove the least recently used build to make room. | def _evict_build(current_build_dir):
"""Remove the least recently used build to make room."""
builds_directory = environment.get_value('BUILDS_DIR')
least_recently_used = None
least_recently_used_timestamp = None
for build_directory in os.listdir(builds_directory):
absolute_build_directory = os.path.abspath(
os.path.join(builds_directory, build_directory))
if not os.path.isdir(absolute_build_directory):
continue
if os.path.commonpath(
[absolute_build_directory,
os.path.abspath(current_build_dir)]) == absolute_build_directory:
# Don't evict the build we're trying to extract. This could be a parent
# directory of where we're currently extracting to.
continue
build = BaseBuild(absolute_build_directory)
timestamp = build.last_used_time()
if (least_recently_used_timestamp is None or
timestamp < least_recently_used_timestamp):
least_recently_used_timestamp = timestamp
least_recently_used = build
if not least_recently_used:
return False
logs.log(
'Deleting build %s to save space.' % least_recently_used.base_build_dir)
least_recently_used.delete()
return True |
Handle non-recoverable error on Windows. This is usually either due to disk
corruption or processes failing to terminate using regular methods. Force a
restart for recovery. | def _handle_unrecoverable_error_on_windows():
"""Handle non-recoverable error on Windows. This is usually either due to disk
corruption or processes failing to terminate using regular methods. Force a
restart for recovery."""
if environment.platform() != 'WINDOWS':
return
logs.log_error('Unrecoverable error, restarting machine...')
time.sleep(60)
utils.restart_machine() |
Remove scheme from the bucket path. | def _remove_scheme(bucket_path):
"""Remove scheme from the bucket path."""
if '://' not in bucket_path:
raise BuildManagerError('Invalid bucket path: ' + bucket_path)
return bucket_path.split('://')[1] |
Return the build directory based on bucket path and job name. | def _get_build_directory(bucket_path, job_name):
"""Return the build directory based on bucket path and job name."""
builds_directory = environment.get_value('BUILDS_DIR')
# In case we have a bucket path, we want those to share the same build
# directory.
if bucket_path:
path = _remove_scheme(bucket_path).lstrip('/')
bucket_path, file_pattern = path.rsplit('/', 1)
bucket_path = bucket_path.replace('/', '_')
# Remove similar build types to force them in same directory.
file_pattern = utils.remove_sub_strings(file_pattern, BUILD_TYPE_SUBSTRINGS)
file_pattern_hash = utils.string_hash(file_pattern)
job_directory = f'{bucket_path}_{file_pattern_hash}'
else:
job_directory = job_name
return os.path.join(builds_directory, job_directory) |
Sets a random fuzz target for fuzzing. | def set_random_fuzz_target_for_fuzzing_if_needed(fuzz_targets, target_weights):
"""Sets a random fuzz target for fuzzing."""
fuzz_target = environment.get_value('FUZZ_TARGET')
if fuzz_target:
logs.log('Use previously picked fuzz target %s for fuzzing.' % fuzz_target)
return fuzz_target
if not environment.is_engine_fuzzer_job():
return None
fuzz_targets = list(fuzz_targets)
if not fuzz_targets:
logs.log_error('No fuzz targets found. Unable to pick random one.')
return None
fuzz_target = fuzzer_selection.select_fuzz_target(fuzz_targets,
target_weights)
environment.set_value('FUZZ_TARGET', fuzz_target)
logs.log('Picked fuzz target %s for fuzzing.' % fuzz_target)
return fuzz_target |
Set up build directories for a job. | def _setup_build_directories(base_build_dir):
"""Set up build directories for a job."""
# Create the root build directory for this job.
shell.create_directory(base_build_dir, create_intermediates=True)
custom_binary_directory = os.path.join(base_build_dir, 'custom')
revision_build_directory = os.path.join(base_build_dir, 'revisions')
sym_build_directory = os.path.join(base_build_dir, 'symbolized')
sym_debug_build_directory = os.path.join(sym_build_directory, 'debug')
sym_release_build_directory = os.path.join(sym_build_directory, 'release')
build_directories = [
custom_binary_directory, revision_build_directory, sym_build_directory,
sym_debug_build_directory, sym_release_build_directory
]
for build_directory in build_directories:
shell.create_directory(build_directory) |
Set build-related environment variables (APP_PATH, APP_DIR etc) by walking
through the build directory. | def set_environment_vars(search_directories, app_path='APP_PATH',
env_prefix=''):
"""Set build-related environment variables (APP_PATH, APP_DIR etc) by walking
through the build directory."""
app_name = environment.get_value(env_prefix + 'APP_NAME')
llvm_symbolizer_filename = environment.get_executable_filename(
'llvm-symbolizer')
llvm_symbolizer_path = None
gn_args_filename = 'args.gn'
gn_args_path = None
platform = environment.platform()
absolute_file_path = None
app_directory = None
use_default_llvm_symbolizer = environment.get_value(
'USE_DEFAULT_LLVM_SYMBOLIZER')
# Chromium specific folder to ignore.
initialexe_folder_path = f'{os.path.sep}initialexe'
logs.log('\n'.join([
'Walking build directory to find files and set environment variables.',
f'Environment prefix: {env_prefix!r}',
f'App path environment variable name: {app_path!r}',
f'App name: {app_name!r}',
f'LLVM symbolizer file name: {llvm_symbolizer_filename!r}',
f'Use default LLVM symbolizer: {use_default_llvm_symbolizer}',
]))
def set_env_var(name, value):
full_name = env_prefix + name
logs.log(f'Setting environment variable: {full_name} = {value}')
environment.set_value(full_name, value)
for search_directory in search_directories:
logs.log(f'Searching in directory: {search_directory}')
for root, _, files in shell.walk(search_directory):
# .dSYM folder contain symbol files on Mac and should
# not be searched for application binary.
if platform == 'MAC' and '.dSYM' in root:
continue
# Ignore some folders on Windows.
if (platform == 'WINDOWS' and (initialexe_folder_path in root)):
continue
for filename in files:
if not absolute_file_path and filename == app_name:
absolute_file_path = os.path.join(root, filename)
app_directory = os.path.dirname(absolute_file_path)
# We don't want to change the state of system binaries.
if not environment.get_value('SYSTEM_BINARY_DIR'):
os.chmod(absolute_file_path, 0o750)
set_env_var(app_path, absolute_file_path)
set_env_var('APP_DIR', app_directory)
if not gn_args_path and filename == gn_args_filename:
gn_args_path = os.path.join(root, gn_args_filename)
set_env_var('GN_ARGS_PATH', gn_args_path)
if (not llvm_symbolizer_path and
filename == llvm_symbolizer_filename and
not use_default_llvm_symbolizer):
llvm_symbolizer_path = os.path.join(root, llvm_symbolizer_filename)
set_env_var('LLVM_SYMBOLIZER_PATH', llvm_symbolizer_path)
if app_name and not absolute_file_path:
logs.log_error(f'Could not find app {app_name!r} in search directories.') |
Return a sorted list of build url by revision. | def _sort_build_urls_by_revision(build_urls, bucket_path, reverse):
"""Return a sorted list of build url by revision."""
base_url = os.path.dirname(bucket_path)
file_pattern = os.path.basename(bucket_path)
filename_by_revision_dict = {}
_, base_path = storage.get_bucket_name_and_path(base_url)
base_path_with_seperator = base_path + '/' if base_path else ''
for build_url in build_urls:
match_pattern = f'{base_path_with_seperator}({file_pattern})'
match = re.match(match_pattern, build_url)
if match:
filename = match.group(1)
revision = match.group(2)
# Ensure that there are no duplicate revisions.
if revision in filename_by_revision_dict:
job_name = environment.get_value('JOB_NAME')
raise errors.BadStateError(
'Found duplicate revision %s when processing bucket. '
'Bucket path is probably malformed for job %s.' % (revision,
job_name))
filename_by_revision_dict[revision] = filename
try:
sorted_revisions = sorted(
filename_by_revision_dict,
reverse=reverse,
key=lambda x: list(map(int, x.split('.'))))
except:
logs.log_warn(
'Revision pattern is not an integer, falling back to string sort.')
sorted_revisions = sorted(filename_by_revision_dict, reverse=reverse)
sorted_build_urls = []
for revision in sorted_revisions:
filename = filename_by_revision_dict[revision]
sorted_build_urls.append('%s/%s' % (base_url, filename))
return sorted_build_urls |
Returns a sorted list of build urls from a bucket path. | def get_build_urls_list(bucket_path, reverse=True):
"""Returns a sorted list of build urls from a bucket path."""
if not bucket_path:
return []
base_url = os.path.dirname(bucket_path)
if environment.is_running_on_app_engine():
build_urls = list(storage.list_blobs(base_url))
else:
keys_directory = environment.get_value('BUILD_URLS_DIR')
keys_filename = '%s.list' % utils.string_hash(bucket_path)
keys_file_path = os.path.join(keys_directory, keys_filename)
# For one task, keys file that is cached locally should be re-used.
# Otherwise, we do waste lot of network bandwidth calling and getting the
# same set of urls (esp for regression and progression testing).
if not os.path.exists(keys_file_path):
# Get url list by reading the GCS bucket.
with open(keys_file_path, 'w') as f:
for path in storage.list_blobs(base_url):
f.write(path + '\n')
content = utils.read_data_from_file(
keys_file_path, eval_data=False).decode('utf-8')
if not content:
return []
build_urls = content.splitlines()
return _sort_build_urls_by_revision(build_urls, bucket_path, reverse) |
Get the main bucket path for the current job. | def get_primary_bucket_path():
"""Get the main bucket path for the current job."""
release_build_bucket_path = environment.get_value('RELEASE_BUILD_BUCKET_PATH')
if release_build_bucket_path:
return release_build_bucket_path
fuzz_target_build_bucket_path = get_bucket_path(
'FUZZ_TARGET_BUILD_BUCKET_PATH')
if fuzz_target_build_bucket_path:
fuzz_target = environment.get_value('FUZZ_TARGET')
if not fuzz_target:
raise BuildManagerError('FUZZ_TARGET is not defined.')
return _full_fuzz_target_path(fuzz_target_build_bucket_path, fuzz_target)
raise BuildManagerError(
'RELEASE_BUILD_BUCKET_PATH or FUZZ_TARGET_BUILD_BUCKET_PATH '
'needs to be defined.') |
Returns a sorted ascending list of revisions from a bucket path, excluding
bad build revisions. Testcase crash revision is not excluded from the list
even if it appears in the bad_revisions list. | def get_revisions_list(bucket_path, bad_revisions, testcase=None):
"""Returns a sorted ascending list of revisions from a bucket path, excluding
bad build revisions. Testcase crash revision is not excluded from the list
even if it appears in the bad_revisions list."""
revision_pattern = revisions.revision_pattern_from_build_bucket_path(
bucket_path)
revision_urls = get_build_urls_list(bucket_path, reverse=False)
if not revision_urls:
return None
# Parse the revisions out of the build urls.
revision_list = []
for url in revision_urls:
match = re.match(revision_pattern, url)
if match:
revision = revisions.convert_revision_to_integer(match.group(1))
revision_list.append(revision)
for bad_revision in bad_revisions:
# Don't remove testcase revision even if it is in bad build list. This
# usually happens when a bad bot sometimes marks a particular revision as
# bad due to flakiness.
if testcase and bad_revision == testcase.crash_revision:
continue
if bad_revision in revision_list:
revision_list.remove(bad_revision)
return revision_list |
Get the base fuzz target name "X" from "X@Y". | def _base_fuzz_target_name(target_name):
"""Get the base fuzz target name "X" from "X@Y"."""
return target_name.split('@')[0] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.