response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Return whether or not the current bot is a uworker. This is not the same as
OSS-Fuzz's untrusted worker. | def is_uworker():
"""Return whether or not the current bot is a uworker. This is not the same as
OSS-Fuzz's untrusted worker."""
return get_value('UWORKER') |
Return True if we are running on appengine (local or production). | def is_running_on_app_engine():
"""Return True if we are running on appengine (local or production)."""
return (os.getenv('GAE_ENV') or is_running_on_app_engine_development() or
os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/')) |
Return True if running on the local development appengine server. | def is_running_on_app_engine_development():
"""Return True if running on the local development appengine server."""
return (os.getenv('GAE_ENV') == 'dev' or
os.getenv('SERVER_SOFTWARE', '').startswith('Development/')) |
Parses a job's environment definition. | def parse_environment_definition(environment_string):
"""Parses a job's environment definition."""
if not environment_string:
return {}
definitions = [environment_string.splitlines()]
values = {}
for definition in definitions:
for line in definition:
if line.startswith('#') or not line.strip():
continue
m = re.match('([^ =]+)[ ]*=[ ]*(.*)', line)
if m:
key = m.group(1).strip()
value = m.group(2).strip()
values[key] = value
return values |
Return the base platform when an override is provided. | def base_platform(override):
"""Return the base platform when an override is provided."""
return override.split(':')[0] |
Return the operating system type, unless an override is provided. | def platform():
"""Return the operating system type, unless an override is provided."""
environment_override = get_value('OS_OVERRIDE')
if environment_override:
return environment_override.upper()
if sys.platform.startswith('win'):
return 'WINDOWS'
if sys.platform.startswith('linux'):
return 'LINUX'
if sys.platform == 'darwin':
return 'MAC'
raise ValueError('Unsupported platform "%s".' % sys.platform) |
Remove environment |key| and its associated value. | def remove_key(key_name):
"""Remove environment |key| and its associated value."""
if not key_name:
return
if key_name not in os.environ:
return
del os.environ[key_name] |
Resets environment variables to their initial state. Saves the initial
state on first call. | def reset_environment():
"""Resets environment variables to their initial state. Saves the initial
state on first call."""
global _initial_environment
if _initial_environment is None:
_initial_environment = copy()
# There is nothing to reset if we are initializing for the first time.
else:
# Clean current environment.
os.environ.clear()
# Add shared variables with values from _initial_environment.
os.environ.update(_initial_environment)
if is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import \
environment as untrusted_env
untrusted_env.reset_environment() |
Sets environment variables common for different memory debugging tools. | def set_common_environment_variables():
"""Sets environment variables common for different memory debugging tools."""
# G_SLICE = always-malloc: make glib use system malloc.
# NSS_DISABLE_UNLOAD = 1: make nss skip dlclosing dynamically loaded modules,
# which would result in "obj:*" in backtraces.
# NSS_DISABLE_ARENA_FREE_LIST = 1: make nss use system malloc.
set_value('G_SLICE', 'always-malloc')
set_value('NSS_DISABLE_UNLOAD', 1)
set_value('NSS_DISABLE_ARENA_FREE_LIST', 1)
set_value('NACL_DANGEROUS_SKIP_QUALIFICATION_TEST', 1) |
Set current memory tool options. | def set_memory_tool_options(env_var, options_dict):
"""Set current memory tool options."""
set_value(env_var, join_memory_tool_options(options_dict)) |
Set environment variables from a file. | def set_environment_parameters_from_file(file_path):
"""Set environment variables from a file."""
if not os.path.exists(file_path):
return
with open(file_path) as f:
file_data = f.read()
for line in file_data.splitlines():
if line.startswith('#') or not line.strip():
continue
m = re.match('([^ =]+)[ ]*=[ ]*(.*)', line)
if m:
environment_variable = m.group(1)
environment_variable_value = m.group(2)
set_value(environment_variable, environment_variable_value) |
Checks and updates the necessary symbolizer options such as
`external_symbolizer_path` and `symbolize_inline_frames`. | def update_symbolizer_options(tool_options, symbolize_inline_frames=False):
"""Checks and updates the necessary symbolizer options such as
`external_symbolizer_path` and `symbolize_inline_frames`."""
if 'external_symbolizer_path' not in tool_options:
llvm_symbolizer_path = get_llvm_symbolizer_path()
if llvm_symbolizer_path:
tool_options.update({
'external_symbolizer_path':
_quote_value_if_needed(llvm_symbolizer_path)
})
if 'symbolize_inline_frames' not in tool_options:
tool_options.update({
'symbolize_inline_frames': str(symbolize_inline_frames).lower()
}) |
Resets environment variables for memory debugging tool to default
values. | def reset_current_memory_tool_options(redzone_size=0,
malloc_context_size=0,
leaks=True,
symbolize_inline_frames=False,
quarantine_size_mb=None,
disable_ubsan=False):
"""Resets environment variables for memory debugging tool to default
values."""
# FIXME: Handle these imports in a cleaner way.
from clusterfuzz._internal.platforms import android
# Set common environment variable useful for memory debugging tools.
set_common_environment_variables()
# Set memory tool name in our environment for easy access.
job_name = get_value('JOB_NAME')
tool_name = get_memory_tool_name(job_name)
set_value('MEMORY_TOOL', tool_name)
bot_platform = platform()
tool_options = {}
# Default options for memory debuggin tool used.
if tool_name in ['ASAN', 'HWASAN']:
tool_options = get_asan_options(redzone_size, malloc_context_size,
quarantine_size_mb, bot_platform, leaks,
disable_ubsan)
elif tool_name == 'KASAN':
tool_options = get_kasan_options()
elif tool_name == 'MSAN':
tool_options = get_msan_options()
elif tool_name == 'TSAN':
tool_options = get_tsan_options()
elif tool_name in ['UBSAN', 'CFI']:
tool_options = get_ubsan_options()
# Additional options. These override the defaults.
additional_tool_options = get_value('ADDITIONAL_%s_OPTIONS' % tool_name)
if additional_tool_options:
tool_options.update(_parse_memory_tool_options(additional_tool_options))
if tool_options.get('symbolize') == 1:
update_symbolizer_options(
tool_options, symbolize_inline_frames=symbolize_inline_frames)
# Join the options.
joined_tool_options = join_memory_tool_options(tool_options)
tool_options_variable_name = '%s_OPTIONS' % tool_name
set_value(tool_options_variable_name, joined_tool_options)
# CFI handles various signals through the UBSan runtime, so need to set
# UBSAN_OPTIONS explicitly. See crbug.com/716235#c25
if tool_name == 'CFI':
set_value('UBSAN_OPTIONS', joined_tool_options)
# For Android, we need to set shell property |asan.options|.
# For engine-based fuzzers, it is not needed as options variable is directly
# passed to shell.
if is_android(bot_platform) and not is_engine_fuzzer_job():
android.sanitizer.set_options(tool_name, joined_tool_options) |
Set default environment vars and values. | def set_default_vars():
"""Set default environment vars and values."""
env_file_path = os.path.join(get_value('ROOT_DIR'), 'bot', 'env.yaml')
with open(env_file_path) as file_handle:
env_file_contents = file_handle.read()
env_vars_and_values = yaml.safe_load(env_file_contents)
for variable, value in env_vars_and_values.items():
# We cannot call set_value here.
os.environ[variable] = str(value) |
Set environment for the bots. | def set_bot_environment():
"""Set environment for the bots."""
root_dir = get_value('ROOT_DIR')
if not root_dir:
# Error, bail out.
return False
# Reset our current working directory. Our's last job might
# have left us in a non-existent temp directory.
# Or ROOT_DIR might be deleted and recreated.
os.chdir(root_dir)
# Set some default directories. These can be overriden by config files below.
bot_dir = os.path.join(root_dir, 'bot')
if is_trusted_host(ensure_connected=False):
worker_root_dir = os.environ['WORKER_ROOT_DIR']
os.environ['BUILDS_DIR'] = os.path.join(worker_root_dir, 'bot', 'builds')
else:
os.environ['BUILDS_DIR'] = os.path.join(bot_dir, 'builds')
os.environ['BUILD_URLS_DIR'] = os.path.join(bot_dir, 'build-urls')
os.environ['LOG_DIR'] = os.path.join(bot_dir, 'logs')
os.environ['CACHE_DIR'] = os.path.join(bot_dir, 'cache')
inputs_dir = os.path.join(bot_dir, 'inputs')
os.environ['INPUT_DIR'] = inputs_dir
os.environ['CRASH_STACKTRACES_DIR'] = os.path.join(inputs_dir, 'crash-stacks')
os.environ['FUZZERS_DIR'] = os.path.join(inputs_dir, 'fuzzers')
os.environ['DATA_BUNDLES_DIR'] = os.path.join(inputs_dir, 'data-bundles')
os.environ['FUZZ_INPUTS'] = os.path.join(inputs_dir, 'fuzzer-testcases')
os.environ['FUZZ_INPUTS_MEMORY'] = os.environ['FUZZ_INPUTS']
os.environ['FUZZ_INPUTS_DISK'] = os.path.join(inputs_dir,
'fuzzer-testcases-disk')
os.environ['FUZZ_DATA'] = os.path.join(inputs_dir,
'fuzzer-common-data-bundles')
os.environ['IMAGES_DIR'] = os.path.join(inputs_dir, 'images')
os.environ['SYMBOLS_DIR'] = os.path.join(inputs_dir, 'symbols')
os.environ['USER_PROFILE_ROOT_DIR'] = os.path.join(inputs_dir,
'user-profile-dirs')
# Set bot name.
if not get_value('BOT_NAME'):
# If not defined, default to host name.
os.environ['BOT_NAME'] = socket.gethostname().lower()
# Local temp directory (non-tmpfs).
local_tmp_dir = os.path.join(bot_dir, 'tmp')
# Set BOT_TMPDIR if not already set.
if not get_value('BOT_TMPDIR'):
os.environ['BOT_TMPDIR'] = local_tmp_dir
# Add common environment variables needed by Bazel test runner.
# See https://docs.bazel.build/versions/master/test-encyclopedia.html.
# NOTE: Do not use a tmpfs folder as some fuzz targets don't work.
os.environ['TEST_TMPDIR'] = local_tmp_dir
os.environ['TZ'] = 'UTC'
# Sets the default configuration. Can be overridden by job environment.
set_default_vars()
# Set environment variable from local project configuration.
from clusterfuzz._internal.config import local_config
local_config.ProjectConfig().set_environment()
# Success.
return True |
Sets maximum history size for TSAN tool. | def set_tsan_max_history_size():
"""Sets maximum history size for TSAN tool."""
tsan_options = get_value('TSAN_OPTIONS')
if not tsan_options:
return
tsan_max_history_size = 7
for i in range(tsan_max_history_size):
tsan_options = (
tsan_options.replace('history_size=%d' % i,
'history_size=%d' % tsan_max_history_size))
set_value('TSAN_OPTIONS', tsan_options) |
Set an environment variable. | def set_value(environment_variable, value, env=None):
"""Set an environment variable."""
if env is None:
env = os.environ
value_str = str(value)
environment_variable_str = str(environment_variable)
value_str = value_str.replace('%ROOT_DIR%', os.getenv('ROOT_DIR', ''))
env[environment_variable_str] = value_str
if is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import \
environment as untrusted_env
untrusted_env.forward_environment_variable(environment_variable_str,
value_str) |
Returns the name of the task that this task (postprocess or utask_main) is
part of. | def get_initial_task_name():
"""Returns the name of the task that this task (postprocess or utask_main) is
part of."""
initial_task_payload = get_value('INITIAL_TASK_PAYLOAD')
if initial_task_payload is None:
return None
return initial_task_payload.split(' ')[0] |
Return if the memory debugging tool is used in this job. | def tool_matches(tool_name, job_name):
"""Return if the memory debugging tool is used in this job."""
match_prefix = '(.*[^a-zA-Z]|^)%s'
matches_tool = re.match(match_prefix % tool_name.lower(), job_name.lower())
return bool(matches_tool) |
Wrap a function into no-op and return None if running on App Engine. | def appengine_noop(func):
"""Wrap a function into no-op and return None if running on App Engine."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if is_running_on_app_engine():
return None
return func(*args, **kwargs)
return wrapper |
Wrap a function into no-op and return None if running on bot. | def bot_noop(func):
"""Wrap a function into no-op and return None if running on bot."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
is_bot = not is_running_on_app_engine()
if is_bot:
return None
return func(*args, **kwargs)
return wrapper |
Return True if running in local development environment (e.g. running
a bot locally, excludes tests). | def is_local_development():
"""Return True if running in local development environment (e.g. running
a bot locally, excludes tests)."""
return bool(get_value('LOCAL_DEVELOPMENT') and not get_value('PY_UNITTESTS')) |
Returns True if there are no environmental indicators
of local development ocurring. | def is_production():
"""Returns True if there are no environmental indicators
of local development ocurring."""
return not (is_local_development() or get_value('UNTRUSTED_RUNNER_TESTS') or
get_value('LOCAL_DEVELOPMENT') or get_value('UTASK_TESTS')) |
Wrap a function into no-op and return None if running in local
development environment. | def local_noop(func):
"""Wrap a function into no-op and return None if running in local
development environment."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if (is_local_development() or is_running_on_app_engine_development()):
return None
return func(*args, **kwargs)
return wrapper |
Return whether or not we are an ephemeral bot. | def is_ephemeral():
"""Return whether or not we are an ephemeral bot."""
return get_value('EPHEMERAL') |
Return True if we are on android platform. | def is_android(plt=None):
"""Return True if we are on android platform."""
return 'ANDROID' in (plt or platform()) |
Return True if we are on android cuttlefish platform. | def is_android_cuttlefish(plt=None):
"""Return True if we are on android cuttlefish platform."""
return 'ANDROID_X86' in (plt or platform()) |
Return True if we are on android emulator platform. | def is_android_emulator():
"""Return True if we are on android emulator platform."""
return 'ANDROID_EMULATOR' in get_platform_group() |
Return True if we are on android kernel platform groups. | def is_android_kernel(plt=None):
"""Return True if we are on android kernel platform groups."""
return 'ANDROID_KERNEL' in (plt or get_platform_group()) |
Return True if we are on a real android device. | def is_android_real_device():
"""Return True if we are on a real android device."""
return base_platform(platform()) == 'ANDROID' |
Whether or not we're in libClusterFuzz. | def is_lib():
"""Whether or not we're in libClusterFuzz."""
return get_value('LIB_CF') |
Wrap a function if redis is available and return None if not. | def if_redis_available(func):
"""Wrap a function if redis is available and return None if not."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if get_value('REDIS_HOST'):
return func(*args, **kwargs)
return None
return wrapper |
Whether or not the device or branch is deprecated. | def is_testcase_deprecated(platform_id=None):
"""Whether or not the device or branch is deprecated."""
if is_android(
platform_id.upper()) and not is_android_cuttlefish(platform_id.upper()):
# FIXME: Handle these imports in a cleaner way.
from clusterfuzz._internal.platforms import android
return android.util.is_testcase_deprecated(platform_id)
return False |
Whether or not the testcase can run on the current platform. | def can_testcase_run_on_platform(testcase_platform_id, current_platform_id):
"""Whether or not the testcase can run on the current platform."""
if not is_android(testcase_platform_id.upper()) or not is_android(
current_platform_id.upper()):
return False
if is_android_cuttlefish(
testcase_platform_id.upper()) and is_android_cuttlefish(
current_platform_id.upper()):
return True
if is_android(testcase_platform_id.upper()):
# FIXME: Handle these imports in a cleaner way.
from clusterfuzz._internal.platforms import android
return android.util.can_testcase_run_on_platform(testcase_platform_id,
current_platform_id)
return False |
Get the minijail path.
Returns:
The path to the minijail binary. | def _get_minijail_path():
"""Get the minijail path.
Returns:
The path to the minijail binary.
"""
return os.path.join(environment.get_platform_resources_directory(),
'minijail0') |
Get user namespace arguments for minijail.
Returns:
A list representing arguments to minijail. | def _get_minijail_user_namespace_args():
"""Get user namespace arguments for minijail.
Returns:
A list representing arguments to minijail.
"""
arguments = ['-U'] # User namespace option
# root (uid 0 in namespace) -> USER.
# The reason for this is that minijail does setresuid(0, 0, 0) before doing a
# chroot, which means uid 0 needs access to the chroot dir (owned by USER).
#
# Note that we also run fuzzers as uid 0 (but with no capabilities in
# permitted/effective/inherited sets which *should* mean there's nothing
# special about it). This is because the uid running the fuzzer also needs
# access to things owned by USER (fuzzer binaries, supporting files), and USER
# can only be mapped once.
uid_map = [
f'0 {os.getuid()} 1',
]
arguments.extend(['-m', ','.join(uid_map)])
return arguments |
Create dir for chroot. | def _create_chroot_dir(base_dir):
"""Create dir for chroot."""
return tempfile.mkdtemp(dir=base_dir) |
Create a tmp mount in base_dir. | def _create_tmp_mount(base_dir):
"""Create a tmp mount in base_dir."""
return tempfile.mkdtemp(dir=base_dir) |
Ends a running process.
Ignores exceptions.
Args:
process: A subprocess.Popen object.
terminate_function: The function to terminate the process.
process_result: A ProcessResult object where timeout information will be
written to. | def _end_process(terminate_function, process_result):
"""Ends a running process.
Ignores exceptions.
Args:
process: A subprocess.Popen object.
terminate_function: The function to terminate the process.
process_result: A ProcessResult object where timeout information will be
written to.
"""
try:
terminate_function()
except OSError:
logs.log('Process already killed.')
process_result.timed_out = True |
Waits until either the process exits or times out.
Args:
process: A subprocess.Popen object.
timeout: Maximum number of seconds to wait for before sending a signal.
input_data: Input to be sent to the process.
terminate_before_kill: A bool indicating that SIGTERM should be sent to
the process first before SIGKILL (to let the SIGTERM handler run).
terminate_wait_time: Maximum number of seconds to wait for the SIGTERM
handler.
Returns:
A ProcessResult. | def wait_process(process,
timeout,
input_data=None,
terminate_before_kill=False,
terminate_wait_time=None):
"""Waits until either the process exits or times out.
Args:
process: A subprocess.Popen object.
timeout: Maximum number of seconds to wait for before sending a signal.
input_data: Input to be sent to the process.
terminate_before_kill: A bool indicating that SIGTERM should be sent to
the process first before SIGKILL (to let the SIGTERM handler run).
terminate_wait_time: Maximum number of seconds to wait for the SIGTERM
handler.
Returns:
A ProcessResult.
"""
result = ProcessResult()
is_windows = environment.platform() == 'WINDOWS'
# On Windows, terminate() just calls Win32 API function TerminateProcess()
# which is equivalent to process kill. So, skip terminate_before_kill.
if terminate_before_kill and not is_windows:
first_timeout_function = process.terminate
# Use a second timer to send the process kill.
second_timer = threading.Timer(timeout + terminate_wait_time, _end_process,
[process.kill, result])
else:
first_timeout_function = process.kill
second_timer = None
first_timer = threading.Timer(timeout, _end_process,
[first_timeout_function, result])
output = None
start_time = time.time()
try:
first_timer.start()
if second_timer:
second_timer.start()
output = process.communicate(input_data)[0]
finally:
first_timer.cancel()
if second_timer:
second_timer.cancel()
result.return_code = process.poll()
result.output = output
result.time_executed = time.time() - start_time
return result |
Kill process tree. | def kill_process_tree(root_pid):
"""Kill process tree."""
try:
parent = psutil.Process(root_pid)
children = parent.children(recursive=True)
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
logs.log_warn('Failed to find or access process.')
return
for child in children:
try:
child.kill()
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
logs.log_warn('Failed to kill process child.')
try:
parent.kill()
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
logs.log_warn('Failed to kill process.') |
Start the process using process handle and override list2cmdline for
Windows. | def start_process(process_handle):
"""Start the process using process handle and override list2cmdline for
Windows."""
is_win = environment.platform() == 'WINDOWS'
if is_win:
# Override list2cmdline on Windows to return first index of list as string.
# This is to workaround a mozprocess bug since it passes command as list
# and not as string.
subprocess.list2cmdline_orig = subprocess.list2cmdline
subprocess.list2cmdline = lambda s: s[0]
try:
process_handle.run()
finally:
if is_win:
subprocess.list2cmdline = subprocess.list2cmdline_orig |
Cleans up defunct processes. | def cleanup_defunct_processes():
"""Cleans up defunct processes."""
# Defunct processes happen only on unix platforms.
if environment.platform() != 'WINDOWS':
while True:
try:
# Matches any defunct child process.
p, _ = os.waitpid(-1, os.WNOHANG)
if not p:
break
logs.log('Clearing defunct process %s.' % str(p))
except:
break |
Executes a process with a given command line and other parameters. | def run_process(cmdline,
current_working_directory=None,
timeout=DEFAULT_TEST_TIMEOUT,
need_shell=False,
gestures=None,
env_copy=None,
testcase_run=True,
ignore_children=True):
"""Executes a process with a given command line and other parameters."""
if environment.is_trusted_host() and testcase_run:
from clusterfuzz._internal.bot.untrusted_runner import remote_process_host
return remote_process_host.run_process(
cmdline, current_working_directory, timeout, need_shell, gestures,
env_copy, testcase_run, ignore_children)
if gestures is None:
gestures = []
if env_copy:
os.environ.update(env_copy)
# FIXME(mbarbella): Using LAUNCHER_PATH here is error prone. It forces us to
# do certain operations before fuzzer setup (e.g. bad build check).
launcher = environment.get_value('LAUNCHER_PATH')
# This is used when running scripts on native linux OS and not on the device.
# E.g. running a fuzzer to generate testcases or launcher script.
plt = environment.platform()
runs_on_device = environment.is_android(plt) or plt == 'FUCHSIA'
if runs_on_device and (not testcase_run or launcher):
plt = 'LINUX'
is_android = environment.is_android(plt)
# Lower down testcase timeout slightly to account for time for crash analysis.
timeout -= CRASH_ANALYSIS_TIME
# LeakSanitizer hack - give time for stdout/stderr processing.
lsan = environment.get_value('LSAN', False)
if lsan:
timeout -= LSAN_ANALYSIS_TIME
# Initialize variables.
adb_output = None
process_output = ''
process_status = None
return_code = 0
process_poll_interval = environment.get_value('PROCESS_POLL_INTERVAL', 0.5)
start_time = time.time()
watch_for_process_exit = (
environment.get_value('WATCH_FOR_PROCESS_EXIT') if is_android else True)
window_list = []
# Get gesture start time from last element in gesture list.
gestures = copy.deepcopy(gestures)
if gestures and gestures[-1].startswith('Trigger'):
gesture_start_time = int(gestures[-1].split(':')[1])
gestures.pop()
else:
gesture_start_time = timeout // 2
if is_android:
# Clear the log upfront.
android.logger.clear_log()
# Run the app.
adb_output = android.adb.run_command(cmdline, timeout=timeout)
else:
cmd = shell.get_command(cmdline)
process_output = mozprocess.processhandler.StoreOutput()
process_status = ProcessStatus()
try:
process_handle = mozprocess.ProcessHandlerMixin(
cmd,
args=None,
cwd=current_working_directory,
shell=need_shell,
processOutputLine=[process_output],
onFinish=[process_status],
ignore_children=ignore_children)
start_process(process_handle)
except:
logs.log_error('Exception occurred when running command: %s.' % cmdline)
return None, None, ''
while True:
time.sleep(process_poll_interval)
# Run the gestures at gesture_start_time or in case we didn't find windows
# in the last try.
if (gestures and time.time() - start_time >= gesture_start_time and
not window_list):
# In case, we don't find any windows, we increment the gesture start time
# so that the next check is after 1 second.
gesture_start_time += 1
if plt == 'LINUX':
linux.gestures.run_gestures(gestures, process_handle.pid,
process_status, start_time, timeout,
window_list)
elif plt == 'WINDOWS':
windows.gestures.run_gestures(gestures, process_handle.pid,
process_status, start_time, timeout,
window_list)
elif is_android:
android.gestures.run_gestures(gestures, start_time, timeout)
# TODO(mbarbella): We add a fake window here to prevent gestures on
# Android from getting executed more than once.
window_list = ['FAKE']
if time.time() - start_time >= timeout:
break
# Collect the process output.
output = (
android.logger.log_output()
if is_android else b'\n'.join(process_output.output))
output = utils.decode_to_unicode(output)
if crash_analyzer.is_memory_tool_crash(output):
break
# Check if we need to bail out on process exit.
if watch_for_process_exit:
# If |watch_for_process_exit| is set, then we already completed running
# our app launch command. So, we can bail out.
if is_android:
break
# On desktop, we bail out as soon as the process finishes.
if process_status and process_status.finished:
# Wait for process shutdown and set return code.
process_handle.wait(timeout=PROCESS_CLEANUP_WAIT_TIME)
break
# Process output based on platform.
if is_android:
# Get current log output. If device is in reboot mode, logcat automatically
# waits for device to be online.
time.sleep(ANDROID_CRASH_LOGCAT_WAIT_TIME)
output = android.logger.log_output()
if android.constants.LOW_MEMORY_REGEX.search(output):
# If the device is low on memory, we should force reboot and bail out to
# prevent device from getting in a frozen state.
logs.log('Device is low on memory, rebooting.', output=output)
android.adb.hard_reset()
android.adb.wait_for_device()
elif android.adb.time_since_last_reboot() < time.time() - start_time:
# Check if a reboot has happened, if yes, append log output before reboot
# and kernel logs content to output.
log_before_last_reboot = android.logger.log_output_before_last_reboot()
kernel_log = android.adb.get_kernel_log_content()
output = '%s%s%s%s%s' % (
log_before_last_reboot, utils.get_line_seperator('Device rebooted'),
output, utils.get_line_seperator('Kernel Log'), kernel_log)
# Make sure to reset SE Linux Permissive Mode. This can be done cheaply
# in ~0.15 sec and is needed especially between runs for kernel crashes.
android.adb.run_as_root()
android.settings.change_se_linux_to_permissive_mode()
return_code = 1
# Add output from adb to the front.
if adb_output:
output = '%s\n\n%s' % (adb_output, output)
# Kill the application if it is still running. We do this at the end to
# prevent this from adding noise to the logcat output.
task_name = environment.get_value('TASK_NAME')
child_process_termination_pattern = environment.get_value(
'CHILD_PROCESS_TERMINATION_PATTERN')
if task_name == 'fuzz' and child_process_termination_pattern:
# In some cases, we do not want to terminate the application after each
# run to avoid long startup times (e.g. for chrome). Terminate processes
# matching a particular pattern for light cleanup in this case.
android.adb.kill_processes_and_children_matching_name(
child_process_termination_pattern)
else:
# There is no special termination behavior. Simply stop the application.
android.app.stop()
else:
# Get the return code in case the process has finished already.
# If the process hasn't finished, return_code will be None which is what
# callers expect unless the output indicates a crash.
return_code = process_handle.poll()
# If the process is still running, then terminate it.
if not process_status.finished:
launcher_with_interpreter = shell.get_execute_command(
launcher) if launcher else None
if (launcher_with_interpreter and
cmdline.startswith(launcher_with_interpreter)):
# If this was a launcher script, we KILL all child processes created
# except for APP_NAME.
# It is expected that, if the launcher script terminated normally, it
# cleans up all the child processes it created itself.
terminate_root_and_child_processes(process_handle.pid)
else:
try:
# kill() here actually sends SIGTERM on posix.
process_handle.kill()
except:
pass
if lsan:
time.sleep(LSAN_ANALYSIS_TIME)
output = b'\n'.join(process_output.output)
output = utils.decode_to_unicode(output)
# X Server hack when max client reached.
if ('Maximum number of clients reached' in output or
'Unable to get connection to X server' in output):
logs.log_error('Unable to connect to X server, exiting.')
os.system('sudo killall -9 Xvfb blackbox >/dev/null 2>&1')
sys.exit(0)
if testcase_run and (crash_analyzer.is_memory_tool_crash(output) or
crash_analyzer.is_check_failure_crash(output)):
return_code = 1
# If a crash is found, then we add the memory state as well.
if return_code and is_android:
ps_output = android.adb.get_ps_output()
if ps_output:
output += utils.get_line_seperator('Memory Statistics')
output += ps_output
if return_code:
logs.log_warn(
'Process (%s) ended with exit code (%s).' % (repr(cmdline),
str(return_code)),
output=output)
return return_code, round(time.time() - start_time, 1), output |
Kill stale processes left behind by a job. | def cleanup_stale_processes():
"""Kill stale processes left behind by a job."""
terminate_multiprocessing_children()
terminate_stale_application_instances()
cleanup_defunct_processes() |
Close the queue. | def close_queue(queue_to_close):
"""Close the queue."""
if environment.is_trusted_host():
# We don't use multiprocessing.Queue on trusted hosts.
return
try:
queue_to_close.close()
except:
logs.log_error('Unable to close queue.') |
Return a multiprocessing process object (with bug fixes). | def get_process():
"""Return a multiprocessing process object (with bug fixes)."""
if environment.is_trusted_host():
# forking/multiprocessing is unsupported because of the RPC connection.
return threading.Thread
# FIXME(unassigned): Remove this hack after real bug is fixed.
# pylint: disable=protected-access
multiprocessing.current_process()._identity = ()
return multiprocessing.Process |
Return a list of current processes and their command lines as string. | def get_runtime_snapshot():
"""Return a list of current processes and their command lines as string."""
process_strings = []
for process in psutil.process_iter():
try:
process_info = process.as_dict(attrs=['name', 'cmdline', 'pid', 'ppid'])
process_string = '{name} ({pid}, {ppid})'.format(
name=process_info['name'],
pid=process_info['pid'],
ppid=process_info['ppid'])
process_cmd_line = process_info['cmdline']
if process_cmd_line:
process_string += f': {" ".join(process_cmd_line)}'
process_strings.append(process_string)
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
# Ignore the error, use whatever info is available for access.
pass
return '\n'.join(sorted(process_strings)) |
Return a multiprocessing queue object. | def get_queue():
"""Return a multiprocessing queue object."""
if environment.is_trusted_host():
# We don't use multiprocessing.Process on trusted hosts. No need to use
# multiprocessing.Queue.
return queue.Queue()
try:
result_queue = multiprocessing.Queue()
except:
# FIXME: Invalid cross-device link error. Happens sometimes with
# chroot jobs even though /dev/shm and /run/shm are mounted.
logs.log_error('Unable to get multiprocessing queue.')
return None
return result_queue |
Terminate hung threads. | def terminate_hung_threads(threads):
"""Terminate hung threads."""
start_time = time.time()
while time.time() - start_time < THREAD_FINISH_WAIT_TIME:
if not any([thread.is_alive() for thread in threads]):
# No threads are alive, so we're done.
return
time.sleep(0.1)
logs.log_warn('Hang detected.', snapshot=get_runtime_snapshot())
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import host
# Bail out on trusted hosts since we're using threads and can't clean up.
host.host_exit_no_return()
# Terminate all threads that are still alive.
try:
[thread.terminate() for thread in threads if thread.is_alive()]
except:
pass |
Terminate the root process along with any children it spawned. | def terminate_root_and_child_processes(root_pid):
"""Terminate the root process along with any children it spawned."""
app_name = environment.get_value('APP_NAME')
direct_children = utils.get_process_ids(root_pid, recursive=False)
for child_pid in direct_children:
# utils.get_process_ids also returns the parent pid.
if child_pid == root_pid:
continue
try:
child = psutil.Process(child_pid)
except Exception:
# Process doesn't exist anymore.
continue
if child.name() == app_name:
# Send SIGTERM to the root APP_NAME process only, and none of its children
# so that coverage data will be dumped properly (e.g. the browser process
# of chrome).
# TODO(ochang): Figure out how windows coverage is dumped since there is
# no equivalent of SIGTERM.
terminate_process(child_pid, kill=False)
continue
child_and_grand_children_pids = utils.get_process_ids(
child_pid, recursive=True)
for pid in child_and_grand_children_pids:
terminate_process(pid, kill=True)
terminate_process(root_pid, kill=True) |
Terminate all children created with multiprocessing module. | def terminate_multiprocessing_children():
"""Terminate all children created with multiprocessing module."""
child_list = multiprocessing.active_children()
for child in child_list:
try:
child.terminate()
except:
# Unable to terminate multiprocessing child or was not needed.
pass |
Kill stale instances of the application running for this command. | def terminate_stale_application_instances():
"""Kill stale instances of the application running for this command."""
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import remote_process_host
remote_process_host.terminate_stale_application_instances()
return
# Stale instance cleanup is sometimes disabled for local testing.
if not environment.get_value('KILL_STALE_INSTANCES', True):
return
additional_process_to_kill = environment.get_value(
'ADDITIONAL_PROCESSES_TO_KILL')
builds_directory = environment.get_value('BUILDS_DIR')
llvm_symbolizer_filename = environment.get_executable_filename(
'llvm-symbolizer')
platform = environment.platform()
start_time = time.time()
processes_to_kill = []
app_name = environment.get_value('APP_NAME')
processes_to_kill += [app_name]
if additional_process_to_kill:
processes_to_kill += additional_process_to_kill.split(' ')
processes_to_kill = [x for x in processes_to_kill if x]
if environment.is_android(platform):
# Cleanup any stale adb connections.
device_serial = environment.get_value('ANDROID_SERIAL')
adb_search_string = 'adb -s %s' % device_serial
# Terminate llvm symbolizer processes matching exact path. This is important
# for Android where multiple device instances run on same host.
llvm_symbolizer_path = environment.get_llvm_symbolizer_path()
terminate_processes_matching_cmd_line(
[adb_search_string, llvm_symbolizer_path], kill=True)
# Make sure device is online and rooted.
android.adb.run_as_root()
# Make sure to reset SE Linux Permissive Mode (might be lost in reboot).
android.settings.change_se_linux_to_permissive_mode()
# Make sure that device forwarder is running (might be lost in reboot or
# process crash).
android.device.setup_host_and_device_forwarder_if_needed()
# Make sure that package optimization is complete (might be triggered due to
# unexpected circumstances).
android.app.wait_until_optimization_complete()
# Reset application state, which kills its pending instances and re-grants
# the storage permissions.
android.app.reset()
elif platform == 'WINDOWS':
processes_to_kill += [
'cdb.exe',
'handle.exe',
'msdt.exe',
'openwith.exe',
'WerFault.exe',
llvm_symbolizer_filename,
]
terminate_processes_matching_names(processes_to_kill, kill=True)
terminate_processes_matching_cmd_line(builds_directory, kill=True)
# Artifical sleep to let the processes get terminated.
time.sleep(1)
elif platform == 'FUCHSIA':
processes_to_kill += [
'undercoat',
llvm_symbolizer_filename,
]
terminate_processes_matching_names(processes_to_kill, kill=True)
else:
# Handle Linux and Mac platforms.
processes_to_kill += [
'addr2line',
'atos',
'chrome-devel-sandbox',
'gdb',
'nacl_helper',
'xdotool',
llvm_symbolizer_filename,
]
terminate_processes_matching_names(processes_to_kill, kill=True)
terminate_processes_matching_cmd_line(builds_directory, kill=True)
duration = int(time.time() - start_time)
if duration >= 5:
logs.log('Process kill took longer than usual - %s.' % str(
datetime.timedelta(seconds=duration))) |
Terminates a process by its process id. | def terminate_process(process_id, kill=False):
"""Terminates a process by its process id."""
try:
process = psutil.Process(process_id)
if kill:
process.kill()
else:
process.terminate()
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
logs.log_warn('Failed to terminate process.') |
Terminates processes matching particular names (case sensitive). | def terminate_processes_matching_names(match_strings, kill=False):
"""Terminates processes matching particular names (case sensitive)."""
if isinstance(match_strings, str):
match_strings = [match_strings]
for process in psutil.process_iter():
try:
process_info = process.as_dict(attrs=['name', 'pid'])
process_name = process_info['name']
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
continue
if any(x == process_name for x in match_strings):
terminate_process(process_info['pid'], kill) |
Terminates processes matching particular command line (case sensitive). | def terminate_processes_matching_cmd_line(match_strings,
kill=False,
exclude_strings=None):
"""Terminates processes matching particular command line (case sensitive)."""
if exclude_strings is None:
exclude_strings = []
if isinstance(match_strings, str):
match_strings = [match_strings]
for process in psutil.process_iter():
try:
process_info = process.as_dict(attrs=['cmdline', 'pid'])
process_cmd_line = process_info['cmdline']
if not process_cmd_line:
continue
process_path = ' '.join(process_cmd_line)
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
continue
if any(x in process_path for x in match_strings):
if not any([x in process_path for x in exclude_strings]):
terminate_process(process_info['pid'], kill) |
Check if all target scripts are running as expected. | def scripts_are_running(expected_scripts):
"""Check if all target scripts are running as expected."""
scripts_left = expected_scripts.copy()
for process in psutil.process_iter():
for expected_script in scripts_left:
if any(expected_script == os.path.basename(cmdline)
for cmdline in process.cmdline()):
scripts_left.remove(expected_script)
if not scripts_left:
return True
return False |
Get the low disk space threshold. | def _low_disk_space_threshold():
"""Get the low disk space threshold."""
if environment.is_trusted_host(ensure_connected=False):
# Trusted hosts can run with less free space as they do not store builds or
# corpora.
return _TRUSTED_HOST_LOW_DISK_SPACE_THRESHOLD
return _DEFAULT_LOW_DISK_SPACE_THRESHOLD |
Faster version of shutil.copy with buffer size. | def copy_file(source_file_path, destination_file_path):
"""Faster version of shutil.copy with buffer size."""
if not os.path.exists(source_file_path):
logs.log_error('Source file %s for copy not found.' % source_file_path)
return False
error_occurred = False
try:
with open(source_file_path, 'rb') as source_file_handle:
with open(destination_file_path, 'wb') as destination_file_handle:
shutil.copyfileobj(source_file_handle, destination_file_handle,
FILE_COPY_BUFFER_SIZE)
except:
error_occurred = True
# Make sure that the destination file actually exists.
error_occurred |= not os.path.exists(destination_file_path)
if error_occurred:
logs.log_warn('Failed to copy source file %s to destination file %s.' %
(source_file_path, destination_file_path))
return False
return True |
Clears the build directory. | def clear_build_directory():
"""Clears the build directory."""
remove_directory(environment.get_value('BUILDS_DIR'), recreate=True) |
Clears the build url directory. | def clear_build_urls_directory():
"""Clears the build url directory."""
remove_directory(environment.get_value('BUILD_URLS_DIR'), recreate=True)
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import file_host
file_host.clear_build_urls_directory() |
Clears the crash stacktraces directory. | def clear_crash_stacktraces_directory():
"""Clears the crash stacktraces directory."""
remove_directory(
environment.get_value('CRASH_STACKTRACES_DIR'), recreate=True) |
Clear the common data bundle directory. | def clear_common_data_bundles_directory():
"""Clear the common data bundle directory."""
remove_directory(environment.get_value('FUZZ_DATA'), recreate=True) |
Clears the data bundles directory. | def clear_data_bundles_directory():
"""Clears the data bundles directory."""
remove_directory(environment.get_value('DATA_BUNDLES_DIR'), recreate=True) |
Clear all data directories. | def clear_data_directories():
"""Clear all data directories."""
clear_build_directory()
clear_build_urls_directory()
clear_crash_stacktraces_directory()
clear_common_data_bundles_directory()
clear_data_bundles_directory()
clear_fuzzers_directories()
clear_temp_directory()
clear_testcase_directories()
persistent_cache.clear_values(clear_all=True) |
Clear all data directories on low disk space. This should ideally never
happen, but when it does, we do this to keep the bot working in sane state. | def clear_data_directories_on_low_disk_space():
"""Clear all data directories on low disk space. This should ideally never
happen, but when it does, we do this to keep the bot working in sane state."""
free_disk_space = get_free_disk_space()
if free_disk_space is None:
# Can't determine free disk space, bail out.
return
if free_disk_space >= _low_disk_space_threshold():
return
logs.log_warn(
'Low disk space detected, clearing all data directories to free up space.'
)
clear_data_directories() |
Clear device specific temp directories. | def clear_device_temp_directories():
"""Clear device specific temp directories."""
if environment.is_android():
from clusterfuzz._internal.platforms import android
android.device.clear_temp_directories() |
Clears the fuzzers directory. | def clear_fuzzers_directories():
"""Clears the fuzzers directory."""
remove_directory(environment.get_value('FUZZERS_DIR'), recreate=True) |
Clear the temporary directories. | def clear_temp_directory(clear_user_profile_directories=True):
"""Clear the temporary directories."""
temp_directory = environment.get_value('BOT_TMPDIR')
remove_directory(temp_directory, recreate=True)
test_temp_directory = environment.get_value('TEST_TMPDIR')
if test_temp_directory != temp_directory:
remove_directory(test_temp_directory, recreate=True)
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import file_host
file_host.clear_temp_directory()
if not clear_user_profile_directories:
return
user_profile_root_directory = environment.get_value('USER_PROFILE_ROOT_DIR')
if not user_profile_root_directory:
return
remove_directory(user_profile_root_directory, recreate=True) |
Clear system specific temp directory. | def clear_system_temp_directory():
"""Clear system specific temp directory."""
def _delete_object(path, delete_func):
"""Delete a object with its delete function, ignoring any error."""
try:
delete_func(path)
except:
pass
if environment.get_value('SKIP_SYSTEM_TEMP_CLEANUP'):
# This provides a way to avoid clearing system temporary directory when it
# can interfere with other processes on the system.
return
# Cache system temp directory to avoid iterating through the system dir list
# on every gettempdir call. Also, it helps to avoid a case where temp dir
# fills up the disk and gets ignored by gettempdir.
global _system_temp_dir
if not _system_temp_dir:
_system_temp_dir = tempfile.gettempdir()
# Use a custom cleanup rather than using |remove_directory| since it
# recreates the directory and can mess up permissions and symlinks.
for root, dirs, files in walk(_system_temp_dir, topdown=False):
for name in files:
_delete_object(os.path.join(root, name), os.remove)
for name in dirs:
_delete_object(os.path.join(root, name), os.rmdir)
logs.log('Cleared system temp directory: %s' % _system_temp_dir) |
Clears the testcase directories. | def clear_testcase_directories():
"""Clears the testcase directories."""
remove_directory(environment.get_value('FUZZ_INPUTS'), recreate=True)
remove_directory(environment.get_value('FUZZ_INPUTS_DISK'), recreate=True)
if environment.is_android():
from clusterfuzz._internal.platforms import android
android.device.clear_testcase_directory()
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import file_host
file_host.clear_testcase_directories() |
Try to close all open file handle for a specific path. | def close_open_file_handles_if_needed(path):
"""Try to close all open file handle for a specific path."""
if environment.platform() != 'WINDOWS':
# Handle closing is only applicable on Windows platform.
return
resources_directory = environment.get_platform_resources_directory()
handle_executable_path = os.path.join(resources_directory, 'handle.exe')
handle_output = execute_command(
'%s -accepteula "%s"' % (handle_executable_path, path))
for line in handle_output.splitlines():
match = HANDLE_OUTPUT_FILE_TYPE_REGEX.match(line)
if not match:
continue
process_id = match.group(1).decode('utf-8')
file_handle_id = match.group(2).decode('utf-8')
file_path = match.group(3).decode('utf-8')
logs.log(
'Closing file handle id %s for path %s.' % (file_handle_id, file_path))
execute_command('%s -accepteula -c %s -p %s -y' %
(handle_executable_path, file_handle_id, process_id)) |
Creates |directory|. Create intermediate directories if
|create_intermediates|. Ignore if it already exists and |recreate| is
False. | def create_directory(directory, create_intermediates=False, recreate=False):
"""Creates |directory|. Create intermediate directories if
|create_intermediates|. Ignore if it already exists and |recreate| is
False."""
if os.path.exists(directory):
if recreate:
remove_directory(directory)
else:
return True
try:
if create_intermediates:
os.makedirs(directory)
else:
os.mkdir(directory)
except:
logs.log_error('Unable to create directory %s.' % directory)
return False
return True |
Run a command, returning its output. | def execute_command(shell_command):
"""Run a command, returning its output."""
try:
process_handle = subprocess.Popen(
shell_command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, _ = process_handle.communicate()
except:
logs.log_error('Error while executing command %s.' % shell_command)
return ''
return output |
Get the command to pass to subprocess. | def get_command(command_line):
"""Get the command to pass to subprocess."""
if environment.platform() == 'WINDOWS':
return command_line
return shlex.split(command_line, posix=True) |
Convert a list of arguments to a string. | def get_command_line_from_argument_list(argument_list):
"""Convert a list of arguments to a string."""
return subprocess.list2cmdline(argument_list) |
Returns number of files within a directory (recursively). | def get_directory_file_count(directory_path):
"""Returns number of files within a directory (recursively)."""
file_count = 0
for (root, _, files) in walk(directory_path):
for filename in files:
file_path = os.path.join(root, filename)
if not os.path.isfile(file_path):
continue
file_count += 1
return file_count |
Returns size of a directory (in bytes). | def get_directory_size(directory_path):
"""Returns size of a directory (in bytes)."""
directory_size = 0
for (root, _, files) in walk(directory_path):
for filename in files:
file_path = os.path.join(root, filename)
directory_size += os.path.getsize(file_path)
return directory_size |
Returns a list of files in a directory (recursively). | def get_files_list(directory_path):
"""Returns a list of files in a directory (recursively)."""
files_list = []
for (root, _, files) in walk(directory_path):
for filename in files:
file_path = os.path.join(root, filename)
if not os.path.isfile(file_path):
continue
files_list.append(file_path)
return files_list |
Return free disk space. | def get_free_disk_space(path='/'):
"""Return free disk space."""
if not os.path.exists(path):
return None
if psutil is None:
logs.log_error(
'Attempted to get free disk space, but \'psutil\' was not found.')
return None
return psutil.disk_usage(path).free |
Gives the interpreter needed to execute |file_to_execute|. | def get_interpreter(file_to_execute):
"""Gives the interpreter needed to execute |file_to_execute|."""
interpreters = {
'.bash': 'bash',
'.class': 'java',
'.js': 'node',
'.pl': 'perl',
'.py': sys.executable,
'.pyc': sys.executable,
'.sh': 'sh'
}
try:
interpreter = interpreters[os.path.splitext(file_to_execute)[1]]
except KeyError:
return None
return interpreter |
Return command to execute |file_to_execute|. | def get_execute_command(file_to_execute):
"""Return command to execute |file_to_execute|."""
interpreter_path = get_interpreter(file_to_execute)
# Hack for Java scripts.
file_to_execute = file_to_execute.replace('.class', '')
if interpreter_path:
command = '%s %s' % (interpreter_path, file_to_execute)
else:
# Handle executables that don't need an interpreter.
command = file_to_execute
return command |
Wrapper around shutil.move(src, dst). If shutil.move throws an shutil.Error
the exception is caught, an error is logged, and False is returned. | def move(src, dst):
"""Wrapper around shutil.move(src, dst). If shutil.move throws an shutil.Error
the exception is caught, an error is logged, and False is returned."""
try:
shutil.move(src, dst)
return True
except shutil.Error:
logs.log_error('Failed to move %s to %s' % (src, dst))
return False |
Removes empty files in a path recursively | def remove_empty_files(root_path):
"""Removes empty files in a path recursively"""
for directory, _, filenames in walk(root_path):
for filename in filenames:
path = os.path.join(directory, filename)
if os.path.getsize(path) > 0:
continue
try:
os.remove(path)
except:
logs.log_error('Unable to remove the empty file: %s (%s).' %
(path, sys.exc_info()[0])) |
Removes empty folder in a path recursively. | def remove_empty_directories(path):
"""Removes empty folder in a path recursively."""
if not os.path.isdir(path):
return
# Remove empty sub-folders.
files = os.listdir(path)
for filename in files:
absolute_path = os.path.join(path, filename)
if os.path.isdir(absolute_path):
remove_empty_directories(absolute_path)
# If folder is empty, delete it.
files = os.listdir(path)
if not files:
try:
os.rmdir(path)
except:
logs.log_error('Unable to remove empty folder %s.' % path) |
Removes a file, ignoring any error if it occurs. | def remove_file(file_path):
"""Removes a file, ignoring any error if it occurs."""
try:
if os.path.exists(file_path):
os.remove(file_path)
except:
pass |
Returns path to a temporary file. | def get_tempfile(prefix='', suffix=''):
"""Returns path to a temporary file."""
tempdir = environment.get_value('BOT_TMPDIR', '/tmp')
os.makedirs(tempdir, exist_ok=True)
basename = _get_random_filename()
filename = f'{prefix}{basename}{suffix}'
filepath = os.path.join(tempdir, filename)
yield filepath
if os.path.exists(filepath):
os.remove(filepath) |
Removes a directory tree. | def remove_directory(directory, recreate=False, ignore_errors=False):
"""Removes a directory tree."""
# Log errors as warnings if |ignore_errors| is set.
log_error_func = logs.log_warn if ignore_errors else logs.log_error
def clear_read_only(func, path, _):
"""Clear the read-only bit and reattempt the removal again.
This is needed on Windows."""
try:
os.chmod(path, 0o750)
except:
# If this is tmpfs, we will probably fail.
pass
try:
func(path)
except:
# Log errors for all cases except device or resource busy errors, as such
# errors are expected in cases when mounts are used.
error_message = str(sys.exc_info()[1])
if 'Device or resource busy' not in error_message:
logs.log_warn(
'Failed to remove directory %s failed because %s with %s failed. %s'
% (directory, func, path, error_message))
# Try the os-specific deletion commands first. This helps to overcome issues
# with unicode filename handling.
if os.path.exists(directory):
if environment.platform() == 'WINDOWS':
os.system('rd /s /q "%s" > nul 2>&1' % directory)
else:
os.system('rm -rf "%s" > /dev/null 2>&1' % directory)
if os.path.exists(directory):
# If the directory still exists after using native OS delete commands, then
# try closing open file handles and then try removing it with read only
# bit removed (Windows only).
close_open_file_handles_if_needed(directory)
shutil.rmtree(directory, onerror=clear_read_only)
if os.path.exists(directory):
# 1. If directory is a mount point, then directory itself won't be
# removed. So, check the list of files inside it.
# 2. If directory is a regular directory, then it should have not
# existed.
if not os.path.ismount(directory) or os.listdir(directory):
# Directory could not be cleared. Bail out.
log_error_func('Failed to clear directory %s.' % directory)
return False
return True
if not recreate:
return True
try:
os.makedirs(directory)
except:
log_error_func('Unable to re-create directory %s.' % directory)
return False
return True |
Wrapper around walk to resolve compatibility issues. | def walk(directory, **kwargs):
"""Wrapper around walk to resolve compatibility issues."""
return os.walk(directory, **kwargs) |
Helper function to read the contents of a data file. | def _read_data_file(data_file):
"""Helper function to read the contents of a data file."""
with open(os.path.join(DATA_DIRECTORY, data_file)) as handle:
return handle.read() |
Mock resource. | def mock_resource(spec):
"""Mock resource."""
resource = mock.Mock(spec=spec)
resource.created = False
resource.body = None
def create(*args, **kwargs): # pylint: disable=unused-argument
if resource.created:
raise bot_manager.AlreadyExistsError
resource.created = True
def get():
if resource.created:
return resource.body
raise bot_manager.NotFoundError
def exists():
return resource.created
def delete():
if not resource.created:
raise bot_manager.NotFoundError
resource.created = False
resource.create.side_effect = create
resource.get.side_effect = get
resource.exists.side_effect = exists
resource.delete.side_effect = delete
return resource |
Get the expected instance template for a project. | def expected_instance_template(gce_project_name,
name,
project_name,
disk_size_gb=None,
service_account=None,
tls_cert=False):
"""Get the expected instance template for a project."""
gce_project = compute_engine_projects.load_project(gce_project_name)
expected = copy.deepcopy(gce_project.get_instance_template(name))
expected['properties']['metadata']['items'].append({
'key': 'task-tag',
'value': project_name,
})
if disk_size_gb:
disk = expected['properties']['disks'][0]
disk['initializeParams']['diskSizeGb'] = disk_size_gb
if service_account:
expected['properties']['serviceAccounts'][0]['email'] = service_account
if tls_cert:
expected['properties']['metadata']['items'].extend([{
'key': 'tls-cert',
'value': project_name + '_cert',
}, {
'key': 'tls-key',
'value': project_name + '_key',
}])
return expected |
Get the expected instance template for a project. | def expected_host_instance_template(gce_project_name, name):
"""Get the expected instance template for a project."""
gce_project = compute_engine_projects.load_project(gce_project_name)
return copy.deepcopy(gce_project.get_instance_template(name)) |
Get original issue. | def get_original_issue(self, issue_id):
"""Get original issue."""
issue_id = int(issue_id)
issue = Issue()
issue.open = True
issue.itm = self._itm # pylint: disable=protected-access
issue.id = issue_id
if issue_id == 1337:
issue.add_cc('[email protected]')
issue.add_label('Restrict-View-Commit')
elif issue_id == 1338:
issue.add_cc('[email protected]')
issue.add_cc('[email protected]')
elif issue_id == 1340:
issue.add_label('reported-2015-01-01')
return monorail.Issue(issue) |
Helper function to read the contents of a data file. | def _read_data_file(data_file):
"""Helper function to read the contents of a data file."""
with open(
os.path.join(DATA_DIRECTORY, data_file), encoding='utf-8') as handle:
return handle.read() |
Mock buckets().get(). | def mock_bucket_get(bucket=None):
"""Mock buckets().get()."""
if bucket in EXISTING_BUCKETS:
return MockRequest(False, {'name': 'bucket'})
return MockRequest(True) |
Mock buckets().getIamPolicy(). | def mock_get_iam_policy(bucket=None):
"""Mock buckets().getIamPolicy()."""
response = {
'kind': 'storage#policy',
'resourceId': 'fake',
'bindings': [],
'etag': 'fake'
}
if bucket in ('lib1-logs.clusterfuzz-external.appspot.com',
'lib3-logs.clusterfuzz-external.appspot.com'):
response['bindings'].append({
'role': 'roles/storage.objectViewer',
'members': ['user:[email protected]',]
})
return MockRequest(return_value=response) |
Mock buckets().setIamPolicy(). | def mock_set_iam_policy(bucket=None, body=None): # pylint: disable=unused-argument
"""Mock buckets().setIamPolicy()."""
bindings = body['bindings']
if bindings and 'user:[email protected]' in bindings[0]['members']:
return MockRequest(raise_exception=True)
return MockRequest(return_value=copy.deepcopy(body)) |
Mock get_url(). | def mock_get_url(url):
"""Mock get_url()."""
if url not in URL_RESULTS:
return None
return URL_RESULTS[url] |
Mock read_data. | def _mock_read_data(path):
"""Mock read_data."""
if 'dbg' in path:
return json.dumps({
'projects': [{
'build_path': 'gs://bucket-dbg/a-b/%ENGINE%/%SANITIZER%/'
'%TARGET%/([0-9]+).zip',
'name': '//a/b',
'fuzzing_engines': ['libfuzzer', 'honggfuzz'],
'sanitizers': ['address']
}]
})
if 'android' in path:
return json.dumps({
'projects': [{
'build_path': 'gs://bucket-android/%ENGINE%/%SANITIZER%/'
'%TARGET%/([0-9]+).zip',
'name': 'android_pixel7',
'fuzzing_engines': ['libfuzzer'],
'architectures': ['arm'],
'sanitizers': ['hardware'],
'platform': 'ANDROID',
'queue_id': 'pixel7'
}, {
'build_path':
'gs://bucket-android/a-b-android/%ENGINE%/%SANITIZER%/'
'%TARGET%/([0-9]+).zip',
'name': 'android_pixel8',
'fuzzing_engines': ['libfuzzer', 'afl'],
'architectures': ['x86_64'],
'sanitizers': ['address'],
'platform': 'ANDROID_X86',
'queue_id': 'pixel8'
}, {
'build_path':
'gs://bucket-android/a-b-android/%ENGINE%/%SANITIZER%/'
'%TARGET%/([0-9]+).zip',
'name': 'android_mte',
'fuzzing_engines': ['libfuzzer'],
'architectures': ['arm'],
'sanitizers': ['none'],
'platform': 'ANDROID_MTE',
'queue_id': 'pixel8'
}]
})
return json.dumps({
'projects': [
{
'build_path':
'gs://bucket/a-b/%ENGINE%/%SANITIZER%/%TARGET%/([0-9]+).zip',
'name':
'//a/b',
'fuzzing_engines': ['libfuzzer', 'honggfuzz'],
'sanitizers': ['address', 'memory']
},
{
'build_path':
'gs://bucket/c-d/%ENGINE%/%SANITIZER%/%TARGET%/([0-9]+).zip',
'name':
'//c/d',
'fuzzing_engines': ['libfuzzer', 'googlefuzztest'],
'sanitizers': ['address']
},
{
'build_path':
'gs://bucket/e-f/%ENGINE%/%SANITIZER%/%TARGET%/([0-9]+).zip',
'name':
'//e/f',
'fuzzing_engines': ['libfuzzer'],
'sanitizers': ['none']
},
]
}) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.