response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Get the issue tracker for the project name. | def get_issue_tracker(project_name, config): # pylint: disable=unused-argument
"""Get the issue tracker for the project name."""
# TODO(ochang): Make this lazy.
itm = _get_issue_tracker_manager_for_project(project_name)
if itm is None:
return None
return IssueTracker(itm) |
Get remainder. This should be tested together with
convert_index_to_hour. | def get_remainder_for_index(true_end, time_span):
"""Get remainder. This should be tested together with
convert_index_to_hour."""
# The remainder needs +1 because the cut-off is at the end of true_end.
# For example, if the true_end is 49, the 1st day is 2 to 25 and the
# 2nd day is 26, to 49.
return (true_end % time_span) + 1 |
Convert index to hour. | def convert_index_to_hour(index, time_span, remainder):
"""Convert index to hour."""
# This needs -1 because the end hour is inclusive. For example, if the period
# represents [2, 26), the end hour is 25.
#
# Index is added 1 because, in our SQL, we subtract the remainder, divide,
# and floor. So, in order to get the original hour, we need to add 1 to the
# index.
return ((index + 1) * time_span) + remainder - 1 |
Query from BigQuery given the params. | def get(end, days, block, group_by, where_clause, group_having_clause, sort_by,
offset, limit):
"""Query from BigQuery given the params."""
if where_clause:
where_clause = '(%s) AND ' % where_clause
start = end - (days * 24) + 1
where_clause += '(hour BETWEEN %d AND %d) AND ' % (start, end)
where_clause += ('(_PARTITIONTIME BETWEEN TIMESTAMP_TRUNC("%s", DAY) '
'AND TIMESTAMP_TRUNC("%s", DAY))' %
(get_datetime(start).strftime('%Y-%m-%d'),
get_datetime(end).strftime('%Y-%m-%d')))
time_span = 1 if block == 'hour' else 24
remainder = get_remainder_for_index(end, time_span)
if group_having_clause:
group_having_clause = 'HAVING ' + group_having_clause
if (not big_query.VALID_FIELD_NAME_REGEX.match(group_by) or
not big_query.VALID_FIELD_NAME_REGEX.match(sort_by)):
raise ValueError('Invalid group_by or sort_by')
sql = SQL.format(
time_span=time_span,
remainder=remainder,
group_by=group_by,
where_clause=where_clause,
group_having_clause=group_having_clause,
sort_by=sort_by)
client = big_query.Client()
result = client.query(query=sql, offset=offset, limit=limit)
items = []
for row in result.rows:
avg_crash_time_in_ms = row['sum_crash_time_in_ms'] // row['total_count']
for group in row['groups']:
for index in group['indices']:
index['hour'] = convert_index_to_hour(index['index'], time_span,
remainder)
items.append({
'projectName': row['project'],
'crashType': row['crash_type'],
'crashState': row['crash_state'],
'isSecurity': row['security_flag'],
'isReproducible': row['is_reproducible'],
'isNew': row['is_new'],
'totalCount': row['total_count'],
'crashTime': {
'min':
row['min_crash_time_in_ms'],
'max':
row['max_crash_time_in_ms'],
'avg':
avg_crash_time_in_ms,
'std':
math.sqrt(
(row['sum_square_crash_time_in_ms'] // row['total_count']) -
(avg_crash_time_in_ms * avg_crash_time_in_ms))
},
'groups': row['groups'],
'days': days,
'block': block,
'end': end + 1 # Convert to UI's end.
})
return result.total_count, items |
Get datetime obj from hours from epoch. | def get_datetime(hours):
"""Get datetime obj from hours from epoch."""
return datetime.datetime.utcfromtimestamp(hours * 60 * 60) |
Get the first successful hour. | def _get_first_or_last_successful_hour(is_last):
"""Get the first successful hour."""
order = data_types.BuildCrashStatsJobHistory.end_time_in_hours
if is_last:
order = -order
item = data_types.BuildCrashStatsJobHistory.query().order(order).get()
if not item:
return None
return item.end_time_in_hours |
Get the last hour that ran successfully. We want to run the next hour. | def get_last_successful_hour():
"""Get the last hour that ran successfully. We want to run the next hour."""
return _get_first_or_last_successful_hour(is_last=True) |
Get the first hour that ran successfully (for the date-time picker). | def get_min_hour():
"""Get the first hour that ran successfully (for the date-time picker)."""
hour = _get_first_or_last_successful_hour(is_last=False)
# `hour` is None when we haven't run build_crash_stats at all.
# Therefore, there's no crash stats data.
#
# On the UI, the date-time picker choose a point of time. Therefore,
# if we choose, say, 3pm, this means we want the crash stats until 2:59pm.
# Therefore, we need to increment by 1.
return (hour or 0) + 1 |
Get the last hour that can be selected by the date-time picker. | def get_max_hour():
"""Get the last hour that can be selected by the date-time picker."""
hour = get_last_successful_hour()
# `hour` is None when we haven't run build_crash_stats at all.
# Therefore, there's no crash stats data.
#
# On the UI, the date-time picker choose a point of time. Therefore,
# if we choose, say, 3pm, this means we want the crash stats until 2:59pm.
# Therefore, we need to increment by 1.
return (hour or 0) + 1 |
Return timestamp for last crash with same crash params as testcase. | def get_last_crash_time(testcase):
"""Return timestamp for last crash with same crash params as testcase."""
client = big_query.Client()
where_clause = ('crash_type = {crash_type} AND '
'crash_state = {crash_state} AND '
'security_flag = {security_flag} AND '
'project = {project}').format(
crash_type=json.dumps(testcase.crash_type),
crash_state=json.dumps(testcase.crash_state),
security_flag=json.dumps(testcase.security_flag),
project=json.dumps(testcase.project_name),
)
sql = """
SELECT hour
FROM main.crash_stats
WHERE {where_clause}
ORDER by hour DESC
LIMIT 1
""".format(where_clause=where_clause)
result = client.query(query=sql)
if result and result.rows:
return get_datetime(result.rows[0]['hour'])
return None |
Return path to fuzzer logs bucket. | def get_bucket():
"""Return path to fuzzer logs bucket."""
return local_config.ProjectConfig().get('logs.fuzzer.bucket') |
Generate a relative path for a log using the given time.
Args:
log_time: A datetime object.
file_extension: A string appended to the end of the log file name.
LOG_EXTENSION is used if None.
Returns:
A string containing name of the log file. | def get_log_relative_path(log_time, file_extension=None):
"""Generate a relative path for a log using the given time.
Args:
log_time: A datetime object.
file_extension: A string appended to the end of the log file name.
LOG_EXTENSION is used if None.
Returns:
A string containing name of the log file.
"""
if file_extension is None:
file_extension = LOG_EXTENSION
return log_time.strftime(LOG_PATH_FORMAT) + file_extension |
Get directory path of logs for a given fuzzer/job.
Args:
bucket_name: Bucket logs are stored in.
fuzzer_name: Name of the fuzzer.
job_type: Job name.
logs_date: Optional datetime.date for the logs.
Returns:
A cloud storage path to the directory containing the desired logs. Format
returned is /{bucket name}/{path}. | def get_logs_directory(bucket_name, fuzzer_name, job_type=None, logs_date=None):
"""Get directory path of logs for a given fuzzer/job.
Args:
bucket_name: Bucket logs are stored in.
fuzzer_name: Name of the fuzzer.
job_type: Job name.
logs_date: Optional datetime.date for the logs.
Returns:
A cloud storage path to the directory containing the desired logs. Format
returned is /{bucket name}/{path}.
"""
path = '/%s/%s' % (bucket_name, fuzzer_name)
if job_type:
path += '/%s' % job_type
if logs_date is not None:
assert job_type is not None
path += '/%s' % logs_date
return path |
Determines the GCS path to upload a log file to.
Args:
bucket_name: Bucket logs are stored in.
time: A datetime object used to generate filename for the log.
fuzzer_name: Name of the fuzzer. If None, gets this from the environment.
job_type: Job name. If None, gets this from the environment.
file_extension: A string appended to the end of the log filename. A default
value is used if None.
Returns:
The path of the uploaded file and whether the uploaded succeeded. | def get_logs_gcs_path(bucket_name=None,
time=None,
fuzzer_name=None,
job_type=None,
file_extension=None):
"""Determines the GCS path to upload a log file to.
Args:
bucket_name: Bucket logs are stored in.
time: A datetime object used to generate filename for the log.
fuzzer_name: Name of the fuzzer. If None, gets this from the environment.
job_type: Job name. If None, gets this from the environment.
file_extension: A string appended to the end of the log filename. A default
value is used if None.
Returns:
The path of the uploaded file and whether the uploaded succeeded.
"""
if bucket_name is None:
bucket_name = get_bucket()
if not fuzzer_name:
fuzzer_name = environment.get_value('FUZZER_NAME')
if not job_type:
job_type = environment.get_value('JOB_NAME')
log_directory = get_logs_directory(bucket_name, fuzzer_name, job_type)
if not time:
time = datetime.datetime.utcnow()
log_path = 'gs:/' + log_directory + '/' + get_log_relative_path(
time, file_extension)
return log_path |
Uploads file contents to log directory in GCS bucket.
Args:
bucket_name: Bucket logs are stored in.
contents: String containing log to be uploaded.
time: A datetime object used to generate filename for the log.
fuzzer_name: Name of the fuzzer. If None, gets this from the environment.
job_type: Job name. If None, gets this from the environment.
file_extension: A string appended to the end of the log filename. A default
value is used if None.
signed_upload_url: A signed url to upload the logs to.
Returns:
The path of the uploaded file and whether the uploaded succeeded. | def upload_to_logs(bucket_name,
contents,
time=None,
fuzzer_name=None,
job_type=None,
file_extension=None,
signed_upload_url=None):
"""Uploads file contents to log directory in GCS bucket.
Args:
bucket_name: Bucket logs are stored in.
contents: String containing log to be uploaded.
time: A datetime object used to generate filename for the log.
fuzzer_name: Name of the fuzzer. If None, gets this from the environment.
job_type: Job name. If None, gets this from the environment.
file_extension: A string appended to the end of the log filename. A default
value is used if None.
signed_upload_url: A signed url to upload the logs to.
Returns:
The path of the uploaded file and whether the uploaded succeeded.
"""
if signed_upload_url:
if storage.upload_signed_url(contents, signed_upload_url):
logs.log(
'Uploaded file to logs bucket.', signed_upload_url=signed_upload_url)
else:
logs.log_error(
'Failed to write file to logs bucket.',
signed_upload_url=signed_upload_url)
return
log_path = get_logs_gcs_path(
bucket_name,
time=time,
fuzzer_name=fuzzer_name,
job_type=job_type,
file_extension=file_extension)
if storage.write_data(contents, log_path):
logs.log('Uploaded file to logs bucket.', log_path=log_path)
else:
logs.log_error('Failed to write file to logs bucket.', log_path=log_path) |
Uploads logs to script logs GCS bucket.
Args:
logs_bucket: Bucket logs are stored in.
log_contents: String containing log to be uploaded.
fuzzer_name: Name of the fuzzer. If None, gets this from the environment.
job_type: Job name. If None, gets this from the environment.
signed_upload_url: A signed URL to upload this to. | def upload_script_log(log_contents,
fuzzer_name=None,
job_type=None,
signed_upload_url=None):
"""Uploads logs to script logs GCS bucket.
Args:
logs_bucket: Bucket logs are stored in.
log_contents: String containing log to be uploaded.
fuzzer_name: Name of the fuzzer. If None, gets this from the environment.
job_type: Job name. If None, gets this from the environment.
signed_upload_url: A signed URL to upload this to.
"""
logs_bucket = get_bucket()
if logs_bucket and log_contents and log_contents.strip():
upload_to_logs(
logs_bucket,
log_contents,
fuzzer_name=fuzzer_name,
job_type=job_type,
signed_upload_url=signed_upload_url) |
Convert QueryGroupBy value to its corresponding field name. | def group_by_to_field_name(group_by):
"""Convert QueryGroupBy value to its corresponding field name."""
if group_by == QueryGroupBy.GROUP_BY_REVISION:
return 'build_revision'
if group_by == QueryGroupBy.GROUP_BY_DAY:
return 'date'
if group_by == QueryGroupBy.GROUP_BY_TIME:
return 'time'
if group_by == QueryGroupBy.GROUP_BY_JOB:
return 'job'
if group_by == QueryGroupBy.GROUP_BY_FUZZER:
return 'fuzzer'
return None |
Returns a CoverageInformation entity for a given fuzzer and date. If date
is not specified, returns the latest entity available. | def get_coverage_info(fuzzer, date=None):
"""Returns a CoverageInformation entity for a given fuzzer and date. If date
is not specified, returns the latest entity available."""
query = data_types.CoverageInformation.query(
data_types.CoverageInformation.fuzzer == fuzzer)
if date:
# Return info for specific date.
query = query.filter(data_types.CoverageInformation.date == date)
else:
# Return latest.
query = query.order(-data_types.CoverageInformation.date)
return query.get() |
Return gcs path in the format "/bucket/path/to/containing_dir/" for the
given fuzzer, job, and timestamp or revision. | def get_gcs_stats_path(kind, fuzzer, timestamp):
"""Return gcs path in the format "/bucket/path/to/containing_dir/" for the
given fuzzer, job, and timestamp or revision."""
bucket_name = big_query.get_bucket()
if not bucket_name:
return None
datetime_value = datetime.datetime.utcfromtimestamp(timestamp)
dir_name = data_types.coverage_information_date_to_string(datetime_value)
path = '/%s/%s/%s/date/%s/' % (bucket_name, fuzzer, kind, dir_name)
return path |
Upload the fuzzer run to the bigquery bucket. Assumes that all the stats
given are for the same fuzzer/job run. | def upload_stats(stats_list, filename=None):
"""Upload the fuzzer run to the bigquery bucket. Assumes that all the stats
given are for the same fuzzer/job run."""
if not stats_list:
logs.log_error('Failed to upload fuzzer stats: empty stats.')
return
assert isinstance(stats_list, list)
bucket_name = big_query.get_bucket()
if not bucket_name:
logs.log_error('Failed to upload fuzzer stats: missing bucket name.')
return
kind = stats_list[0].kind
fuzzer = stats_list[0].fuzzer
# Group all stats for fuzz targets.
fuzzer_or_engine_name = get_fuzzer_or_engine_name(fuzzer)
if not filename:
# Generate a random filename.
filename = '%016x' % random.randint(0, (1 << 64) - 1) + '.json'
# Handle runs that bleed into the next day.
def timestamp_start_of_day(s):
return utils.utc_date_to_timestamp(
datetime.datetime.utcfromtimestamp(s.timestamp).date())
stats_list.sort(key=lambda s: s.timestamp)
for timestamp, stats in itertools.groupby(stats_list, timestamp_start_of_day):
upload_data = '\n'.join(stat.to_json() for stat in stats)
day_path = 'gs:/' + get_gcs_stats_path(
kind, fuzzer_or_engine_name, timestamp=timestamp) + filename
if storage.write_data(upload_data.encode('utf-8'), day_path):
logs.log(f'Uploaded {kind} stats for {fuzzer} to {day_path}.')
else:
logs.log_error(
f'Failed to upload {kind} stats for {fuzzer} to {day_path}.') |
Parse the stats column fields. | def parse_stats_column_fields(column_fields):
"""Parse the stats column fields."""
# e.g. 'sum(t.field_name) as display_name'.
aggregate_regex = re.compile(r'^(\w+)\(([a-z])\.([^\)]+)\)(\s*as\s*(\w+))?$')
# e.g. '_EDGE_COV as blah'.
builtin_regex = re.compile(r'^(_\w+)(\s*as\s*(\w+))?$')
fields = []
parts = [field.strip() for field in column_fields.split(',')]
for part in parts:
match = aggregate_regex.match(part)
if match:
table_alias = match.group(2)
field_name = match.group(3)
aggregate_function = match.group(1)
select_alias = match.group(5)
if select_alias:
select_alias = select_alias.strip('"')
fields.append(
QueryField(table_alias, field_name, aggregate_function, select_alias))
continue
match = builtin_regex.match(part)
if match:
name = match.group(1)
alias = match.group(3)
if alias:
alias = alias.strip('"')
fields.append(BuiltinFieldSpecifier(name, alias))
continue
return fields |
Return fuzzing engine name if it exists, or |fuzzer_name|. | def get_fuzzer_or_engine_name(fuzzer_name):
"""Return fuzzing engine name if it exists, or |fuzzer_name|."""
fuzz_target = data_handler.get_fuzz_target(fuzzer_name)
if fuzz_target:
return fuzz_target.engine
return fuzzer_name |
Get the stats dataset name for the given |fuzzer_name|. | def dataset_name(fuzzer_name):
"""Get the stats dataset name for the given |fuzzer_name|."""
return fuzzer_name.replace('-', '_') + '_stats' |
Get the schema for an engine name. | def get(engine_name):
"""Get the schema for an engine name."""
schema = _SCHEMA.get(engine_name)
if not schema:
return None
return {'fields': schema} |
"Increment the error count metric. | def _increment_error_count():
""""Increment the error count metric."""
if _is_running_on_k8s():
task_name = 'k8s'
elif _is_running_on_app_engine():
task_name = 'appengine'
else:
task_name = os.getenv('TASK_NAME', 'unknown')
from clusterfuzz._internal.metrics import monitoring_metrics
monitoring_metrics.LOG_ERROR_COUNT.increment({'task_name': task_name}) |
Return whether or not in a local development environment. | def _is_local():
"""Return whether or not in a local development environment."""
return (bool(os.getenv('LOCAL_DEVELOPMENT')) or
os.getenv('SERVER_SOFTWARE', '').startswith('Development/')) |
Return whether or not we're running on App Engine (production or
development). | def _is_running_on_app_engine():
"""Return whether or not we're running on App Engine (production or
development)."""
return os.getenv('GAE_ENV') or (
os.getenv('SERVER_SOFTWARE') and
(os.getenv('SERVER_SOFTWARE').startswith('Development/') or
os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/'))) |
Returns whether or not we're running on K8s. | def _is_running_on_k8s():
"""Returns whether or not we're running on K8s."""
return os.getenv('IS_K8S_ENV') == 'true' |
Return bool on where console logging is enabled, usually for tests. | def _console_logging_enabled():
"""Return bool on where console logging is enabled, usually for tests."""
return bool(os.getenv('LOG_TO_CONSOLE')) |
Return bool True when logging to files (bot/logs/*.log) is enabled.
This is enabled by default.
This is disabled if we are running in app engine or kubernetes as these have
their dedicated loggers, see configure_appengine() and configure_k8s(). | def _file_logging_enabled():
"""Return bool True when logging to files (bot/logs/*.log) is enabled.
This is enabled by default.
This is disabled if we are running in app engine or kubernetes as these have
their dedicated loggers, see configure_appengine() and configure_k8s().
"""
return bool(os.getenv(
'LOG_TO_FILE',
'True')) and not _is_running_on_app_engine() and not _is_running_on_k8s() |
Return bool True where fluentd logging is enabled.
This is enabled by default.
This is disabled for local development and if we are running in app engine or
kubernetes as these have their dedicated loggers, see configure_appengine()
and configure_k8s(). | def _fluentd_logging_enabled():
"""Return bool True where fluentd logging is enabled.
This is enabled by default.
This is disabled for local development and if we are running in app engine or
kubernetes as these have their dedicated loggers, see configure_appengine()
and configure_k8s()."""
return bool(os.getenv('LOG_TO_FLUENTD', 'True')) and not _is_local(
) and not _is_running_on_app_engine() and not _is_running_on_k8s() |
Return bool True where Google Cloud Logging is enabled.
This is disabled for local development and if we are running in a app engine
or kubernetes as these have their dedicated loggers, see
configure_appengine() and configure_k8s(). | def _cloud_logging_enabled():
"""Return bool True where Google Cloud Logging is enabled.
This is disabled for local development and if we are running in a app engine
or kubernetes as these have their dedicated loggers, see
configure_appengine() and configure_k8s()."""
return bool(os.getenv('LOG_TO_GCP')) and not _is_local(
) and not _is_running_on_app_engine() and not _is_running_on_k8s() |
Suppress unwanted warnings. | def suppress_unwanted_warnings():
"""Suppress unwanted warnings."""
# See https://github.com/googleapis/google-api-python-client/issues/299
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR) |
Set the logger. | def set_logger(logger):
"""Set the logger."""
global _logger
_logger = logger |
Get handler config. | def get_handler_config(filename, backup_count):
"""Get handler config."""
root_directory = os.getenv('ROOT_DIR')
file_path = os.path.join(root_directory, filename)
max_bytes = 0 if _is_local() else LOCAL_LOG_LIMIT
return {
'class': 'logging.handlers.RotatingFileHandler',
'level': logging.INFO,
'formatter': 'simple',
'filename': file_path,
'maxBytes': max_bytes,
'backupCount': backup_count,
'encoding': 'utf8',
} |
Get config dict for the logger `name`. | def get_logging_config_dict(name):
"""Get config dict for the logger `name`."""
logging_handler = {
'run_bot':
get_handler_config('bot/logs/bot.log', 3),
'run':
get_handler_config('bot/logs/run.log', 1),
'run_heartbeat':
get_handler_config('bot/logs/run_heartbeat.log', 1),
'heartbeat':
get_handler_config('bot/logs/heartbeat.log', 1),
'run_fuzzer':
get_handler_config('bot/logs/run_fuzzer.log', 1),
'run_testcase':
get_handler_config('bot/logs/run_testcase.log', 1),
'android_heartbeat':
get_handler_config('bot/logs/android_heartbeat.log', 1),
'run_cron':
get_handler_config('bot/logs/run_cron.log', 1),
}
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': ('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
}
},
'handlers': {
'handler': logging_handler[name],
},
'loggers': {
name: {
'handlers': ['handler']
}
},
} |
We need to truncate the message in the middle if it gets too long. | def truncate(msg, limit):
"""We need to truncate the message in the middle if it gets too long."""
if len(msg) <= limit:
return msg
half = limit // 2
return '\n'.join([
msg[:half],
'...%d characters truncated...' % (len(msg) - limit), msg[-half:]
]) |
Format LogEntry into JSON string. | def format_record(record: logging.LogRecord) -> str:
"""Format LogEntry into JSON string."""
entry = {
'message':
truncate(record.getMessage(), STACKDRIVER_LOG_MESSAGE_LIMIT),
'created': (
datetime.datetime.utcfromtimestamp(record.created).isoformat() + 'Z'),
'severity':
record.levelname,
'bot_name':
os.getenv('BOT_NAME'),
'task_payload':
os.getenv('TASK_PAYLOAD'),
'name':
record.name,
}
initial_payload = os.getenv('INITIAL_TASK_PAYLOAD')
if initial_payload:
entry['actual_task_payload'] = entry['task_payload']
entry['task_payload'] = initial_payload
entry['location'] = getattr(record, 'location', {'error': True})
entry['extras'] = getattr(record, 'extras', {})
update_entry_with_exc(entry, record.exc_info)
if not entry['extras']:
del entry['extras']
worker_bot_name = os.environ.get('WORKER_BOT_NAME')
if worker_bot_name:
entry['worker_bot_name'] = worker_bot_name
fuzz_target = os.getenv('FUZZ_TARGET')
if fuzz_target:
entry['fuzz_target'] = fuzz_target
# Log bot shutdown cases as WARNINGs since this is expected for preemptibles.
if (entry['severity'] in ['ERROR', 'CRITICAL'] and
'IOError: [Errno 4] Interrupted function call' in entry['message']):
entry['severity'] = 'WARNING'
return json.dumps(entry, default=_handle_unserializable) |
Update the dict `entry` with exc_info. | def update_entry_with_exc(entry, exc_info):
"""Update the dict `entry` with exc_info."""
if not exc_info:
return
error = exc_info[1]
error_extras = getattr(error, 'extras', {})
entry['task_payload'] = (
entry.get('task_payload') or error_extras.pop('task_payload', None))
entry['extras'].update(error_extras)
entry['serviceContext'] = {'service': 'bots'}
# Reference:
# https://cloud.google.com/error-reporting/docs/formatting-error-messages,
if exc_info[0]:
# we need to set the result of traceback.format_exception to the field
# `message`. And we move our
entry['message'] += '\n' + ''.join(
traceback.format_exception(exc_info[0], exc_info[1], exc_info[2]))
else:
# If we log error without exception, we need to set
# `context.reportLocation`.
location = entry.get('location', {})
entry['context'] = {
'reportLocation': {
'filePath': location.get('path', ''),
'lineNumber': location.get('line', 0),
'functionName': location.get('method', '')
}
} |
Handles any exception that are uncaught by logging an error and calling
the sys.__excepthook__. | def uncaught_exception_handler(exception_type, exception_value,
exception_traceback):
"""Handles any exception that are uncaught by logging an error and calling
the sys.__excepthook__."""
# Ensure that we are not calling ourself. This shouldn't be needed since we
# are using sys.__excepthook__. Do this check anyway since if we are somehow
# calling ourself we might infinitely send errors to the logs, which would be
# quite bad.
global _is_already_handling_uncaught
if _is_already_handling_uncaught:
raise RuntimeError('Loop in uncaught_exception_handler')
_is_already_handling_uncaught = True
# Use emit since log_error needs sys.exc_info() to return this function's
# arguments to call init properly.
# Don't worry about emit() throwing an Exception, python will let us know
# about that exception as well as the original one.
emit(
logging.ERROR,
'Uncaught exception',
exc_info=(exception_type, exception_value, exception_traceback))
sys.__excepthook__(exception_type, exception_value, exception_traceback) |
Configure logging for App Engine. | def configure_appengine():
"""Configure logging for App Engine."""
logging.getLogger().setLevel(logging.INFO)
if os.getenv('LOCAL_DEVELOPMENT') or os.getenv('PY_UNITTESTS'):
return
import google.cloud.logging
client = google.cloud.logging.Client()
handler = client.get_default_handler()
logging.getLogger().addHandler(handler) |
Configure logging for K8S and reporting errors. | def configure_k8s():
"""Configure logging for K8S and reporting errors."""
import google.cloud.logging
client = google.cloud.logging.Client()
client.setup_logging()
old_factory = logging.getLogRecordFactory()
def record_factory(*args, **kwargs):
"""Insert jsonPayload fields to all logs."""
record = old_factory(*args, **kwargs)
if not hasattr(record, 'json_fields'):
record.json_fields = {}
# Add jsonPayload fields to logs that don't contain stack traces to enable
# capturing and grouping by error reporting.
# https://cloud.google.com/error-reporting/docs/formatting-error-messages#log-text
if record.levelno >= logging.ERROR and not record.exc_info:
record.json_fields.update({
'@type':
'type.googleapis.com/google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent', # pylint: disable=line-too-long
'serviceContext': {
'service': 'k8s',
},
'context': {
'reportLocation': {
'filePath': record.pathname,
'lineNumber': record.lineno,
'functionName': record.funcName,
}
},
})
return record
logging.setLogRecordFactory(record_factory)
logging.getLogger().setLevel(logging.INFO) |
Configure Google cloud logging, for bots not running on appengine nor k8s.
| def configure_cloud_logging():
""" Configure Google cloud logging, for bots not running on appengine nor k8s.
"""
import google.cloud.logging
# project will default to the service account's project (likely from
# GOOGLE_APPLICATION_CREDENTIALS).
# Some clients might need to override this to log in a specific project using
# LOGGING_CLOUD_PROJECT_ID.
# Note that CLOUD_PROJECT_ID is not used here, as it might differ from both
# the service account's project and the logging project.
client = google.cloud.logging.Client(
project=os.getenv("LOGGING_CLOUD_PROJECT_ID"))
labels = {
'compute.googleapis.com/resource_name': socket.getfqdn().lower(),
'bot_name': os.getenv('BOT_NAME'),
}
handler = client.get_default_handler(labels=labels)
def cloud_label_filter(record):
# Update the labels with additional information.
# Ideally we would use json_fields as done in configure_k8s(), but since
# src/Pipfile forces google-cloud-logging = "==1.15.0", we have fairly
# limited options to format the output, see:
# https://github.com/googleapis/python-logging/blob/6236537b197422d3dcfff38fe7729dee7f361ca9/google/cloud/logging/handlers/handlers.py#L98 # pylint: disable=line-too-long
# https://github.com/googleapis/python-logging/blob/6236537b197422d3dcfff38fe7729dee7f361ca9/google/cloud/logging/handlers/transports/background_thread.py#L233 # pylint: disable=line-too-long
handler.labels.update({
'task_payload':
os.getenv('TASK_PAYLOAD', 'null'),
'fuzz_target':
os.getenv('FUZZ_TARGET', 'null'),
'worker_bot_name':
os.getenv('WORKER_BOT_NAME', 'null'),
'extra':
json.dumps(
getattr(record, 'extras', {}), default=_handle_unserializable),
'location':
json.dumps(
getattr(record, 'location', {'Error': True}),
default=_handle_unserializable)
})
return True
handler.addFilter(cloud_label_filter)
handler.setLevel(logging.INFO)
logging.getLogger().addHandler(handler) |
Set logger. See the list of loggers in bot/config/logging.yaml.
Also configures the process to log any uncaught exceptions as an error.
|extras| will be included by emit() in log messages. | def configure(name, extras=None):
"""Set logger. See the list of loggers in bot/config/logging.yaml.
Also configures the process to log any uncaught exceptions as an error.
|extras| will be included by emit() in log messages."""
suppress_unwanted_warnings()
if _is_running_on_k8s():
configure_k8s()
return
if _is_running_on_app_engine():
configure_appengine()
return
if _console_logging_enabled():
logging.basicConfig(level=logging.INFO)
if _file_logging_enabled():
config.dictConfig(get_logging_config_dict(name))
if _fluentd_logging_enabled():
configure_fluentd_logging()
if _cloud_logging_enabled():
configure_cloud_logging()
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
set_logger(logger)
# Set _default_extras so they can be used later.
if extras is None:
extras = {}
global _default_extras
_default_extras = extras
# Install an exception handler that will log an error when there is an
# uncaught exception.
sys.excepthook = uncaught_exception_handler |
Return logger. We need this method because we need to mock logger. | def get_logger():
"""Return logger. We need this method because we need to mock logger."""
if _logger:
return _logger
if _is_running_on_app_engine() or _is_running_on_k8s():
# Running on App Engine.
set_logger(logging.getLogger())
elif _console_logging_enabled():
# Force a logger when console logging is enabled.
configure('root')
return _logger |
Return the caller file, lineno, and funcName. | def get_source_location():
"""Return the caller file, lineno, and funcName."""
try:
raise RuntimeError()
except:
# f_back is called twice. Once to leave get_source_location(..) and another
# to leave emit(..).
# The code is adapted from:
# https://github.com/python/cpython/blob/2.7/Lib/logging/__init__.py#L1244
frame = sys.exc_info()[2].tb_frame.f_back
while frame and hasattr(frame, 'f_code'):
if not frame.f_code.co_filename.endswith('logs.py'):
return frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name
frame = frame.f_back
return 'Unknown', '-1', 'Unknown' |
Add App Engine tracing information. | def _add_appengine_trace(extras):
"""Add App Engine tracing information."""
if not _is_running_on_app_engine():
return
from libs import auth
try:
request = auth.get_current_request()
if not request:
return
except Exception:
# FIXME: Find a way to add traces in threads. Skip adding for now, as
# otherwise, we hit an exception "Request global variable is not set".
return
trace_header = request.headers.get('X-Cloud-Trace-Context')
if not trace_header:
return
project_id = os.getenv('APPLICATION_ID')
trace_id = trace_header.split('/')[0]
extras['logging.googleapis.com/trace'] = (
'projects/{project_id}/traces/{trace_id}').format(
project_id=project_id, trace_id=trace_id) |
Log in JSON. | def emit(level, message, exc_info=None, **extras):
"""Log in JSON."""
logger = get_logger()
if not logger:
return
# Include extras passed as an argument and default extras.
all_extras = _default_extras.copy()
all_extras.update(extras)
path_name, line_number, method_name = get_source_location()
if _is_running_on_app_engine():
if exc_info == (None, None, None):
# Don't pass exc_info at all, as otherwise cloud logging will append
# "NoneType: None" to the message.
exc_info = None
if level >= logging.ERROR:
# App Engine only reports errors if there is an exception stacktrace, so
# we generate one. We don't create an exception here and then format it,
# as that will not include frames below this emit() call. We do [:-2] on
# the stacktrace to exclude emit() and the logging function below it (e.g.
# log_error).
message = (
message + '\n' + 'Traceback (most recent call last):\n' + ''.join(
traceback.format_stack()[:-2]) + 'LogError: ' + message)
_add_appengine_trace(all_extras)
# We need to make a dict out of it because member of the dict becomes the
# first class attributes of LogEntry. It is very tricky to identify the extra
# attributes. Therefore, we wrap extra fields under the attribute 'extras'.
logger.log(
level,
truncate(message, LOCAL_LOG_MESSAGE_LIMIT),
exc_info=exc_info,
extra={
'extras': all_extras,
'location': {
'path': path_name,
'line': line_number,
'method': method_name
}
}) |
Logs the message to a given log file. | def log(message, level=logging.INFO, **extras):
"""Logs the message to a given log file."""
emit(level, message, **extras) |
Logs the warning message. | def log_warn(message, **extras):
"""Logs the warning message."""
emit(logging.WARN, message, exc_info=sys.exc_info(), **extras) |
Logs the error in the error log file. | def log_error(message, **extras):
"""Logs the error in the error log file."""
exception = extras.pop('exception', None)
if exception:
try:
raise exception
except:
emit(logging.ERROR, message, exc_info=sys.exc_info(), **extras)
else:
emit(logging.ERROR, message, exc_info=sys.exc_info(), **extras)
_increment_error_count() |
Logs a fatal error and exits. | def log_fatal_and_exit(message, **extras):
"""Logs a fatal error and exits."""
wait_before_exit = extras.pop('wait_before_exit', None)
emit(logging.CRITICAL, message, exc_info=sys.exc_info(), **extras)
_increment_error_count()
if wait_before_exit:
log('Waiting for %d seconds before exit.' % wait_before_exit)
time.sleep(wait_before_exit)
sys.exit(-1) |
Used for mocking. | def check_module_loaded(module):
"""Used for mocking."""
return module is not None |
Decorator to stub out functions on failed imports. | def stub_unavailable(module):
"""Decorator to stub out functions on failed imports."""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if check_module_loaded(module):
return func(*args, **kwargs)
return _MockMetric()
return wrapper
return decorator |
Monitored resources. | def _initialize_monitored_resource():
"""Monitored resources."""
global _monitored_resource
_monitored_resource = monitoring_v3.types.MonitoredResource() # pylint: disable=no-member
# TODO(ochang): Use generic_node when that is available.
_monitored_resource.type = 'gce_instance'
# The project ID must be the same as the one we write metrics to, not the ID
# where the instance lives.
_monitored_resource.labels['project_id'] = utils.get_application_id()
# Use bot name here instance as that's more useful to us.
_monitored_resource.labels['instance_id'] = environment.get_value('BOT_NAME')
if compute_metadata.is_gce():
# Returned in the form projects/{id}/zones/{zone}
zone = compute_metadata.get('instance/zone').split('/')[-1]
_monitored_resource.labels['zone'] = zone
else:
# Default zone for instances not on GCE.
_monitored_resource.labels['zone'] = 'us-central1-f' |
Convert result of time.time() to Timestamp. | def _time_to_timestamp(timestamp, time_seconds):
"""Convert result of time.time() to Timestamp."""
timestamp.seconds = int(time_seconds)
timestamp.nanos = int((time_seconds - timestamp.seconds) * 10**9) |
Initialize if monitoring is enabled for this bot. | def initialize():
"""Initialize if monitoring is enabled for this bot."""
global _monitoring_v3_client
global _flusher_thread
if environment.get_value('LOCAL_DEVELOPMENT'):
return
if not local_config.ProjectConfig().get('monitoring.enabled'):
return
if check_module_loaded(monitoring_v3):
_initialize_monitored_resource()
_monitoring_v3_client = monitoring_v3.MetricServiceClient(
credentials=credentials.get_default()[0])
_flusher_thread = _FlusherThread()
_flusher_thread.start() |
Stops monitoring and cleans up (only if monitoring is enabled). | def stop():
"""Stops monitoring and cleans up (only if monitoring is enabled)."""
if _flusher_thread:
_flusher_thread.stop() |
Get the per-process metrics store. | def metrics_store():
"""Get the per-process metrics store."""
return _metrics_store |
Get bot region. | def _get_region(bot_name):
"""Get bot region."""
try:
regions = local_config.MonitoringRegionsConfig()
except errors.BadConfigError:
return 'unknown'
for pattern in regions.get('patterns'):
if re.match(pattern['pattern'], bot_name):
return pattern['name']
return 'unknown' |
Build _CounterMetric. | def CounterMetric(name, description, field_spec):
"""Build _CounterMetric."""
return _CounterMetric(name, field_spec=field_spec, description=description) |
Build _CounterMetric. | def GaugeMetric(name, description, field_spec):
"""Build _CounterMetric."""
return _GaugeMetric(name, field_spec=field_spec, description=description) |
Build _CounterMetric. | def CumulativeDistributionMetric(name, description, bucketer, field_spec):
"""Build _CounterMetric."""
return _CumulativeDistributionMetric(
name, description=description, bucketer=bucketer, field_spec=field_spec) |
Start Google Cloud Profiler if |USE_PYTHON_PROFILER| environment variable
is set. | def start_if_needed(service):
"""Start Google Cloud Profiler if |USE_PYTHON_PROFILER| environment variable
is set."""
if not environment.get_value('USE_PYTHON_PROFILER'):
return True
project_id = utils.get_application_id()
service_with_platform = '{service}_{platform}'.format(
service=service, platform=environment.platform().lower())
try:
# Import the package here since it is only needed when profiler is enabled.
# Also, this is supported on Linux only.
import googlecloudprofiler
googlecloudprofiler.start(
project_id=project_id, service=service_with_platform)
except Exception:
logs.log_error(
'Failed to start the profiler for service %s.' % service_with_platform)
return False
return True |
Wait when device is in a bad state and exit. | def bad_state_reached():
"""Wait when device is in a bad state and exit."""
persistent_cache.clear_values()
logs.log_fatal_and_exit(
'Device in bad state.', wait_before_exit=BAD_STATE_WAIT) |
Connect to Cuttlefish cvd. | def connect_to_cuttlefish_device():
"""Connect to Cuttlefish cvd."""
logs.log('Connect to cuttlefish device.')
device_serial = environment.get_value('ANDROID_SERIAL')
connect_cmd = f'{get_adb_path()} connect {device_serial}'
return execute_command(connect_cmd, timeout=RECOVERY_CMD_TIMEOUT) |
Copies local directory contents to a device directory. | def copy_local_directory_to_remote(local_directory, remote_directory):
"""Copies local directory contents to a device directory."""
create_directory_if_needed(remote_directory)
if os.listdir(local_directory):
run_command(['push', '%s/.' % local_directory, remote_directory], True,
True) |
Copies local file to a device file. | def copy_local_file_to_remote(local_file_path, remote_file_path):
"""Copies local file to a device file."""
create_directory_if_needed(os.path.dirname(remote_file_path))
run_command(['push', local_file_path, remote_file_path], True, True) |
Copies local directory contents to a device directory. | def copy_remote_directory_to_local(remote_directory, local_directory):
"""Copies local directory contents to a device directory."""
run_command(['pull', '%s/.' % remote_directory, local_directory]) |
Copies device file to a local file. | def copy_remote_file_to_local(remote_file_path, local_file_path):
"""Copies device file to a local file."""
shell.create_directory(
os.path.dirname(local_file_path), create_intermediates=True)
run_command(['pull', remote_file_path, local_file_path]) |
Creates a directory on the device if it doesn't already exist. | def create_directory_if_needed(device_directory):
"""Creates a directory on the device if it doesn't already exist."""
run_shell_command(['mkdir', '-p', device_directory]) |
Return whether a directory exists or not. | def directory_exists(directory_path):
"""Return whether a directory exists or not."""
expected = '0'
result = run_shell_command(
'\'test -d "%s"; echo $?\'' % directory_path, log_error=False)
return result == expected |
Spawns a subprocess to run the given shell command. | def execute_command(cmd, timeout=None, log_error=True,
on_cuttlefish_host=False):
"""Spawns a subprocess to run the given shell command."""
if on_cuttlefish_host and environment.is_android_cuttlefish():
# Auto accept key fingerprint for ssh command.
cmd = ('ssh -o StrictHostKeyChecking=no '
f'{get_cuttlefish_ssh_target()} "{cmd}"')
so = []
# pylint: disable=consider-using-with
output_dest = tempfile.TemporaryFile()
# pylint: disable=subprocess-popen-preexec-fn,consider-using-with
pipe = subprocess.Popen(
cmd,
executable='/bin/bash',
stdout=output_dest,
stderr=subprocess.STDOUT,
shell=True,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL),
bufsize=0)
def run():
"""Thread target function that waits for subprocess to complete."""
try:
pipe.communicate()
output_dest.seek(0)
output = output_dest.read()
output_dest.close()
if output:
so.append(output)
except OSError as _:
logs.log_warn('Failed to retrieve stdout from: %s' % cmd)
if pipe.returncode:
if log_error:
logs.log_warn(
'%s returned %d error code.' % (cmd, pipe.returncode),
output=output)
thread = threading.Thread(target=run)
thread.start()
thread.join(timeout)
if thread.is_alive():
logs.log_warn('Command %s timed out. Killing process.' % cmd)
try:
pipe.kill()
except OSError:
# Can't kill a dead process.
pass
return None
bytes_output = b''.join(so)
return bytes_output.strip().decode('utf-8', errors='ignore') |
Copy file to cuttlefish device. | def copy_to_cuttlefish(src_path, dest_path, timeout=None):
"""Copy file to cuttlefish device."""
cvd_address = get_cuttlefish_ssh_target()
return execute_command(
'scp -o StrictHostKeyChecking=no '
f'-r {src_path} {cvd_address}:{dest_path}',
timeout=timeout) |
Reset device to factory state. | def factory_reset():
"""Reset device to factory state."""
logs.log('reached factory_reset')
if environment.is_android_cuttlefish():
# We cannot recover from this since there can be cases like userdata image
# corruption in /data/data. Till the bug is fixed, we just need to wait
# for reimage in next iteration.
bad_state_reached()
# A device can be stuck in a boot loop due to a bad clang library update.
# Reverting that can bring a device back to good state.
revert_asan_device_setup_if_needed()
run_as_root()
run_shell_command([
'am', 'broadcast', '-a', 'android.intent.action.MASTER_CLEAR', '-n',
'android/com.android.server.MasterClearReceiver'
])
# Wait until the reset is complete.
time.sleep(FACTORY_RESET_WAIT) |
Return whether a file exists or not. | def file_exists(file_path):
"""Return whether a file exists or not."""
expected = '0'
result = run_shell_command(
'\'test -f "%s"; echo $?\'' % file_path, log_error=False)
return result == expected |
Return adb command line for running an adb command. | def get_adb_command_line(adb_cmd):
"""Return adb command line for running an adb command."""
device_serial = environment.get_value('ANDROID_SERIAL')
adb_cmd_line = '%s -s %s %s' % (get_adb_path(), device_serial, adb_cmd)
return adb_cmd_line |
Return path to ADB binary. | def get_adb_path():
"""Return path to ADB binary."""
adb_path = environment.get_value('ADB')
if adb_path:
return adb_path
return os.path.join(environment.get_platform_resources_directory(), 'adb') |
Return the device status. | def get_device_state():
"""Return the device status."""
if environment.is_android_emulator():
fastboot_state = run_fastboot_command(
['getvar', 'is-ramdump-mode'], timeout=GET_DEVICE_STATE_TIMEOUT)
if fastboot_state and 'is-ramdump-mode: yes' in fastboot_state:
return 'is-ramdump-mode:yes'
state_cmd = get_adb_command_line('get-state')
return execute_command(state_cmd, timeout=RECOVERY_CMD_TIMEOUT) |
Return fastboot command line for running a fastboot command. | def get_fastboot_command_line(fastboot_cmd):
"""Return fastboot command line for running a fastboot command."""
fastboot_cmd_line = '%s %s' % (get_fastboot_path(), fastboot_cmd)
return fastboot_cmd_line |
Return path to fastboot binary. | def get_fastboot_path():
"""Return path to fastboot binary."""
return os.path.join(environment.get_platform_resources_directory(),
'fastboot') |
Return file's md5 checksum. | def get_file_checksum(file_path):
"""Return file's md5 checksum."""
if not file_exists(file_path):
return None
return run_shell_command(['md5sum', '-b', file_path]) |
Return file's size. | def get_file_size(file_path):
"""Return file's size."""
if not file_exists(file_path):
return None
return int(run_shell_command(['stat', '-c%s', file_path])) |
Return content of kernel logs. | def get_kernel_log_content():
"""Return content of kernel logs."""
kernel_log_content = ''
for kernel_log_file in KERNEL_LOG_FILES:
kernel_log_content += read_data_from_file(kernel_log_file) or ''
return kernel_log_content |
Extracts logcat from ramdump kernel log and reboots. | def extract_logcat_from_ramdump_and_reboot():
"""Extracts logcat from ramdump kernel log and reboots."""
run_fastboot_command(
['oem', 'ramdump', 'stage_file', 'kernel.log'],
timeout=RECOVERY_CMD_TIMEOUT)
run_fastboot_command(
['get_staged', 'kernel.log'], timeout=WAIT_FOR_DEVICE_TIMEOUT)
storage.copy_file_from(RAMOOPS_READER_GCS_PATH, 'ramoops_reader.py')
subprocess.run(
'python ramoops_reader.py kernel.log > logcat.log',
shell=True,
check=False)
with open('logcat.log') as file:
logcat = file.read()
files_to_delete = ['ramoops_reader.py', 'kernel.log', 'logcat.log']
for filepath in files_to_delete:
os.remove(filepath)
run_fastboot_command('reboot')
wait_until_fully_booted()
return logcat |
Return ps output for all processes. | def get_ps_output():
"""Return ps output for all processes."""
return run_shell_command(['ps', '-A']) |
Return process and child pids matching a process name. | def get_process_and_child_pids(process_name):
"""Return process and child pids matching a process name."""
pids = []
ps_output_lines = get_ps_output().splitlines()
while True:
old_pids_length = len(pids)
for line in ps_output_lines:
data = line.split()
# Make sure we have a valid pid and parent pid.
try:
# PID is in the second column.
line_process_pid = int(data[1])
# Parent PID is in the third column.
line_parent_pid = int(data[2])
except:
continue
# If we have already processed this pid, no more work to do.
if line_process_pid in pids:
continue
# Process name is in the last column.
# Monkey framework instances (if any) are children of our process launch,
# so include these in pid list.
line_process_name = data[-1]
if (process_name in line_process_name or
MONKEY_PROCESS_NAME in line_process_name):
if process_name == line_process_name:
pids.insert(0, line_process_pid)
else:
pids.append(line_process_pid)
continue
# Add child pids to end.
if line_parent_pid in pids:
pids.append(line_process_pid)
new_pids_length = len(pids)
if old_pids_length == new_pids_length:
break
return pids |
Return property's value. | def get_property(property_name):
"""Return property's value."""
return run_shell_command(['getprop', property_name]) |
Perform a hard reset of the device. | def hard_reset():
"""Perform a hard reset of the device."""
if environment.is_android_cuttlefish():
# There is no recovery step at this point for a cuttlefish bot, so just exit
# and wait for reimage on next iteration.
bad_state_reached()
# For physical device.
# Try hard-reset via sysrq-trigger (requires root).
hard_reset_sysrq_cmd = get_adb_command_line(
'shell echo b \\> /proc/sysrq-trigger')
execute_command(hard_reset_sysrq_cmd, timeout=RECOVERY_CMD_TIMEOUT)
# Try soft-reset now (does not require root).
soft_reset_cmd = get_adb_command_line('reboot')
execute_command(soft_reset_cmd, timeout=RECOVERY_CMD_TIMEOUT)
if environment.is_android_emulator():
logs.log('Platform ANDROID_EMULATOR detected.')
restart_adb()
state = get_device_state()
logs.log('Device state is: %s' % state)
if state == 'recovery':
logs.log('Rebooting recovery state device with --wipe_data.')
run_command('root')
run_shell_command('recovery --wipe_data') |
Kills process along with children matching names. | def kill_processes_and_children_matching_name(process_name):
"""Kills process along with children matching names."""
process_and_child_pids = get_process_and_child_pids(process_name)
if not process_and_child_pids:
return
kill_command = ['kill', '-9'] + process_and_child_pids
run_shell_command(kill_command) |
Return device's file content. | def read_data_from_file(file_path):
"""Return device's file content."""
if not file_exists(file_path):
return None
return run_shell_command(['cat', '"%s"' % file_path]) |
Reboots device. | def reboot():
"""Reboots device."""
run_command('reboot') |
Start the cuttlefish device. | def start_cuttlefish_device(use_kernel=False):
"""Start the cuttlefish device."""
cvd_dir = environment.get_value('CVD_DIR')
cvd_bin_dir = os.path.join(cvd_dir, 'bin')
launch_cvd_path = os.path.join(cvd_bin_dir, 'launch_cvd')
device_memory_mb = environment.get_value('DEVICE_MEMORY_MB',
DEFAULT_DEVICE_MEMORY_MB)
# @TODO(https://github.com/google/clusterfuzz/issues/3777): Enable sandboxing
launch_cvd_command_line = (
f'{launch_cvd_path} --daemon --memory_mb={device_memory_mb} '
'--report_anonymous_usage_stats=Y --enable_sandbox=false --resume=false')
if use_kernel:
kernel_path = os.path.join(cvd_dir, 'bzImage')
initramfs_path = os.path.join(cvd_dir, 'initramfs.img')
launch_cvd_command_line += (
f' --kernel_path={kernel_path} --initramfs_path={initramfs_path}')
execute_command(
launch_cvd_command_line,
timeout=LAUNCH_CVD_TIMEOUT,
on_cuttlefish_host=True) |
Copy and Combine cvd host package and OTA images. | def copy_images_to_cuttlefish():
"""Copy and Combine cvd host package and OTA images."""
image_directory = environment.get_value('IMAGES_DIR')
cvd_dir = environment.get_value('CVD_DIR')
for image_filename in os.listdir(image_directory):
if image_filename.endswith('.zip') or image_filename.endswith('.tar.gz'):
continue
image_src = os.path.join(image_directory, image_filename)
image_dest = os.path.join(cvd_dir, image_filename)
copy_to_cuttlefish(image_src, image_dest) |
Stops the cuttlefish device. | def stop_cuttlefish_device():
"""Stops the cuttlefish device."""
cvd_dir = environment.get_value('CVD_DIR')
cvd_bin_dir = os.path.join(cvd_dir, 'bin')
stop_cvd_cmd = os.path.join(cvd_bin_dir, 'stop_cvd')
logs.log('stop_cvd_cmd: %s' % str(stop_cvd_cmd))
if get_device_state() == 'device':
execute_command(
stop_cvd_cmd, timeout=RECOVERY_CMD_TIMEOUT, on_cuttlefish_host=True)
time.sleep(STOP_CVD_WAIT) |
Restarts the cuttlefish device. | def restart_cuttlefish_device():
"""Restarts the cuttlefish device."""
cvd_dir = environment.get_value('CVD_DIR')
cvd_bin_dir = os.path.join(cvd_dir, 'bin')
restart_cvd_cmd = os.path.join(cvd_bin_dir, 'restart_cvd')
execute_command(restart_cvd_cmd, on_cuttlefish_host=True) |
Recreate cuttlefish device, restoring from backup images. | def recreate_cuttlefish_device():
"""Recreate cuttlefish device, restoring from backup images."""
logs.log('Reimaging cuttlefish device.')
cvd_dir = environment.get_value('CVD_DIR')
logs.log('cvd_dir: %s' % str(cvd_dir))
copy_images_to_cuttlefish()
stop_cuttlefish_device()
# Delete all existing images.
rm_cmd = f'rm -rf {cvd_dir}/*'
execute_command(rm_cmd, timeout=RECOVERY_CMD_TIMEOUT, on_cuttlefish_host=True)
copy_images_to_cuttlefish()
start_cuttlefish_device() |
Remount /system as read/write. | def remount():
"""Remount /system as read/write."""
run_as_root()
run_command('remount')
wait_for_device()
run_as_root() |
Delete everything inside of a device directory and recreate if needed. | def remove_directory(device_directory, recreate=False):
"""Delete everything inside of a device directory and recreate if needed."""
run_shell_command('rm -rf %s' % device_directory, root=True)
if recreate:
create_directory_if_needed(device_directory) |
Remove file. | def remove_file(file_path):
"""Remove file."""
run_shell_command('rm -f %s' % file_path, root=True) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.