response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Return gcs path to directory containing crash regressions.
def _get_regressions_corpus_gcs_url(bucket_name, bucket_path): """Return gcs path to directory containing crash regressions.""" return _get_gcs_url( bucket_name, bucket_path, suffix=REGRESSIONS_GCS_PATH_SUFFIX)
Build corpus GCS URL for gsutil. Returns: A string giving the GCS URL.
def _get_gcs_url(bucket_name, bucket_path, suffix=''): """Build corpus GCS URL for gsutil. Returns: A string giving the GCS URL. """ # TODO(metzman): Delete this after we are done migrating to the zipcorpus # format. url = f'gs://{bucket_name}{bucket_path}{suffix}' if not url.endswith('/'): # Ensure that the bucket path is '/' terminated. Without this, when a # single file is being uploaded, it is renamed to the trailing non-/ # terminated directory name instead. url += '/' return url
Returns a data bundle corpus that can be used by uworkers or trusted workers to download the data bundle files using the fastest means available to them.
def get_proto_data_bundle_corpus( data_bundle) -> uworker_msg_pb2.DataBundleCorpus: """Returns a data bundle corpus that can be used by uworkers or trusted workers to download the data bundle files using the fastest means available to them.""" data_bundle_corpus = uworker_msg_pb2.DataBundleCorpus() data_bundle_corpus.gcs_url = data_handler.get_data_bundle_bucket_url( data_bundle.name) data_bundle_corpus.data_bundle.CopyFrom( uworker_io.entity_to_protobuf(data_bundle)) if task_types.task_main_runs_on_uworker(): # Slow path for when we need an untrusted worker to run a task. # Note that the security of the system (only the correctness) depends on # this path being taken. If it is not taken when we need to, utask_main will # simply fail as it tries to do privileged operation it does not have # permissions for. urls = (f'{data_bundle_corpus.gcs_url}/{url}' for url in storage.list_blobs(data_bundle_corpus.gcs_url)) data_bundle_corpus.corpus_urls.extend([ url_pair[0] for url_pair in storage.sign_urls_for_existing_files( urls, include_delete_urls=False) ]) return data_bundle_corpus
Returns a proto representation of a corpus.
def get_proto_corpus(bucket_name, bucket_path, max_upload_urls, include_delete_urls=False): """Returns a proto representation of a corpus.""" gcs_url = _get_gcs_url(bucket_name, bucket_path) # TODO(metzman): Allow this step to be skipped by trusted fuzzers. urls = (f'{storage.GS_PREFIX}/{bucket_name}/{url}' for url in storage.list_blobs(gcs_url)) corpus_urls = dict( storage.sign_urls_for_existing_files(urls, include_delete_urls)) upload_urls = storage.get_arbitrary_signed_upload_urls( gcs_url, num_uploads=max_upload_urls) corpus = uworker_msg_pb2.Corpus( corpus_urls=corpus_urls, upload_urls=upload_urls, gcs_url=gcs_url, ) last_updated = storage.last_updated(_get_gcs_url(bucket_name, bucket_path)) if last_updated: timestamp = timestamp_pb2.Timestamp() # pylint: disable=no-member timestamp.FromDatetime(last_updated) corpus.last_updated_time.CopyFrom(timestamp) return corpus
Gets target and bucket path for the corpus.
def get_target_bucket_and_path(engine, project_qualified_target_name, quarantine=False): """Gets target and bucket path for the corpus.""" engine = os.getenv('CORPUS_FUZZER_NAME_OVERRIDE', engine) if quarantine: sync_corpus_bucket_name = environment.get_value('QUARANTINE_BUCKET') else: sync_corpus_bucket_name = environment.get_value('CORPUS_BUCKET') if not sync_corpus_bucket_name: raise RuntimeError('No corpus bucket specified.') return sync_corpus_bucket_name, f'/{engine}/{project_qualified_target_name}'
Copies the corpus from gcs to disk. Can run on uworker.
def get_fuzz_target_corpus(engine, project_qualified_target_name, quarantine=False, include_regressions=False, include_delete_urls=False, max_upload_urls=10000): """Copies the corpus from gcs to disk. Can run on uworker.""" fuzz_target_corpus = uworker_msg_pb2.FuzzTargetCorpus() bucket_name, bucket_path = get_target_bucket_and_path( engine, project_qualified_target_name, quarantine) corpus = get_proto_corpus( bucket_name, bucket_path, include_delete_urls=include_delete_urls, max_upload_urls=max_upload_urls) print('bucket_name', bucket_name, 'bucket_path', bucket_path, corpus.gcs_url) fuzz_target_corpus.corpus.CopyFrom(corpus) assert not (include_regressions and quarantine) if include_regressions: regressions_bucket_path = f'{bucket_path}{REGRESSIONS_GCS_PATH_SUFFIX}' regressions_corpus = get_proto_corpus( bucket_name, regressions_bucket_path, max_upload_urls=0, # This is never uploaded to using this mechanism. include_delete_urls=False) # This is never deleted from. fuzz_target_corpus.regressions_corpus.CopyFrom(regressions_corpus) return ProtoFuzzTargetCorpus(engine, project_qualified_target_name, fuzz_target_corpus)
Returns a fuzz target corpus and quarantine corpus for pruning.
def get_corpuses_for_pruning(engine, project_qualified_name): """Returns a fuzz target corpus and quarantine corpus for pruning.""" # We need to include upload URLs because of corpus pollination. This is # unfortunate as it is probably rarely used. corpus = get_fuzz_target_corpus( engine, project_qualified_name, include_regressions=True, include_delete_urls=True) max_upload_urls = len(corpus.proto_corpus.corpus.corpus_urls) # We will never need to upload more than the number of testcases in the # corpus to the quarantine. quarantine_corpus = get_fuzz_target_corpus( engine, project_qualified_name, quarantine=True, max_upload_urls=max_upload_urls) return corpus, quarantine_corpus
Clear existing mappings for a fuzzer, and replace them.
def update_mappings_for_fuzzer(fuzzer, mappings=None): """Clear existing mappings for a fuzzer, and replace them.""" if mappings is None: mappings = fuzzer.jobs query = data_types.FuzzerJob.query() query = query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name) entities = ndb_utils.get_all_from_query(query) old_mappings = {} for entity in entities: old_mappings[entity.job] = entity new_mappings = [] for job_name in mappings: mapping = old_mappings.pop(job_name, None) if mapping: continue job = data_types.Job.query(data_types.Job.name == job_name).get() if not job: logs.log_error('An unknown job %s was selected for fuzzer %s.' % (job_name, fuzzer.name)) continue mapping = data_types.FuzzerJob() mapping.fuzzer = fuzzer.name mapping.job = job_name mapping.platform = job.platform new_mappings.append(mapping) ndb_utils.put_multi(new_mappings) ndb_utils.delete_multi([m.key for m in list(old_mappings.values())])
Clear existing mappings for a job, and replace them.
def update_mappings_for_job(job, mappings): """Clear existing mappings for a job, and replace them.""" existing_fuzzers = { fuzzer.name: fuzzer for fuzzer in data_types.Fuzzer.query() if job.name in fuzzer.jobs } modified_fuzzers = [] for fuzzer_name in mappings: fuzzer = existing_fuzzers.pop(fuzzer_name, None) if fuzzer: continue fuzzer = data_types.Fuzzer.query( data_types.Fuzzer.name == fuzzer_name).get() if not fuzzer: logs.log_error('An unknown fuzzer %s was selected for job %s.' % (fuzzer_name, job.name)) continue fuzzer.jobs.append(job.name) modified_fuzzers.append(fuzzer) update_mappings_for_fuzzer(fuzzer) # Removing the remaining values in exisiting_fuzzers as # they are no longer mapped. for fuzzer in existing_fuzzers.values(): fuzzer.jobs.remove(job.name) modified_fuzzers.append(fuzzer) update_mappings_for_fuzzer(fuzzer) ndb.put_multi(modified_fuzzers)
Update platform for all mappings for a particular job.
def update_platform_for_job(job_name, new_platform): """Update platform for all mappings for a particular job.""" query = data_types.FuzzerJob.query() query = query.filter(data_types.FuzzerJob.job == job_name) mappings = ndb_utils.get_all_from_query(query) new_mappings = [] for mapping in mappings: mapping.platform = new_platform new_mappings.append(mapping) ndb_utils.put_multi(new_mappings)
Select a fuzzer that can run on this platform.
def get_fuzz_task_payload(platform=None): """Select a fuzzer that can run on this platform.""" if not platform: queue_override = environment.get_value('QUEUE_OVERRIDE') platform = queue_override if queue_override else environment.platform() platforms = [platform] base_platform = platform.split(':')[0] # Generalized queue for platforms with a base platform (e.g. ANDROID) if base_platform != platform: platforms.append(base_platform) if environment.is_production(): query = data_types.FuzzerJobs.query() query = query.filter(data_types.FuzzerJobs.platform.IN(platforms)) mappings = [] for entity in query: mappings.extend(entity.fuzzer_jobs) else: # 'FuzzerJobs' may not exist locally because they are created by # the 'batch_fuzzer_jobs' cron job query = data_types.FuzzerJob.query() query = query.filter(data_types.FuzzerJob.platform.IN(platforms)) mappings = list(ndb_utils.get_all_from_query(query))[:1] if not mappings: return None, None selection = utils.random_weighted_choice( mappings, weight_attribute='actual_weight') return selection.fuzzer, selection.job
Select a fuzz target from a list of potential targets.
def select_fuzz_target(targets, target_weights): """Select a fuzz target from a list of potential targets.""" assert targets weighted_targets = [] for target in targets: weight = target_weights.get(target, 1.0) weighted_targets.append(WeightedTarget(target, weight)) return utils.random_weighted_choice(weighted_targets).target
Get a list of fuzz target weights based on the current fuzzer.
def get_fuzz_target_weights(): """Get a list of fuzz target weights based on the current fuzzer.""" job_type = environment.get_value('JOB_NAME') target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(job=job_type)) fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(target_jobs) weights = {} for fuzz_target, target_job in zip(fuzz_targets, target_jobs): if not fuzz_target: logs.log_error('Skipping weight assignment for fuzz target ' f'{target_job.fuzz_target_name}.') continue weights[fuzz_target.binary] = target_job.weight return weights
Return a list of random gestures.
def get_gestures(gesture_count): """Return a list of random gestures.""" plt = environment.platform() if environment.is_android(plt): return android.gestures.get_random_gestures(gesture_count) if plt == 'LINUX': return linux.gestures.get_random_gestures(gesture_count) if plt == 'WINDOWS': return windows.gestures.get_random_gestures(gesture_count) return []
Creates an empty local blacklist.
def create_empty_local_blacklist(): """Creates an empty local blacklist.""" lsan_suppressions_path = get_local_blacklist_file_path() with open(lsan_suppressions_path, 'w') as local_blacklist: # Insert comment on top to avoid parsing errors on empty file. local_blacklist.write(LSAN_HEADER_COMMENT)
Cleans out closed and deleted testcases from the global blacklist.
def cleanup_global_blacklist(): """Cleans out closed and deleted testcases from the global blacklist.""" blacklists_to_delete = [] global_blacklists = data_types.Blacklist.query( data_types.Blacklist.tool_name == LSAN_TOOL_NAME) for blacklist in global_blacklists: testcase_id = blacklist.testcase_id try: testcase = data_handler.get_testcase_by_id(testcase_id) except errors.InvalidTestcaseError: testcase = None # Delete entry if testcase is closed, deleted, or unreproducible. if not testcase or not testcase.open or testcase.one_time_crasher_flag: blacklists_to_delete.append(blacklist.key) ndb_utils.delete_multi(blacklists_to_delete)
Copies contents of global blacklist into local blacklist file, excluding a particular testcase (if any).
def copy_global_to_local_blacklist(blacklisted_functions, excluded_testcase=None): """Copies contents of global blacklist into local blacklist file, excluding a particular testcase (if any).""" lsan_suppressions_path = get_local_blacklist_file_path() excluded_function_name = ( get_leak_function_for_blacklist(excluded_testcase) if excluded_testcase else None) with open(lsan_suppressions_path, 'w') as local_blacklist: # Insert comment on top to avoid parsing errors on empty file. local_blacklist.write(LSAN_HEADER_COMMENT) for function_name in blacklisted_functions: if function_name == excluded_function_name: continue local_blacklist.write( LSAN_SUPPRESSION_LINE.format(function=function_name))
Return leak function to be used for blacklisting.
def get_leak_function_for_blacklist(testcase): """Return leak function to be used for blacklisting.""" crash_functions = testcase.crash_state.splitlines() if not crash_functions: return None return crash_functions[0]
Return the file path to the local blacklist text file.
def get_local_blacklist_file_path(): """Return the file path to the local blacklist text file.""" local_blacklist_path = os.path.join(environment.get_suppressions_directory(), 'lsan_suppressions.txt') # Create the directory if it does not exists, since we need to write to it. blacklist_directory = os.path.dirname(local_blacklist_path) if not os.path.exists(blacklist_directory): os.makedirs(blacklist_directory) return local_blacklist_path
Returns True if testcase is reproducible and not deleted.
def should_be_blacklisted(testcase): """Returns True if testcase is reproducible and not deleted.""" return (testcase.open and testcase.crash_type == DIRECT_LEAK_LABEL and not testcase.one_time_crasher_flag)
Adds relevant function from testcase crash state to global blacklist.
def add_crash_to_global_blacklist_if_needed(testcase): """Adds relevant function from testcase crash state to global blacklist.""" testcase_id = testcase.key.id() if not should_be_blacklisted(testcase): logs.log('Testcase %s is not a reproducible leak, skipping leak blacklist.' % testcase_id) return False function_name = get_leak_function_for_blacklist(testcase) if not function_name: logs.log_error( 'Testcase %s has invalid crash state, skipping leak blacklist.' % testcase_id) return False existing_query = data_types.Blacklist.query( data_types.Blacklist.function_name == function_name) existing_query = existing_query.filter( data_types.Blacklist.testcase_id == testcase_id) existing_query = existing_query.filter( data_types.Blacklist.tool_name == LSAN_TOOL_NAME) if existing_query.get(): logs.log_error('Item already in leak blacklist.') return False blacklist_item = data_types.Blacklist( function_name=function_name, testcase_id=testcase_id, tool_name=LSAN_TOOL_NAME) blacklist_item.put() logs.log('Added %s to leak blacklist.' % function_name) return blacklist_item
Highlights the first direct leak in a report. Args: crash_stacktrace: The crash report. Returns: new_report: Updated crash report with first direct leak highlighted.
def highlight_first_direct_leak(crash_stacktrace): """Highlights the first direct leak in a report. Args: crash_stacktrace: The crash report. Returns: new_report: Updated crash report with first direct leak highlighted. """ new_report = [] processed_first_leak = False num_stacks = 0 highlighted_stack_index = 0 divider_index = 0 # Used to prevent highlighting on first indirect leak. direct_leak = False currently_highlighting = False for line in crash_stacktrace.splitlines(): if DIRECT_LEAK_REGEX.match(line): direct_leak = True # Marking the end of the highlighted stack with a divider. if BLANK_LINE_REGEX.match(line) and currently_highlighting: currently_highlighting = False processed_first_leak = True if STACK_REGEX.match(line): if STACK_START_REGEX.match(line): num_stacks += 1 if direct_leak and not processed_first_leak: highlighted_stack_index = num_stacks currently_highlighting = True # If the line is in the first stack, highlight. if currently_highlighting: line = '<b>%s</b>' % line if not processed_first_leak: divider_index += 1 new_report.append(line) # If there's only one stack, return original report. if num_stacks == 1: return crash_stacktrace # If there are leaks after the highlighted leak, insert a divider. if highlighted_stack_index != num_stacks: new_report.insert(divider_index + 1, FIRST_LEAK_DIVIDER) return '\n'.join(new_report)
Creates a batch client.
def _create_batch_client_new(): """Creates a batch client.""" creds, project = credentials.get_default() if not project: project = utils.get_application_id() return batch.BatchServiceClient(credentials=creds)
Gets the batch client, creating it if it does not exist.
def _batch_client(): """Gets the batch client, creating it if it does not exist.""" if hasattr(_local, 'client'): return _local.client _local.client = _create_batch_client_new() return _local.client
Implementation of itertools.py's batched that was added after Python3.7.
def _bunched(iterator, bunch_size): """Implementation of itertools.py's batched that was added after Python3.7.""" # TODO(metzman): Replace this with itertools.batched. assert bunch_size > -1 idx = 0 bunch = [] for item in iterator: idx += 1 bunch.append(item) if idx == bunch_size: idx = 0 yield bunch bunch = [] if bunch: yield bunch
Creates batch jobs.
def create_uworker_main_batch_jobs(batch_tasks): """Creates batch jobs.""" job_specs = collections.defaultdict(list) for batch_task in batch_tasks: logs.log(f'Scheduling {batch_task.command}, {batch_task.job_type}.') spec = _get_spec_from_config(batch_task.command, batch_task.job_type) job_specs[spec].append(batch_task.input_download_url) logs.log('Creating batch jobs.') jobs = [] logs.log('Batching utask_mains.') for spec, input_urls in job_specs.items(): for input_urls_portion in _bunched(input_urls, MAX_CONCURRENT_VMS_PER_JOB): jobs.append(_create_job(spec, input_urls_portion)) return jobs
Creates batch jobs 20 tasks at a time, lazily. This is helpful to use when batch_tasks takes a very long time to create.
def create_uworker_main_batch_jobs_bunched(batch_tasks): """Creates batch jobs 20 tasks at a time, lazily. This is helpful to use when batch_tasks takes a very long time to create.""" # Use term bunch instead of "batch" since "batch" has nothing to do with the # cloud service and is thus very confusing in this context. jobs = [ create_uworker_main_batch_jobs(bunch) for bunch in _bunched(batch_tasks, TASK_BUNCH_SIZE) ] return list(itertools.chain(jobs))
Gets the task spec based on the batch workload spec.
def _get_task_spec(batch_workload_spec): """Gets the task spec based on the batch workload spec.""" runnable = batch.Runnable() runnable.container = batch.Runnable.Container() runnable.container.image_uri = batch_workload_spec.docker_image runnable.container.options = ( '--memory-swappiness=40 --shm-size=1.9g --rm --net=host ' '-e HOST_UID=1337 -P --privileged --cap-add=all ' '--name=clusterfuzz -e UNTRUSTED_WORKER=False -e UWORKER=True ' '-e UWORKER_INPUT_DOWNLOAD_URL') runnable.container.volumes = ['/var/scratch0:/mnt/scratch0'] task_spec = batch.TaskSpec() task_spec.runnables = [runnable] task_spec.max_retry_count = RETRY_COUNT # TODO(metzman): Change this for production. task_spec.max_run_duration = MAX_DURATION return task_spec
Returns the allocation policy for a BatchWorkloadSpec.
def _get_allocation_policy(spec): """Returns the allocation policy for a BatchWorkloadSpec.""" disk = batch.AllocationPolicy.Disk() disk.image = 'batch-cos' disk.size_gb = spec.disk_size_gb disk.type = spec.disk_type instance_policy = batch.AllocationPolicy.InstancePolicy() instance_policy.boot_disk = disk instance_policy.machine_type = spec.machine_type instances = batch.AllocationPolicy.InstancePolicyOrTemplate() instances.policy = instance_policy # Don't use external ip addresses which use quota, cost money, and are # unnecessary. network_interface = batch.AllocationPolicy.NetworkInterface() network_interface.no_external_ip_address = True # TODO(metzman): Make configurable. network_interface.network = ( 'projects/google.com:clusterfuzz/global/networks/batch') network_interface.subnetwork = ( 'projects/google.com:clusterfuzz/regions/us-west1/subnetworks/us-west1a') network_interfaces = [network_interface] network_policy = batch.AllocationPolicy.NetworkPolicy() network_policy.network_interfaces = network_interfaces allocation_policy = batch.AllocationPolicy() allocation_policy.instances = [instances] allocation_policy.network = network_policy service_account = batch.ServiceAccount(email=spec.service_account_email) # pylint: disable=no-member allocation_policy.service_account = service_account return allocation_policy
Creates and starts a batch job from |spec| that executes all tasks.
def _create_job(spec, input_urls): """Creates and starts a batch job from |spec| that executes all tasks.""" task_group = batch.TaskGroup() task_group.task_count = len(input_urls) assert task_group.task_count < MAX_CONCURRENT_VMS_PER_JOB task_environments = [ batch.Environment(variables={'UWORKER_INPUT_DOWNLOAD_URL': input_url}) for input_url in input_urls ] task_group.task_environments = task_environments task_group.task_spec = _get_task_spec(spec) task_group.task_count_per_node = TASK_COUNT_PER_NODE assert task_group.task_count_per_node == 1, 'This is a security issue' job = batch.Job() job.task_groups = [task_group] job.allocation_policy = _get_allocation_policy(spec) job.labels = {'env': 'testing', 'type': 'container'} job.logs_policy = batch.LogsPolicy() job.logs_policy.destination = batch.LogsPolicy.Destination.CLOUD_LOGGING create_request = batch.CreateJobRequest() create_request.job = job job_name = get_job_name() create_request.job_id = job_name # The job's parent is the region in which the job will run project_id = 'google.com:clusterfuzz' create_request.parent = f'projects/{project_id}/locations/us-west1' job_result = _send_create_job_request(create_request) logs.log(f'Created batch job id={job_name}.', spec=spec) return job_result
Returns the batch config. This function was made to make mocking easier.
def _get_batch_config(): """Returns the batch config. This function was made to make mocking easier.""" return local_config.BatchConfig()
Returns the Job entity named by |job_name|. This function was made to make mocking easier.
def _get_job(job_name): """Returns the Job entity named by |job_name|. This function was made to make mocking easier.""" return data_types.Job.query(data_types.Job.name == job_name).get()
Gets the configured specifications for a batch workload.
def _get_spec_from_config(command, job_name): """Gets the configured specifications for a batch workload.""" job = _get_job(job_name) config_name = job.platform if command == 'fuzz': config_name += '-PREEMPTIBLE' else: config_name += '-NONPREEMPTIBLE' # TODO(metzman): Get rid of this when we stop doing privileged operations in # utasks. if command in _UNPRIVILEGED_TASKS: config_name += '-UNPRIVILEGED' batch_config = _get_batch_config() instance_spec = batch_config.get('mapping').get(config_name, None) if instance_spec is None: raise ValueError(f'No mapping for {config_name}') project_name = batch_config.get('project') docker_image = instance_spec['docker_image'] user_data = instance_spec['user_data'] # TODO(https://github.com/google/clusterfuzz/issues/3008): Make this use a # low-privilege account. spec = BatchWorkloadSpec( docker_image=docker_image, user_data=user_data, disk_size_gb=instance_spec['disk_size_gb'], disk_type=instance_spec['disk_type'], service_account_email=instance_spec['service_account_email'], subnetwork=instance_spec['subnetwork'], gce_zone=instance_spec['gce_zone'], project=project_name, preemptible=instance_spec['preemptible'], machine_type=instance_spec['machine_type']) return spec
Return an api client for bigquery.
def get_api_client(): """Return an api client for bigquery.""" return discovery.build( 'bigquery', 'v2', cache_discovery=False, credentials=credentials.get_default()[0])
Return bucket for bigquery stats.
def get_bucket(): """Return bucket for bigquery stats.""" return local_config.ProjectConfig().get('bigquery.bucket')
Cast value to appropriate type.
def cast(value, field): """Cast value to appropriate type.""" if value is None: return None if field['type'] in {'INTEGER', 'INT64'}: return int(value) if field['type'] in {'FLOAT', 'FLOAT64'}: return float(value) if field['type'] in {'BOOLEAN', 'BOOL'}: return value == 'true' if field['type'] in {'STRING'}: return value if field['type'] in {'TIMESTAMP'}: return datetime.datetime.utcfromtimestamp(float(value)) if field['type'] in {'RECORD'}: return convert_row(value, field['fields']) raise RuntimeError(f'The type field[{"type"}] is unsupported.')
Convert a single raw row (from BigQuery) to a dict.
def convert_row(raw_row, fields): """Convert a single raw row (from BigQuery) to a dict.""" row = {} for index, raw_value in enumerate(raw_row['f']): field = fields[index] if field['mode'] == 'REPEATED': row[field['name']] = [] for item in raw_value['v']: row[field['name']].append(cast(item['v'], field)) else: row[field['name']] = cast(raw_value['v'], field) return row
Convert a query result into an array of dicts, each of which represents a row.
def convert(result): """Convert a query result into an array of dicts, each of which represents a row.""" fields = result['schema']['fields'] rows = [] for raw_row in result.get('rows', []): rows.append(convert_row(raw_row, fields)) return rows
Write a range to BigQuery. This is applicable for regression and fixed ranges.
def write_range(table_id, testcase, range_name, start, end): """Write a range to BigQuery. This is applicable for regression and fixed ranges.""" client = Client(dataset_id='main', table_id=table_id) result = client.insert([ Insert( row={ 'testcase_id': str(testcase.key.id()), 'crash_type': testcase.crash_type, 'crash_state': testcase.crash_state, 'security_flag': testcase.security_flag, 'parent_fuzzer_name': testcase.fuzzer_name, 'fuzzer_name': testcase.overridden_fuzzer_name, 'job_type': testcase.job_type, 'created_at': int(time.time()), ('%s_range_start' % range_name): int(start), ('%s_range_end' % range_name): int(end), }, insert_id='%s:%s:%s' % (testcase.key.id(), start, end)) ]) for error in result.get('insertErrors', []): logs.log_error( ("Ignoring error writing the testcase's %s range (%s) to " 'BigQuery.' % (range_name, testcase.key.id())), exception=ValueError(error))
Get an appropriate max_results.
def _get_max_results(max_results, limit, count_so_far): """Get an appropriate max_results.""" # limit is None means we get every record (no limit). if limit is None: return max_results return min(max_results, limit - count_so_far)
Return whether if the key is a GCS key.
def _is_gcs_key(blob_key): """Return whether if the key is a GCS key.""" gcs_key_pattern = re.compile( r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$') return bool(gcs_key_pattern.match(blob_key))
Return the full path to the blob on GCS.
def _get_gcs_blob_path(blob_key): """Return the full path to the blob on GCS.""" return '/%s/%s' % (storage.blobs_bucket(), blob_key)
Return GCS path for the blob.
def get_gcs_path(blob_key): """Return GCS path for the blob.""" if _is_gcs_key(blob_key): return _get_gcs_blob_path(blob_key) # Legacy blobstore key. blob_info = get_legacy_blob_info(blob_key) if not blob_info: return None return blob_info.gs_object_name
Returns blob size for a given blob key.
def get_blob_size(blob_key): """Returns blob size for a given blob key.""" if not blob_key or blob_key == 'NA': return None blob_info = get_blob_info(blob_key) if not blob_info: return None return blob_info.size
Get the GcsBlobInfo for the given key. Always returns a storage.GcsBlobInfo, even for legacy blobs.
def get_blob_info(blob_key): """Get the GcsBlobInfo for the given key. Always returns a storage.GcsBlobInfo, even for legacy blobs.""" if _is_gcs_key(blob_key): return storage.GcsBlobInfo.from_key(blob_key) legacy_blob_info = get_legacy_blob_info(blob_key) if not legacy_blob_info: return None return storage.GcsBlobInfo.from_legacy_blob_info(legacy_blob_info)
Delete a blob key.
def delete_blob(blob_key): """Delete a blob key.""" blob_info = get_blob_info(blob_key) if not blob_info: return False return storage.delete(blob_info.gcs_path)
Write a single file testcase to GCS.
def write_blob(file_handle_or_path): """Write a single file testcase to GCS.""" blobs_bucket = storage.blobs_bucket() blob_name = generate_new_blob_name() if storage.get(storage.get_cloud_storage_file_path(blobs_bucket, blob_name)): raise BlobsError('UUID collision found: %s' % blob_name) if isinstance(file_handle_or_path, str): filename = os.path.basename(file_handle_or_path) else: filename = file_handle_or_path.name metadata = { storage.BLOB_FILENAME_METADATA_KEY: filename, } gcs_path = '/%s/%s' % (blobs_bucket, blob_name) if storage.copy_file_to(file_handle_or_path, gcs_path, metadata=metadata): return blob_name raise BlobsError('Failed to write blob %s.' % blob_name)
Copy data stored in the blobstore to a local file.
def read_blob_to_disk(blob_key, local_file): """Copy data stored in the blobstore to a local file.""" assert not environment.is_running_on_app_engine() directory = os.path.dirname(local_file) if not os.path.exists(directory): os.makedirs(directory) gcs_path = get_gcs_path(blob_key) return storage.copy_file_from(gcs_path, local_file)
Returns data associated with a blobstore key.
def read_key(blob_key): """Returns data associated with a blobstore key.""" gcs_path = get_gcs_path(blob_key) return storage.read_data(gcs_path)
Return legacy blob info information.
def get_legacy_blob_info(blob_key): """Return legacy blob info information.""" legacy_blob_info = ndb.Key(BlobInfo, blob_key).get() if not legacy_blob_info: return None if legacy_blob_info.gs_object_name: return legacy_blob_info # Blobs which were stored before the move to GCS have an additional mapping # entry created by our migration jobs. blob_mapping = get_blob_mapping(blob_key) if not blob_mapping: raise BlobsError('Blob mapping not found.') legacy_blob_info.gs_object_name = blob_mapping.gcs_filename return legacy_blob_info
Return blob mapping information.
def get_blob_mapping(blob_key): """Return blob mapping information.""" return ndb.Key(_blobmigrator_BlobKeyMapping, blob_key).get()
Generate a new blob name.
def generate_new_blob_name(): """Generate a new blob name.""" return str(uuid.uuid4()).lower()
Returns a signed download URL that can be used to upload the blob pointed to by |blob_key|.
def get_signed_download_url(blob_key): """Returns a signed download URL that can be used to upload the blob pointed to by |blob_key|.""" gcs_path = get_gcs_path(blob_key) return storage.get_signed_download_url(gcs_path)
Returns a signed download URL that can be used to download the blob pointed to by |blob_key|.
def get_signed_upload_url(blob_key): """Returns a signed download URL that can be used to download the blob pointed to by |blob_key|.""" gcs_path = get_gcs_path(blob_key) return storage.get_signed_upload_url(gcs_path)
Returns a pair of (blob_name,signed_upload_url) to be used from utask_main to upload blobs.
def get_blob_signed_upload_url(): """Returns a pair of (blob_name,signed_upload_url) to be used from utask_main to upload blobs.""" bucket = storage.blobs_bucket() blob_name = generate_new_blob_name() gcs_path = storage.get_cloud_storage_file_path(bucket, blob_name) # Keep generating paths until no collision is encountered. while storage.get(gcs_path): blob_name = generate_new_blob_name() gcs_path = storage.get_cloud_storage_file_path(bucket, blob_name) # Write something to the file to avoid collision between calls. storage.write_data('', gcs_path) if not storage.get(gcs_path): raise BlobsError(f'Failed to create blob under: {gcs_path}') logs.log(f'created blob with gcs_path: {gcs_path}') signed_upload_url = storage.get_signed_upload_url(gcs_path) return blob_name, signed_upload_url
Adds a metadata key/value to the "items" list from an instance's metadata.
def _add_metadata_key_value(items, key, value): """Adds a metadata key/value to the "items" list from an instance's metadata.""" replaced_existing = False for item in items: if item['key'] == key: replaced_existing = True item['value'] = value break if not replaced_existing: items.append({'key': key, 'value': value})
Execute an operation, retrying on any exceptions.
def _do_operation_with_retries(operation, project, zone, wait_for_completion): """Execute an operation, retrying on any exceptions.""" response = _execute_api_call_with_retries(operation) if response is None: return False if not wait_for_completion: # No need to wait, so we are done. return True for _ in range(NUM_RETRIES + 1): try: # This could cause exceptions when the response is not ready. _wait_for_operation(response, project, zone) return True except Exception: logs.log_error('Failed to wait for Compute Engine operation. ' 'Original response is %s.' % str(response)) time.sleep(SLEEP_TIME) continue return False
Do a start/reset/stop on the compute engine instance in the given project and zone.
def _do_instance_operation(operation, instance_name, project, zone, wait_for_completion): """Do a start/reset/stop on the compute engine instance in the given project and zone.""" api = _get_api() operation_func = getattr(api.instances(), operation) operation = operation_func(instance=instance_name, project=project, zone=zone) return _do_operation_with_retries( operation, project, zone, wait_for_completion=wait_for_completion)
Execute the given API call, retrying if necessary. Returns the response if successful, or None.
def _execute_api_call_with_retries(api_func): """Execute the given API call, retrying if necessary. Returns the response if successful, or None.""" last_exception = None for i in range(NUM_RETRIES + 1): try: # Try to execute the operation. response = api_func.execute() last_exception = None break except Exception as e: # Exponential backoff. last_exception = str(e) sleep_time = random.uniform(1, SLEEP_TIME * (1 << i)) time.sleep(sleep_time) continue if last_exception is not None: # Failed, log exception with as much information as we can. if hasattr(api_func, 'uri'): uri = api_func.uri else: uri = 'unknown' logs.log_error('Compute engine API call "%s" failed with exception:\n%s' % (uri, last_exception)) return None return response
Return the compute engine api object.
def _get_api(): """Return the compute engine api object.""" return build('compute', 'v1', cache_discovery=False)
Return the instance information for a given instance.
def _get_instance_info(instance_name, project, zone): """Return the instance information for a given instance.""" api = _get_api() instance_info_func = api.instances().get( instance=instance_name, project=project, zone=zone) return _execute_api_call_with_retries(instance_info_func)
Return the metadata values and fingerprint for the given instance.
def _get_metadata_and_fingerprint(instance_name, project, zone): """Return the metadata values and fingerprint for the given instance.""" instance_info = _get_instance_info(instance_name, project, zone) if not instance_info: logs.log_error('Failed to fetch instance metadata') return None, None fingerprint = instance_info['metadata']['fingerprint'] metadata_items = instance_info['metadata']['items'] return metadata_items, fingerprint
Wait for the given operation to complete.
def _wait_for_operation(response, project, zone): """Wait for the given operation to complete.""" if 'status' in response and response['status'] == 'DONE': return if 'kind' not in response or response['kind'] != 'compute#operation': logs.log_error('Compute api response not an operation.') return api = _get_api() operation = response['name'] start_time = datetime.datetime.utcnow() while not dates.time_has_expired(start_time, seconds=OPERATION_TIMEOUT): operation_func = api.zoneOperations().get( operation=operation, project=project, zone=zone) response = _execute_api_call_with_retries(operation_func) if 'status' not in response: logs.log_error('Invalid compute engine operation %s.' % str(operation)) return if response['status'] == 'DONE': return time.sleep(POLL_INTERVAL) logs.log_error('Compute engine operation %s timed out.' % str(operation))
Add metadata to an existing instance. Replaces existing metadata values with the same key.
def add_metadata(instance_name, project, zone, key, value, wait_for_completion): """Add metadata to an existing instance. Replaces existing metadata values with the same key.""" existing_metadata, fingerprint = _get_metadata_and_fingerprint( instance_name, project, zone) if not existing_metadata: return False _add_metadata_key_value(existing_metadata, key, value) api = _get_api() operation = api.instances().setMetadata( body={ 'fingerprint': fingerprint, 'items': existing_metadata, }, instance=instance_name, project=project, zone=zone) return _do_operation_with_retries(operation, project, zone, wait_for_completion)
Create a disk.
def create_disk(disk_name, source_image, size_gb, project, zone, wait_for_completion=False): """Create a disk.""" api = _get_api() operation = api.disks().insert( body={ 'name': disk_name, 'sizeGb': size_gb, }, sourceImage=source_image, project=project, zone=zone) return _do_operation_with_retries( operation, project, zone, wait_for_completion=wait_for_completion)
Delete a disk.
def delete_disk(disk_name, project, zone, wait_for_completion=False): """Delete a disk.""" api = _get_api() operation = api.disks().delete(disk=disk_name, project=project, zone=zone) return _do_operation_with_retries( operation, project, zone, wait_for_completion=wait_for_completion)
Recreate an instance and its disk.
def recreate_instance_with_disks(instance_name, project, zone, additional_metadata=None, wait_for_completion=False): """Recreate an instance and its disk.""" # Get existing instance information. # First, try to get instance info from cache. # TODO(ochang): Make this more general in case anything else needs to use # this method (e.g. appengine). instance_info = persistent_cache.get_value(GCE_INSTANCE_INFO_KEY) if instance_info is None: instance_info = _get_instance_info(instance_name, project, zone) # Bail out if we don't have a valid instance information. if (not instance_info or 'disks' not in instance_info or not instance_info['disks']): logs.log_error( 'Failed to get disk info from existing instance, bailing on instance ' 'recreation.') return False # Add any additional metadata required for instance booting. if additional_metadata: for key, value in additional_metadata.items(): items = instance_info.setdefault('metadata', {}).setdefault('items', []) _add_metadata_key_value(items, key, value) # Cache the latest instance information. persistent_cache.set_value( GCE_INSTANCE_INFO_KEY, instance_info, persist_across_reboots=True) # Delete the instance. if not _do_instance_operation( 'delete', instance_name, project, zone, wait_for_completion=True): logs.log_error('Failed to delete instance.') return False # Get existing disks information, and recreate. api = _get_api() disks = instance_info['disks'] for disk in disks: disk_source = disk['source'] disk_name = disk_source.split('/')[-1] disk_info_func = api.disks().get(disk=disk_name, project=project, zone=zone) disk_info = _execute_api_call_with_retries(disk_info_func) if 'sourceImage' not in disk_info or 'sizeGb' not in disk_info: logs.log_error( 'Failed to get source image and size from existing disk, bailing on ' 'instance recreation.') return False size_gb = disk_info['sizeGb'] source_image = disk_info['sourceImage'] # Recreate the disk. if not delete_disk(disk_name, project, zone, wait_for_completion=True): logs.log_error('Failed to delete disk.') return False if not create_disk( disk_name, source_image, size_gb, project, zone, wait_for_completion=True): logs.log_error('Failed to recreate disk.') return False # Recreate the instance with the exact same configurations, but not # necessarily the same IPs. try: del instance_info['networkInterfaces'][0]['accessConfigs'][0]['natIP'] except: # This is not a failure. When a bot is stopped, it has no ip/interface. pass try: del instance_info['networkInterfaces'][0]['networkIP'] except: # This is not a failure. When a bot is stopped, it has no ip/interface. pass operation = api.instances().insert( body=instance_info, project=project, zone=zone) return _do_operation_with_retries( operation, project, zone, wait_for_completion=wait_for_completion)
Remove a metadata key/value from an existing instance.
def remove_metadata(instance_name, project, zone, key, wait_for_completion): """Remove a metadata key/value from an existing instance.""" existing_metadata, fingerprint = _get_metadata_and_fingerprint( instance_name, project, zone) if not existing_metadata: return False filtered_metadata = [] for item in existing_metadata: if item['key'] != key: filtered_metadata.append(item) if len(filtered_metadata) == len(existing_metadata): # Nothing to do. return True api = _get_api() operation = api.instances().setMetadata( body={ 'fingerprint': fingerprint, 'items': filtered_metadata, }, instance=instance_name, project=project, zone=zone) return _do_operation_with_retries(operation, project, zone, wait_for_completion)
Reset an instance.
def reset_instance(instance_name, project, zone, wait_for_completion=False): """Reset an instance.""" return _do_instance_operation('reset', instance_name, project, zone, wait_for_completion)
Process instance template, normalizing some of metadata key values.
def _process_instance_template(instance_template): """Process instance template, normalizing some of metadata key values.""" # Load metadata items for a particular instance template. items = instance_template['properties']['metadata']['items'] for item in items: # If the item value is a relative file path specified using the file:// # scheme, then subtitute it with the actual file content. This is needed # since compute engine instance manager cannot read files from our repo. if (isinstance(item['value'], str) and item['value'].startswith(FILE_SCHEME)): file_path = item['value'][len(FILE_SCHEME):] with open( os.path.join(environment.get_gce_config_directory(), file_path), encoding='utf-8') as f: item['value'] = f.read()
Read a project config.
def _config_to_project(name, config): """Read a project config.""" clusters = [] for cluster_name, zone in config['clusters'].items(): clusters.append( Cluster( name=cluster_name, gce_zone=zone['gce_zone'], instance_count=zone['instance_count'], instance_template=zone['instance_template'], distribute=zone.get('distribute', False), auto_healing_policy=zone.get('auto_healing_policy', {}), worker=zone.get('worker', False), high_end=zone.get('high_end', False))) for instance_template in config['instance_templates']: _process_instance_template(instance_template) host_worker_assignments = [] for assignment in config.get('host_worker_assignments', []): host_worker_assignments.append( HostWorkerAssignment( host=assignment['host'], worker=assignment['worker'], workers_per_host=assignment['workers_per_host'])) return Project(name, clusters, config['instance_templates'], host_worker_assignments)
Get GCE metadata value.
def get(path): """Get GCE metadata value.""" attribute_url = ( 'http://{}/computeMetadata/v1/'.format(_METADATA_SERVER) + path) headers = {'Metadata-Flavor': 'Google'} operations_timeout = environment.get_value('URL_BLOCKING_OPERATIONS_TIMEOUT') response = requests.get( attribute_url, headers=headers, timeout=operations_timeout) response.raise_for_status() return response.text
Return whether or not we're on GCE.
def is_gce(): """Return whether or not we're on GCE.""" try: sock = socket.create_connection((_METADATA_SERVER, 80)) sock.close() except Exception: return False return True
Returns whether or not to use anonymous credentials.
def _use_anonymous_credentials(): """Returns whether or not to use anonymous credentials.""" if (environment.get_value('INTEGRATION') or environment.get_value('UNTRUSTED_RUNNER_TESTS') or environment.get_value('UTASK_TESTS')): # Integration tests need real credentials. return False return (environment.get_value('LOCAL_DEVELOPMENT') or environment.get_value('PY_UNITTESTS'))
Get default Google Cloud credentials.
def get_default(scopes=None): """Get default Google Cloud credentials.""" if _use_anonymous_credentials(): return credentials.AnonymousCredentials(), '' return google.auth.default(scopes=scopes)
Gets a dedicated signing account for signing storage objects.
def get_storage_signing_service_account(): """Gets a dedicated signing account for signing storage objects.""" if _use_anonymous_credentials(): return None project_id = utils.get_application_id() return json.loads(secret_manager.get(_SIGNING_KEY_SECRET_ID, project_id))
Returns signing credentials for signing URLs.
def get_signing_credentials(service_account_info): """Returns signing credentials for signing URLs.""" if _use_anonymous_credentials(): return None if service_account_info is not None: signing_creds = service_account.Credentials.from_service_account_info( service_account_info, scopes=_SCOPES) request = requests.Request() signing_creds.refresh(request) token = None else: # Fallback for when a dedicated singing account is not configured. logs.log_error('Please configure dedicated signing credentials.') creds, _ = get_default() request = requests.Request() creds.refresh(request) signing_creds = compute_engine.IDTokenCredentials( request, '', service_account_email=creds.service_account_email) token = creds.token return signing_creds, token
Get path to gsutil executable. Returns: Path to gsutil executable on the system.
def _get_gsutil_path(): """Get path to gsutil executable. Returns: Path to gsutil executable on the system. """ gsutil_executable = 'gsutil' if environment.platform() == 'WINDOWS': gsutil_executable += '.cmd' gsutil_directory = environment.get_value('GSUTIL_PATH') if not gsutil_directory: # Try searching the binary in path. gsutil_absolute_path = shutil.which(gsutil_executable) if gsutil_absolute_path: return gsutil_absolute_path logs.log_error('Cannot locate gsutil in PATH, set GSUTIL_PATH to directory ' 'containing gsutil binary.') return None gsutil_absolute_path = os.path.join(gsutil_directory, gsutil_executable) return gsutil_absolute_path
Get multiprocessing args for gsutil.
def _multiprocessing_args(): """Get multiprocessing args for gsutil.""" if utils.cpu_count() == 1: # GSUtil's default thread count is 5 as it assumes the common configuration # is many CPUs (GSUtil uses num_cpu processes). return ['-o', 'GSUtil:parallel_thread_count=16'] return []
Filters path if needed. In local development environment, this uses local paths from an emulated GCS instead of real GCS. `write` indicates whether if `path` is a GCS write destination and that intermediate paths should be automatically created.
def _filter_path(path, write=False): """Filters path if needed. In local development environment, this uses local paths from an emulated GCS instead of real GCS. `write` indicates whether if `path` is a GCS write destination and that intermediate paths should be automatically created.""" if not path.startswith(storage.GS_PREFIX): # Only applicable to GCS paths. return path local_buckets_path = environment.get_value('LOCAL_GCS_BUCKETS_PATH') if not local_buckets_path: return path if write: local_path = storage.FileSystemProvider( local_buckets_path).convert_path_for_write(path) else: local_path = storage.FileSystemProvider(local_buckets_path).convert_path( path) return local_path
Get subscription name.
def subscription_name(project, name): """Get subscription name.""" return 'projects/{project}/subscriptions/{name}'.format( project=project, name=name)
Get topic name.
def topic_name(project, name): """Get topic name.""" return 'projects/{project}/topics/{name}'.format(project=project, name=name)
Parse the topic or subscription name.
def parse_name(name): """Parse the topic or subscription name.""" components = name.split('/') if len(components) != 4: raise ValueError('Invalid pubsub name.') project = components[1] name = components[3] return project, name
Decode Pub/Sub data.
def _decode_data(raw_message): """Decode Pub/Sub data.""" return (base64.b64decode(raw_message['data']) if 'data' in raw_message else None)
Convert a raw message response to a Message.
def raw_message_to_message(raw_message_response): """Convert a raw message response to a Message.""" raw_message = raw_message_response['message'] return Message(_decode_data(raw_message), raw_message.get('attributes'))
Convert a raw message response to a Message.
def _raw_message_to_received_message(client, subscription, raw_message_response): """Convert a raw message response to a Message.""" raw_message = raw_message_response['message'] return ReceivedMessage(client, subscription, _decode_data(raw_message), raw_message.get('attributes'), raw_message['messageId'], raw_message['publishTime'], raw_message_response['ackId'])
Convert the message to a dict.
def _message_to_dict(message): """Convert the message to a dict.""" result = {} if message.data: result['data'] = base64.b64encode(message.data).decode('utf-8') if message.attributes: result['attributes'] = message.attributes return result
Returns the secretmanager client.
def get_secret_manager_client(): """Returns the secretmanager client.""" return secretmanager.SecretManagerServiceClient()
Returns the value of the secret identified by |secret_id| in |project|.
def get(secret_id, project): """Returns the value of the secret identified by |secret_id| in |project|.""" client = get_secret_manager_client() name = f'projects/{project}/secrets/{secret_id}/versions/1' response = client.access_secret_version(request={'name': name}) return response.payload.data
Returns a signed URL for |remote_path| with |method|.
def _sign_url(remote_path, minutes=SIGNED_URL_EXPIRATION_MINUTES, method='GET'): """Returns a signed URL for |remote_path| with |method|.""" if _integration_test_env_doesnt_support_signed_urls(): return remote_path minutes = datetime.timedelta(minutes=minutes) bucket_name, object_path = get_bucket_name_and_path(remote_path) signing_creds, access_token = _signing_creds() client = _storage_client() bucket = client.bucket(bucket_name) blob = bucket.blob(object_path) return blob.generate_signed_url( version='v4', expiration=minutes, method=method, credentials=signing_creds, access_token=access_token, service_account_email=signing_creds.service_account_email)
Get the current storage provider.
def _provider(): """Get the current storage provider.""" local_buckets_path = environment.get_value('LOCAL_GCS_BUCKETS_PATH') if local_buckets_path: return FileSystemProvider(local_buckets_path) return GcsProvider()
Create a storage client.
def _create_storage_client_new(): """Create a storage client.""" creds, project = credentials.get_default() if not project: project = utils.get_application_id() return gcs.Client(project=project, credentials=creds)
Get the storage client, creating it if it does not exist.
def _storage_client(): """Get the storage client, creating it if it does not exist.""" if not hasattr(_local, 'client'): _local.client = _create_storage_client_new() return _local.client
Return bucket name and path given a full cloud storage path.
def get_bucket_name_and_path(cloud_storage_file_path): """Return bucket name and path given a full cloud storage path.""" filtered_path = utils.strip_from_left(cloud_storage_file_path, GS_PREFIX) _, bucket_name_and_path = filtered_path.split('/', 1) if '/' in bucket_name_and_path: bucket_name, path = bucket_name_and_path.split('/', 1) else: bucket_name = bucket_name_and_path path = '' return bucket_name, path
Get the full GCS file path.
def get_cloud_storage_file_path(bucket, path): """Get the full GCS file path.""" return GS_PREFIX + '/' + bucket + '/' + path
Get error reason from googleapiclient.errors.HttpError.
def _get_error_reason(http_error): """Get error reason from googleapiclient.errors.HttpError.""" try: data = json.loads(http_error.content.decode('utf-8')) return data['error']['message'] except (ValueError, KeyError): logs.log_error('Failed to decode error content: %s' % http_error.content) return None
Attempt to add a single bucket IAM. Returns the modified iam policy, or None on failure.
def add_single_bucket_iam(storage, iam_policy, role, bucket_name, member): """Attempt to add a single bucket IAM. Returns the modified iam policy, or None on failure.""" binding = get_bucket_iam_binding(iam_policy, role) binding['members'].append(member) result = set_bucket_iam_policy(storage, bucket_name, iam_policy) binding['members'].pop() return result
Get the binding matching a role, or None.
def get_bucket_iam_binding(iam_policy, role): """Get the binding matching a role, or None.""" return next(( binding for binding in iam_policy['bindings'] if binding['role'] == role), None)
Get or create the binding matching a role.
def get_or_create_bucket_iam_binding(iam_policy, role): """Get or create the binding matching a role.""" binding = get_bucket_iam_binding(iam_policy, role) if not binding: binding = {'role': role, 'members': []} iam_policy['bindings'].append(binding) return binding
Remove existing binding matching the role.
def remove_bucket_iam_binding(iam_policy, role): """Remove existing binding matching the role.""" iam_policy['bindings'] = [ binding for binding in iam_policy['bindings'] if binding['role'] != role ]