text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Plot an histogram of the data. <END_TASK> <USER_TASK:> Description: def histogram(series, **kwargs): """Plot an histogram of the data. Parameters ---------- series: Series The data to plot. Returns ------- str The resulting image encoded as a string. """
imgdata = BytesIO() plot = _plot_histogram(series, **kwargs) plot.figure.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.1, wspace=0, hspace=0) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) # TODO Think about writing this to disk instead of caching them in strings plt.close(plot.figure) return result_string
<SYSTEM_TASK:> Factory func for filters. <END_TASK> <USER_TASK:> Description: def factory(self, data, manager=None): """Factory func for filters. data - policy config for filters manager - resource type manager (ec2, s3, etc) """
# Make the syntax a little nicer for common cases. if isinstance(data, dict) and len(data) == 1 and 'type' not in data: op = list(data.keys())[0] if op == 'or': return Or(data, self, manager) elif op == 'and': return And(data, self, manager) elif op == 'not': return Not(data, self, manager) return ValueFilter(data, manager) if isinstance(data, six.string_types): filter_type = data data = {'type': data} else: filter_type = data.get('type') if not filter_type: raise PolicyValidationError( "%s Invalid Filter %s" % ( self.plugin_type, data)) filter_class = self.get(filter_type) if filter_class is not None: return filter_class(data, manager) else: raise PolicyValidationError( "%s Invalid filter type %s" % ( self.plugin_type, data))
<SYSTEM_TASK:> Determine the immediate parent boolean operator for a filter <END_TASK> <USER_TASK:> Description: def get_block_operator(self): """Determine the immediate parent boolean operator for a filter"""
# Top level operator is `and` block_stack = ['and'] for f in self.manager.iter_filters(block_end=True): if f is None: block_stack.pop() continue if f.type in ('and', 'or', 'not'): block_stack.append(f.type) if f == self: break return block_stack[-1]
<SYSTEM_TASK:> Specific validation for `resource_count` type <END_TASK> <USER_TASK:> Description: def _validate_resource_count(self): """ Specific validation for `resource_count` type The `resource_count` type works a little differently because it operates on the entire set of resources. It: - does not require `key` - `value` must be a number - supports a subset of the OPERATORS list """
for field in ('op', 'value'): if field not in self.data: raise PolicyValidationError( "Missing '%s' in value filter %s" % (field, self.data)) if not (isinstance(self.data['value'], int) or isinstance(self.data['value'], list)): raise PolicyValidationError( "`value` must be an integer in resource_count filter %s" % self.data) # I don't see how to support regex for this? if self.data['op'] not in OPERATORS or self.data['op'] in {'regex', 'regex-case'}: raise PolicyValidationError( "Invalid operator in value filter %s" % self.data) return self
<SYSTEM_TASK:> Given an inventory csv file, return an iterator over keys <END_TASK> <USER_TASK:> Description: def load_manifest_file(client, bucket, schema, versioned, ifilters, key_info): """Given an inventory csv file, return an iterator over keys """
# To avoid thundering herd downloads, we do an immediate yield for # interspersed i/o yield None # Inline these values to avoid the local var lookup, they are constants # rKey = schema['Key'] # 1 # rIsLatest = schema['IsLatest'] # 3 # rVersionId = schema['VersionId'] # 2 with tempfile.NamedTemporaryFile() as fh: client.download_fileobj(Bucket=bucket, Key=key_info['key'], Fileobj=fh) fh.seek(0) reader = csv.reader(gzip.GzipFile(fileobj=fh, mode='r')) for key_set in chunks(reader, 1000): keys = [] for kr in key_set: k = kr[1] if inventory_filter(ifilters, schema, kr): continue k = unquote_plus(k) if versioned: if kr[3] == 'true': keys.append((k, kr[2], True)) else: keys.append((k, kr[2])) else: keys.append(k) yield keys
<SYSTEM_TASK:> Given an inventory location for a bucket, return an iterator over keys <END_TASK> <USER_TASK:> Description: def load_bucket_inventory( client, inventory_bucket, inventory_prefix, versioned, ifilters): """Given an inventory location for a bucket, return an iterator over keys on the most recent delivered manifest. """
now = datetime.datetime.now() key_prefix = "%s/%s" % (inventory_prefix, now.strftime('%Y-%m-')) keys = client.list_objects( Bucket=inventory_bucket, Prefix=key_prefix).get('Contents', []) keys = [k['Key'] for k in keys if k['Key'].endswith('.json')] keys.sort() if not keys: # no manifest delivery return None latest_manifest = keys[-1] manifest = client.get_object(Bucket=inventory_bucket, Key=latest_manifest) manifest_data = json.load(manifest['Body']) # schema as column name to column index mapping schema = dict([(k, i) for i, k in enumerate( [n.strip() for n in manifest_data['fileSchema'].split(',')])]) processor = functools.partial( load_manifest_file, client, inventory_bucket, schema, versioned, ifilters) generators = map(processor, manifest_data.get('files', ())) return random_chain(generators)
<SYSTEM_TASK:> Generator to generate a set of keys from <END_TASK> <USER_TASK:> Description: def random_chain(generators): """Generator to generate a set of keys from from a set of generators, each generator is selected at random and consumed to exhaustion. """
while generators: g = random.choice(generators) try: v = g.next() if v is None: continue yield v except StopIteration: generators.remove(g)
<SYSTEM_TASK:> Check a bucket for a named inventory, and return the destination. <END_TASK> <USER_TASK:> Description: def get_bucket_inventory(client, bucket, inventory_id): """Check a bucket for a named inventory, and return the destination."""
inventories = client.list_bucket_inventory_configurations( Bucket=bucket).get('InventoryConfigurationList', []) inventories = {i['Id']: i for i in inventories} found = fnmatch.filter(inventories, inventory_id) if not found: return None i = inventories[found.pop()] s3_info = i['Destination']['S3BucketDestination'] return {'bucket': s3_info['Bucket'].rsplit(':')[-1], 'prefix': "%s/%s/%s" % (s3_info['Prefix'], bucket, i['Id'])}
<SYSTEM_TASK:> You can customize the automated documentation by altering <END_TASK> <USER_TASK:> Description: def create_html_file(config): """ You can customize the automated documentation by altering the code directly in this script or the associated jinja2 template """
logging.debug("Starting create_html_file") logging.debug( "\tjinja2_template_file = {}" .format(config['jinja2_template_filename'])) logging.debug( "\ttrendered_filename = {}" .format(config['rendered_filename'])) ts = time.time() timestamp = datetime.datetime.utcfromtimestamp(ts).strftime( '%Y-%m-%d %H:%M:%S') script_path = os.path.dirname(os.path.abspath(__file__)) rendered_file_path = os.path.join( script_path, config['rendered_filename']) environment = jinja2.Environment( loader=jinja2.FileSystemLoader(script_path)) environment_column = True if config['environment_tags'] else False render_vars = { "timestamp": timestamp, "c7n_data": c7n_data, "environment_column": environment_column, "environment_tags": config['environment_tags'] } with open(rendered_file_path, "w") as result_file: result_file.write( environment.get_template(config['jinja2_template_filename']) .render(render_vars)) logging.debug("File created: %s", rendered_file_path) return rendered_file_path
<SYSTEM_TASK:> Update this function to help build the link to your file <END_TASK> <USER_TASK:> Description: def get_file_url(path, config): """ Update this function to help build the link to your file """
file_url_regex = re.compile(config['file_url_regex']) new_path = re.sub(file_url_regex, config['file_url_base'], path) return new_path
<SYSTEM_TASK:> Gather policy information from files <END_TASK> <USER_TASK:> Description: def gather_file_data(config): """ Gather policy information from files """
file_regex = re.compile(config['file_regex']) category_regex = re.compile(config['category_regex']) policies = {} for root, dirs, files in os.walk(config['c7n_policy_directory']): for file in files: if file_regex.match(file): file_path = root + '/' + file logging.debug('Processing file %s', file_path) with open(file_path, 'r') as stream: try: if category_regex.search(file_path): category = 'Security & Governance' else: category = 'Cost Controls' policies = yaml.load(stream) for policy in policies['policies']: logging.debug( 'Processing policy %s', policy['name']) policy['file_url'] = get_file_url( file_path, config) resource_type = policy['resource'] if category not in c7n_data: c7n_data[category] = {} if resource_type not in c7n_data[category]: c7n_data[category][resource_type] = [] c7n_data[category][resource_type].append(policy) except yaml.YAMLError as exc: logging.error(exc)
<SYSTEM_TASK:> Return all github repositories in an organization. <END_TASK> <USER_TASK:> Description: def github_repos(organization, github_url, github_token): """Return all github repositories in an organization."""
# Get github repos headers = {"Authorization": "token {}".format(github_token)} next_cursor = None while next_cursor is not False: params = {'query': query, 'variables': { 'organization': organization, 'cursor': next_cursor}} response = requests.post(github_url, headers=headers, json=params) result = response.json() if response.status_code != 200 or 'errors' in result: raise ValueError("Github api error %s" % ( response.content.decode('utf8'),)) repos = jmespath.search( 'data.organization.repositories.edges[].node', result) for r in repos: yield r page_info = jmespath.search( 'data.organization.repositories.pageInfo', result) if page_info: next_cursor = (page_info['hasNextPage'] and page_info['endCursor'] or False) else: next_cursor = False
<SYSTEM_TASK:> Stream changes for repos in a GitHub organization. <END_TASK> <USER_TASK:> Description: def org_stream(ctx, organization, github_url, github_token, clone_dir, verbose, filter, exclude, stream_uri, assume): """Stream changes for repos in a GitHub organization. """
logging.basicConfig( format="%(asctime)s: %(name)s:%(levelname)s %(message)s", level=(verbose and logging.DEBUG or logging.INFO)) log.info("Checkout/Update org repos") repos = ctx.invoke( org_checkout, organization=organization, github_url=github_url, github_token=github_token, clone_dir=clone_dir, verbose=verbose, filter=filter, exclude=exclude) log.info('Streaming org changes') change_count = 0 for r in repos: change_count += ctx.invoke( stream, repo_uri=r, stream_uri=stream_uri, verbose=verbose, assume=assume) log.info("Streamed %d org changes", change_count)
<SYSTEM_TASK:> Checkout repositories from a GitHub organization. <END_TASK> <USER_TASK:> Description: def org_checkout(organization, github_url, github_token, clone_dir, verbose, filter, exclude): """Checkout repositories from a GitHub organization."""
logging.basicConfig( format="%(asctime)s: %(name)s:%(levelname)s %(message)s", level=(verbose and logging.DEBUG or logging.INFO)) callbacks = pygit2.RemoteCallbacks( pygit2.UserPass(github_token, 'x-oauth-basic')) repos = [] for r in github_repos(organization, github_url, github_token): if filter: found = False for f in filter: if fnmatch(r['name'], f): found = True break if not found: continue if exclude: found = False for e in exclude: if fnmatch(r['name'], e): found = True break if found: continue repo_path = os.path.join(clone_dir, r['name']) repos.append(repo_path) if not os.path.exists(repo_path): log.debug("Cloning repo: %s/%s" % (organization, r['name'])) repo = pygit2.clone_repository( r['url'], repo_path, callbacks=callbacks) else: repo = pygit2.Repository(repo_path) if repo.status(): log.warning('repo %s not clean skipping update') continue log.debug("Syncing repo: %s/%s" % (organization, r['name'])) pull(repo, callbacks) return repos
<SYSTEM_TASK:> Policy diff between two arbitrary revisions. <END_TASK> <USER_TASK:> Description: def diff(repo_uri, source, target, output, verbose): """Policy diff between two arbitrary revisions. Revision specifiers for source and target can use fancy git refspec syntax for symbolics, dates, etc. See: https://git-scm.com/book/en/v2/Git-Tools-Revision-Selection Default revision selection is dependent on current working tree branch. The intent is for two use cases, if on a non-master branch then show the diff to master. If on master show the diff to previous commit on master. For repositories not using the `master` convention, please specify explicit source and target. """
logging.basicConfig( format="%(asctime)s: %(name)s:%(levelname)s %(message)s", level=(verbose and logging.DEBUG or logging.INFO)) logging.getLogger('botocore').setLevel(logging.WARNING) if repo_uri is None: repo_uri = pygit2.discover_repository(os.getcwd()) repo = pygit2.Repository(repo_uri) load_resources() # If on master show diff between last commit to current head if repo.head.shorthand == 'master': if source is None: source = 'master@{1}' if target is None: target = 'master' # Else show difference between master and current head elif target is None: target = repo.head.shorthand if source is None: source = 'master' policy_repo = PolicyRepo(repo_uri, repo) changes = list(policy_repo.delta_commits( repo.revparse_single(source), repo.revparse_single(target))) output.write( yaml.safe_dump({ 'policies': [c.policy.data for c in changes if c.kind != ChangeType.REMOVE]}).encode('utf8'))
<SYSTEM_TASK:> Stream git history policy changes to destination. <END_TASK> <USER_TASK:> Description: def stream(repo_uri, stream_uri, verbose, assume, sort, before=None, after=None): """Stream git history policy changes to destination. Default stream destination is a summary of the policy changes to stdout, one per line. Also supported for stdout streaming is `jsonline`. AWS Kinesis and SQS destinations are specified by providing the ARN. Database destinations are supported by providing a sqlalchemy DSN. Note SQLAlchemy and db drivers must be installed separately as they an optional dependency. When using database destinations, streaming defaults to incremental. """
logging.basicConfig( format="%(asctime)s: %(name)s:%(levelname)s %(message)s", level=(verbose and logging.DEBUG or logging.INFO)) logging.getLogger('botocore').setLevel(logging.WARNING) if before: before = parse(before) if after: after = parse(after) if sort: sort = six.moves.reduce(operator.or_, [SORT_TYPE[s] for s in sort]) with contextlib.closing(TempDir().open()) as temp_dir: if repo_uri is None: repo_uri = pygit2.discover_repository(os.getcwd()) log.debug("Using repository %s", repo_uri) if repo_uri.startswith('http') or repo_uri.startswith('git@'): log.info("Cloning repository: %s", repo_uri) repo = pygit2.clone_repository(repo_uri, temp_dir.path) else: repo = pygit2.Repository(repo_uri) load_resources() policy_repo = PolicyRepo(repo_uri, repo) change_count = 0 with contextlib.closing(transport(stream_uri, assume)) as t: if after is None and isinstance(t, IndexedTransport): after = t.last() for change in policy_repo.delta_stream(after=after, before=before): change_count += 1 t.send(change) log.info("Streamed %d policy repo changes", change_count) return change_count
<SYSTEM_TASK:> return the named subset of policies <END_TASK> <USER_TASK:> Description: def select(self, names): """return the named subset of policies"""
return PolicyCollection( [p for p in self.policies if p.name in names], self.options)
<SYSTEM_TASK:> Show policies changes between arbitrary commits. <END_TASK> <USER_TASK:> Description: def delta_commits(self, baseline, target): """Show policies changes between arbitrary commits. The common use form is comparing the heads of two branches. """
baseline_files = self._get_policy_fents(baseline.tree) target_files = self._get_policy_fents(target.tree) baseline_policies = PolicyCollection() target_policies = PolicyCollection() # Added for f in set(target_files) - set(baseline_files): target_policies += self._policy_file_rev(f, target) # Removed for f in set(baseline_files) - set(target_files): baseline_policies += self._policy_file_rev(f, baseline) # Modified for f in set(baseline_files).intersection(target_files): if baseline_files[f].hex == target_files[f].hex: continue target_policies += self._policy_file_rev(f, target) baseline_policies += self._policy_file_rev(f, baseline) return CollectionDelta( baseline_policies, target_policies, target, self.repo_uri).delta()
<SYSTEM_TASK:> Return an iterator of policy changes along a commit lineage in a repo. <END_TASK> <USER_TASK:> Description: def delta_stream(self, target='HEAD', limit=None, sort=pygit2.GIT_SORT_TIME | pygit2.GIT_SORT_REVERSE, after=None, before=None): """Return an iterator of policy changes along a commit lineage in a repo. """
if target == 'HEAD': target = self.repo.head.target commits = [] for commit in self.repo.walk(target, sort): cdate = commit_date(commit) log.debug( "processing commit id:%s date:%s parents:%d msg:%s", str(commit.id)[:6], cdate.isoformat(), len(commit.parents), commit.message) if after and cdate > after: continue if before and cdate < before: continue commits.append(commit) if limit and len(commits) > limit: break if limit: self.initialize_tree(commits[limit].tree) commits.pop(-1) for commit in commits: for policy_change in self._process_stream_commit(commit): yield policy_change
<SYSTEM_TASK:> Bookkeeping on internal data structures while iterating a stream. <END_TASK> <USER_TASK:> Description: def _process_stream_delta(self, delta_stream): """Bookkeeping on internal data structures while iterating a stream."""
for pchange in delta_stream: if pchange.kind == ChangeType.ADD: self.policy_files.setdefault( pchange.file_path, PolicyCollection()).add(pchange.policy) elif pchange.kind == ChangeType.REMOVE: self.policy_files[pchange.file_path].remove(pchange.policy) elif pchange.kind in (ChangeType.MOVED, ChangeType.MODIFIED): if pchange.policy.file_path != pchange.previous.file_path: self.policy_files[pchange.previous.file_path].remove(pchange.previous) if (pchange.policy.file_path in self.policy_files and pchange.policy.name in self.policy_files[pchange.file_path]): self.policy_files[pchange.file_path][pchange.policy.name] = pchange.policy else: self.policy_files.setdefault( pchange.file_path, PolicyCollection()).add(pchange.policy) else: self.policy_files[pchange.file_path][pchange.policy.name] = pchange.policy yield pchange
<SYSTEM_TASK:> send the given policy change <END_TASK> <USER_TASK:> Description: def send(self, change): """send the given policy change"""
self.buf.append(change) if len(self.buf) % self.BUF_SIZE == 0: self.flush()
<SYSTEM_TASK:> flush any buffered messages <END_TASK> <USER_TASK:> Description: def flush(self): """flush any buffered messages"""
buf = self.buf self.buf = [] if buf: self._flush(buf)
<SYSTEM_TASK:> Download firehose archive, aggregate records in memory and write back. <END_TASK> <USER_TASK:> Description: def process_firehose_archive(bucket, key): """Download firehose archive, aggregate records in memory and write back."""
data = {} with tempfile.NamedTemporaryFile(mode='w+b') as fh: s3.download_file(bucket, key, fh.name) log.warning("Downloaded Key Size:%s Key:%s", sizeof_fmt(os.path.getsize(fh.name)), key) fh.seek(0, 0) record_count = 0 iteration_count = 0 for r in records_iter(gzip.GzipFile(fh.name, mode='r')): record_count += len(r['logEvents']) iteration_count += 1 key = '%s/%s/%s' % (r['owner'], r['logGroup'], r['logStream']) data.setdefault(key, []).extend(r['logEvents']) if record_count > EVENTS_SIZE_BUFFER: log.warning( "Incremental Data Load records:%d enis:%d", record_count, len(data)) for k in data: process_record_set(k, data[k]) data.clear() gc.collect() record_count = 0 for k in data: process_record_set(k, data[k]) data.clear() gc.collect()
<SYSTEM_TASK:> Split up a firehose s3 object into records <END_TASK> <USER_TASK:> Description: def records_iter(fh, buffer_size=1024 * 1024 * 16): """Split up a firehose s3 object into records Firehose cloudwatch log delivery of flow logs does not delimit record boundaries. We have to use knowledge of content to split the records on boundaries. In the context of flow logs we're dealing with delimited records. """
buf = None while True: chunk = fh.read(buffer_size) if not chunk: if buf: yield json.loads(buf) return if buf: chunk = b"%s%s" % (buf, chunk) buf = None while chunk: idx = chunk.find(b'}{') if idx == -1: buf = chunk chunk = None continue record = chunk[:idx + 1] yield json.loads(record) chunk = chunk[idx + 1:]
<SYSTEM_TASK:> Get an active session in the target account. <END_TASK> <USER_TASK:> Description: def get_session(self, account_id): """Get an active session in the target account."""
if account_id not in self.account_sessions: if account_id not in self.config['accounts']: raise AccountNotFound("account:%s is unknown" % account_id) self.account_sessions[account_id] = s = assumed_session( self.config['accounts'][account_id]['role'], "Sphere11") s._session.user_agent_name = "Sphere11" s._session.user_agent_version = "0.07" return self.account_sessions[account_id]
<SYSTEM_TASK:> Scope a schema error to its policy name and resource. <END_TASK> <USER_TASK:> Description: def policy_error_scope(error, data): """Scope a schema error to its policy name and resource."""
err_path = list(error.absolute_path) if err_path[0] != 'policies': return error pdata = data['policies'][err_path[1]] pdata.get('name', 'unknown') error.message = "Error on policy:{} resource:{}\n".format( pdata.get('name', 'unknown'), pdata.get('resource', 'unknown')) + error.message return error
<SYSTEM_TASK:> Try to find the best error for humans to resolve <END_TASK> <USER_TASK:> Description: def specific_error(error): """Try to find the best error for humans to resolve The jsonschema.exceptions.best_match error is based purely on a mix of a strong match (ie. not anyOf, oneOf) and schema depth, this often yields odd results that are semantically confusing, instead we can use a bit of structural knowledge of schema to provide better results. """
if error.validator not in ('anyOf', 'oneOf'): return error r = t = None if isinstance(error.instance, dict): t = error.instance.get('type') r = error.instance.get('resource') if r is not None: found = None for idx, v in enumerate(error.validator_value): if v['$ref'].rsplit('/', 2)[1].endswith(r): found = idx break if found is not None: # error context is a flat list of all validation # failures, we have to index back to the policy # of interest. for e in error.context: # resource policies have a fixed path from # the top of the schema if e.absolute_schema_path[4] == found: return specific_error(e) return specific_error(error.context[idx]) if t is not None: found = None for idx, v in enumerate(error.validator_value): if '$ref' in v and v['$ref'].rsplit('/', 2)[-1] == t: found = idx break elif 'type' in v and t in v['properties']['type']['enum']: found = idx break if found is not None: for e in error.context: for el in reversed(e.absolute_schema_path): if isinstance(el, int): if el == found: return e break return error
<SYSTEM_TASK:> get a resource manager or a given resource type. <END_TASK> <USER_TASK:> Description: def get_resource_manager(self, resource_type, data=None): """get a resource manager or a given resource type. assumes the query is for the same underlying cloud provider. """
if '.' in resource_type: provider_name, resource_type = resource_type.split('.', 1) else: provider_name = self.ctx.policy.provider_name provider_resources = clouds[provider_name].resources klass = provider_resources.get(resource_type) if klass is None: raise ValueError(resource_type) # if we're already querying via config carry it forward if not data and self.source_type == 'config' and getattr( klass.get_model(), 'config_type', None): return klass(self.ctx, {'source': self.source_type}) return klass(self.ctx, data or {})
<SYSTEM_TASK:> Augment ElasticBeanstalk Environments with their tags. <END_TASK> <USER_TASK:> Description: def _eb_env_tags(envs, session_factory, retry): """Augment ElasticBeanstalk Environments with their tags."""
client = local_session(session_factory).client('elasticbeanstalk') def process_tags(eb_env): try: eb_env['Tags'] = retry( client.list_tags_for_resource, ResourceArn=eb_env['EnvironmentArn'])['ResourceTags'] except client.exceptions.ResourceNotFoundException: return return eb_env # Handle API rate-limiting, which is a problem for accounts with many # EB Environments return list(map(process_tags, envs))
<SYSTEM_TASK:> Assemble a document representing all the config state around a bucket. <END_TASK> <USER_TASK:> Description: def assemble_bucket(item): """Assemble a document representing all the config state around a bucket. TODO: Refactor this, the logic here feels quite muddled. """
factory, b = item s = factory() c = s.client('s3') # Bucket Location, Current Client Location, Default Location b_location = c_location = location = "us-east-1" methods = list(S3_AUGMENT_TABLE) for m, k, default, select in methods: try: method = getattr(c, m) v = method(Bucket=b['Name']) v.pop('ResponseMetadata') if select is not None and select in v: v = v[select] except (ssl.SSLError, SSLError) as e: # Proxy issues? i assume log.warning("Bucket ssl error %s: %s %s", b['Name'], b.get('Location', 'unknown'), e) continue except ClientError as e: code = e.response['Error']['Code'] if code.startswith("NoSuch") or "NotFound" in code: v = default elif code == 'PermanentRedirect': s = factory() c = bucket_client(s, b) # Requeue with the correct region given location constraint methods.append((m, k, default, select)) continue else: log.warning( "Bucket:%s unable to invoke method:%s error:%s ", b['Name'], m, e.response['Error']['Message']) # For auth failures, we don't bail out, continue processing if we can. # Note this can lead to missing data, but in general is cleaner than # failing hard, due to the common use of locked down s3 bucket policies # that may cause issues fetching information across a fleet of buckets. # This does mean s3 policies depending on augments should check denied # methods annotation, generally though lacking get access to an augment means # they won't have write access either. # For other error types we raise and bail policy execution. if e.response['Error']['Code'] == 'AccessDenied': b.setdefault('c7n:DeniedMethods', []).append(m) continue raise # As soon as we learn location (which generally works) if k == 'Location' and v is not None: b_location = v.get('LocationConstraint') # Location == region for all cases but EU # https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html if b_location is None: b_location = "us-east-1" elif b_location == 'EU': b_location = "eu-west-1" v['LocationConstraint'] = 'eu-west-1' if v and v != c_location: c = s.client('s3', region_name=b_location) elif c_location != location: c = s.client('s3', region_name=location) b[k] = v return b
<SYSTEM_TASK:> Tries to get the bucket region from Location.LocationConstraint <END_TASK> <USER_TASK:> Description: def get_region(b): """Tries to get the bucket region from Location.LocationConstraint Special cases: LocationConstraint EU defaults to eu-west-1 LocationConstraint null defaults to us-east-1 Args: b (object): A bucket object Returns: string: an aws region string """
remap = {None: 'us-east-1', 'EU': 'eu-west-1'} region = b.get('Location', {}).get('LocationConstraint') return remap.get(region, region)
<SYSTEM_TASK:> Format a policy's extant records into a report. <END_TASK> <USER_TASK:> Description: def report(policies, start_date, options, output_fh, raw_output_fh=None): """Format a policy's extant records into a report."""
regions = set([p.options.region for p in policies]) policy_names = set([p.name for p in policies]) formatter = Formatter( policies[0].resource_manager.resource_type, extra_fields=options.field, include_default_fields=not options.no_default_fields, include_region=len(regions) > 1, include_policy=len(policy_names) > 1 ) records = [] for policy in policies: # initialize policy execution context for output access policy.ctx.initialize() if policy.ctx.output.type == 's3': policy_records = record_set( policy.session_factory, policy.ctx.output.config['netloc'], policy.ctx.output.config['path'].strip('/'), start_date) else: policy_records = fs_record_set(policy.ctx.log_dir, policy.name) log.debug("Found %d records for region %s", len(policy_records), policy.options.region) for record in policy_records: record['policy'] = policy.name record['region'] = policy.options.region records += policy_records rows = formatter.to_csv(records) if options.format == 'csv': writer = UnicodeWriter(output_fh, formatter.headers()) writer.writerow(formatter.headers()) writer.writerows(rows) elif options.format == 'json': print(dumps(records, indent=2)) else: # We special case CSV, and for other formats we pass to tabulate print(tabulate(rows, formatter.headers(), tablefmt=options.format)) if raw_output_fh is not None: dumps(records, raw_output_fh, indent=2)
<SYSTEM_TASK:> Retrieve all s3 records for the given policy output url <END_TASK> <USER_TASK:> Description: def record_set(session_factory, bucket, key_prefix, start_date, specify_hour=False): """Retrieve all s3 records for the given policy output url From the given start date. """
s3 = local_session(session_factory).client('s3') records = [] key_count = 0 date = start_date.strftime('%Y/%m/%d') if specify_hour: date += "/{}".format(start_date.hour) else: date += "/00" marker = "{}/{}/resources.json.gz".format(key_prefix.strip("/"), date) p = s3.get_paginator('list_objects_v2').paginate( Bucket=bucket, Prefix=key_prefix.strip('/') + '/', StartAfter=marker, ) with ThreadPoolExecutor(max_workers=20) as w: for key_set in p: if 'Contents' not in key_set: continue keys = [k for k in key_set['Contents'] if k['Key'].endswith('resources.json.gz')] key_count += len(keys) futures = map(lambda k: w.submit( get_records, bucket, k, session_factory), keys) for f in as_completed(futures): records.extend(f.result()) log.info("Fetched %d records across %d files" % ( len(records), key_count)) return records
<SYSTEM_TASK:> Only the first record for each id <END_TASK> <USER_TASK:> Description: def uniq_by_id(self, records): """Only the first record for each id"""
uniq = [] keys = set() for rec in records: rec_id = rec[self._id_field] if rec_id not in keys: uniq.append(rec) keys.add(rec_id) return uniq
<SYSTEM_TASK:> Resources preparation for transport. <END_TASK> <USER_TASK:> Description: def prepare_resources(self, resources): """Resources preparation for transport. If we have sensitive or overly large resource metadata we want to remove or additional serialization we need to perform, this provides a mechanism. TODO: consider alternative implementations, at min look at adding provider as additional discriminator to resource type. One alternative would be dynamically adjusting buffer size based on underlying transport. """
handler = getattr(self, "prepare_%s" % ( self.manager.type.replace('-', '_')), None) if handler is None: return resources return handler(resources)
<SYSTEM_TASK:> run export across accounts and log groups specified in config. <END_TASK> <USER_TASK:> Description: def run(config, start, end, accounts, region, debug): """run export across accounts and log groups specified in config."""
config = validate.callback(config) destination = config.get('destination') start = start and parse(start) or start end = end and parse(end) or datetime.now() executor = debug and MainThreadExecutor or ThreadPoolExecutor with executor(max_workers=32) as w: futures = {} for account in config.get('accounts', ()): if accounts and account['name'] not in accounts: continue futures[ w.submit(process_account, account, start, end, destination, region)] = account for f in as_completed(futures): account = futures[f] if f.exception(): log.error("Error on account %s err: %s", account['name'], f.exception()) log.info("Completed %s", account['name'])
<SYSTEM_TASK:> simple decorator that will auto fan out async style in lambda. <END_TASK> <USER_TASK:> Description: def lambdafan(func): """simple decorator that will auto fan out async style in lambda. outside of lambda, this will invoke synchrously. """
if 'AWS_LAMBDA_FUNCTION_NAME' not in os.environ: return func @functools.wraps(func) def scaleout(*args, **kw): client = boto3.client('lambda') client.invoke( FunctionName=os.environ['AWS_LAMBDA_FUNCTION_NAME'], InvocationType='Event', Payload=dumps({ 'event': 'fanout', 'function': func.__name__, 'args': args, 'kwargs': kw}), Qualifier=os.environ['AWS_LAMBDA_FUNCTION_VERSION']) return scaleout
<SYSTEM_TASK:> Filter log groups by shell patterns. <END_TASK> <USER_TASK:> Description: def filter_group_names(groups, patterns): """Filter log groups by shell patterns. """
group_names = [g['logGroupName'] for g in groups] matched = set() for p in patterns: matched.update(fnmatch.filter(group_names, p)) return [g for g in groups if g['logGroupName'] in matched]
<SYSTEM_TASK:> Filter log groups by their creation date. <END_TASK> <USER_TASK:> Description: def filter_creation_date(groups, start, end): """Filter log groups by their creation date. Also sets group specific value for start to the minimum of creation date or start. """
results = [] for g in groups: created = datetime.fromtimestamp(g['creationTime'] / 1000.0) if created > end: continue if created > start: g['exportStart'] = created else: g['exportStart'] = start results.append(g) return results
<SYSTEM_TASK:> Filter log groups where the last write was before the start date. <END_TASK> <USER_TASK:> Description: def filter_last_write(client, groups, start): """Filter log groups where the last write was before the start date. """
retry = get_retry(('ThrottlingException',)) def process_group(group_set): matched = [] for g in group_set: streams = retry( client.describe_log_streams, logGroupName=g['logGroupName'], orderBy='LastEventTime', limit=1, descending=True) if not streams.get('logStreams'): continue stream = streams['logStreams'][0] if stream['storedBytes'] == 0 and datetime.fromtimestamp( stream['creationTime'] / 1000) > start: matched.append(g) elif 'lastIngestionTime' in stream and datetime.fromtimestamp( stream['lastIngestionTime'] / 1000) > start: matched.append(g) return matched results = [] with ThreadPoolExecutor(max_workers=3) as w: futures = {} for group_set in chunks(groups, 10): futures[w.submit(process_group, group_set)] = group_set for f in as_completed(futures): if f.exception(): log.error( "Error processing groupset:%s error:%s", group_set, f.exception()) results.extend(f.result()) return results
<SYSTEM_TASK:> Filter days where the bucket already has extant export keys. <END_TASK> <USER_TASK:> Description: def filter_extant_exports(client, bucket, prefix, days, start, end=None): """Filter days where the bucket already has extant export keys. """
end = end or datetime.now() # days = [start + timedelta(i) for i in range((end-start).days)] try: tag_set = client.get_object_tagging(Bucket=bucket, Key=prefix).get('TagSet', []) except ClientError as e: if e.response['Error']['Code'] != 'NoSuchKey': raise tag_set = [] tags = {t['Key']: t['Value'] for t in tag_set} if 'LastExport' not in tags: return sorted(days) last_export = parse(tags['LastExport']) if last_export.tzinfo is None: last_export = last_export.replace(tzinfo=tzutc()) return [d for d in sorted(days) if d > last_export]
<SYSTEM_TASK:> size of exported records for a given day. <END_TASK> <USER_TASK:> Description: def size(config, accounts=(), day=None, group=None, human=True, region=None): """size of exported records for a given day."""
config = validate.callback(config) destination = config.get('destination') client = boto3.Session().client('s3') day = parse(day) def export_size(client, account): paginator = client.get_paginator('list_objects_v2') count = 0 size = 0 session = get_session(account['role'], region) account_id = session.client('sts').get_caller_identity()['Account'] prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id prefix = "%s/%s/%s" % (prefix, group, day.strftime("%Y/%m/%d")) account['account_id'] = account_id for page in paginator.paginate( Bucket=destination['bucket'], Prefix=prefix): for k in page.get('Contents', ()): size += k['Size'] count += 1 return (count, size) total_size = 0 accounts_report = [] logging.getLogger('botocore').setLevel(logging.ERROR) with ThreadPoolExecutor(max_workers=16) as w: futures = {} for account in config.get('accounts'): if accounts and account['name'] not in accounts: continue futures[w.submit(export_size, client, account)] = account for f in as_completed(futures): account = futures[f] count, size = f.result() account.pop('role') account.pop('groups') total_size += size if human: account['size'] = GetHumanSize(size) else: account['size'] = size account['count'] = count accounts_report.append(account) accounts_report.sort(key=operator.itemgetter('count'), reverse=True) print(tabulate(accounts_report, headers='keys')) log.info("total size:%s", GetHumanSize(total_size))
<SYSTEM_TASK:> report current export state status <END_TASK> <USER_TASK:> Description: def status(config, group, accounts=(), region=None): """report current export state status"""
config = validate.callback(config) destination = config.get('destination') client = boto3.Session().client('s3') for account in config.get('accounts', ()): if accounts and account['name'] not in accounts: continue session = get_session(account['role'], region) account_id = session.client('sts').get_caller_identity()['Account'] prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id prefix = "%s/flow-log" % prefix role = account.pop('role') if isinstance(role, six.string_types): account['account_id'] = role.split(':')[4] else: account['account_id'] = role[-1].split(':')[4] account.pop('groups') try: tag_set = client.get_object_tagging( Bucket=destination['bucket'], Key=prefix).get('TagSet', []) except ClientError: account['export'] = 'missing' continue tags = {t['Key']: t['Value'] for t in tag_set} if 'LastExport' not in tags: account['export'] = 'empty' else: last_export = parse(tags['LastExport']) account['export'] = last_export.strftime('%Y/%m/%d') accounts = [a for a in config.get('accounts') if a in accounts or not accounts] accounts.sort(key=operator.itemgetter('export'), reverse=True) print(tabulate(accounts, headers='keys'))
<SYSTEM_TASK:> Find exports for a given account <END_TASK> <USER_TASK:> Description: def get_exports(client, bucket, prefix, latest=True): """Find exports for a given account """
keys = client.list_objects_v2( Bucket=bucket, Prefix=prefix, Delimiter='/').get('CommonPrefixes', []) found = [] years = [] for y in keys: part = y['Prefix'].rsplit('/', 2)[-2] if not part.isdigit(): continue year = int(part) years.append(year) if not years: return [] years.sort(reverse=True) if latest: years = [years[0]] for y in years: keys = client.list_objects_v2( Bucket=bucket, Prefix="%s/%d/" % (prefix.strip('/'), y), Delimiter='/').get('CommonPrefixes', []) months = [] for m in keys: part = m['Prefix'].rsplit('/', 2)[-2] if not part.isdigit(): continue month = int(part) date_key = (y, month) months.append(month) months.sort(reverse=True) if not months: continue if latest: months = [months[0]] for m in months: keys = client.list_objects_v2( Bucket=bucket, Prefix="%s/%d/%s/" % ( prefix.strip('/'), y, ('%d' % m).rjust(2, '0')), Delimiter='/').get('CommonPrefixes', []) for d in keys: part = d['Prefix'].rsplit('/', 2)[-2] if not part.isdigit(): continue day = int(part) date_key = (y, m, day) found.append(date_key) found.sort(reverse=True) if latest: found = [found[0]] return found
<SYSTEM_TASK:> Lambda Entrypoint - Log Subscriber <END_TASK> <USER_TASK:> Description: def process_log_event(event, context): """Lambda Entrypoint - Log Subscriber Format log events and relay to sentry (direct or sqs) """
init() # Grab the actual error log payload serialized = event['awslogs'].pop('data') data = json.loads(zlib.decompress( base64.b64decode(serialized), 16 + zlib.MAX_WBITS)) msg = get_sentry_message(config, data) if msg is None: return if config['sentry_dsn']: # Deliver directly to sentry send_sentry_message(config['sentry_dsn'], msg) elif config['sentry_sqs']: # Delivery indirectly via sqs sqs.send_message( QueueUrl=config['sentry_sqs'])
<SYSTEM_TASK:> Break an iterable into lists of size <END_TASK> <USER_TASK:> Description: def chunks(iterable, size=50): """Break an iterable into lists of size"""
batch = [] for n in iterable: batch.append(n) if len(batch) % size == 0: yield batch batch = [] if batch: yield batch
<SYSTEM_TASK:> Load external plugins. <END_TASK> <USER_TASK:> Description: def load_plugins(self): """ Load external plugins. Custodian is intended to interact with internal and external systems that are not suitable for embedding into the custodian code base. """
try: from pkg_resources import iter_entry_points except ImportError: return for ep in iter_entry_points(group="custodian.%s" % self.plugin_type): f = ep.load() f()
<SYSTEM_TASK:> Submit a function for serialized execution on sqs <END_TASK> <USER_TASK:> Description: def submit(self, func, *args, **kwargs): """Submit a function for serialized execution on sqs """
self.op_sequence += 1 self.sqs.send_message( QueueUrl=self.map_queue, MessageBody=utils.dumps({'args': args, 'kwargs': kwargs}), MessageAttributes={ 'sequence_id': { 'StringValue': str(self.op_sequence), 'DataType': 'Number'}, 'op': { 'StringValue': named(func), 'DataType': 'String', }, 'ser': { 'StringValue': 'json', 'DataType': 'String'}} ) self.futures[self.op_sequence] = f = SQSFuture( self.op_sequence) return f
<SYSTEM_TASK:> Fetch results from separate queue <END_TASK> <USER_TASK:> Description: def gather(self): """Fetch results from separate queue """
limit = self.op_sequence - self.op_sequence_start results = MessageIterator(self.sqs, self.reduce_queue, limit) for m in results: # sequence_id from above msg_id = int(m['MessageAttributes']['sequence_id']['StringValue']) if (not msg_id > self.op_sequence_start or not msg_id <= self.op_sequence or msg_id not in self.futures): raise RuntimeError( "Concurrent queue user from different " "process or previous results") f = self.futures[msg_id] f.set_result(m) results.ack(m)
<SYSTEM_TASK:> normalize tag format on ecs resources to match common aws format. <END_TASK> <USER_TASK:> Description: def ecs_tag_normalize(resources): """normalize tag format on ecs resources to match common aws format."""
for r in resources: if 'tags' in r: r['Tags'] = [{'Key': t['key'], 'Value': t['value']} for t in r['tags']] r.pop('tags')
<SYSTEM_TASK:> Retrieve any associated metrics for the policy. <END_TASK> <USER_TASK:> Description: def get_metrics(self, start, end, period): """Retrieve any associated metrics for the policy."""
values = {} default_dimensions = { 'Policy': self.policy.name, 'ResType': self.policy.resource_type, 'Scope': 'Policy'} metrics = list(self.POLICY_METRICS) # Support action, and filter custom metrics for el in itertools.chain( self.policy.resource_manager.actions, self.policy.resource_manager.filters): if el.metrics: metrics.extend(el.metrics) session = utils.local_session(self.policy.session_factory) client = session.client('cloudwatch') for m in metrics: if isinstance(m, six.string_types): dimensions = default_dimensions else: m, m_dimensions = m dimensions = dict(default_dimensions) dimensions.update(m_dimensions) results = client.get_metric_statistics( Namespace=DEFAULT_NAMESPACE, Dimensions=[ {'Name': k, 'Value': v} for k, v in dimensions.items()], Statistics=['Sum', 'Average'], StartTime=start, EndTime=end, Period=period, MetricName=m) values[m] = results['Datapoints'] return values
<SYSTEM_TASK:> Run policy in push mode against given event. <END_TASK> <USER_TASK:> Description: def run(self, event, lambda_context): """Run policy in push mode against given event. Lambda automatically generates cloud watch logs, and metrics for us, albeit with some deficienies, metrics no longer count against valid resources matches, but against execution. If metrics execution option is enabled, custodian will generate metrics per normal. """
from c7n.actions import EventAction mode = self.policy.data.get('mode', {}) if not bool(mode.get("log", True)): root = logging.getLogger() map(root.removeHandler, root.handlers[:]) root.handlers = [logging.NullHandler()] resources = self.resolve_resources(event) if not resources: return resources resources = self.policy.resource_manager.filter_resources( resources, event) if 'debug' in event: self.policy.log.info("Filtered resources %d" % len(resources)) if not resources: self.policy.log.info( "policy: %s resources: %s no resources matched" % ( self.policy.name, self.policy.resource_type)) return with self.policy.ctx: self.policy.ctx.metrics.put_metric( 'ResourceCount', len(resources), 'Count', Scope="Policy", buffer=False) if 'debug' in event: self.policy.log.info( "Invoking actions %s", self.policy.resource_manager.actions) self.policy._write_file( 'resources.json', utils.dumps(resources, indent=2)) for action in self.policy.resource_manager.actions: self.policy.log.info( "policy: %s invoking action: %s resources: %d", self.policy.name, action.name, len(resources)) if isinstance(action, EventAction): results = action.process(resources, event) else: results = action.process(resources) self.policy._write_file( "action-%s" % action.name, utils.dumps(results)) return resources
<SYSTEM_TASK:> Get runtime variables for policy interpolation. <END_TASK> <USER_TASK:> Description: def get_variables(self, variables=None): """Get runtime variables for policy interpolation. Runtime variables are merged with the passed in variables if any. """
# Global policy variable expansion, we have to carry forward on # various filter/action local vocabularies. Where possible defer # by using a format string. # # See https://github.com/capitalone/cloud-custodian/issues/2330 if not variables: variables = {} if 'mode' in self.data: if 'role' in self.data['mode'] and not self.data['mode']['role'].startswith("arn:aws"): self.data['mode']['role'] = "arn:aws:iam::%s:role/%s" % \ (self.options.account_id, self.data['mode']['role']) variables.update({ # standard runtime variables for interpolation 'account': '{account}', 'account_id': self.options.account_id, 'region': self.options.region, # non-standard runtime variables from local filter/action vocabularies # # notify action 'policy': self.data, 'event': '{event}', # mark for op action 'op': '{op}', 'action_date': '{action_date}', # tag action pyformat-date handling 'now': utils.FormatDate(datetime.utcnow()), # account increase limit action 'service': '{service}', # s3 set logging action :-( see if we can revisit this one. 'bucket_region': '{bucket_region}', 'bucket_name': '{bucket_name}', 'source_bucket_name': '{source_bucket_name}', 'target_bucket_name': '{target_bucket_name}', 'target_prefix': '{target_prefix}', 'LoadBalancerName': '{LoadBalancerName}' }) return variables
<SYSTEM_TASK:> Expand variables in policy data. <END_TASK> <USER_TASK:> Description: def expand_variables(self, variables): """Expand variables in policy data. Updates the policy data in-place. """
# format string values returns a copy updated = utils.format_string_values(self.data, **variables) # Several keys should only be expanded at runtime, perserve them. if 'member-role' in updated.get('mode', {}): updated['mode']['member-role'] = self.data['mode']['member-role'] # Update ourselves in place self.data = updated # Reload filters/actions using updated data, we keep a reference # for some compatiblity preservation work. m = self.resource_manager self.resource_manager = self.load_resource_manager() # XXX: Compatiblity hack # Preserve notify action subject lines which support # embedded jinja2 as a passthrough to the mailer. for old_a, new_a in zip(m.actions, self.resource_manager.actions): if old_a.type == 'notify' and 'subject' in old_a.data: new_a.data['subject'] = old_a.data['subject']
<SYSTEM_TASK:> get permissions needed by this policy <END_TASK> <USER_TASK:> Description: def get_permissions(self): """get permissions needed by this policy"""
permissions = set() permissions.update(self.resource_manager.get_permissions()) for f in self.resource_manager.filters: permissions.update(f.get_permissions()) for a in self.resource_manager.actions: permissions.update(a.get_permissions()) return permissions
<SYSTEM_TASK:> Handle various client side errors when describing snapshots <END_TASK> <USER_TASK:> Description: def extract_bad_snapshot(e): """Handle various client side errors when describing snapshots"""
msg = e.response['Error']['Message'] error = e.response['Error']['Code'] e_snap_id = None if error == 'InvalidSnapshot.NotFound': e_snap_id = msg[msg.find("'") + 1:msg.rfind("'")] log.warning("Snapshot not found %s" % e_snap_id) elif error == 'InvalidSnapshotID.Malformed': e_snap_id = msg[msg.find('"') + 1:msg.rfind('"')] log.warning("Snapshot id malformed %s" % e_snap_id) return e_snap_id
<SYSTEM_TASK:> STS Role assume a boto3.Session <END_TASK> <USER_TASK:> Description: def assumed_session(role_arn, session_name, session=None, region=None, external_id=None): """STS Role assume a boto3.Session With automatic credential renewal. Args: role_arn: iam role arn to assume session_name: client session identifier session: an optional extant session, note session is captured in a function closure for renewing the sts assumed role. :return: a boto3 session using the sts assumed role credentials Notes: We have to poke at botocore internals a few times """
if session is None: session = Session() retry = get_retry(('Throttling',)) def refresh(): parameters = {"RoleArn": role_arn, "RoleSessionName": session_name} if external_id is not None: parameters['ExternalId'] = external_id credentials = retry( session.client('sts').assume_role, **parameters)['Credentials'] return dict( access_key=credentials['AccessKeyId'], secret_key=credentials['SecretAccessKey'], token=credentials['SessionToken'], # Silly that we basically stringify so it can be parsed again expiry_time=credentials['Expiration'].isoformat()) session_credentials = RefreshableCredentials.create_from_metadata( metadata=refresh(), refresh_using=refresh, method='sts-assume-role') # so dirty.. it hurts, no clean way to set this outside of the # internals poke. There's some work upstream on making this nicer # but its pretty baroque as well with upstream support. # https://github.com/boto/boto3/issues/443 # https://github.com/boto/botocore/issues/761 s = get_session() s._credentials = session_credentials if region is None: region = s.get_config_variable('region') or 'us-east-1' s.set_config_variable('region', region) return Session(botocore_session=s)
<SYSTEM_TASK:> Does the resource tag schedule and policy match the current time. <END_TASK> <USER_TASK:> Description: def process_resource_schedule(self, i, value, time_type): """Does the resource tag schedule and policy match the current time."""
rid = i[self.id_key] # this is to normalize trailing semicolons which when done allows # dateutil.parser.parse to process: value='off=(m-f,1);' properly. # before this normalization, some cases would silently fail. value = ';'.join(filter(None, value.split(';'))) if self.parser.has_resource_schedule(value, time_type): schedule = self.parser.parse(value) elif self.parser.keys_are_valid(value): # respect timezone from tag raw_data = self.parser.raw_data(value) if 'tz' in raw_data: schedule = dict(self.default_schedule) schedule['tz'] = raw_data['tz'] else: schedule = self.default_schedule else: schedule = None if schedule is None: log.warning( "Invalid schedule on resource:%s value:%s", rid, value) self.parse_errors.append((rid, value)) return False tz = self.get_tz(schedule['tz']) if not tz: log.warning( "Could not resolve tz on resource:%s value:%s", rid, value) self.parse_errors.append((rid, value)) return False now = datetime.datetime.now(tz).replace( minute=0, second=0, microsecond=0) now_str = now.strftime("%Y-%m-%d") if 'skip-days-from' in self.data: values = ValuesFrom(self.data['skip-days-from'], self.manager) self.skip_days = values.get_values() else: self.skip_days = self.data.get('skip-days', []) if now_str in self.skip_days: return False return self.match(now, schedule)
<SYSTEM_TASK:> Get the resource's tag value specifying its schedule. <END_TASK> <USER_TASK:> Description: def get_tag_value(self, i): """Get the resource's tag value specifying its schedule."""
# Look for the tag, Normalize tag key and tag value found = False for t in i.get('Tags', ()): if t['Key'].lower() == self.tag_key: found = t['Value'] break if found is False: return False # enforce utf8, or do translate tables via unicode ord mapping value = found.lower().encode('utf8').decode('utf8') # Some folks seem to be interpreting the docs quote marks as # literal for values. value = value.strip("'").strip('"') return value
<SYSTEM_TASK:> convert the tag to a dictionary, taking values as is <END_TASK> <USER_TASK:> Description: def raw_data(tag_value): """convert the tag to a dictionary, taking values as is This method name and purpose are opaque... and not true. """
data = {} pieces = [] for p in tag_value.split(' '): pieces.extend(p.split(';')) # parse components for piece in pieces: kv = piece.split('=') # components must by key=value if not len(kv) == 2: continue key, value = kv data[key] = value return data
<SYSTEM_TASK:> test that provided tag keys are valid <END_TASK> <USER_TASK:> Description: def keys_are_valid(self, tag_value): """test that provided tag keys are valid"""
for key in ScheduleParser.raw_data(tag_value): if key not in ('on', 'off', 'tz'): return False return True
<SYSTEM_TASK:> Garbage collect old custodian policies based on prefix. <END_TASK> <USER_TASK:> Description: def resources_gc_prefix(options, policy_config, policy_collection): """Garbage collect old custodian policies based on prefix. We attempt to introspect to find the event sources for a policy but without the old configuration this is implicit. """
# Classify policies by region policy_regions = {} for p in policy_collection: if p.execution_mode == 'poll': continue policy_regions.setdefault(p.options.region, []).append(p) regions = get_gc_regions(options.regions) for r in regions: region_gc(options, r, policy_config, policy_regions.get(r, []))
<SYSTEM_TASK:> Get a boto3 sesssion potentially cross account sts assumed <END_TASK> <USER_TASK:> Description: def get_session(account_info): """Get a boto3 sesssion potentially cross account sts assumed assumed sessions are automatically refreshed. """
s = getattr(CONN_CACHE, '%s-session' % account_info['name'], None) if s is not None: return s if account_info.get('role'): s = assumed_session(account_info['role'], SESSION_NAME) else: s = boto3.Session() setattr(CONN_CACHE, '%s-session' % account_info['name'], s) return s
<SYSTEM_TASK:> Context manager for dealing with s3 errors in one place <END_TASK> <USER_TASK:> Description: def bucket_ops(bid, api=""): """Context manager for dealing with s3 errors in one place bid: bucket_id in form of account_name:bucket_name """
try: yield 42 except ClientError as e: code = e.response['Error']['Code'] log.info( "bucket error bucket:%s error:%s", bid, e.response['Error']['Code']) if code == "NoSuchBucket": pass elif code == 'AccessDenied': connection.sadd('buckets-denied', bid) else: connection.hset( 'buckets-unknown-errors', bid, "%s:%s" % (api, e.response['Error']['Code'])) except Exception as e: connection.hset( 'buckets-unknown-errors', bid, "%s:%s" % (api, str(e))) # Let the error queue catch it raise
<SYSTEM_TASK:> Remove bits in content results to minimize memory utilization. <END_TASK> <USER_TASK:> Description: def page_strip(page, versioned): """Remove bits in content results to minimize memory utilization. TODO: evolve this to a key filter on metadata, like date """
# page strip filtering should be conditional page.pop('ResponseMetadata', None) contents_key = versioned and 'Versions' or 'Contents' contents = page.get(contents_key, ()) # aggressive size if versioned: keys = [] for k in contents: if k['IsLatest']: keys.append((k['Key'], k['VersionId'], True)) else: keys.append((k['Key'], k['VersionId'])) return keys else: return [k['Key'] for k in contents] if not contents: return page # Depending on use case we may want these for k in contents: k.pop('Owner', None) k.pop('LastModified', None) k.pop('ETag', None) k.pop('StorageClass', None) k.pop('Size', None) return page
<SYSTEM_TASK:> Scan all buckets in an account and schedule processing <END_TASK> <USER_TASK:> Description: def process_account(account_info): """Scan all buckets in an account and schedule processing"""
log = logging.getLogger('salactus.bucket-iterator') log.info("processing account %s", account_info) session = get_session(account_info) client = session.client('s3', config=s3config) buckets = client.list_buckets()['Buckets'] connection.hset( 'bucket-accounts', account_info['name'], json.dumps(account_info)) for b in buckets: connection.hset( 'bucket-ages', bucket_id(account_info, b['Name']), b['CreationDate'].isoformat()) account_buckets = account_info.pop('buckets', None) buckets = [n['Name'] for n in buckets if not account_buckets or n['Name'] in account_buckets] account_not_buckets = account_info.pop('not-buckets', None) buckets = [n for n in buckets if not account_not_buckets or n not in account_not_buckets] log.info("processing %d buckets in account %s", len(buckets), account_info['name']) for bucket_set in chunks(buckets, 50): invoke(process_bucket_set, account_info, bucket_set)
<SYSTEM_TASK:> Process a collection of buckets. <END_TASK> <USER_TASK:> Description: def process_bucket_set(account_info, buckets): """Process a collection of buckets. For each bucket fetch location, versioning and size and then kickoff processing strategy based on size. """
region_clients = {} log = logging.getLogger('salactus.bucket-set') log.info("processing account %s", account_info) session = get_session(account_info) client = session.client('s3', config=s3config) for b in buckets: bid = bucket_id(account_info, b) with bucket_ops(bid): info = {'name': b} error = None try: location = client.get_bucket_location( Bucket=b).get('LocationConstraint') except Exception as e: error = e location = None if location is None: region = "us-east-1" elif location == 'EU': region = "eu-west-1" else: region = location if (account_info.get('regions', ()) and region not in account_info.get('regions', ())): continue info['region'] = region if region not in region_clients: region_clients.setdefault(region, {}) region_clients[region]['s3'] = s3 = session.client( 's3', region_name=region, config=s3config) region_clients[region]['cloudwatch'] = cw = session.client( 'cloudwatch', region_name=region, config=s3config) else: s3 = region_clients[region]['s3'] cw = region_clients[region]['cloudwatch'] try: info['keycount'] = bucket_key_count(cw, info) except Exception: raise else: connection.hset('bucket-sizes', bid, info['keycount']) if error: raise error connection.hset('bucket-regions', bid, region) versioning = s3.get_bucket_versioning(Bucket=b) info['versioned'] = ( versioning and versioning.get('Status', '') in ('Enabled', 'Suspended') or False) connection.hset('bucket-versions', bid, int(info['versioned'])) log.info("processing bucket %s", info) connection.hset('bucket-starts', bid, time.time()) dispatch_object_source(s3, account_info, bid, info)
<SYSTEM_TASK:> Select and dispatch an object source for a bucket. <END_TASK> <USER_TASK:> Description: def dispatch_object_source(client, account_info, bid, bucket_info): """Select and dispatch an object source for a bucket. Choices are bucket partition, inventory, or direct pagination. """
if (account_info.get('inventory') and bucket_info['keycount'] > account_info['inventory'].get('bucket-size-threshold', DEFAULT_INVENTORY_BUCKET_SIZE_THRESHOLD)): inventory_info = get_bucket_inventory( client, bucket_info['name'], account_info['inventory'].get('id-selector', '*')) if inventory_info is not None: return invoke( process_bucket_inventory, bid, inventory_info['bucket'], inventory_info['prefix']) if bucket_info['keycount'] > PARTITION_BUCKET_SIZE_THRESHOLD: invoke(process_bucket_partitions, bid) else: invoke(process_bucket_iterator, bid)
<SYSTEM_TASK:> Use set of keys as selector for character superset <END_TASK> <USER_TASK:> Description: def get_keys_charset(keys, bid): """ Use set of keys as selector for character superset Note this isn't optimal, its probabilistic on the keyset char population. """
# use the keys found to sample possible chars chars = set() for k in keys: chars.update(k[:4]) remainder = chars # Normalize charsets for matching normalized = {} for n, sset in [ ("p", set(string.punctuation)), ("w", set(string.whitespace)) ]: m = chars.intersection(sset) if m: normalized[n] = m remainder = remainder.difference(sset) # Detect character sets charset = None for candidate in CharSet.charsets(): if remainder.issubset(candidate): charset = candidate break if charset is None: raise ValueError( "Bucket: %s Failed charset ngram detection %r\n%s" % ( bid, "".join(chars)), "\n".join(sorted(keys))) for n, sset in normalized.items(): charset = charset.symmetric_difference(sset) return charset
<SYSTEM_TASK:> Try to detect the best partitioning strategy for a large bucket <END_TASK> <USER_TASK:> Description: def detect_partition_strategy(bid, delimiters=('/', '-'), prefix=''): """Try to detect the best partitioning strategy for a large bucket Consider nested buckets with common prefixes, and flat buckets. """
account, bucket = bid.split(":", 1) region = connection.hget('bucket-regions', bid) versioned = bool(int(connection.hget('bucket-versions', bid))) size = int(float(connection.hget('bucket-sizes', bid))) session = get_session( json.loads(connection.hget('bucket-accounts', account))) s3 = session.client('s3', region_name=region, config=s3config) (contents_key, contents_method, continue_tokens) = BUCKET_OBJ_DESC[versioned] with bucket_ops(bid, 'detect'): keys = set() for delimiter in delimiters: method = getattr(s3, contents_method, None) results = method( Bucket=bucket, Prefix=prefix, Delimiter=delimiter) prefixes = [p['Prefix'] for p in results.get('CommonPrefixes', [])] contents = results.get(contents_key, []) keys.update([k['Key'] for k in contents]) # If we have common prefixes within limit thresholds go wide if (len(prefixes) > 0 and len(prefixes) < 1000 and len(contents) < 1000): log.info( "%s detected prefix delimiter:%s contents:%d prefixes:%d", bid, delimiter, len(contents), len(prefixes)) limit = prefix and 2 or 4 return process_bucket_partitions( bid, partition=delimiter, strategy='p', prefix_set=prefixes, limit=limit) # Detect character sets charset = get_keys_charset(keys, bid) log.info("Detected charset %s for %s", charset, bid) # Determine the depth we need to keep total api calls below threshold scan_count = size / 1000.0 for limit in range(1, 4): if math.pow(len(charset), limit) * 1000 > scan_count: break # Dispatch prefixes = ('',) prefixes = NGramPartition( charset, limit=limit).initialize_prefixes(prefixes) # random.shuffle(prefixes) # Pregen on ngram means we have many potentially useless prefixes # todo carry charset forward as param, and go incremental on prefix # ngram expansion connection.hincrby('bucket-partition', bid, len(prefixes)) return bulk_invoke( process_bucket_iterator, [bid], prefixes)
<SYSTEM_TASK:> Load last inventory dump and feed as key source. <END_TASK> <USER_TASK:> Description: def process_bucket_inventory(bid, inventory_bucket, inventory_prefix): """Load last inventory dump and feed as key source. """
log.info("Loading bucket %s keys from inventory s3://%s/%s", bid, inventory_bucket, inventory_prefix) account, bucket = bid.split(':', 1) region = connection.hget('bucket-regions', bid) versioned = bool(int(connection.hget('bucket-versions', bid))) session = boto3.Session() s3 = session.client('s3', region_name=region, config=s3config) # find any key visitors with inventory filtering account_info = json.loads(connection.hget('bucket-accounts', account)) ifilters = [v.inventory_filter for v in get_key_visitors(account_info) if v.inventory_filter] with bucket_ops(bid, 'inventory'): page_iterator = load_bucket_inventory( s3, inventory_bucket, inventory_prefix, versioned, ifilters) if page_iterator is None: log.info("bucket:%s could not find inventory" % bid) # case: inventory configured but not delivered yet # action: dispatch to bucket partition (assumes 100k+ for inventory) # - todo consider max inventory age/staleness for usage return invoke(process_bucket_partitions, bid) connection.hset('buckets-inventory', bid, 1) for page in page_iterator: invoke(process_keyset, bid, page)
<SYSTEM_TASK:> Retry support for resourcegroup tagging apis. <END_TASK> <USER_TASK:> Description: def universal_retry(method, ResourceARNList, **kw): """Retry support for resourcegroup tagging apis. The resource group tagging api typically returns a 200 status code with embedded resource specific errors. To enable resource specific retry on throttles, we extract those, perform backoff w/ jitter and continue. Other errors are immediately raised. We do not aggregate unified resource responses across retries, only the last successful response is returned for a subset of the resources if a retry is performed. """
max_attempts = 6 for idx, delay in enumerate( utils.backoff_delays(1.5, 2 ** 8, jitter=True)): response = method(ResourceARNList=ResourceARNList, **kw) failures = response.get('FailedResourcesMap', {}) if not failures: return response errors = {} throttles = set() for f_arn in failures: error_code = failures[f_arn]['ErrorCode'] if error_code == 'ThrottlingException': throttles.add(f_arn) elif error_code == 'ResourceNotFoundException': continue else: errors[f_arn] = error_code if errors: raise Exception("Resource Tag Errors %s" % (errors)) if idx == max_attempts - 1: raise Exception("Resource Tag Throttled %s" % (", ".join(throttles))) time.sleep(delay) ResourceARNList = list(throttles)
<SYSTEM_TASK:> Return a mapping of launch configs for the given set of asgs <END_TASK> <USER_TASK:> Description: def get_launch_configs(self, asgs): """Return a mapping of launch configs for the given set of asgs"""
config_names = set() for a in asgs: if 'LaunchConfigurationName' not in a: continue config_names.add(a['LaunchConfigurationName']) if not config_names: return {} lc_resources = self.manager.get_resource_manager('launch-config') if len(config_names) < 5: configs = lc_resources.get_resources(list(config_names)) else: configs = lc_resources.resources() return { cfg['LaunchConfigurationName']: cfg for cfg in configs if cfg['LaunchConfigurationName'] in config_names}
<SYSTEM_TASK:> Support server side filtering on arns or names <END_TASK> <USER_TASK:> Description: def get_resources(self, ids, cache=True): """Support server side filtering on arns or names """
if ids[0].startswith('arn:'): params = {'LoadBalancerArns': ids} else: params = {'Names': ids} return self.query.filter(self.manager, **params)
<SYSTEM_TASK:> Run across a set of accounts and buckets. <END_TASK> <USER_TASK:> Description: def run(config, tag, bucket, account, not_bucket, not_account, debug, region): """Run across a set of accounts and buckets."""
logging.basicConfig( level=logging.INFO, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger('botocore').setLevel(level=logging.WARNING) if debug: def invoke(f, *args, **kw): # if f.func_name == 'process_keyset': # key_count = len(args[-1]) # print("debug skip keyset %d" % key_count) # return return f(*args, **kw) worker.invoke = invoke with open(config) as fh: data = utils.yaml_load(fh.read()) for account_info in data.get('accounts', ()): if tag and tag not in account_info.get('tags', ()): continue if account and account_info['name'] not in account: continue if not_account and account_info['name'] in not_account: continue if 'inventory' in data and 'inventory' not in account_info: account_info['inventory'] = data['inventory'] if 'visitors' in data and 'visitors' not in account_info: account_info['visitors'] = data['visitors'] if 'object-reporting' in data and 'object-reporting' not in account_info: account_info['object-reporting'] = data['object-reporting'] account_info['object-reporting'][ 'record-prefix'] = datetime.utcnow().strftime('%Y/%m/%d') if bucket: account_info['buckets'] = bucket if not_bucket: account_info['not-buckets'] = not_bucket if region: account_info['regions'] = region try: worker.invoke(worker.process_account, account_info) except Exception: if not debug: raise import pdb, traceback, sys traceback.print_exc() pdb.post_mortem(sys.exc_info()[-1]) raise
<SYSTEM_TASK:> Delete all persistent cluster state. <END_TASK> <USER_TASK:> Description: def reset(c7n_async=None): """Delete all persistent cluster state. """
click.echo('Delete db? Are you Sure? [yn] ', nl=False) c = click.getchar() click.echo() if c == 'y': click.echo('Wiping database') worker.connection.flushdb() elif c == 'n': click.echo('Abort!') else: click.echo('Invalid input :(')
<SYSTEM_TASK:> Report on stats by account <END_TASK> <USER_TASK:> Description: def accounts(dbpath, output, format, account, config=None, tag=None, tagprefix=None, region=(), not_region=(), not_bucket=None): """Report on stats by account"""
d = db.db(dbpath) accounts = d.accounts() formatter = ( format == 'csv' and format_accounts_csv or format_accounts_plain) if region: for a in accounts: a.buckets = [b for b in a.buckets if b.region in region] accounts = [a for a in accounts if a.bucket_count] if not_region: for a in accounts: a.buckets = [b for b in a.buckets if b.region not in not_region] accounts = [a for a in accounts if a.bucket_count] if not_bucket: for a in accounts: a.buckets = [b for b in a.buckets if b.name not in not_bucket] if config and tagprefix: account_map = {account.name: account for account in accounts} with open(config) as fh: account_data = json.load(fh).get('accounts') tag_groups = {} for a in account_data: if tag is not None and tag not in a['tags']: continue for t in a['tags']: if t.startswith(tagprefix): tvalue = t[len(tagprefix):] if not tvalue: continue if tvalue not in tag_groups: tag_groups[tvalue] = db.Account(tvalue, []) account_results = account_map.get(a['name']) if not account_results: print("missing %s" % a['name']) continue tag_groups[tvalue].buckets.extend( account_map[a['name']].buckets) accounts = tag_groups.values() formatter(accounts, output)
<SYSTEM_TASK:> watch scan rates across the cluster <END_TASK> <USER_TASK:> Description: def watch(limit): """watch scan rates across the cluster"""
period = 5.0 prev = db.db() prev_totals = None while True: click.clear() time.sleep(period) cur = db.db() cur.data['gkrate'] = {} progress = [] prev_buckets = {b.bucket_id: b for b in prev.buckets()} totals = {'scanned': 0, 'krate': 0, 'lrate': 0, 'bucket_id': 'totals'} for b in cur.buckets(): if not b.scanned: continue totals['scanned'] += b.scanned totals['krate'] += b.krate totals['lrate'] += b.lrate if b.bucket_id not in prev_buckets: b.data['gkrate'][b.bucket_id] = b.scanned / period elif b.scanned == prev_buckets[b.bucket_id].scanned: continue else: b.data['gkrate'][b.bucket_id] = ( b.scanned - prev_buckets[b.bucket_id].scanned) / period progress.append(b) if prev_totals is None: totals['gkrate'] = '...' else: totals['gkrate'] = (totals['scanned'] - prev_totals['scanned']) / period prev = cur prev_totals = totals progress = sorted(progress, key=lambda x: x.gkrate, reverse=True) if limit: progress = progress[:limit] progress.insert(0, Bag(totals)) format_plain( progress, None, explicit_only=True, keys=['bucket_id', 'scanned', 'gkrate', 'lrate', 'krate'])
<SYSTEM_TASK:> Discover the partitions on a bucket via introspection. <END_TASK> <USER_TASK:> Description: def inspect_partitions(bucket): """Discover the partitions on a bucket via introspection. For large buckets which lack s3 inventories, salactus will attempt to process objects in parallel on the bucket by breaking the bucket into a separate keyspace partitions. It does this with a heurestic that attempts to sample the keyspace and determine appropriate subparts. This command provides additional visibility into the partitioning of a bucket by showing how salactus would partition a given bucket. """
logging.basicConfig( level=logging.INFO, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger('botocore').setLevel(level=logging.WARNING) state = db.db() # add db.bucket accessor found = None for b in state.buckets(): if b.name == bucket: found = b break if not found: click.echo("no bucket named: %s" % bucket) return keyset = [] partitions = [] def process_keyset(bid, page): keyset.append(len(page)) def process_bucket_iterator(bid, prefix, delimiter="", **continuation): partitions.append(prefix) # synchronous execution def invoke(f, *args, **kw): return f(*args, **kw) # unleash the monkies ;-) worker.connection.hincrby = lambda x, y, z: True worker.invoke = invoke worker.process_keyset = process_keyset worker.process_bucket_iterator = process_bucket_iterator # kick it off worker.process_bucket_partitions(b.bucket_id) keys_scanned = sum(keyset) click.echo( "Found %d partitions %s keys scanned during partitioning" % ( len(partitions), keys_scanned)) click.echo("\n".join(partitions))
<SYSTEM_TASK:> Show all information known on a bucket. <END_TASK> <USER_TASK:> Description: def inspect_bucket(bucket): """Show all information known on a bucket."""
state = db.db() found = None for b in state.buckets(): if b.name == bucket: found = b if not found: click.echo("no bucket named: %s" % bucket) return click.echo("Bucket: %s" % found.name) click.echo("Account: %s" % found.account) click.echo("Region: %s" % found.region) click.echo("Created: %s" % found.created) click.echo("Size: %s" % found.size) click.echo("Inventory: %s" % found.inventory) click.echo("Partitions: %s" % found.partitions) click.echo("Scanned: %0.2f%%" % found.percent_scanned) click.echo("") click.echo("Errors") click.echo("Denied: %s" % found.keys_denied) click.echo("BErrors: %s" % found.error_count) click.echo("KErrors: %s" % found.data['keys-error'].get(found.bucket_id, 0)) click.echo("Throttle: %s" % found.data['keys-throttled'].get(found.bucket_id, 0)) click.echo("Missing: %s" % found.data['keys-missing'].get(found.bucket_id, 0)) click.echo("Session: %s" % found.data['keys-sesserr'].get(found.bucket_id, 0)) click.echo("Connection: %s" % found.data['keys-connerr'].get(found.bucket_id, 0)) click.echo("Endpoint: %s" % found.data['keys-enderr'].get(found.bucket_id, 0))
<SYSTEM_TASK:> Extracts ports ranges from the NSG rule object <END_TASK> <USER_TASK:> Description: def _get_rule_port_ranges(rule): """ Extracts ports ranges from the NSG rule object Returns an array of PortsRange tuples """
properties = rule['properties'] if 'destinationPortRange' in properties: return [PortsRangeHelper._get_port_range(properties['destinationPortRange'])] else: return [PortsRangeHelper._get_port_range(r) for r in properties['destinationPortRanges']]
<SYSTEM_TASK:> Given an arbitrary resource attempt to resolve back to a qualified name. <END_TASK> <USER_TASK:> Description: def get_name(self, r): """Given an arbitrary resource attempt to resolve back to a qualified name."""
namer = ResourceNameAdapters[self.manager.resource_type.service] return namer(r)
<SYSTEM_TASK:> Get extant locks for the given account. <END_TASK> <USER_TASK:> Description: def list_locks(self, account_id=None): """Get extant locks for the given account. """
account_id = self.get_account_id(account_id) return self.http.get( "%s/%s/locks" % (self.endpoint, account_id), auth=self.get_api_auth())
<SYSTEM_TASK:> Get the lock status for a given resource. <END_TASK> <USER_TASK:> Description: def lock_status(self, resource_id, parent_id=None, account_id=None): """Get the lock status for a given resource. for security groups, parent id is their vpc. """
account_id = self.get_account_id(account_id) params = parent_id and {'parent_id': parent_id} or None return self.http.get( "%s/%s/locks/%s" % (self.endpoint, account_id, resource_id), params=params, auth=self.get_api_auth())
<SYSTEM_TASK:> report on a cross account policy execution. <END_TASK> <USER_TASK:> Description: def report(config, output, use, output_dir, accounts, field, no_default_fields, tags, region, debug, verbose, policy, policy_tags, format, resource, cache_path): """report on a cross account policy execution."""
accounts_config, custodian_config, executor = init( config, use, debug, verbose, accounts, tags, policy, resource=resource, policy_tags=policy_tags) resource_types = set() for p in custodian_config.get('policies'): resource_types.add(p['resource']) if len(resource_types) > 1: raise ValueError("can only report on one resource type at a time") elif not len(custodian_config['policies']) > 0: raise ValueError("no matching policies found") records = [] with executor(max_workers=WORKER_COUNT) as w: futures = {} for a in accounts_config.get('accounts', ()): for r in resolve_regions(region or a.get('regions', ())): futures[w.submit( report_account, a, r, custodian_config, output_dir, cache_path, debug)] = (a, r) for f in as_completed(futures): a, r = futures[f] if f.exception(): if debug: raise log.warning( "Error running policy in %s @ %s exception: %s", a['name'], r, f.exception()) records.extend(f.result()) log.debug( "Found %d records across %d accounts and %d policies", len(records), len(accounts_config['accounts']), len(custodian_config['policies'])) if format == 'json': dumps(records, output, indent=2) return prefix_fields = OrderedDict( (('Account', 'account'), ('Region', 'region'), ('Policy', 'policy'))) config = Config.empty() factory = resource_registry.get(list(resource_types)[0]) formatter = Formatter( factory.resource_type, extra_fields=field, include_default_fields=not(no_default_fields), include_region=False, include_policy=False, fields=prefix_fields) rows = formatter.to_csv(records, unique=False) writer = UnicodeWriter(output, formatter.headers()) writer.writerow(formatter.headers()) writer.writerows(rows)
<SYSTEM_TASK:> run an aws script across accounts <END_TASK> <USER_TASK:> Description: def run_script(config, output_dir, accounts, tags, region, echo, serial, script_args): """run an aws script across accounts"""
# TODO count up on success / error / error list by account accounts_config, custodian_config, executor = init( config, None, serial, True, accounts, tags, (), ()) if echo: print("command to run: `%s`" % (" ".join(script_args))) return # Support fully quoted scripts, which are common to avoid parameter # overlap with c7n-org run-script. if len(script_args) == 1 and " " in script_args[0]: script_args = script_args[0].split() with executor(max_workers=WORKER_COUNT) as w: futures = {} for a in accounts_config.get('accounts', ()): for r in resolve_regions(region or a.get('regions', ())): futures[ w.submit(run_account_script, a, r, output_dir, serial, script_args)] = (a, r) for f in as_completed(futures): a, r = futures[f] if f.exception(): if serial: raise log.warning( "Error running script in %s @ %s exception: %s", a['name'], r, f.exception()) exit_code = f.result() if exit_code == 0: log.info( "ran script on account:%s region:%s script: `%s`", a['name'], r, " ".join(script_args)) else: log.info( "error running script on account:%s region:%s script: `%s`", a['name'], r, " ".join(script_args))
<SYSTEM_TASK:> Execute a set of policies on an account. <END_TASK> <USER_TASK:> Description: def run_account(account, region, policies_config, output_path, cache_period, cache_path, metrics, dryrun, debug): """Execute a set of policies on an account. """
logging.getLogger('custodian.output').setLevel(logging.ERROR + 1) CONN_CACHE.session = None CONN_CACHE.time = None # allow users to specify interpolated output paths if '{' not in output_path: output_path = os.path.join(output_path, account['name'], region) cache_path = os.path.join(cache_path, "%s-%s.cache" % (account['account_id'], region)) config = Config.empty( region=region, cache=cache_path, cache_period=cache_period, dryrun=dryrun, output_dir=output_path, account_id=account['account_id'], metrics_enabled=metrics, log_group=None, profile=None, external_id=None) env_vars = account_tags(account) if account.get('role'): if isinstance(account['role'], six.string_types): config['assume_role'] = account['role'] config['external_id'] = account.get('external_id') else: env_vars.update( _get_env_creds(get_session(account, 'custodian', region), region)) elif account.get('profile'): config['profile'] = account['profile'] policies = PolicyCollection.from_data(policies_config, config) policy_counts = {} success = True st = time.time() with environ(**env_vars): for p in policies: # Variable expansion and non schema validation (not optional) p.expand_variables(p.get_variables(account.get('vars', {}))) p.validate() log.debug( "Running policy:%s account:%s region:%s", p.name, account['name'], region) try: resources = p.run() policy_counts[p.name] = resources and len(resources) or 0 if not resources: continue log.info( "Ran account:%s region:%s policy:%s matched:%d time:%0.2f", account['name'], region, p.name, len(resources), time.time() - st) except ClientError as e: success = False if e.response['Error']['Code'] == 'AccessDenied': log.warning('Access denied account:%s region:%s', account['name'], region) return policy_counts, success log.error( "Exception running policy:%s account:%s region:%s error:%s", p.name, account['name'], region, e) continue except Exception as e: success = False log.error( "Exception running policy:%s account:%s region:%s error:%s", p.name, account['name'], region, e) if not debug: continue import traceback, pdb, sys traceback.print_exc() pdb.post_mortem(sys.exc_info()[-1]) raise return policy_counts, success
<SYSTEM_TASK:> run a custodian policy across accounts <END_TASK> <USER_TASK:> Description: def run(config, use, output_dir, accounts, tags, region, policy, policy_tags, cache_period, cache_path, metrics, dryrun, debug, verbose, metrics_uri): """run a custodian policy across accounts"""
accounts_config, custodian_config, executor = init( config, use, debug, verbose, accounts, tags, policy, policy_tags=policy_tags) policy_counts = Counter() success = True if metrics_uri: metrics = metrics_uri if not cache_path: cache_path = os.path.expanduser("~/.cache/c7n-org") if not os.path.exists(cache_path): os.makedirs(cache_path) with executor(max_workers=WORKER_COUNT) as w: futures = {} for a in accounts_config['accounts']: for r in resolve_regions(region or a.get('regions', ())): futures[w.submit( run_account, a, r, custodian_config, output_dir, cache_period, cache_path, metrics, dryrun, debug)] = (a, r) for f in as_completed(futures): a, r = futures[f] if f.exception(): if debug: raise log.warning( "Error running policy in %s @ %s exception: %s", a['name'], r, f.exception()) account_region_pcounts, account_region_success = f.result() for p in account_region_pcounts: policy_counts[p] += account_region_pcounts[p] if not account_region_success: success = False log.info("Policy resource counts %s" % policy_counts) if not success: sys.exit(1)
<SYSTEM_TASK:> Ensure all logging output has been flushed. <END_TASK> <USER_TASK:> Description: def flush(self): """Ensure all logging output has been flushed."""
if self.shutdown: return self.flush_buffers(force=True) self.queue.put(FLUSH_MARKER) self.queue.join()
<SYSTEM_TASK:> start thread transports. <END_TASK> <USER_TASK:> Description: def start_transports(self): """start thread transports."""
self.transport = Transport( self.queue, self.batch_size, self.batch_interval, self.session_factory) thread = threading.Thread(target=self.transport.loop) self.threads.append(thread) thread.daemon = True thread.start()
<SYSTEM_TASK:> Handle various client side errors when describing images <END_TASK> <USER_TASK:> Description: def extract_bad_ami(e): """Handle various client side errors when describing images"""
msg = e.response['Error']['Message'] error = e.response['Error']['Code'] e_ami_ids = None if error == 'InvalidAMIID.NotFound': e_ami_ids = [ e_ami_id.strip() for e_ami_id in msg[msg.find("'[") + 2:msg.rfind("]'")].split(',')] log.warning("Image not found %s" % e_ami_ids) elif error == 'InvalidAMIID.Malformed': e_ami_ids = [msg[msg.find('"') + 1:msg.rfind('"')]] log.warning("Image id malformed %s" % e_ami_ids) return e_ami_ids
<SYSTEM_TASK:> format config for lambda exec <END_TASK> <USER_TASK:> Description: def format_json(config): """format config for lambda exec """
with open(config) as fh: print(json.dumps(yaml.safe_load(fh.read()), indent=2))
<SYSTEM_TASK:> Attempt to acquire any pending locks. <END_TASK> <USER_TASK:> Description: def flush_pending(function): """Attempt to acquire any pending locks. """
s = boto3.Session() client = s.client('lambda') results = client.invoke( FunctionName=function, Payload=json.dumps({'detail-type': 'Scheduled Event'}) ) content = results.pop('Payload').read() pprint.pprint(results) pprint.pprint(json.loads(content))
<SYSTEM_TASK:> Check config status in an account. <END_TASK> <USER_TASK:> Description: def config_status(): """ Check config status in an account. """
s = boto3.Session() client = s.client('config') channels = client.describe_delivery_channel_status()[ 'DeliveryChannelsStatus'] for c in channels: print(yaml.safe_dump({ c['name']: dict( snapshot=str( c['configSnapshotDeliveryInfo'].get('lastSuccessfulTime')), history=str( c['configHistoryDeliveryInfo'].get('lastSuccessfulTime')), stream=str( c['configStreamDeliveryInfo'].get('lastStatusChangeTime')) ), }, default_flow_style=False))
<SYSTEM_TASK:> run local app server, assumes into the account <END_TASK> <USER_TASK:> Description: def local(reload, port): """run local app server, assumes into the account """
import logging from bottle import run from app import controller, app from c7n.resources import load_resources load_resources() print("Loaded resources definitions") logging.basicConfig(level=logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) if controller.db.provision(): print("Table Created") run(app, reloader=reload, port=port)
<SYSTEM_TASK:> Given a class, return its docstring. <END_TASK> <USER_TASK:> Description: def _schema_get_docstring(starting_class): """ Given a class, return its docstring. If no docstring is present for the class, search base classes in MRO for a docstring. """
for cls in inspect.getmro(starting_class): if inspect.getdoc(cls): return inspect.getdoc(cls)
<SYSTEM_TASK:> For tab-completion via argcomplete, return completion options. <END_TASK> <USER_TASK:> Description: def schema_completer(prefix): """ For tab-completion via argcomplete, return completion options. For the given prefix so far, return the possible options. Note that filtering via startswith happens after this list is returned. """
from c7n import schema load_resources() components = prefix.split('.') if components[0] in provider.clouds.keys(): cloud_provider = components.pop(0) provider_resources = provider.resources(cloud_provider) else: cloud_provider = 'aws' provider_resources = provider.resources('aws') components[0] = "aws.%s" % components[0] # Completions for resource if len(components) == 1: choices = [r for r in provider.resources().keys() if r.startswith(components[0])] if len(choices) == 1: choices += ['{}{}'.format(choices[0], '.')] return choices if components[0] not in provider_resources.keys(): return [] # Completions for category if len(components) == 2: choices = ['{}.{}'.format(components[0], x) for x in ('actions', 'filters') if x.startswith(components[1])] if len(choices) == 1: choices += ['{}{}'.format(choices[0], '.')] return choices # Completions for item elif len(components) == 3: resource_mapping = schema.resource_vocabulary(cloud_provider) return ['{}.{}.{}'.format(components[0], components[1], x) for x in resource_mapping[components[0]][components[1]]] return []
<SYSTEM_TASK:> Determine the start and end dates based on user-supplied options. <END_TASK> <USER_TASK:> Description: def _metrics_get_endpoints(options): """ Determine the start and end dates based on user-supplied options. """
if bool(options.start) ^ bool(options.end): log.error('--start and --end must be specified together') sys.exit(1) if options.start and options.end: start = options.start end = options.end else: end = datetime.utcnow() start = end - timedelta(options.days) return start, end
<SYSTEM_TASK:> EC2 API and AWOL Tags <END_TASK> <USER_TASK:> Description: def augment(self, resources): """EC2 API and AWOL Tags While ec2 api generally returns tags when doing describe_x on for various resources, it may also silently fail to do so unless a tag is used as a filter. See footnote on for official documentation. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#Using_Tags_CLI Apriori we may be using custodian to ensure tags (including name), so there isn't a good default to ensure that we will always get tags from describe_x calls. """
# First if we're in event based lambda go ahead and skip this, # tags can't be trusted in ec2 instances immediately post creation. if not resources or self.manager.data.get( 'mode', {}).get('type', '') in ( 'cloudtrail', 'ec2-instance-state'): return resources # AWOL detector, so we don't make extraneous api calls. resource_count = len(resources) search_count = min(int(resource_count % 0.05) + 1, 5) if search_count > resource_count: search_count = resource_count found = False for r in random.sample(resources, search_count): if 'Tags' in r: found = True break if found: return resources # Okay go and do the tag lookup client = utils.local_session(self.manager.session_factory).client('ec2') tag_set = self.manager.retry( client.describe_tags, Filters=[{'Name': 'resource-type', 'Values': ['instance']}])['Tags'] resource_tags = {} for t in tag_set: t.pop('ResourceType') rid = t.pop('ResourceId') resource_tags.setdefault(rid, []).append(t) m = self.manager.get_model() for r in resources: r['Tags'] = resource_tags.get(r[m.id], ()) return resources
<SYSTEM_TASK:> Create a lambda code archive for running custodian. <END_TASK> <USER_TASK:> Description: def custodian_archive(packages=None): """Create a lambda code archive for running custodian. Lambda archive currently always includes `c7n` and `pkg_resources`. Add additional packages in the mode block. Example policy that includes additional packages .. code-block:: yaml policy: name: lambda-archive-example resource: s3 mode: packages: - botocore packages: List of additional packages to include in the lambda archive. """
modules = {'c7n', 'pkg_resources'} if packages: modules = filter(None, modules.union(packages)) return PythonPackageArchive(*sorted(modules))