text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Add the file at ``src`` to the archive.
<END_TASK>
<USER_TASK:>
Description:
def add_file(self, src, dest=None):
"""Add the file at ``src`` to the archive.
If ``dest`` is ``None`` then it is added under just the original
filename. So ``add_file('foo/bar.txt')`` ends up at ``bar.txt`` in the
archive, while ``add_file('bar.txt', 'foo/bar.txt')`` ends up at
``foo/bar.txt``.
""" |
dest = dest or os.path.basename(src)
with open(src, 'rb') as fp:
contents = fp.read()
self.add_contents(dest, contents) |
<SYSTEM_TASK:>
Add file contents to the archive under ``dest``.
<END_TASK>
<USER_TASK:>
Description:
def add_contents(self, dest, contents):
"""Add file contents to the archive under ``dest``.
If ``dest`` is a path, it will be added compressed and world-readable
(user-writeable). You may also pass a :py:class:`~zipfile.ZipInfo` for
custom behavior.
""" |
assert not self._closed, "Archive closed"
if not isinstance(dest, zipfile.ZipInfo):
dest = zinfo(dest) # see for some caveats
# Ensure we apply the compression
dest.compress_type = self.zip_compression
# Mark host OS as Linux for all archives
dest.create_system = 3
self._zip_file.writestr(dest, contents) |
<SYSTEM_TASK:>
Close the zip file.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Close the zip file.
Note underlying tempfile is removed when archive is garbage collected.
""" |
self._closed = True
self._zip_file.close()
log.debug(
"Created custodian serverless archive size: %0.2fmb",
(os.path.getsize(self._temp_archive_file.name) / (
1024.0 * 1024.0)))
return self |
<SYSTEM_TASK:>
Return the b64 encoded sha256 checksum of the archive.
<END_TASK>
<USER_TASK:>
Description:
def get_checksum(self, encoder=base64.b64encode, hasher=hashlib.sha256):
"""Return the b64 encoded sha256 checksum of the archive.""" |
assert self._closed, "Archive not closed"
with open(self._temp_archive_file.name, 'rb') as fh:
return encoder(checksum(fh, hasher())).decode('ascii') |
<SYSTEM_TASK:>
Create or update an alias for the given function.
<END_TASK>
<USER_TASK:>
Description:
def publish_alias(self, func_data, alias):
"""Create or update an alias for the given function.
""" |
if not alias:
return func_data['FunctionArn']
func_name = func_data['FunctionName']
func_version = func_data['Version']
exists = resource_exists(
self.client.get_alias, FunctionName=func_name, Name=alias)
if not exists:
log.debug("Publishing custodian lambda alias %s", alias)
alias_result = self.client.create_alias(
FunctionName=func_name,
Name=alias,
FunctionVersion=func_version)
else:
if (exists['FunctionVersion'] == func_version and
exists['Name'] == alias):
return exists['AliasArn']
log.debug('Updating custodian lambda alias %s', alias)
alias_result = self.client.update_alias(
FunctionName=func_name,
Name=alias,
FunctionVersion=func_version)
return alias_result['AliasArn'] |
<SYSTEM_TASK:>
report on guard duty enablement by account
<END_TASK>
<USER_TASK:>
Description:
def report(config, tags, accounts, master, debug, region):
"""report on guard duty enablement by account""" |
accounts_config, master_info, executor = guardian_init(
config, debug, master, accounts, tags)
session = get_session(
master_info.get('role'), 'c7n-guardian',
master_info.get('profile'),
region)
client = session.client('guardduty')
detector_id = get_or_create_detector_id(client)
members = {m['AccountId']: m for m in
client.list_members(DetectorId=detector_id).get('Members')}
accounts_report = []
for a in accounts_config['accounts']:
ar = dict(a)
accounts_report.append(ar)
ar.pop('tags', None)
ar.pop('role')
ar.pop('regions', None)
if a['account_id'] not in members:
ar['member'] = False
ar['status'] = None
ar['invited'] = None
ar['updated'] = datetime.datetime.now().isoformat()
continue
m = members[a['account_id']]
ar['status'] = m['RelationshipStatus']
ar['member'] = True
ar['joined'] = m['InvitedAt']
ar['updated'] = m['UpdatedAt']
accounts_report.sort(key=operator.itemgetter('updated'), reverse=True)
print(tabulate(accounts_report, headers=('keys'))) |
<SYSTEM_TASK:>
suspend guard duty in the given accounts.
<END_TASK>
<USER_TASK:>
Description:
def disable(config, tags, accounts, master, debug,
suspend, disable_detector, delete_detector, dissociate, region):
"""suspend guard duty in the given accounts.""" |
accounts_config, master_info, executor = guardian_init(
config, debug, master, accounts, tags)
if sum(map(int, (suspend, disable_detector, dissociate))) != 1:
raise ValueError((
"One and only of suspend, disable-detector, dissociate"
"can be specified."))
master_session = get_session(
master_info['role'], 'c7n-guardian',
master_info.get('profile'), region)
master_client = master_session.client('guardduty')
detector_id = get_or_create_detector_id(master_client)
if suspend:
unprocessed = master_client.stop_monitoring_members(
DetectorId=detector_id,
AccountIds=[a['account_id'] for a in accounts_config['accounts']]
).get('UnprocessedAccounts', ())
if unprocessed:
log.warning(
"Following accounts where unprocessed\n %s",
format_event(unprocessed))
log.info("Stopped monitoring %d accounts in master",
len(accounts_config['accounts']))
return
if dissociate:
master_client.disassociate_members(
DetectorId=detector_id,
AccountIds=[a['account_id'] for a in accounts_config['accounts']])
# Seems like there's a couple of ways to disable an account
# delete the detector (member), disable the detector (master or member),
# or disassociate members, or from member disassociate from master.
for a in accounts_config['accounts']:
member_session = get_session(
a['role'], 'c7n-guardian',
a.get('profile'), region)
member_client = member_session.client('guardduty')
m_detector_id = get_or_create_detector_id(member_client)
if disable_detector:
member_client.update_detector(
DetectorId=m_detector_id, Enable=False)
log.info("Disabled detector in account:%s", a['name'])
if dissociate:
try:
log.info("Disassociated member account:%s", a['name'])
result = member_client.disassociate_from_master_account(
DetectorId=m_detector_id)
log.info("Result %s", format_event(result))
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidInputException':
continue
if delete_detector:
member_client.delete_detector(DetectorId=m_detector_id)
log.info("Deleted detector in account:%s", a['name']) |
<SYSTEM_TASK:>
enable guard duty on a set of accounts
<END_TASK>
<USER_TASK:>
Description:
def enable(config, master, tags, accounts, debug, message, region):
"""enable guard duty on a set of accounts""" |
accounts_config, master_info, executor = guardian_init(
config, debug, master, accounts, tags)
regions = expand_regions(region)
for r in regions:
log.info("Processing Region:%s", r)
enable_region(master_info, accounts_config, executor, message, r) |
<SYSTEM_TASK:>
Return all the security group names configured in this action.
<END_TASK>
<USER_TASK:>
Description:
def get_action_group_names(self):
"""Return all the security group names configured in this action.""" |
return self.get_group_names(
list(itertools.chain(
*[self._get_array('add'),
self._get_array('remove'),
self._get_array('isolation-group')]))) |
<SYSTEM_TASK:>
Resolve security names to security groups resources.
<END_TASK>
<USER_TASK:>
Description:
def get_groups_by_names(self, names):
"""Resolve security names to security groups resources.""" |
if not names:
return []
client = utils.local_session(
self.manager.session_factory).client('ec2')
sgs = self.manager.retry(
client.describe_security_groups,
Filters=[{
'Name': 'group-name', 'Values': names}]).get(
'SecurityGroups', [])
unresolved = set(names)
for s in sgs:
if s['GroupName'] in unresolved:
unresolved.remove(s['GroupName'])
if unresolved:
raise PolicyExecutionError(self._format_error(
"policy:{policy} security groups not found "
"requested: {names}, found: {groups}",
names=list(unresolved), groups=[g['GroupId'] for g in sgs]))
return sgs |
<SYSTEM_TASK:>
Resolve any security group names to the corresponding group ids
<END_TASK>
<USER_TASK:>
Description:
def resolve_group_names(self, r, target_group_ids, groups):
"""Resolve any security group names to the corresponding group ids
With the context of a given network attached resource.
""" |
names = self.get_group_names(target_group_ids)
if not names:
return target_group_ids
target_group_ids = list(target_group_ids)
vpc_id = self.vpc_expr.search(r)
if not vpc_id:
raise PolicyExecutionError(self._format_error(
"policy:{policy} non vpc attached resource used "
"with modify-security-group: {resource_id}",
resource_id=r[self.manager.resource_type.id]))
found = False
for n in names:
for g in groups:
if g['GroupName'] == n and g['VpcId'] == vpc_id:
found = g['GroupId']
if not found:
raise PolicyExecutionError(self._format_error((
"policy:{policy} could not resolve sg:{name} for "
"resource:{resource_id} in vpc:{vpc}"),
name=n,
resource_id=r[self.manager.resource_type.id], vpc=vpc_id))
target_group_ids.remove(n)
target_group_ids.append(found)
return target_group_ids |
<SYSTEM_TASK:>
Resolve the resources security groups that need be modified.
<END_TASK>
<USER_TASK:>
Description:
def resolve_remove_symbols(self, r, target_group_ids, rgroups):
"""Resolve the resources security groups that need be modified.
Specifically handles symbolic names that match annotations from policy filters
for groups being removed.
""" |
if 'matched' in target_group_ids:
return r.get('c7n:matched-security-groups', ())
elif 'network-location' in target_group_ids:
for reason in r.get('c7n:NetworkLocation', ()):
if reason['reason'] == 'SecurityGroupMismatch':
return list(reason['security-groups'])
elif 'all' in target_group_ids:
return rgroups
return target_group_ids |
<SYSTEM_TASK:>
Return lists of security groups to set on each resource
<END_TASK>
<USER_TASK:>
Description:
def get_groups(self, resources):
"""Return lists of security groups to set on each resource
For each input resource, parse the various add/remove/isolation-
group policies for 'modify-security-groups' to find the resulting
set of VPC security groups to attach to that resource.
Returns a list of lists containing the resulting VPC security groups
that should end up on each resource passed in.
:param resources: List of resources containing VPC Security Groups
:return: List of lists of security groups per resource
""" |
resolved_groups = self.get_groups_by_names(self.get_action_group_names())
return_groups = []
for idx, r in enumerate(resources):
rgroups = self.sg_expr.search(r) or []
add_groups = self.resolve_group_names(
r, self._get_array('add'), resolved_groups)
remove_groups = self.resolve_remove_symbols(
r,
self.resolve_group_names(
r, self._get_array('remove'), resolved_groups),
rgroups)
isolation_groups = self.resolve_group_names(
r, self._get_array('isolation-group'), resolved_groups)
for g in remove_groups:
if g in rgroups:
rgroups.remove(g)
for g in add_groups:
if g not in rgroups:
rgroups.append(g)
if not rgroups:
rgroups = list(isolation_groups)
return_groups.append(rgroups)
return return_groups |
<SYSTEM_TASK:>
jsonschema generation helper
<END_TASK>
<USER_TASK:>
Description:
def type_schema(
type_name, inherits=None, rinherit=None,
aliases=None, required=None, **props):
"""jsonschema generation helper
params:
- type_name: name of the type
- inherits: list of document fragments that are required via anyOf[$ref]
- rinherit: use another schema as a base for this, basically work around
inherits issues with additionalProperties and type enums.
- aliases: additional names this type maybe called
- required: list of required properties, by default 'type' is required
- props: additional key value properties
""" |
if aliases:
type_names = [type_name]
type_names.extend(aliases)
else:
type_names = [type_name]
if rinherit:
s = copy.deepcopy(rinherit)
s['properties']['type'] = {'enum': type_names}
else:
s = {
'type': 'object',
'properties': {
'type': {'enum': type_names}}}
# Ref based inheritance and additional properties don't mix well.
# https://stackoverflow.com/questions/22689900/json-schema-allof-with-additionalproperties
if not inherits:
s['additionalProperties'] = False
s['properties'].update(props)
if not required:
required = []
if isinstance(required, list):
required.append('type')
s['required'] = required
if inherits:
extended = s
s = {'allOf': [{'$ref': i} for i in inherits]}
s['allOf'].append(extended)
return s |
<SYSTEM_TASK:>
Return a mapping of key value to resources with the corresponding value.
<END_TASK>
<USER_TASK:>
Description:
def group_by(resources, key):
"""Return a mapping of key value to resources with the corresponding value.
Key may be specified as dotted form for nested dictionary lookup
""" |
resource_map = {}
parts = key.split('.')
for r in resources:
v = r
for k in parts:
v = v.get(k)
if not isinstance(v, dict):
break
resource_map.setdefault(v, []).append(r)
return resource_map |
<SYSTEM_TASK:>
Some sources from apis return lowerCased where as describe calls
<END_TASK>
<USER_TASK:>
Description:
def camelResource(obj):
"""Some sources from apis return lowerCased where as describe calls
always return TitleCase, this function turns the former to the later
""" |
if not isinstance(obj, dict):
return obj
for k in list(obj.keys()):
v = obj.pop(k)
obj["%s%s" % (k[0].upper(), k[1:])] = v
if isinstance(v, dict):
camelResource(v)
elif isinstance(v, list):
list(map(camelResource, v))
return obj |
<SYSTEM_TASK:>
Return a list of ec2 instances for the query.
<END_TASK>
<USER_TASK:>
Description:
def query_instances(session, client=None, **query):
"""Return a list of ec2 instances for the query.
""" |
if client is None:
client = session.client('ec2')
p = client.get_paginator('describe_instances')
results = p.paginate(**query)
return list(itertools.chain(
*[r["Instances"] for r in itertools.chain(
*[pp['Reservations'] for pp in results])])) |
<SYSTEM_TASK:>
Cache a session thread local for up to 45m
<END_TASK>
<USER_TASK:>
Description:
def local_session(factory):
"""Cache a session thread local for up to 45m""" |
factory_region = getattr(factory, 'region', 'global')
s = getattr(CONN_CACHE, factory_region, {}).get('session')
t = getattr(CONN_CACHE, factory_region, {}).get('time')
n = time.time()
if s is not None and t + (60 * 45) > n:
return s
s = factory()
setattr(CONN_CACHE, factory_region, {'session': s, 'time': n})
return s |
<SYSTEM_TASK:>
Return an identifier for a snapshot of a database or cluster.
<END_TASK>
<USER_TASK:>
Description:
def snapshot_identifier(prefix, db_identifier):
"""Return an identifier for a snapshot of a database or cluster.
""" |
now = datetime.now()
return '%s-%s-%s' % (prefix, db_identifier, now.strftime('%Y-%m-%d-%H-%M')) |
<SYSTEM_TASK:>
Decorator for retry boto3 api call on transient errors.
<END_TASK>
<USER_TASK:>
Description:
def get_retry(codes=(), max_attempts=8, min_delay=1, log_retries=False):
"""Decorator for retry boto3 api call on transient errors.
https://www.awsarchitectureblog.com/2015/03/backoff.html
https://en.wikipedia.org/wiki/Exponential_backoff
:param codes: A sequence of retryable error codes.
:param max_attempts: The max number of retries, by default the delay
time is proportional to the max number of attempts.
:param log_retries: Whether we should log retries, if specified
specifies the level at which the retry should be logged.
:param _max_delay: The maximum delay for any retry interval *note*
this parameter is only exposed for unit testing, as its
derived from the number of attempts.
Returns a function for invoking aws client calls that
retries on retryable error codes.
""" |
max_delay = max(min_delay, 2) ** max_attempts
def _retry(func, *args, **kw):
for idx, delay in enumerate(
backoff_delays(min_delay, max_delay, jitter=True)):
try:
return func(*args, **kw)
except ClientError as e:
if e.response['Error']['Code'] not in codes:
raise
elif idx == max_attempts - 1:
raise
if log_retries:
worker_log.log(
log_retries,
"retrying %s on error:%s attempt:%d last delay:%0.2f",
func, e.response['Error']['Code'], idx, delay)
time.sleep(delay)
return _retry |
<SYSTEM_TASK:>
Generic wrapper to log uncaught exceptions in a function.
<END_TASK>
<USER_TASK:>
Description:
def worker(f):
"""Generic wrapper to log uncaught exceptions in a function.
When we cross concurrent.futures executor boundaries we lose our
traceback information, and when doing bulk operations we may tolerate
transient failures on a partial subset. However we still want to have
full accounting of the error in the logs, in a format that our error
collection (cwl subscription) can still pickup.
""" |
def _f(*args, **kw):
try:
return f(*args, **kw)
except Exception:
worker_log.exception(
'Error invoking %s',
"%s.%s" % (f.__module__, f.__name__))
raise
functools.update_wrapper(_f, f)
return _f |
<SYSTEM_TASK:>
Reformat schema to be in a more displayable format.
<END_TASK>
<USER_TASK:>
Description:
def reformat_schema(model):
""" Reformat schema to be in a more displayable format. """ |
if not hasattr(model, 'schema'):
return "Model '{}' does not have a schema".format(model)
if 'properties' not in model.schema:
return "Schema in unexpected format."
ret = copy.deepcopy(model.schema['properties'])
if 'type' in ret:
del(ret['type'])
for key in model.schema.get('required', []):
if key in ret:
ret[key]['required'] = True
return ret |
<SYSTEM_TASK:>
Returns all extant rds engine upgrades.
<END_TASK>
<USER_TASK:>
Description:
def _get_available_engine_upgrades(client, major=False):
"""Returns all extant rds engine upgrades.
As a nested mapping of engine type to known versions
and their upgrades.
Defaults to minor upgrades, but configurable to major.
Example::
>>> _get_engine_upgrades(client)
{
'oracle-se2': {'12.1.0.2.v2': '12.1.0.2.v5',
'12.1.0.2.v3': '12.1.0.2.v5'},
'postgres': {'9.3.1': '9.3.14',
'9.3.10': '9.3.14',
'9.3.12': '9.3.14',
'9.3.2': '9.3.14'}
}
""" |
results = {}
engine_versions = client.describe_db_engine_versions()['DBEngineVersions']
for v in engine_versions:
if not v['Engine'] in results:
results[v['Engine']] = {}
if 'ValidUpgradeTarget' not in v or len(v['ValidUpgradeTarget']) == 0:
continue
for t in v['ValidUpgradeTarget']:
if not major and t['IsMajorVersionUpgrade']:
continue
if LooseVersion(t['EngineVersion']) > LooseVersion(
results[v['Engine']].get(v['EngineVersion'], '0.0.0')):
results[v['Engine']][v['EngineVersion']] = t['EngineVersion']
return results |
<SYSTEM_TASK:>
Create a local output directory per execution.
<END_TASK>
<USER_TASK:>
Description:
def get_local_output_dir():
"""Create a local output directory per execution.
We've seen occassional (1/100000) perm issues with lambda on temp
directory and changing unix execution users (2015-2018), so use a
per execution temp space. With firecracker lambdas this may be outdated.
""" |
output_dir = os.environ.get('C7N_OUTPUT_DIR', '/tmp/' + str(uuid.uuid4()))
if not os.path.exists(output_dir):
try:
os.mkdir(output_dir)
except OSError as error:
log.warning("Unable to make output directory: {}".format(error))
return output_dir |
<SYSTEM_TASK:>
Get policy lambda execution configuration.
<END_TASK>
<USER_TASK:>
Description:
def init_config(policy_config):
"""Get policy lambda execution configuration.
cli parameters are serialized into the policy lambda config,
we merge those with any policy specific execution options.
--assume role and -s output directory get special handling, as
to disambiguate any cli context.
account id is sourced from the config options or from api call
and cached as a global
""" |
global account_id
exec_options = policy_config.get('execution-options', {})
# Remove some configuration options that don't make sense to translate from
# cli to lambda automatically.
# - assume role on cli doesn't translate, it is the default lambda role and
# used to provision the lambda.
# - profile doesnt translate to lambda its `home` dir setup dependent
# - dryrun doesn't translate (and shouldn't be present)
# - region doesn't translate from cli (the lambda is bound to a region), and
# on the cli represents the region the lambda is provisioned in.
for k in ('assume_role', 'profile', 'region', 'dryrun', 'cache'):
exec_options.pop(k, None)
# a cli local directory doesn't translate to lambda
if not exec_options.get('output_dir', '').startswith('s3'):
exec_options['output_dir'] = get_local_output_dir()
# we can source account id from the cli parameters to avoid the sts call
if exec_options.get('account_id'):
account_id = exec_options['account_id']
# merge with policy specific configuration
exec_options.update(
policy_config['policies'][0].get('mode', {}).get('execution-options', {}))
# if using assume role in lambda ensure that the correct
# execution account is captured in options.
if 'assume_role' in exec_options:
account_id = exec_options['assume_role'].split(':')[4]
elif account_id is None:
session = boto3.Session()
account_id = get_account_id_from_sts(session)
exec_options['account_id'] = account_id
# Historical compatibility with manually set execution options
# previously this was a boolean, its now a string value with the
# boolean flag triggering a string value of 'aws'
if 'metrics_enabled' in exec_options \
and isinstance(exec_options['metrics_enabled'], bool) \
and exec_options['metrics_enabled']:
exec_options['metrics_enabled'] = 'aws'
return Config.empty(**exec_options) |
<SYSTEM_TASK:>
index policy metrics
<END_TASK>
<USER_TASK:>
Description:
def index_metrics(
config, start, end, incremental=False, concurrency=5, accounts=None,
period=3600, tag=None, index='policy-metrics', verbose=False):
"""index policy metrics""" |
logging.basicConfig(level=(verbose and logging.DEBUG or logging.INFO))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('c7n.worker').setLevel(logging.INFO)
with open(config) as fh:
config = yaml.safe_load(fh.read())
jsonschema.validate(config, CONFIG_SCHEMA)
start, end = get_date_range(start, end)
p_accounts = set()
p_account_stats = {}
i_time = i_points = 0
t = time.time()
with ProcessPoolExecutor(max_workers=concurrency) as w:
futures = {}
jobs = []
# Filter
for account in config.get('accounts'):
if accounts and account['name'] not in accounts:
continue
if tag:
found = False
for t in account['tags'].values():
if tag == t:
found = True
break
if not found:
continue
p_accounts.add((account['name']))
for region in account.get('regions'):
for (p_start, p_end) in get_periods(start, end, period):
p = (config, index, region, account, p_start, p_end, period)
jobs.append(p)
# by default we'll be effectively processing in order, but thats bumps
# our concurrency into rate limits on metrics retrieval in a given account
# region, go ahead and shuffle, at least with lucene, the non ordering
# should have minimal impact on query perf (inverted index).
random.shuffle(jobs)
for j in jobs:
log.debug("submit account:%s region:%s start:%s end:%s" % (
j[3]['name'], j[2], j[4], j[5]))
futures[w.submit(index_account_metrics, *j)] = j
# Process completed
for f in as_completed(futures):
config, index, region, account, p_start, p_end, period = futures[f]
if f.exception():
log.warning("error account:%s region:%s error:%s",
account['name'], region, f.exception())
continue
rtime, rpoints = f.result()
rstat = p_account_stats.setdefault(
account['name'], {}).setdefault(region, {'points': 0})
rstat['points'] += rpoints
# log.info("complete account:%s, region:%s points:%s time:%0.2f",
# account['name'], region, rpoints, rtime)
i_time += rtime
i_points += rpoints
log.info("complete accounts:%d points:%d itime:%0.2f time:%0.2f",
len(p_accounts), i_points, i_time, time.time() - t) |
<SYSTEM_TASK:>
make config revision look like describe output.
<END_TASK>
<USER_TASK:>
Description:
def transform_revision(self, revision):
"""make config revision look like describe output.""" |
config = self.manager.get_source('config')
return config.load_resource(revision) |
<SYSTEM_TASK:>
Add basic options ot the subparser.
<END_TASK>
<USER_TASK:>
Description:
def _default_options(p, blacklist=""):
""" Add basic options ot the subparser.
`blacklist` is a list of options to exclude from the default set.
e.g.: ['region', 'log-group']
""" |
provider = p.add_argument_group(
"provider", "AWS account information, defaults per the aws cli")
if 'region' not in blacklist:
provider.add_argument(
"-r", "--region", action='append', default=[],
dest='regions', metavar='REGION',
help="AWS Region to target. Can be used multiple times")
provider.add_argument(
"--profile",
help="AWS Account Config File Profile to utilize")
provider.add_argument("--assume", default=None, dest="assume_role",
help="Role to assume")
provider.add_argument("--external-id", default=None, dest="external_id",
help="External Id to provide when assuming a role")
config = p.add_argument_group(
"config", "Policy config file(s) and policy selectors")
# -c is deprecated. Supported for legacy reasons
config.add_argument("-c", "--config", help=argparse.SUPPRESS)
config.add_argument("configs", nargs='*',
help="Policy configuration file(s)")
config.add_argument("-p", "--policies", default=None, dest='policy_filter',
help="Only use named/matched policies")
config.add_argument("-t", "--resource", default=None, dest='resource_type',
help="Only use policies with the given resource type")
output = p.add_argument_group("output", "Output control")
output.add_argument("-v", "--verbose", action="count", help="Verbose logging")
if 'quiet' not in blacklist:
output.add_argument("-q", "--quiet", action="count",
help="Less logging (repeatable, -qqq for no output)")
else:
output.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS)
output.add_argument("--debug", default=False, help=argparse.SUPPRESS,
action="store_true")
if 'vars' not in blacklist:
# p.add_argument('--vars', default=None,
# help='Vars file to substitute into policy')
p.set_defaults(vars=None)
if 'log-group' not in blacklist:
p.add_argument(
"-l", "--log-group", default=None,
help="Cloudwatch Log Group to send policy logs")
else:
p.add_argument("--log-group", default=None, help=argparse.SUPPRESS)
if 'output-dir' not in blacklist:
p.add_argument("-s", "--output-dir", required=True,
help="[REQUIRED] Directory or S3 URL For policy output")
if 'cache' not in blacklist:
p.add_argument(
"-f", "--cache", default="~/.cache/cloud-custodian.cache",
help="Cache file (default %(default)s)")
p.add_argument(
"--cache-period", default=15, type=int,
help="Cache validity in minutes (default %(default)i)")
else:
p.add_argument("--cache", default=None, help=argparse.SUPPRESS) |
<SYSTEM_TASK:>
Add options specific to the report subcommand.
<END_TASK>
<USER_TASK:>
Description:
def _report_options(p):
""" Add options specific to the report subcommand. """ |
_default_options(p, blacklist=['cache', 'log-group', 'quiet'])
p.add_argument(
'--days', type=float, default=1,
help="Number of days of history to consider")
p.add_argument(
'--raw', type=argparse.FileType('wb'),
help="Store raw json of collected records to given file path")
p.add_argument(
'--field', action='append', default=[], type=_key_val_pair,
metavar='HEADER=FIELD',
help='Repeatable. JMESPath of field to include in the output OR '
'for a tag use prefix `tag:`. Special case fields `region` and'
'`policy` are available')
p.add_argument(
'--no-default-fields', action="store_true",
help='Exclude default fields for report.')
p.add_argument(
'--format', default='csv', choices=['csv', 'grid', 'simple', 'json'],
help="Format to output data in (default: %(default)s). "
"Options include simple, grid, csv, json") |
<SYSTEM_TASK:>
Add options specific to metrics subcommand.
<END_TASK>
<USER_TASK:>
Description:
def _metrics_options(p):
""" Add options specific to metrics subcommand. """ |
_default_options(p, blacklist=['log-group', 'output-dir', 'cache', 'quiet'])
p.add_argument(
'--start', type=date_parse,
help='Start date (requires --end, overrides --days)')
p.add_argument(
'--end', type=date_parse, help='End date')
p.add_argument(
'--days', type=int, default=14,
help='Number of days of history to consider (default: %(default)i)')
p.add_argument('--period', type=int, default=60 * 24 * 24) |
<SYSTEM_TASK:>
Add options specific to logs subcommand.
<END_TASK>
<USER_TASK:>
Description:
def _logs_options(p):
""" Add options specific to logs subcommand. """ |
_default_options(p, blacklist=['cache', 'quiet'])
# default time range is 0 to "now" (to include all log entries)
p.add_argument(
'--start',
default='the beginning', # invalid, will result in 0
help='Start date and/or time',
)
p.add_argument(
'--end',
default=datetime.now().strftime('%c'),
help='End date and/or time',
) |
<SYSTEM_TASK:>
Add options specific to schema subcommand.
<END_TASK>
<USER_TASK:>
Description:
def _schema_options(p):
""" Add options specific to schema subcommand. """ |
p.add_argument(
'resource', metavar='selector', nargs='?',
default=None).completer = _schema_tab_completer
p.add_argument(
'--summary', action="store_true",
help="Summarize counts of available resources, actions and filters")
p.add_argument('--json', action="store_true", help=argparse.SUPPRESS)
p.add_argument("-v", "--verbose", action="count", help="Verbose logging")
p.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS)
p.add_argument("--debug", default=False, help=argparse.SUPPRESS) |
<SYSTEM_TASK:>
Type convert the csv record, modifies in place.
<END_TASK>
<USER_TASK:>
Description:
def process_user_record(cls, info):
"""Type convert the csv record, modifies in place.""" |
keys = list(info.keys())
# Value conversion
for k in keys:
v = info[k]
if v in ('N/A', 'no_information'):
info[k] = None
elif v == 'false':
info[k] = False
elif v == 'true':
info[k] = True
# Object conversion
for p, t in cls.list_sub_objects:
obj = dict([(k[len(p):], info.pop(k))
for k in keys if k.startswith(p)])
if obj.get('active', False):
info.setdefault(t, []).append(obj)
return info |
<SYSTEM_TASK:>
Builds and returns a cloud API service object.
<END_TASK>
<USER_TASK:>
Description:
def _create_service_api(credentials, service_name, version, developer_key=None,
cache_discovery=False, http=None):
"""Builds and returns a cloud API service object.
Args:
credentials (OAuth2Credentials): Credentials that will be used to
authenticate the API calls.
service_name (str): The name of the API.
version (str): The version of the API to use.
developer_key (str): The api key to use to determine the project
associated with the API call, most API services do not require
this to be set.
cache_discovery (bool): Whether or not to cache the discovery doc.
Returns:
object: A Resource object with methods for interacting with the service.
""" |
# The default logging of the discovery obj is very noisy in recent versions.
# Lower the default logging level of just this module to WARNING unless
# debug is enabled.
if log.getEffectiveLevel() > logging.DEBUG:
logging.getLogger(discovery.__name__).setLevel(logging.WARNING)
discovery_kwargs = {
'serviceName': service_name,
'version': version,
'developerKey': developer_key,
'cache_discovery': cache_discovery,
}
if http:
discovery_kwargs['http'] = http
else:
discovery_kwargs['credentials'] = credentials
return discovery.build(**discovery_kwargs) |
<SYSTEM_TASK:>
Safely initialize a repository class to a property.
<END_TASK>
<USER_TASK:>
Description:
def client(self, service_name, version, component, **kw):
"""Safely initialize a repository class to a property.
Args:
repository_class (class): The class to initialize.
version (str): The gcp service version for the repository.
Returns:
object: An instance of repository_class.
""" |
service = _create_service_api(
self._credentials,
service_name,
version,
kw.get('developer_key'),
kw.get('cache_discovery', False),
self._http or _build_http())
return ServiceClient(
gcp_service=service,
component=component,
credentials=self._credentials,
rate_limiter=self._rate_limiter,
use_cached_http=self._use_cached_http,
http=self._http) |
<SYSTEM_TASK:>
Builds pagination-aware request object.
<END_TASK>
<USER_TASK:>
Description:
def _build_next_request(self, verb, prior_request, prior_response):
"""Builds pagination-aware request object.
More details:
https://developers.google.com/api-client-library/python/guide/pagination
Args:
verb (str): Request verb (ex. insert, update, delete).
prior_request (httplib2.HttpRequest): Request that may trigger
paging.
prior_response (dict): Potentially partial response.
Returns:
httplib2.HttpRequest: HttpRequest or None. None is returned when
there is nothing more to fetch - request completed.
""" |
method = getattr(self._component, verb + '_next')
return method(prior_request, prior_response) |
<SYSTEM_TASK:>
Run execute with retries and rate limiting.
<END_TASK>
<USER_TASK:>
Description:
def _execute(self, request):
"""Run execute with retries and rate limiting.
Args:
request (object): The HttpRequest object to execute.
Returns:
dict: The response from the API.
""" |
if self._rate_limiter:
# Since the ratelimiter library only exposes a context manager
# interface the code has to be duplicated to handle the case where
# no rate limiter is defined.
with self._rate_limiter:
return request.execute(http=self.http,
num_retries=self._num_retries)
return request.execute(http=self.http,
num_retries=self._num_retries) |
<SYSTEM_TASK:>
Check if a resource is locked.
<END_TASK>
<USER_TASK:>
Description:
def info(self, account_id, resource_id, parent_id):
"""Check if a resource is locked.
If a resource has an explicit status we use that, else
we defer to the parent resource lock status.
""" |
resource = self.record(account_id, resource_id)
if resource is None and not parent_id:
return {'ResourceId': resource_id,
'LockStatus': self.STATE_UNLOCKED}
elif resource is None:
parent = self.record(account_id, parent_id)
if parent is None:
return {'ResourceId': resource_id,
'ParentId': parent_id,
'LockStatus': self.STATE_UNLOCKED}
parent['ResourceId'] = resource_id
parent['ParentId'] = parent_id
parent['LockType'] = 'parent'
return parent
if resource['ResourceId'].startswith('vpc-'):
return resource
if resource['ResourceId'].startswith('sg-'):
return resource |
<SYSTEM_TASK:>
ENI flow stream processor that rollups, enhances,
<END_TASK>
<USER_TASK:>
Description:
def process_eni_metrics(
stream_eni, myips, stream,
start, end, period, sample_size,
resolver, sink_uri):
"""ENI flow stream processor that rollups, enhances,
and indexes the stream by time period.""" |
stats = Counter()
period_counters = flow_stream_stats(myips, stream, period)
client = InfluxDBClient.from_dsn(sink_uri)
resource = resolver.resolve_resource(stream_eni)
points = []
for period in sorted(period_counters):
pc = period_counters[period]
pd = datetime.fromtimestamp(period)
for t in ('inbytes', 'outbytes'):
tpc = pc[t]
ips = [ip for ip, _ in tpc.most_common(sample_size)]
resolved = resolver.resolve(ips, pd - timedelta(900), pd + timedelta(900))
logical_counter = rollup_logical(tpc, resolved, ('app', 'env'))
for (app, env), v in logical_counter.items():
p = {}
# rinfo = resolved.get(ip, {})
p['fields'] = {'Bytes': v}
p['measurement'] = 'traffic_%s' % t
p['time'] = datetime.fromtimestamp(period)
p['tags'] = {
'Kind': resource['type'],
'AccountId': resource['account_id'],
'App': resource['app'],
'Env': resource['env'],
'ForeignApp': app,
'ForeignEnv': env}
points.append(p)
if len(points) > 2000:
client.write_points(points)
stats['Points'] += len(points)
points = []
client.write_points(points)
stats['Points'] += len(points)
log.info('periods:%d resource:%s points:%d',
len(period_counters), resource, stats['Points'])
return stats |
<SYSTEM_TASK:>
publish the given function.
<END_TASK>
<USER_TASK:>
Description:
def publish(self, func):
"""publish the given function.""" |
project = self.session.get_default_project()
func_name = "projects/{}/locations/{}/functions/{}".format(
project, self.region, func.name)
func_info = self.get(func.name)
source_url = None
archive = func.get_archive()
if not func_info or self._delta_source(archive, func_name):
source_url = self._upload(archive, self.region)
config = func.get_config()
config['name'] = func_name
if source_url:
config['sourceUploadUrl'] = source_url
# todo - we'll really need before() and after() for pre-provisioning of
# resources (ie topic for function stream on create) and post provisioning (schedule
# invocation of extant function).
#
# convergent event source creation
for e in func.events:
e.add(func)
if func_info is None:
log.info("creating function")
response = self.client.execute_command(
'create', {
'location': "projects/{}/locations/{}".format(
project, self.region),
'body': config})
else:
delta = delta_resource(func_info, config, ('httpsTrigger',))
if not delta:
response = None
else:
update_mask = ','.join(delta)
log.info("updating function config %s", update_mask)
response = self.client.execute_command(
'patch', {
'name': func_name,
'body': config,
'updateMask': update_mask})
return response |
<SYSTEM_TASK:>
Ensure the given identities are in the iam role bindings for the topic.
<END_TASK>
<USER_TASK:>
Description:
def ensure_iam(self, publisher=None):
"""Ensure the given identities are in the iam role bindings for the topic.
""" |
topic = self.get_topic_param()
client = self.session.client('pubsub', 'v1', 'projects.topics')
policy = client.execute_command('getIamPolicy', {'resource': topic})
policy.pop('etag')
found = False
for binding in policy.get('bindings', {}):
if binding['role'] != 'roles/pubsub.publisher':
continue
if publisher in binding['members']:
return
found = binding
if not found:
policy.setdefault(
'bindings', {'members': [publisher], 'role': 'roles/pubsub.publisher'})
else:
found['members'].append(publisher)
client.execute_command('setIamPolicy', {'resource': topic, 'body': {'policy': policy}}) |
<SYSTEM_TASK:>
Get the parent container for the log sink
<END_TASK>
<USER_TASK:>
Description:
def get_parent(self, log_info):
"""Get the parent container for the log sink""" |
if self.data.get('scope', 'log') == 'log':
if log_info.scope_type != 'projects':
raise ValueError("Invalid log subscriber scope")
parent = "%s/%s" % (log_info.scope_type, log_info.scope_id)
elif self.data['scope'] == 'project':
parent = 'projects/{}'.format(
self.data.get('scope_id', self.session.get_default_project()))
elif self.data['scope'] == 'organization':
parent = 'organizations/{}'.format(self.data['scope_id'])
elif self.data['scope'] == 'folder':
parent = 'folders/{}'.format(self.data['scope_id'])
elif self.data['scope'] == 'billing':
parent = 'billingAccounts/{}'.format(self.data['scope_id'])
else:
raise ValueError(
'invalid log subscriber scope %s' % (self.data))
return parent |
<SYSTEM_TASK:>
Ensure the log sink and its pub sub topic exist.
<END_TASK>
<USER_TASK:>
Description:
def ensure_sink(self):
"""Ensure the log sink and its pub sub topic exist.""" |
topic_info = self.pubsub.ensure_topic()
scope, sink_path, sink_info = self.get_sink(topic_info)
client = self.session.client('logging', 'v2', '%s.sinks' % scope)
try:
sink = client.execute_command('get', {'sinkName': sink_path})
except HttpError as e:
if e.resp.status != 404:
raise
sink = client.execute_command('create', sink_info)
else:
delta = delta_resource(sink, sink_info['body'])
if delta:
sink_info['updateMask'] = ','.join(delta)
sink_info['sinkName'] = sink_path
sink_info.pop('parent')
sink = client.execute_command('update', sink_info)
else:
return sink_path
self.pubsub.ensure_iam(publisher=sink['writerIdentity'])
return sink_path |
<SYSTEM_TASK:>
Remove any provisioned log sink if auto created
<END_TASK>
<USER_TASK:>
Description:
def remove(self, func):
"""Remove any provisioned log sink if auto created""" |
if not self.data['name'].startswith(self.prefix):
return
parent = self.get_parent(self.get_log())
_, sink_path, _ = self.get_sink()
client = self.session.client(
'logging', 'v2', '%s.sinks' % (parent.split('/', 1)[0]))
try:
client.execute_command(
'delete', {'sinkName': sink_path})
except HttpError as e:
if e.resp.status != 404:
raise |
<SYSTEM_TASK:>
Match a given cwe event as cloudtrail with an api call
<END_TASK>
<USER_TASK:>
Description:
def match(cls, event):
"""Match a given cwe event as cloudtrail with an api call
That has its information filled out.
""" |
if 'detail' not in event:
return False
if 'eventName' not in event['detail']:
return False
k = event['detail']['eventName']
# We want callers to use a compiled expression, but want to avoid
# initialization cost of doing it without cause. Not thread safe,
# but usage context is lambda entry.
if k in cls.trail_events:
v = dict(cls.trail_events[k])
if isinstance(v['ids'], six.string_types):
v['ids'] = e = jmespath.compile('detail.%s' % v['ids'])
cls.trail_events[k]['ids'] = e
return v
return False |
<SYSTEM_TASK:>
extract resources ids from a cloud trail event.
<END_TASK>
<USER_TASK:>
Description:
def get_trail_ids(cls, event, mode):
"""extract resources ids from a cloud trail event.""" |
resource_ids = ()
event_name = event['detail']['eventName']
event_source = event['detail']['eventSource']
for e in mode.get('events', []):
if not isinstance(e, dict):
# Check if we have a short cut / alias
info = CloudWatchEvents.match(event)
if info:
return info['ids'].search(event)
continue
if event_name != e.get('event'):
continue
if event_source != e.get('source'):
continue
id_query = e.get('ids')
if not id_query:
raise ValueError("No id query configured")
evt = event
# be forgiving for users specifying with details or without
if not id_query.startswith('detail.'):
evt = event.get('detail', {})
resource_ids = jmespath.search(id_query, evt)
if resource_ids:
break
return resource_ids |
<SYSTEM_TASK:>
Generate a c7n-org accounts config file using AWS Organizations
<END_TASK>
<USER_TASK:>
Description:
def main(role, ou, assume, profile, output, regions, active):
"""Generate a c7n-org accounts config file using AWS Organizations
With c7n-org you can then run policies or arbitrary scripts across
accounts.
""" |
session = get_session(assume, 'c7n-org', profile)
client = session.client('organizations')
accounts = []
for path in ou:
ou = get_ou_from_path(client, path)
accounts.extend(get_accounts_for_ou(client, ou, active))
results = []
for a in accounts:
tags = []
path_parts = a['Path'].strip('/').split('/')
for idx, _ in enumerate(path_parts):
tags.append("path:/%s" % "/".join(path_parts[:idx + 1]))
ainfo = {
'account_id': a['Id'],
'email': a['Email'],
'name': a['Name'],
'tags': tags,
'role': role.format(**a)}
if regions:
ainfo['regions'] = regions
results.append(ainfo)
print(
yaml.safe_dump(
{'accounts': results},
default_flow_style=False),
file=output) |
<SYSTEM_TASK:>
time series lastest record time by account.
<END_TASK>
<USER_TASK:>
Description:
def status(config):
"""time series lastest record time by account.""" |
with open(config) as fh:
config = yaml.safe_load(fh.read())
jsonschema.validate(config, CONFIG_SCHEMA)
last_index = get_incremental_starts(config, None)
accounts = {}
for (a, region), last in last_index.items():
accounts.setdefault(a, {})[region] = last
print(yaml.safe_dump(accounts, default_flow_style=False)) |
<SYSTEM_TASK:>
Generator that returns the events
<END_TASK>
<USER_TASK:>
Description:
def fetch_events(cursor, config, account_name):
"""Generator that returns the events""" |
query = config['indexer'].get('query',
'select * from events where user_agent glob \'*CloudCustodian*\'')
for event in cursor.execute(query):
event['account'] = account_name
event['_index'] = config['indexer']['idx_name']
event['_type'] = config['indexer'].get('idx_type', 'traildb')
yield event |
<SYSTEM_TASK:>
Creates a session using available authentication type.
<END_TASK>
<USER_TASK:>
Description:
def _initialize_session(self):
"""
Creates a session using available authentication type.
Auth priority:
1. Token Auth
2. Tenant Auth
3. Azure CLI Auth
""" |
# Only run once
if self.credentials is not None:
return
tenant_auth_variables = [
constants.ENV_TENANT_ID, constants.ENV_SUB_ID,
constants.ENV_CLIENT_ID, constants.ENV_CLIENT_SECRET
]
token_auth_variables = [
constants.ENV_ACCESS_TOKEN, constants.ENV_SUB_ID
]
msi_auth_variables = [
constants.ENV_USE_MSI, constants.ENV_SUB_ID
]
if self.authorization_file:
self.credentials, self.subscription_id = self.load_auth_file(self.authorization_file)
self.log.info("Creating session with authorization file")
elif all(k in os.environ for k in token_auth_variables):
# Token authentication
self.credentials = BasicTokenAuthentication(
token={
'access_token': os.environ[constants.ENV_ACCESS_TOKEN]
})
self.subscription_id = os.environ[constants.ENV_SUB_ID]
self.log.info("Creating session with Token Authentication")
self._is_token_auth = True
elif all(k in os.environ for k in tenant_auth_variables):
# Tenant (service principal) authentication
self.credentials = ServicePrincipalCredentials(
client_id=os.environ[constants.ENV_CLIENT_ID],
secret=os.environ[constants.ENV_CLIENT_SECRET],
tenant=os.environ[constants.ENV_TENANT_ID],
resource=self.resource_namespace)
self.subscription_id = os.environ[constants.ENV_SUB_ID]
self.tenant_id = os.environ[constants.ENV_TENANT_ID]
self.log.info("Creating session with Service Principal Authentication")
elif all(k in os.environ for k in msi_auth_variables):
# MSI authentication
if constants.ENV_CLIENT_ID in os.environ:
self.credentials = MSIAuthentication(
client_id=os.environ[constants.ENV_CLIENT_ID],
resource=self.resource_namespace)
else:
self.credentials = MSIAuthentication(
resource=self.resource_namespace)
self.subscription_id = os.environ[constants.ENV_SUB_ID]
self.log.info("Creating session with MSI Authentication")
else:
# Azure CLI authentication
self._is_cli_auth = True
(self.credentials,
self.subscription_id,
self.tenant_id) = Profile().get_login_credentials(
resource=self.resource_namespace)
self.log.info("Creating session with Azure CLI Authentication")
# Let provided id parameter override everything else
if self.subscription_id_override is not None:
self.subscription_id = self.subscription_id_override
self.log.info("Session using Subscription ID: %s" % self.subscription_id)
if self.credentials is None:
self.log.error('Unable to locate credentials for Azure session.') |
<SYSTEM_TASK:>
Build auth json string for deploying
<END_TASK>
<USER_TASK:>
Description:
def get_functions_auth_string(self, target_subscription_id):
"""
Build auth json string for deploying
Azure Functions. Look for dedicated
Functions environment variables or
fall back to normal Service Principal
variables.
""" |
self._initialize_session()
function_auth_variables = [
constants.ENV_FUNCTION_TENANT_ID,
constants.ENV_FUNCTION_CLIENT_ID,
constants.ENV_FUNCTION_CLIENT_SECRET
]
# Use dedicated function env vars if available
if all(k in os.environ for k in function_auth_variables):
auth = {
'credentials':
{
'client_id': os.environ[constants.ENV_FUNCTION_CLIENT_ID],
'secret': os.environ[constants.ENV_FUNCTION_CLIENT_SECRET],
'tenant': os.environ[constants.ENV_FUNCTION_TENANT_ID]
},
'subscription': target_subscription_id
}
elif type(self.credentials) is ServicePrincipalCredentials:
auth = {
'credentials':
{
'client_id': os.environ[constants.ENV_CLIENT_ID],
'secret': os.environ[constants.ENV_CLIENT_SECRET],
'tenant': os.environ[constants.ENV_TENANT_ID]
},
'subscription': target_subscription_id
}
else:
raise NotImplementedError(
"Service Principal credentials are the only "
"supported auth mechanism for deploying functions.")
return json.dumps(auth, indent=2) |
<SYSTEM_TASK:>
Create an api gw response from a wsgi app and environ.
<END_TASK>
<USER_TASK:>
Description:
def create_gw_response(app, wsgi_env):
"""Create an api gw response from a wsgi app and environ.
""" |
response = {}
buf = []
result = []
def start_response(status, headers, exc_info=None):
result[:] = [status, headers]
return buf.append
appr = app(wsgi_env, start_response)
close_func = getattr(appr, 'close', None)
try:
buf.extend(list(appr))
finally:
close_func and close_func()
response['body'] = ''.join(buf)
response['statusCode'] = result[0].split(' ', 1)[0]
response['headers'] = {}
for k, v in result[1]:
response['headers'][k] = v
if 'Content-Length' not in response['headers']:
response['headers']['Content-Length'] = str(len(response['body']))
if 'Content-Type' not in response['headers']:
response['headers']['Content-Type'] = 'text/plain'
return response |
<SYSTEM_TASK:>
Create a wsgi environment from an apigw request.
<END_TASK>
<USER_TASK:>
Description:
def create_wsgi_request(event, server_name='apigw'):
"""Create a wsgi environment from an apigw request.
""" |
path = urllib.url2pathname(event['path'])
script_name = (
event['headers']['Host'].endswith('.amazonaws.com') and
event['requestContext']['stage'] or '').encode('utf8')
query = event['queryStringParameters']
query_string = query and urllib.urlencode(query) or ""
body = event['body'] and event['body'].encode('utf8') or ''
environ = {
'HTTPS': 'on',
'PATH_INFO': path.encode('utf8'),
'QUERY_STRING': query_string.encode('utf8'),
'REMOTE_ADDR': event[
'requestContext']['identity']['sourceIp'].encode('utf8'),
'REQUEST_METHOD': event['httpMethod'].encode('utf8'),
'SCRIPT_NAME': script_name,
'SERVER_NAME': server_name.encode('utf8'),
'SERVER_PORT': '80'.encode('utf8'),
'SERVER_PROTOCOL': u'HTTP/1.1'.encode('utf8'),
'wsgi.errors': sys.stderr,
'wsgi.input': StringIO(body),
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'wsgi.url_scheme': u'https'.encode('utf8'),
'wsgi.version': (1, 0),
}
headers = event['headers']
# Input processing
if event['httpMethod'] in ("POST", "PUT", "PATCH"):
if 'Content-Type' in headers:
environ['CONTENT_TYPE'] = headers['Content-Type']
environ['CONTENT_LENGTH'] = str(len(body))
for header in list(event['headers'].keys()):
wsgi_name = "HTTP_" + header.upper().replace('-', '_')
environ[wsgi_name] = headers[header].encode('utf8')
if script_name:
path_info = environ['PATH_INFO']
if script_name in path_info:
environ['PATH_INFO'].replace(script_name, '')
# Extract remote user from event
remote_user = None
if event['requestContext'].get('authorizer'):
remote_user = event[
'requestContext']['authorizer'].get('principalId')
elif event['requestContext'].get('identity'):
remote_user = event['requestContext']['identity'].get('userArn')
if remote_user:
environ['REMOTE_USER'] = remote_user
# apigw aware integrations
environ['apigw.request'] = event['requestContext']
environ['apigw.stagevars'] = event['stageVariables']
return environ |
<SYSTEM_TASK:>
Retrieve logs from a log group.
<END_TASK>
<USER_TASK:>
Description:
def retrieve_logs(self, include_lambda_messages=True, max_entries=None):
# type: (bool, Optional[int]) -> Iterator[Dict[str, Any]]
"""Retrieve logs from a log group.
:type include_lambda_messages: boolean
:param include_lambda_messages: Include logs generated by the AWS
Lambda service. If this value is False, only chalice logs will be
included.
:type max_entries: int
:param max_entries: Maximum number of log messages to include.
:rtype: iterator
:return: An iterator that yields event dicts. Each event
dict has these keys:
* logStreamName -> (string) The name of the log stream.
* timestamp -> (datetime.datetime) - The timestamp for the msg.
* message -> (string) The data contained in the log event.
* ingestionTime -> (datetime.datetime) Ingestion time of event.
* eventId -> (string) A unique identifier for this event.
* logShortId -> (string) Short identifier for logStreamName.
""" |
# TODO: Add support for startTime/endTime.
shown = 0
for event in self._client.iter_log_events(self._log_group_name,
interleaved=True):
if not include_lambda_messages and \
self._is_lambda_message(event):
continue
# logStreamName is: '2016/07/05/[id]hash'
# We want to extract the hash portion and
# provide a short identifier.
identifier = event['logStreamName']
if ']' in identifier:
index = identifier.find(']')
identifier = identifier[index + 1:index + 7]
event['logShortId'] = identifier
yield event
shown += 1
if max_entries is not None and shown >= max_entries:
return |
<SYSTEM_TASK:>
Validate app configuration.
<END_TASK>
<USER_TASK:>
Description:
def validate_configuration(config):
# type: (Config) -> None
"""Validate app configuration.
The purpose of this method is to provide a fail fast mechanism
for anything we know is going to fail deployment.
We can detect common error cases and provide the user with helpful
error messages.
""" |
routes = config.chalice_app.routes
validate_routes(routes)
validate_route_content_types(routes, config.chalice_app.api.binary_types)
_validate_manage_iam_role(config)
validate_python_version(config)
validate_unique_function_names(config)
validate_feature_flags(config.chalice_app) |
<SYSTEM_TASK:>
Validate configuration matches a specific python version.
<END_TASK>
<USER_TASK:>
Description:
def validate_python_version(config, actual_py_version=None):
# type: (Config, Optional[str]) -> None
"""Validate configuration matches a specific python version.
If the ``actual_py_version`` is not provided, it will default
to the major/minor version of the currently running python
interpreter.
:param actual_py_version: The major/minor python version in
the form "pythonX.Y", e.g "python2.7", "python3.6".
""" |
lambda_version = config.lambda_python_version
if actual_py_version is None:
actual_py_version = 'python%s.%s' % sys.version_info[:2]
if actual_py_version != lambda_version:
# We're not making this a hard error for now, but we may
# turn this into a hard fail.
warnings.warn("You are currently running %s, but the closest "
"supported version on AWS Lambda is %s\n"
"Please use %s, otherwise you may run into "
"deployment issues. " %
(actual_py_version, lambda_version, lambda_version),
stacklevel=2) |
<SYSTEM_TASK:>
Execute a pip command with the given arguments.
<END_TASK>
<USER_TASK:>
Description:
def _execute(self,
command, # type: str
args, # type: List[str]
env_vars=None, # type: EnvVars
shim=None # type: OptStr
):
# type: (...) -> Tuple[int, bytes, bytes]
"""Execute a pip command with the given arguments.""" |
main_args = [command] + args
logger.debug("calling pip %s", ' '.join(main_args))
rc, out, err = self._wrapped_pip.main(main_args, env_vars=env_vars,
shim=shim)
return rc, out, err |
<SYSTEM_TASK:>
Build an sdist into a wheel file.
<END_TASK>
<USER_TASK:>
Description:
def build_wheel(self, wheel, directory, compile_c=True):
# type: (str, str, bool) -> None
"""Build an sdist into a wheel file.""" |
arguments = ['--no-deps', '--wheel-dir', directory, wheel]
env_vars = self._osutils.environ()
shim = ''
if not compile_c:
env_vars.update(pip_no_compile_c_env_vars)
shim = pip_no_compile_c_shim
# Ignore rc and stderr from this command since building the wheels
# may fail and we will find out when we categorize the files that were
# generated.
self._execute('wheel', arguments,
env_vars=env_vars, shim=shim) |
<SYSTEM_TASK:>
Download all dependencies as sdist or wheel.
<END_TASK>
<USER_TASK:>
Description:
def download_all_dependencies(self, requirements_filename, directory):
# type: (str, str) -> None
"""Download all dependencies as sdist or wheel.""" |
arguments = ['-r', requirements_filename, '--dest', directory]
rc, out, err = self._execute('download', arguments)
# When downloading all dependencies we expect to get an rc of 0 back
# since we are casting a wide net here letting pip have options about
# what to download. If a package is not found it is likely because it
# does not exist and was mispelled. In this case we raise an error with
# the package name. Otherwise a nonzero rc results in a generic
# download error where we pass along the stderr.
if rc != 0:
if err is None:
err = b'Unknown error'
error = err.decode()
match = re.search(("Could not find a version that satisfies the "
"requirement (.+?) "), error)
if match:
package_name = match.group(1)
raise NoSuchPackageError(str(package_name))
raise PackageDownloadError(error)
stdout = out.decode()
matches = re.finditer(self._LINK_IS_DIR_PATTERN, stdout)
for match in matches:
wheel_package_path = str(match.group(1))
# Looks odd we do not check on the error status of building the
# wheel here. We can assume this is a valid package path since
# we already passed the pip download stage. This stage would have
# thrown a PackageDownloadError if any of the listed packages were
# not valid.
# If it fails the actual build step, it will have the same behavior
# as any other package we fail to build a valid wheel for, and
# complain at deployment time.
self.build_wheel(wheel_package_path, directory) |
<SYSTEM_TASK:>
Download wheel files for manylinux for all the given packages.
<END_TASK>
<USER_TASK:>
Description:
def download_manylinux_wheels(self, abi, packages, directory):
# type: (str, List[str], str) -> None
"""Download wheel files for manylinux for all the given packages.""" |
# If any one of these dependencies fails pip will bail out. Since we
# are only interested in all the ones we can download, we need to feed
# each package to pip individually. The return code of pip doesn't
# matter here since we will inspect the working directory to see which
# wheels were downloaded. We are only interested in wheel files
# compatible with lambda, which means manylinux1_x86_64 platform and
# cpython implementation. The compatible abi depends on the python
# version and is checked later.
for package in packages:
arguments = ['--only-binary=:all:', '--no-deps', '--platform',
'manylinux1_x86_64', '--implementation', 'cp',
'--abi', abi, '--dest', directory, package]
self._execute('download', arguments) |
<SYSTEM_TASK:>
Transform a name to a valid cfn name.
<END_TASK>
<USER_TASK:>
Description:
def to_cfn_resource_name(name):
# type: (str) -> str
"""Transform a name to a valid cfn name.
This will convert the provided name to a CamelCase name.
It's possible that the conversion to a CFN resource name
can result in name collisions. It's up to the caller
to handle name collisions appropriately.
""" |
if not name:
raise ValueError("Invalid name: %r" % name)
word_separators = ['-', '_']
for word_separator in word_separators:
word_parts = [p for p in name.split(word_separator) if p]
name = ''.join([w[0].upper() + w[1:] for w in word_parts])
return re.sub(r'[^A-Za-z0-9]+', '', name) |
<SYSTEM_TASK:>
Delete a top level key from the deployed JSON file.
<END_TASK>
<USER_TASK:>
Description:
def remove_stage_from_deployed_values(key, filename):
# type: (str, str) -> None
"""Delete a top level key from the deployed JSON file.""" |
final_values = {} # type: Dict[str, Any]
try:
with open(filename, 'r') as f:
final_values = json.load(f)
except IOError:
# If there is no file to delete from, then this funciton is a noop.
return
try:
del final_values[key]
with open(filename, 'wb') as f:
data = serialize_to_json(final_values)
f.write(data.encode('utf-8'))
except KeyError:
# If they key didn't exist then there is nothing to remove.
pass |
<SYSTEM_TASK:>
Record deployed values to a JSON file.
<END_TASK>
<USER_TASK:>
Description:
def record_deployed_values(deployed_values, filename):
# type: (Dict[str, Any], str) -> None
"""Record deployed values to a JSON file.
This allows subsequent deploys to lookup previously deployed values.
""" |
final_values = {} # type: Dict[str, Any]
if os.path.isfile(filename):
with open(filename, 'r') as f:
final_values = json.load(f)
final_values.update(deployed_values)
with open(filename, 'wb') as f:
data = serialize_to_json(final_values)
f.write(data.encode('utf-8')) |
<SYSTEM_TASK:>
Create a zip file from a source input directory.
<END_TASK>
<USER_TASK:>
Description:
def create_zip_file(source_dir, outfile):
# type: (str, str) -> None
"""Create a zip file from a source input directory.
This function is intended to be an equivalent to
`zip -r`. You give it a source directory, `source_dir`,
and it will recursively zip up the files into a zipfile
specified by the `outfile` argument.
""" |
with zipfile.ZipFile(outfile, 'w',
compression=zipfile.ZIP_DEFLATED) as z:
for root, _, filenames in os.walk(source_dir):
for filename in filenames:
full_name = os.path.join(root, filename)
archive_name = os.path.relpath(full_name, source_dir)
z.write(full_name, archive_name) |
<SYSTEM_TASK:>
Update a Lambda function's code and configuration.
<END_TASK>
<USER_TASK:>
Description:
def update_function(self,
function_name, # type: str
zip_contents, # type: str
environment_variables=None, # type: StrMap
runtime=None, # type: OptStr
tags=None, # type: StrMap
timeout=None, # type: OptInt
memory_size=None, # type: OptInt
role_arn=None, # type: OptStr
subnet_ids=None, # type: OptStrList
security_group_ids=None, # type: OptStrList
layers=None, # type: OptStrList
):
# type: (...) -> Dict[str, Any]
"""Update a Lambda function's code and configuration.
This method only updates the values provided to it. If a parameter
is not provided, no changes will be made for that that parameter on
the targeted lambda function.
""" |
return_value = self._update_function_code(function_name=function_name,
zip_contents=zip_contents)
self._update_function_config(
environment_variables=environment_variables,
runtime=runtime,
timeout=timeout,
memory_size=memory_size,
role_arn=role_arn,
subnet_ids=subnet_ids,
security_group_ids=security_group_ids,
function_name=function_name,
layers=layers
)
if tags is not None:
self._update_function_tags(return_value['FunctionArn'], tags)
return return_value |
<SYSTEM_TASK:>
Delete a role by first deleting all inline policies.
<END_TASK>
<USER_TASK:>
Description:
def delete_role(self, name):
# type: (str) -> None
"""Delete a role by first deleting all inline policies.""" |
client = self._client('iam')
inline_policies = client.list_role_policies(
RoleName=name
)['PolicyNames']
for policy_name in inline_policies:
self.delete_role_policy(name, policy_name)
client.delete_role(RoleName=name) |
<SYSTEM_TASK:>
Get rest api id associated with an API name.
<END_TASK>
<USER_TASK:>
Description:
def get_rest_api_id(self, name):
# type: (str) -> Optional[str]
"""Get rest api id associated with an API name.
:type name: str
:param name: The name of the rest api.
:rtype: str
:return: If the rest api exists, then the restApiId
is returned, otherwise None.
""" |
rest_apis = self._client('apigateway').get_rest_apis()['items']
for api in rest_apis:
if api['name'] == name:
return api['id']
return None |
<SYSTEM_TASK:>
Authorize API gateway to invoke a lambda function is needed.
<END_TASK>
<USER_TASK:>
Description:
def add_permission_for_apigateway(self, function_name,
region_name, account_id,
rest_api_id, random_id=None):
# type: (str, str, str, str, Optional[str]) -> None
"""Authorize API gateway to invoke a lambda function is needed.
This method will first check if API gateway has permission to call
the lambda function, and only if necessary will it invoke
``self.add_permission_for_apigateway(...).
""" |
source_arn = self._build_source_arn_str(region_name, account_id,
rest_api_id)
self._add_lambda_permission_if_needed(
source_arn=source_arn,
function_arn=function_name,
service_name='apigateway',
) |
<SYSTEM_TASK:>
Return the function policy for a lambda function.
<END_TASK>
<USER_TASK:>
Description:
def get_function_policy(self, function_name):
# type: (str) -> Dict[str, Any]
"""Return the function policy for a lambda function.
This function will extract the policy string as a json document
and return the json.loads(...) version of the policy.
""" |
client = self._client('lambda')
try:
policy = client.get_policy(FunctionName=function_name)
return json.loads(policy['Policy'])
except client.exceptions.ResourceNotFoundException:
return {'Statement': []} |
<SYSTEM_TASK:>
Download an SDK to a directory.
<END_TASK>
<USER_TASK:>
Description:
def download_sdk(self, rest_api_id, output_dir,
api_gateway_stage=DEFAULT_STAGE_NAME,
sdk_type='javascript'):
# type: (str, str, str, str) -> None
"""Download an SDK to a directory.
This will generate an SDK and download it to the provided
``output_dir``. If you're using ``get_sdk_download_stream()``,
you have to handle downloading the stream and unzipping the
contents yourself. This method handles that for you.
""" |
zip_stream = self.get_sdk_download_stream(
rest_api_id, api_gateway_stage=api_gateway_stage,
sdk_type=sdk_type)
tmpdir = tempfile.mkdtemp()
with open(os.path.join(tmpdir, 'sdk.zip'), 'wb') as f:
f.write(zip_stream.read())
tmp_extract = os.path.join(tmpdir, 'extracted')
with zipfile.ZipFile(os.path.join(tmpdir, 'sdk.zip')) as z:
z.extractall(tmp_extract)
# The extract zip dir will have a single directory:
# ['apiGateway-js-sdk']
dirnames = os.listdir(tmp_extract)
if len(dirnames) == 1:
full_dirname = os.path.join(tmp_extract, dirnames[0])
if os.path.isdir(full_dirname):
final_dirname = 'chalice-%s-sdk' % sdk_type
full_renamed_name = os.path.join(tmp_extract, final_dirname)
os.rename(full_dirname, full_renamed_name)
shutil.move(full_renamed_name, output_dir)
return
raise RuntimeError(
"The downloaded SDK had an unexpected directory structure: %s" %
(', '.join(dirnames))) |
<SYSTEM_TASK:>
Generate an SDK for a given SDK.
<END_TASK>
<USER_TASK:>
Description:
def get_sdk_download_stream(self, rest_api_id,
api_gateway_stage=DEFAULT_STAGE_NAME,
sdk_type='javascript'):
# type: (str, str, str) -> file
"""Generate an SDK for a given SDK.
Returns a file like object that streams a zip contents for the
generated SDK.
""" |
response = self._client('apigateway').get_sdk(
restApiId=rest_api_id, stageName=api_gateway_stage,
sdkType=sdk_type)
return response['body'] |
<SYSTEM_TASK:>
Verify a subscription arn matches the topic and function name.
<END_TASK>
<USER_TASK:>
Description:
def verify_sns_subscription_current(self, subscription_arn, topic_name,
function_arn):
# type: (str, str, str) -> bool
"""Verify a subscription arn matches the topic and function name.
Given a subscription arn, verify that the associated topic name
and function arn match up to the parameters passed in.
""" |
sns_client = self._client('sns')
try:
attributes = sns_client.get_subscription_attributes(
SubscriptionArn=subscription_arn)['Attributes']
return (
# Splitting on ':' is safe because topic names can't have
# a ':' char.
attributes['TopicArn'].rsplit(':', 1)[1] == topic_name and
attributes['Endpoint'] == function_arn
)
except sns_client.exceptions.NotFoundException:
return False |
<SYSTEM_TASK:>
Configure S3 bucket to invoke a lambda function.
<END_TASK>
<USER_TASK:>
Description:
def connect_s3_bucket_to_lambda(self, bucket, function_arn, events,
prefix=None, suffix=None):
# type: (str, str, List[str], OptStr, OptStr) -> None
"""Configure S3 bucket to invoke a lambda function.
The S3 bucket must already have permission to invoke the
lambda function before you call this function, otherwise
the service will return an error. You can add permissions
by using the ``add_permission_for_s3_event`` below. The
``events`` param matches the event strings supported by the
service.
This method also only supports a single prefix/suffix for now,
which is what's offered in the Lambda console.
""" |
s3 = self._client('s3')
existing_config = s3.get_bucket_notification_configuration(
Bucket=bucket)
# Because we're going to PUT this config back to S3, we need
# to remove `ResponseMetadata` because that's added in botocore
# and isn't a param of the put_bucket_notification_configuration.
existing_config.pop('ResponseMetadata', None)
existing_lambda_config = existing_config.get(
'LambdaFunctionConfigurations', [])
single_config = {
'LambdaFunctionArn': function_arn, 'Events': events
} # type: Dict[str, Any]
filter_rules = []
if prefix is not None:
filter_rules.append({'Name': 'Prefix', 'Value': prefix})
if suffix is not None:
filter_rules.append({'Name': 'Suffix', 'Value': suffix})
if filter_rules:
single_config['Filter'] = {'Key': {'FilterRules': filter_rules}}
new_config = self._merge_s3_notification_config(existing_lambda_config,
single_config)
existing_config['LambdaFunctionConfigurations'] = new_config
s3.put_bucket_notification_configuration(
Bucket=bucket,
NotificationConfiguration=existing_config,
) |
<SYSTEM_TASK:>
Check if the uuid matches the resource and function arn provided.
<END_TASK>
<USER_TASK:>
Description:
def verify_event_source_current(self, event_uuid, resource_name,
service_name, function_arn):
# type: (str, str, str, str) -> bool
"""Check if the uuid matches the resource and function arn provided.
Given a uuid representing an event source mapping for a lambda
function, verify that the associated source arn
and function arn match up to the parameters passed in.
Instead of providing the event source arn, the resource name
is provided along with the service name. For example, if we're
checking an SQS queue event source, the resource name would be
the queue name (e.g. ``myqueue``) and the service would be ``sqs``.
""" |
client = self._client('lambda')
try:
attributes = client.get_event_source_mapping(UUID=event_uuid)
actual_arn = attributes['EventSourceArn']
arn_start, actual_name = actual_arn.rsplit(':', 1)
return (
actual_name == resource_name and
arn_start.startswith('arn:aws:%s' % service_name) and
attributes['FunctionArn'] == function_arn
)
except client.exceptions.ResourceNotFoundException:
return False |
<SYSTEM_TASK:>
Load the chalice config file from the project directory.
<END_TASK>
<USER_TASK:>
Description:
def load_project_config(self):
# type: () -> Dict[str, Any]
"""Load the chalice config file from the project directory.
:raise: OSError/IOError if unable to load the config file.
""" |
config_file = os.path.join(self.project_dir, '.chalice', 'config.json')
with open(config_file) as f:
return json.loads(f.read()) |
<SYSTEM_TASK:>
Generate a cloudformation template for a starter CD pipeline.
<END_TASK>
<USER_TASK:>
Description:
def generate_pipeline(ctx, codebuild_image, source, buildspec_file, filename):
# type: (click.Context, str, str, str, str) -> None
"""Generate a cloudformation template for a starter CD pipeline.
This command will write a starter cloudformation template to
the filename you provide. It contains a CodeCommit repo,
a CodeBuild stage for packaging your chalice app, and a
CodePipeline stage to deploy your application using cloudformation.
You can use any AWS SDK or the AWS CLI to deploy this stack.
Here's an example using the AWS CLI:
\b
$ chalice generate-pipeline pipeline.json
$ aws cloudformation deploy --stack-name mystack \b
--template-file pipeline.json --capabilities CAPABILITY_IAM
""" |
from chalice import pipeline
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj()
p = pipeline.CreatePipelineTemplate()
params = pipeline.PipelineParameters(
app_name=config.app_name,
lambda_python_version=config.lambda_python_version,
codebuild_image=codebuild_image,
code_source=source,
)
output = p.create_template(params)
if buildspec_file:
extractor = pipeline.BuildSpecExtractor()
buildspec_contents = extractor.extract_buildspec(output)
with open(buildspec_file, 'w') as f:
f.write(buildspec_contents)
with open(filename, 'w') as f:
f.write(serialize_to_json(output)) |
<SYSTEM_TASK:>
Return resources associated with a given stage.
<END_TASK>
<USER_TASK:>
Description:
def deployed_resources(self, chalice_stage_name):
# type: (str) -> DeployedResources
"""Return resources associated with a given stage.
If a deployment to a given stage has never happened,
this method will return a value of None.
""" |
# This is arguably the wrong level of abstraction.
# We might be able to move this elsewhere.
deployed_file = os.path.join(
self.project_dir, '.chalice', 'deployed',
'%s.json' % chalice_stage_name)
data = self._load_json_file(deployed_file)
if data is not None:
schema_version = data.get('schema_version', '1.0')
if schema_version != '2.0':
raise ValueError("Unsupported schema version (%s) in file: %s"
% (schema_version, deployed_file))
return DeployedResources(data)
return self._try_old_deployer_values(chalice_stage_name) |
<SYSTEM_TASK:>
Auto generate policy for an application.
<END_TASK>
<USER_TASK:>
Description:
def generate_policy(self, config):
# type: (Config) -> Dict[str, Any]
"""Auto generate policy for an application.""" |
# Admittedly, this is pretty bare bones logic for the time
# being. All it really does it work out, given a Config instance,
# which files need to analyzed and then delegates to the
# appropriately analyzer functions to do the real work.
# This may change in the future.
app_py = os.path.join(config.project_dir, 'app.py')
assert self._osutils.file_exists(app_py)
app_source = self._osutils.get_file_contents(app_py, binary=False)
app_policy = policy_from_source_code(app_source)
app_policy['Statement'].append(CLOUDWATCH_LOGS)
if config.subnet_ids and config.security_group_ids:
app_policy['Statement'].append(VPC_ATTACH_POLICY)
return app_policy |
<SYSTEM_TASK:>
Return all clients calls made in provided source code.
<END_TASK>
<USER_TASK:>
Description:
def get_client_calls(source_code):
# type: (str) -> APICallT
"""Return all clients calls made in provided source code.
:returns: A dict of service_name -> set([client calls]).
Example: {"s3": set(["list_objects", "create_bucket"]),
"dynamodb": set(["describe_table"])}
""" |
parsed = parse_code(source_code)
t = SymbolTableTypeInfer(parsed)
binder = t.bind_types()
collector = APICallCollector(binder)
api_calls = collector.collect_api_calls(parsed.parsed_ast)
return api_calls |
<SYSTEM_TASK:>
Return client calls for a chalice app.
<END_TASK>
<USER_TASK:>
Description:
def get_client_calls_for_app(source_code):
# type: (str) -> APICallT
"""Return client calls for a chalice app.
This is similar to ``get_client_calls`` except it will
automatically traverse into chalice views with the assumption
that they will be called.
""" |
parsed = parse_code(source_code)
parsed.parsed_ast = AppViewTransformer().visit(parsed.parsed_ast)
ast.fix_missing_locations(parsed.parsed_ast)
t = SymbolTableTypeInfer(parsed)
binder = t.bind_types()
collector = APICallCollector(binder)
api_calls = collector.collect_api_calls(parsed.parsed_ast)
return api_calls |
<SYSTEM_TASK:>
Match the url against known routes.
<END_TASK>
<USER_TASK:>
Description:
def match_route(self, url):
# type: (str) -> MatchResult
"""Match the url against known routes.
This method takes a concrete route "/foo/bar", and
matches it against a set of routes. These routes can
use param substitution corresponding to API gateway patterns.
For example::
match_route('/foo/bar') -> '/foo/{name}'
""" |
# Otherwise we need to check for param substitution
parsed_url = urlparse(url)
parsed_qs = parse_qs(parsed_url.query, keep_blank_values=True)
query_params = {k: v[-1] for k, v in parsed_qs.items()}
path = parsed_url.path
# API Gateway removes the trailing slash if the route is not the root
# path. We do the same here so our route matching works the same way.
if path != '/' and path.endswith('/'):
path = path[:-1]
parts = path.split('/')
captured = {}
for route_url in self.route_urls:
url_parts = route_url.split('/')
if len(parts) == len(url_parts):
for i, j in zip(parts, url_parts):
if j.startswith('{') and j.endswith('}'):
captured[j[1:-1]] = i
continue
if i != j:
break
else:
return MatchResult(route_url, captured, query_params)
raise ValueError("No matching route found for: %s" % url) |
<SYSTEM_TASK:>
Translate event for an authorizer input.
<END_TASK>
<USER_TASK:>
Description:
def _prepare_authorizer_event(self, arn, lambda_event, lambda_context):
# type: (str, EventType, LambdaContext) -> EventType
"""Translate event for an authorizer input.""" |
authorizer_event = lambda_event.copy()
authorizer_event['type'] = 'TOKEN'
try:
authorizer_event['authorizationToken'] = authorizer_event.get(
'headers', {})['authorization']
except KeyError:
raise NotAuthorizedError(
{'x-amzn-RequestId': lambda_context.aws_request_id,
'x-amzn-ErrorType': 'UnauthorizedException'},
b'{"message":"Unauthorized"}')
authorizer_event['methodArn'] = arn
return authorizer_event |
<SYSTEM_TASK:>
Estimate the frequency of the baseband signal using FFT
<END_TASK>
<USER_TASK:>
Description:
def estimate_frequency(self, start: int, end: int, sample_rate: float):
"""
Estimate the frequency of the baseband signal using FFT
:param start: Start of the area that shall be investigated
:param end: End of the area that shall be investigated
:param sample_rate: Sample rate of the signal
:return:
""" |
# ensure power of 2 for faster fft
length = 2 ** int(math.log2(end - start))
data = self.data[start:start + length]
try:
w = np.fft.fft(data)
frequencies = np.fft.fftfreq(len(w))
idx = np.argmax(np.abs(w))
freq = frequencies[idx]
freq_in_hertz = abs(freq * sample_rate)
except ValueError:
# No samples in window e.g. start == end, use a fallback
freq_in_hertz = 100e3
return freq_in_hertz |
<SYSTEM_TASK:>
Build the order of component based on their priority and predecessors
<END_TASK>
<USER_TASK:>
Description:
def build_component_order(self):
"""
Build the order of component based on their priority and predecessors
:rtype: list of Component
""" |
present_components = [item for item in self.__dict__.values() if isinstance(item, Component) and item.enabled]
result = [None] * len(present_components)
used_prios = set()
for component in present_components:
index = component.priority % len(present_components)
if index in used_prios:
raise ValueError("Duplicate priority: {}".format(component.priority))
used_prios.add(index)
result[index] = component
# Check if predecessors are valid
for i, component in enumerate(result):
if any(i < result.index(pre) for pre in component.predecessors):
raise ValueError("Component {} comes before at least one of its predecessors".format(component))
return result |
<SYSTEM_TASK:>
This method clusters some bitvectors based on their length. An example output is
<END_TASK>
<USER_TASK:>
Description:
def cluster_lengths(self):
"""
This method clusters some bitvectors based on their length. An example output is
2: [0.5, 1]
4: [1, 0.75, 1, 1]
Meaning there were two message lengths: 2 and 4 bit.
(0.5, 1) means, the first bit was equal in 50% of cases (meaning maximum difference) and bit 2 was equal in all messages
A simple XOR would not work as it would be error prone.
:rtype: dict[int, tuple[np.ndarray, int]]
""" |
number_ones = dict() # dict of tuple. 0 = number ones vector, 1 = number of blocks for this vector
for vector in self.bitvectors:
vec_len = 4 * (len(vector) // 4)
if vec_len == 0:
continue
if vec_len not in number_ones:
number_ones[vec_len] = [np.zeros(vec_len, dtype=int), 0]
number_ones[vec_len][0] += vector[0:vec_len]
number_ones[vec_len][1] += 1
# Calculate the relative numbers and normalize the equalness so e.g. 0.3 becomes 0.7
return {vl: (np.vectorize(lambda x: x if x >= 0.5 else 1 - x)(number_ones[vl][0] / number_ones[vl][1]))
for vl in number_ones if number_ones[vl][1] >= self.MIN_MESSAGES_PER_CLUSTER} |
<SYSTEM_TASK:>
Find candidate addresses using LCS algorithm
<END_TASK>
<USER_TASK:>
Description:
def find_candidates(candidates):
"""
Find candidate addresses using LCS algorithm
perform a scoring based on how often a candidate appears in a longer candidate
Input is something like
------------------------
['1b6033', '1b6033fd57', '701b603378e289', '20701b603378e289000c62',
'1b603300', '78e289757e', '7078e2891b6033000000', '207078e2891b6033000000']
Output like
-----------
{'1b6033': 18, '1b6033fd57': 1, '701b603378e289': 2, '207078e2891b6033000000': 1,
'57': 1, '7078e2891b6033000000': 2, '78e289757e': 1, '20701b603378e289000c62': 1,
'78e289': 4, '1b603300': 3}
:type candidates: list of CommonRange
:return:
""" |
result = defaultdict(int)
for i, c_i in enumerate(candidates):
for j in range(i, len(candidates)):
lcs = util.longest_common_substring(c_i.hex_value, candidates[j].hex_value)
if lcs:
result[lcs] += 1
return result |
<SYSTEM_TASK:>
Choose a pair of address candidates ensuring they have the same length and starting with the highest scored ones
<END_TASK>
<USER_TASK:>
Description:
def choose_candidate_pair(candidates):
"""
Choose a pair of address candidates ensuring they have the same length and starting with the highest scored ones
:type candidates: dict[str, int]
:param candidates: Count how often the longest common substrings appeared in the messages
:return:
""" |
highscored = sorted(candidates, key=candidates.get, reverse=True)
for i, h_i in enumerate(highscored):
for h_j in highscored[i+1:]:
if len(h_i) == len(h_j):
yield (h_i, h_j) |
<SYSTEM_TASK:>
continuous haar wavelet transform based on the paper
<END_TASK>
<USER_TASK:>
Description:
def cwt_haar(x: np.ndarray, scale=10):
"""
continuous haar wavelet transform based on the paper
"A practical guide to wavelet analysis" by Christopher Torrence and Gilbert P Compo
""" |
next_power_two = 2 ** int(np.log2(len(x)))
x = x[0:next_power_two]
num_data = len(x)
# get FFT of x (eq. (3) in paper)
x_hat = np.fft.fft(x)
# Get omega (eq. (5) in paper)
f = (2.0 * np.pi / num_data)
omega = f * np.concatenate((np.arange(0, num_data // 2), np.arange(num_data // 2, num_data) * -1))
# get psi hat (eq. (6) in paper)
psi_hat = np.sqrt(2.0 * np.pi * scale) * normalized_haar_wavelet(scale * omega, scale)
# get W (eq. (4) in paper)
W = np.fft.ifft(x_hat * psi_hat)
return W[2 * scale:-2 * scale] |
<SYSTEM_TASK:>
Finding the synchronization works by finding the first difference between two messages.
<END_TASK>
<USER_TASK:>
Description:
def __find_sync_range(self, messages, preamble_end: int, search_end: int):
"""
Finding the synchronization works by finding the first difference between two messages.
This is performed for all messages and the most frequent first difference is chosen
:type messages: list of Message
:param preamble_end: End of preamble = start of search
:param search_end: End of search = start of first other label
""" |
possible_sync_pos = defaultdict(int)
for i, msg in enumerate(messages):
bits_i = msg.decoded_bits[preamble_end:search_end]
for j in range(i, len(messages)):
bits_j = messages[j].decoded_bits[preamble_end:search_end]
first_diff = next((k for k, (bit_i, bit_j) in enumerate(zip(bits_i, bits_j)) if bit_i != bit_j), None)
if first_diff is not None:
first_diff = preamble_end + 4 * (first_diff // 4)
if (first_diff - preamble_end) >= 4:
possible_sync_pos[(preamble_end, first_diff)] += 1
try:
sync_interval = max(possible_sync_pos, key=possible_sync_pos.__getitem__)
return sync_interval
except ValueError:
return None |
<SYSTEM_TASK:>
Search all differences between protocol messages regarding a reference message
<END_TASK>
<USER_TASK:>
Description:
def find_differences(self, refindex: int):
"""
Search all differences between protocol messages regarding a reference message
:param refindex: index of reference message
:rtype: dict[int, set[int]]
""" |
differences = defaultdict(set)
if refindex >= len(self.protocol.messages):
return differences
if self.proto_view == 0:
proto = self.protocol.decoded_proto_bits_str
elif self.proto_view == 1:
proto = self.protocol.decoded_hex_str
elif self.proto_view == 2:
proto = self.protocol.decoded_ascii_str
else:
return differences
ref_message = proto[refindex]
ref_offset = self.get_alignment_offset_at(refindex)
for i, message in enumerate(proto):
if i == refindex:
continue
msg_offset = self.get_alignment_offset_at(i)
short, long = sorted([len(ref_message) + ref_offset, len(message) + msg_offset])
differences[i] = {
j for j in range(max(msg_offset, ref_offset), long)
if j >= short or message[j - msg_offset] != ref_message[j - ref_offset]
}
return differences |
<SYSTEM_TASK:>
Return true if redraw is needed
<END_TASK>
<USER_TASK:>
Description:
def set_parameters(self, samples: np.ndarray, window_size, data_min, data_max) -> bool:
"""
Return true if redraw is needed
""" |
redraw_needed = False
if self.samples_need_update:
self.spectrogram.samples = samples
redraw_needed = True
self.samples_need_update = False
if window_size != self.spectrogram.window_size:
self.spectrogram.window_size = window_size
redraw_needed = True
if data_min != self.spectrogram.data_min:
self.spectrogram.data_min = data_min
redraw_needed = True
if data_max != self.spectrogram.data_max:
self.spectrogram.data_max = data_max
redraw_needed = True
return redraw_needed |
<SYSTEM_TASK:>
Return the length of this message in byte.
<END_TASK>
<USER_TASK:>
Description:
def get_byte_length(self, decoded=True) -> int:
"""
Return the length of this message in byte.
""" |
end = len(self.decoded_bits) if decoded else len(self.__plain_bits)
end = self.convert_index(end, 0, 2, decoded=decoded)[0]
return int(end) |
<SYSTEM_TASK:>
Return the SRC address of a message if SRC_ADDRESS label is present in message type of the message
<END_TASK>
<USER_TASK:>
Description:
def get_src_address_from_data(self, decoded=True):
"""
Return the SRC address of a message if SRC_ADDRESS label is present in message type of the message
Return None otherwise
:param decoded:
:return:
""" |
src_address_label = next((lbl for lbl in self.message_type if lbl.field_type
and lbl.field_type.function == FieldType.Function.SRC_ADDRESS), None)
if src_address_label:
start, end = self.get_label_range(src_address_label, view=1, decode=decoded)
if decoded:
src_address = self.decoded_hex_str[start:end]
else:
src_address = self.plain_hex_str[start:end]
else:
src_address = None
return src_address |
<SYSTEM_TASK:>
Set all protocols in copy mode. They will return a copy of their protocol.
<END_TASK>
<USER_TASK:>
Description:
def set_copy_mode(self, use_copy: bool):
"""
Set all protocols in copy mode. They will return a copy of their protocol.
This is used for writable mode in CFC.
:param use_copy:
:return:
""" |
for group in self.rootItem.children:
for proto in group.children:
proto.copy_data = use_copy |
<SYSTEM_TASK:>
Push values to buffer. If buffer can't store all values a ValueError is raised
<END_TASK>
<USER_TASK:>
Description:
def push(self, values: np.ndarray):
"""
Push values to buffer. If buffer can't store all values a ValueError is raised
""" |
n = len(values)
if len(self) + n > self.size:
raise ValueError("Too much data to push to RingBuffer")
slide_1 = np.s_[self.right_index:min(self.right_index + n, self.size)]
slide_2 = np.s_[:max(self.right_index + n - self.size, 0)]
with self.__data.get_lock():
data = np.frombuffer(self.__data.get_obj(), dtype=np.complex64)
data[slide_1] = values[:slide_1.stop - slide_1.start]
data[slide_2] = values[slide_1.stop - slide_1.start:]
self.right_index += n
self.__length.value += n |
<SYSTEM_TASK:>
Pop number of elements. If there are not enough elements, all remaining elements are returned and the
<END_TASK>
<USER_TASK:>
Description:
def pop(self, number: int, ensure_even_length=False):
"""
Pop number of elements. If there are not enough elements, all remaining elements are returned and the
buffer is cleared afterwards. If buffer is empty, an empty numpy array is returned.
If number is -1 (or any other value below zero) than complete buffer is returned
""" |
if ensure_even_length:
number -= number % 2
if len(self) == 0 or number == 0:
return np.array([], dtype=np.complex64)
if number < 0:
# take everything
number = len(self)
else:
number = min(number, len(self))
with self.__data.get_lock():
data = np.frombuffer(self.__data.get_obj(), dtype=np.complex64)
result = np.empty(number, dtype=np.complex64)
if self.left_index + number > len(data):
end = len(data) - self.left_index
else:
end = number
result[:end] = data[self.left_index:self.left_index + end]
if end < number:
result[end:] = data[:number-end]
self.left_index += number
self.__length.value -= number
return result |
<SYSTEM_TASK:>
Scrolls the mouse if ROI Selection reaches corner of view
<END_TASK>
<USER_TASK:>
Description:
def scroll_mouse(self, mouse_x: int):
"""
Scrolls the mouse if ROI Selection reaches corner of view
:param mouse_x:
:return:
""" |
scrollbar = self.horizontalScrollBar()
if mouse_x - self.view_rect().x() > self.view_rect().width():
scrollbar.setValue(scrollbar.value() + 5)
elif mouse_x < self.view_rect().x():
scrollbar.setValue(scrollbar.value() - 5) |
<SYSTEM_TASK:>
Return the boundaries of the view in scene coordinates
<END_TASK>
<USER_TASK:>
Description:
def view_rect(self) -> QRectF:
"""
Return the boundaries of the view in scene coordinates
""" |
top_left = self.mapToScene(0, 0)
bottom_right = self.mapToScene(self.viewport().width() - 1, self.viewport().height() - 1)
return QRectF(top_left, bottom_right) |
<SYSTEM_TASK:>
get start and end index of bit sequence from selected samples
<END_TASK>
<USER_TASK:>
Description:
def get_bitseq_from_selection(self, selection_start: int, selection_width: int):
"""
get start and end index of bit sequence from selected samples
:rtype: tuple[int,int,int,int]
:return: start_message index, start index, end message index, end index
""" |
start_message, start_index, end_message, end_index = -1, -1, -1, -1
if not self.messages or not self.messages[0].bit_sample_pos:
return start_message, start_index, end_message, end_index
if selection_start + selection_width < self.messages[0].bit_sample_pos[0]:
return start_message, start_index, end_message, end_index
for i, msg in enumerate(self.messages):
msg_sample_pos = msg.bit_sample_pos
if msg_sample_pos[-2] < selection_start:
continue
elif start_message == -1:
start_message = i
for j, sample_pos in enumerate(msg_sample_pos):
if sample_pos < selection_start:
continue
elif start_index == -1:
start_index = j
if msg_sample_pos[-1] - selection_start < selection_width:
break
elif sample_pos - selection_start > selection_width:
return start_message, start_index, i, j
elif msg_sample_pos[-1] - selection_start < selection_width:
continue
else:
for j, sample_pos in enumerate(msg_sample_pos):
if sample_pos - selection_start > selection_width:
return start_message, start_index, i, j
last_message = len(self.messages) - 1
last_index = len(self.messages[-1].plain_bits) + 1
return start_message, start_index, last_message, last_index |
<SYSTEM_TASK:>
Calculates the frequency of at most nbits logical ones and returns the mean of these frequencies
<END_TASK>
<USER_TASK:>
Description:
def estimate_frequency_for_one(self, sample_rate: float, nbits=42) -> float:
"""
Calculates the frequency of at most nbits logical ones and returns the mean of these frequencies
:param nbits:
:return:
""" |
return self.__estimate_frequency_for_bit(True, sample_rate, nbits) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.