text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Helper to handle the conversion of method_settings to operations
<END_TASK>
<USER_TASK:>
Description:
def _convert_method_settings_into_operations(method_settings=None):
"""Helper to handle the conversion of method_settings to operations
:param method_settings:
:return: list of operations
""" |
# operations docs here: https://tools.ietf.org/html/rfc6902#section-4
operations = []
if method_settings:
for method in method_settings.keys():
for key, value in method_settings[method].items():
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
operations.append({
'op': 'replace',
'path': method + _resolve_key(key),
'value': value
})
return operations |
<SYSTEM_TASK:>
This command is run when ``default_path`` doesn't exist, or ``init`` is
<END_TASK>
<USER_TASK:>
Description:
def generate_settings():
"""
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
""" |
conf_file = os.path.join(os.path.dirname(base_settings.__file__),
'example', 'conf.py')
conf_template = open(conf_file).read()
default_url = 'http://salmon.example.com'
site_url = raw_input("What will be the URL for Salmon? [{0}]".format(
default_url))
site_url = site_url or default_url
secret_key = base64.b64encode(os.urandom(KEY_LENGTH))
api_key = base64.b64encode(os.urandom(KEY_LENGTH))
output = conf_template.format(api_key=api_key, secret_key=secret_key,
site_url=site_url)
return output |
<SYSTEM_TASK:>
Builds up the settings using the same method as logan
<END_TASK>
<USER_TASK:>
Description:
def configure_app(**kwargs):
"""Builds up the settings using the same method as logan""" |
sys_args = sys.argv
args, command, command_args = parse_args(sys_args[1:])
parser = OptionParser()
parser.add_option('--config', metavar='CONFIG')
(options, logan_args) = parser.parse_args(args)
config_path = options.config
logan_configure(config_path=config_path, **kwargs) |
<SYSTEM_TASK:>
Stores current values for comparison later
<END_TASK>
<USER_TASK:>
Description:
def _reset_changes(self):
"""Stores current values for comparison later""" |
self._original = {}
if self.last_updated is not None:
self._original['last_updated'] = self.last_updated |
<SYSTEM_TASK:>
Build a file path to the Whisper database
<END_TASK>
<USER_TASK:>
Description:
def whisper_filename(self):
"""Build a file path to the Whisper database""" |
source_name = self.source_id and self.source.name or ''
return get_valid_filename("{0}__{1}.wsp".format(source_name,
self.name)) |
<SYSTEM_TASK:>
Time between current `last_updated` and previous `last_updated`
<END_TASK>
<USER_TASK:>
Description:
def time_between_updates(self):
"""Time between current `last_updated` and previous `last_updated`""" |
if 'last_updated' not in self._original:
return 0
last_update = self._original['last_updated']
this_update = self.last_updated
return this_update - last_update |
<SYSTEM_TASK:>
Update latest value to the diff between it and the previous value
<END_TASK>
<USER_TASK:>
Description:
def do_counter_conversion(self):
"""Update latest value to the diff between it and the previous value""" |
if self.is_counter:
if self._previous_counter_value is None:
prev_value = self.latest_value
else:
prev_value = self._previous_counter_value
self._previous_counter_value = self.latest_value
self.latest_value = self.latest_value - prev_value |
<SYSTEM_TASK:>
Evaluate expression and return result
<END_TASK>
<USER_TASK:>
Description:
def result(self):
"""Evaluate expression and return result""" |
# Module(body=[Expr(value=...)])
return self.eval_(ast.parse(self.expr).body[0].value) |
<SYSTEM_TASK:>
Given a request, an email and optionally some additional data, ensure that
<END_TASK>
<USER_TASK:>
Description:
def email_login(request, *, email, **kwargs):
"""
Given a request, an email and optionally some additional data, ensure that
a user with the email address exists, and authenticate & login them right
away if the user is active.
Returns a tuple consisting of ``(user, created)`` upon success or ``(None,
None)`` when authentication fails.
""" |
_u, created = auth.get_user_model()._default_manager.get_or_create(email=email)
user = auth.authenticate(request, email=email)
if user and user.is_active: # The is_active check is possibly redundant.
auth.login(request, user)
return user, created
return None, None |
<SYSTEM_TASK:>
Shows the latest results for each source
<END_TASK>
<USER_TASK:>
Description:
def dashboard(request):
"""Shows the latest results for each source""" |
sources = (models.Source.objects.all().prefetch_related('metric_set')
.order_by('name'))
metrics = SortedDict([(src, src.metric_set.all()) for src in sources])
no_source_metrics = models.Metric.objects.filter(source__isnull=True)
if no_source_metrics:
metrics[''] = no_source_metrics
if request.META.get('HTTP_X_PJAX', False):
parent_template = 'pjax.html'
else:
parent_template = 'base.html'
return render(request, 'metrics/dashboard.html', {
'source_metrics': metrics,
'parent_template': parent_template
}) |
<SYSTEM_TASK:>
This method store in the datapoints in the current database.
<END_TASK>
<USER_TASK:>
Description:
def _update(self, datapoints):
"""
This method store in the datapoints in the current database.
:datapoints: is a list of tupple with the epoch timestamp and value
[(1368977629,10)]
""" |
if len(datapoints) == 1:
timestamp, value = datapoints[0]
whisper.update(self.path, value, timestamp)
else:
whisper.update_many(self.path, datapoints) |
<SYSTEM_TASK:>
This method fetch data from the database according to the period
<END_TASK>
<USER_TASK:>
Description:
def fetch(self, from_time, until_time=None):
"""
This method fetch data from the database according to the period
given
fetch(path, fromTime, untilTime=None)
fromTime is an datetime
untilTime is also an datetime, but defaults to now.
Returns a tuple of (timeInfo, valueList)
where timeInfo is itself a tuple of (fromTime, untilTime, step)
Returns None if no data can be returned
""" |
until_time = until_time or datetime.now()
time_info, values = whisper.fetch(self.path,
from_time.strftime('%s'),
until_time.strftime('%s'))
# build up a list of (timestamp, value)
start_time, end_time, step = time_info
current = start_time
times = []
while current <= end_time:
times.append(current)
current += step
return zip(times, values) |
<SYSTEM_TASK:>
CMN Ra, Rb
<END_TASK>
<USER_TASK:>
Description:
def CMN(self, params):
"""
CMN Ra, Rb
Add the two registers and set the NZCV flags
The result is discarded
Ra and Rb must be low registers
""" |
Ra, Rb = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
self.check_arguments(low_registers=(Ra, Rb))
# CMN Ra, Rb
def CMN_func():
self.set_NZCV_flags(self.register[Ra], self.register[Rb],
self.register[Ra] + self.register[Rb], 'add')
return CMN_func |
<SYSTEM_TASK:>
MULS Ra, Rb, Ra
<END_TASK>
<USER_TASK:>
Description:
def MULS(self, params):
"""
MULS Ra, Rb, Ra
Multiply Rb and Ra together and store the result in Ra.
Set the NZ flags.
Ra and Rb must be low registers
The first and last operand must be the same register
""" |
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_COMMA_SEPARATED, params)
self.check_arguments(low_registers=(Ra, Rb, Rc))
if Ra != Rc:
raise iarm.exceptions.RuleError("Third parameter {} is not the same as the first parameter {}".format(Rc, Ra))
# MULS Ra, Rb, Ra
def MULS_func():
self.register[Ra] = self.register[Rb] * self.register[Rc]
self.set_NZ_flags(self.register[Ra])
return MULS_func |
<SYSTEM_TASK:>
Get dist for installed version of dist_name avoiding pkg_resources cache
<END_TASK>
<USER_TASK:>
Description:
def get_dist(dist_name, lookup_dirs=None):
"""Get dist for installed version of dist_name avoiding pkg_resources cache
""" |
# note: based on pip/utils/__init__.py, get_installed_version(...)
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
if lookup_dirs is None:
working_set = pkg_resources.WorkingSet()
else:
working_set = pkg_resources.WorkingSet(lookup_dirs)
# Get the installed distribution from our working set
return working_set.find(req) |
<SYSTEM_TASK:>
Load hook module and register signals.
<END_TASK>
<USER_TASK:>
Description:
def _load_hooks(path):
"""Load hook module and register signals.
:param path: Absolute or relative path to module.
:return: module
""" |
module = imp.load_source(os.path.splitext(os.path.basename(path))[0], path)
if not check_hook_mechanism_is_intact(module):
# no hooks - do nothing
log.debug('No valid hook configuration: \'%s\'. Not using hooks!', path)
else:
if check_register_present(module):
# register the template hooks so they listen to gcdt_signals
module.register()
return module |
<SYSTEM_TASK:>
gcdt tools parametrized main function to initiate gcdt lifecycle.
<END_TASK>
<USER_TASK:>
Description:
def main(doc, tool, dispatch_only=None):
"""gcdt tools parametrized main function to initiate gcdt lifecycle.
:param doc: docopt string
:param tool: gcdt tool (gcdt, kumo, tenkai, ramuda, yugen)
:param dispatch_only: list of commands which do not use gcdt lifecycle
:return: exit_code
""" |
# Use signal handler to throw exception which can be caught to allow
# graceful exit.
# here: https://stackoverflow.com/questions/26414704/how-does-a-python-process-exit-gracefully-after-receiving-sigterm-while-waiting
signal.signal(signal.SIGTERM, signal_handler) # Jenkins
signal.signal(signal.SIGINT, signal_handler) # Ctrl-C
try:
arguments = docopt(doc, sys.argv[1:])
command = get_command(arguments)
# DEBUG mode (if requested)
verbose = arguments.pop('--verbose', False)
if verbose:
logging_config['loggers']['gcdt']['level'] = 'DEBUG'
dictConfig(logging_config)
if dispatch_only is None:
dispatch_only = ['version']
assert tool in ['gcdt', 'kumo', 'tenkai', 'ramuda', 'yugen']
if command in dispatch_only:
# handle commands that do not need a lifecycle
# Note: `dispatch_only` commands do not have a check for ENV variable!
check_gcdt_update()
return cmd.dispatch(arguments)
else:
env = get_env()
if not env:
log.error('\'ENV\' environment variable not set!')
return 1
awsclient = AWSClient(botocore.session.get_session())
return lifecycle(awsclient, env, tool, command, arguments)
except GracefulExit as e:
log.info('Received %s signal - exiting command \'%s %s\'',
str(e), tool, command)
return 1 |
<SYSTEM_TASK:>
MVNS Ra, Rb
<END_TASK>
<USER_TASK:>
Description:
def MVNS(self, params):
"""
MVNS Ra, Rb
Negate the value in Rb and store it in Ra
Ra and Rb must be a low register
""" |
Ra, Rb = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
self.check_arguments(low_registers=(Ra, Rb))
def MVNS_func():
self.register[Ra] = ~self.register[Rb]
self.set_NZ_flags(self.register[Ra])
return MVNS_func |
<SYSTEM_TASK:>
REV Ra, Rb
<END_TASK>
<USER_TASK:>
Description:
def REV(self, params):
"""
REV Ra, Rb
Reverse the byte order in register Rb and store the result in Ra
""" |
Ra, Rb = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
self.check_arguments(low_registers=(Ra, Rb))
def REV_func():
self.register[Ra] = ((self.register[Rb] & 0xFF000000) >> 24) | \
((self.register[Rb] & 0x00FF0000) >> 8) | \
((self.register[Rb] & 0x0000FF00) << 8) | \
((self.register[Rb] & 0x000000FF) << 24)
return REV_func |
<SYSTEM_TASK:>
REV16 Ra, Rb
<END_TASK>
<USER_TASK:>
Description:
def REV16(self, params):
"""
REV16 Ra, Rb
Reverse the byte order of the half words in register Rb and store the result in Ra
""" |
Ra, Rb = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
self.check_arguments(low_registers=(Ra, Rb))
def REV16_func():
self.register[Ra] = ((self.register[Rb] & 0xFF00FF00) >> 8) | \
((self.register[Rb] & 0x00FF00FF) << 8)
return REV16_func |
<SYSTEM_TASK:>
STXB Ra, Rb
<END_TASK>
<USER_TASK:>
Description:
def SXTB(self, params):
"""
STXB Ra, Rb
Sign extend the byte in Rb and store the result in Ra
""" |
Ra, Rb = self.get_two_parameters(r'\s*([^\s,]*),\s*([^\s,]*)(,\s*[^\s,]*)*\s*', params)
self.check_arguments(low_registers=(Ra, Rb))
def SXTB_func():
if self.register[Rb] & (1 << 7):
self.register[Ra] = 0xFFFFFF00 + (self.register[Rb] & 0xFF)
else:
self.register[Ra] = (self.register[Rb] & 0xFF)
return SXTB_func |
<SYSTEM_TASK:>
STXH Ra, Rb
<END_TASK>
<USER_TASK:>
Description:
def SXTH(self, params):
"""
STXH Ra, Rb
Sign extend the half word in Rb and store the result in Ra
""" |
Ra, Rb = self.get_two_parameters(r'\s*([^\s,]*),\s*([^\s,]*)(,\s*[^\s,]*)*\s*', params)
self.check_arguments(low_registers=(Ra, Rb))
def SXTH_func():
if self.register[Rb] & (1 << 15):
self.register[Ra] = 0xFFFF0000 + (self.register[Rb] & 0xFFFF)
else:
self.register[Ra] = (self.register[Rb] & 0xFFFF)
return SXTH_func |
<SYSTEM_TASK:>
UTXB Ra, Rb
<END_TASK>
<USER_TASK:>
Description:
def UXTB(self, params):
"""
UTXB Ra, Rb
Zero extend the byte in Rb and store the result in Ra
""" |
Ra, Rb = self.get_two_parameters(r'\s*([^\s,]*),\s*([^\s,]*)(,\s*[^\s,]*)*\s*', params)
self.check_arguments(low_registers=(Ra, Rb))
def UXTB_func():
self.register[Ra] = (self.register[Rb] & 0xFF)
return UXTB_func |
<SYSTEM_TASK:>
UTXH Ra, Rb
<END_TASK>
<USER_TASK:>
Description:
def UXTH(self, params):
"""
UTXH Ra, Rb
Zero extend the half word in Rb and store the result in Ra
""" |
Ra, Rb = self.get_two_parameters(r'\s*([^\s,]*),\s*([^\s,]*)(,\s*[^\s,]*)*\s*', params)
self.check_arguments(low_registers=(Ra, Rb))
def UXTH_func():
self.register[Ra] = (self.register[Rb] & 0xFFFF)
return UXTH_func |
<SYSTEM_TASK:>
Given awsclient, event_source dictionary item
<END_TASK>
<USER_TASK:>
Description:
def _get_event_source_obj(awsclient, evt_source):
"""
Given awsclient, event_source dictionary item
create an event_source object of the appropriate event type
to schedule this event, and return the object.
""" |
event_source_map = {
'dynamodb': event_source.dynamodb_stream.DynamoDBStreamEventSource,
'kinesis': event_source.kinesis.KinesisEventSource,
's3': event_source.s3.S3EventSource,
'sns': event_source.sns.SNSEventSource,
'events': event_source.cloudwatch.CloudWatchEventSource,
'cloudfront': event_source.cloudfront.CloudFrontEventSource,
'cloudwatch_logs': event_source.cloudwatch_logs.CloudWatchLogsEventSource,
}
evt_type = _get_event_type(evt_source)
event_source_func = event_source_map.get(evt_type, None)
if not event_source:
raise ValueError('Unknown event source: {0}'.format(
evt_source['arn']))
return event_source_func(awsclient, evt_source) |
<SYSTEM_TASK:>
Unwire a list of event from an AWS Lambda function.
<END_TASK>
<USER_TASK:>
Description:
def unwire(awsclient, events, lambda_name, alias_name=ALIAS_NAME):
"""Unwire a list of event from an AWS Lambda function.
'events' is a list of dictionaries, where the dict must contains the
'schedule' of the event as string, and an optional 'name' and 'description'.
:param awsclient:
:param events: list of events
:param lambda_name:
:param alias_name:
:return: exit_code
""" |
if not lambda_exists(awsclient, lambda_name):
log.error(colored.red('The function you try to wire up doesn\'t ' +
'exist... Bailing out...'))
return 1
client_lambda = awsclient.get_client('lambda')
lambda_function = client_lambda.get_function(FunctionName=lambda_name)
lambda_arn = client_lambda.get_alias(FunctionName=lambda_name,
Name=alias_name)['AliasArn']
log.info('UN-wiring lambda_arn %s ' % lambda_arn)
# TODO why load the policies here?
'''
policies = None
try:
result = client_lambda.get_policy(FunctionName=lambda_name,
Qualifier=alias_name)
policies = json.loads(result['Policy'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
log.warn("Permission policies not found")
else:
raise e
'''
if lambda_function is not None:
#_unschedule_events(awsclient, events, lambda_arn)
for event in events:
evt_source = event['event_source']
_remove_event_source(awsclient, evt_source, lambda_arn)
return 0 |
<SYSTEM_TASK:>
Use only prefix OR suffix
<END_TASK>
<USER_TASK:>
Description:
def _lambda_add_s3_event_source(awsclient, arn, event, bucket, prefix,
suffix):
"""Use only prefix OR suffix
:param arn:
:param event:
:param bucket:
:param prefix:
:param suffix:
:return:
""" |
json_data = {
'LambdaFunctionConfigurations': [{
'LambdaFunctionArn': arn,
'Id': str(uuid.uuid1()),
'Events': [event]
}]
}
filter_rules = build_filter_rules(prefix, suffix)
json_data['LambdaFunctionConfigurations'][0].update({
'Filter': {
'Key': {
'FilterRules': filter_rules
}
}
})
# http://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-notification-configuration.html
# http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
client_s3 = awsclient.get_client('s3')
bucket_configurations = client_s3.get_bucket_notification_configuration(
Bucket=bucket)
bucket_configurations.pop('ResponseMetadata')
if 'LambdaFunctionConfigurations' in bucket_configurations:
bucket_configurations['LambdaFunctionConfigurations'].append(
json_data['LambdaFunctionConfigurations'][0]
)
else:
bucket_configurations['LambdaFunctionConfigurations'] = json_data[
'LambdaFunctionConfigurations']
response = client_s3.put_bucket_notification_configuration(
Bucket=bucket,
NotificationConfiguration=bucket_configurations
)
# TODO don't return a table, but success state
return json2table(response) |
<SYSTEM_TASK:>
Helper to read the params for the logs command
<END_TASK>
<USER_TASK:>
Description:
def check_and_format_logs_params(start, end, tail):
"""Helper to read the params for the logs command""" |
def _decode_duration_type(duration_type):
durations = {'m': 'minutes', 'h': 'hours', 'd': 'days', 'w': 'weeks'}
return durations[duration_type]
if not start:
if tail:
start_dt = maya.now().subtract(seconds=300).datetime(naive=True)
else:
start_dt = maya.now().subtract(days=1).datetime(naive=True)
elif start and start[-1] in ['m', 'h', 'd', 'w']:
value = int(start[:-1])
start_dt = maya.now().subtract(
**{_decode_duration_type(start[-1]): value}).datetime(naive=True)
elif start:
start_dt = maya.parse(start).datetime(naive=True)
if end and end[-1] in ['m', 'h', 'd', 'w']:
value = int(end[:-1])
end_dt = maya.now().subtract(
**{_decode_duration_type(end[-1]): value}).datetime(naive=True)
elif end:
end_dt = maya.parse(end).datetime(naive=True)
else:
end_dt = None
return start_dt, end_dt |
<SYSTEM_TASK:>
Upload a file to AWS S3 bucket.
<END_TASK>
<USER_TASK:>
Description:
def upload_file_to_s3(awsclient, bucket, key, filename):
"""Upload a file to AWS S3 bucket.
:param awsclient:
:param bucket:
:param key:
:param filename:
:return:
""" |
client_s3 = awsclient.get_client('s3')
transfer = S3Transfer(client_s3)
# Upload /tmp/myfile to s3://bucket/key and print upload progress.
transfer.upload_file(filename, bucket, key)
response = client_s3.head_object(Bucket=bucket, Key=key)
etag = response.get('ETag')
version_id = response.get('VersionId', None)
return etag, version_id |
<SYSTEM_TASK:>
Remove a file from an AWS S3 bucket.
<END_TASK>
<USER_TASK:>
Description:
def remove_file_from_s3(awsclient, bucket, key):
"""Remove a file from an AWS S3 bucket.
:param awsclient:
:param bucket:
:param key:
:return:
""" |
client_s3 = awsclient.get_client('s3')
response = client_s3.delete_object(Bucket=bucket, Key=key) |
<SYSTEM_TASK:>
TST Ra, Rb
<END_TASK>
<USER_TASK:>
Description:
def TST(self, params):
"""
TST Ra, Rb
AND Ra and Rb together and update the NZ flag. The result is not set
The equivalent of `Ra & Rc`
Ra and Rb must be low registers
""" |
Ra, Rb = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
self.check_arguments(low_registers=(Ra, Rb))
def TST_func():
result = self.register[Ra] & self.register[Rb]
self.set_NZ_flags(result)
return TST_func |
<SYSTEM_TASK:>
Renders a mail and returns the resulting ``EmailMultiAlternatives``
<END_TASK>
<USER_TASK:>
Description:
def render_to_mail(template, context, **kwargs):
"""
Renders a mail and returns the resulting ``EmailMultiAlternatives``
instance
* ``template``: The base name of the text and HTML (optional) version of
the mail.
* ``context``: The context used to render the mail. This context instance
should contain everything required.
* Additional keyword arguments are passed to the ``EmailMultiAlternatives``
instantiation. Use those to specify the ``to``, ``headers`` etc.
arguments.
Usage example::
# Render the template myproject/hello_mail.txt (first non-empty line
# contains the subject, third to last the body) and optionally the
# template myproject/hello_mail.html containing the alternative HTML
# representation.
message = render_to_mail('myproject/hello_mail', {}, to=[email])
message.send()
""" |
lines = iter(
line.rstrip()
for line in render_to_string("%s.txt" % template, context).splitlines()
)
subject = ""
try:
while True:
line = next(lines)
if line:
subject = line
break
except StopIteration: # if lines is empty
pass
body = "\n".join(lines).strip("\n")
message = EmailMultiAlternatives(subject=subject, body=body, **kwargs)
try:
message.attach_alternative(
render_to_string("%s.html" % template, context), "text/html"
)
except TemplateDoesNotExist:
pass
return message |
<SYSTEM_TASK:>
Use service discovery to get the host zone name from the default stack
<END_TASK>
<USER_TASK:>
Description:
def _retrieve_stack_host_zone_name(awsclient, default_stack_name=None):
"""
Use service discovery to get the host zone name from the default stack
:return: Host zone name as string
""" |
global _host_zone_name
if _host_zone_name is not None:
return _host_zone_name
env = get_env()
if env is None:
print("Please set environment...")
# TODO: why is there a sys.exit in library code used by cloudformation!!!
sys.exit()
if default_stack_name is None:
# TODO why 'dp-<env>'? - this should not be hardcoded!
default_stack_name = 'dp-%s' % env
default_stack_output = get_outputs_for_stack(awsclient, default_stack_name)
if HOST_ZONE_NAME__STACK_OUTPUT_NAME not in default_stack_output:
print("Please debug why default stack '{}' does not contain '{}'...".format(
default_stack_name,
HOST_ZONE_NAME__STACK_OUTPUT_NAME,
))
# TODO: why is there a sys.exit in library code used by cloudformation!!!
sys.exit()
_host_zone_name = default_stack_output[HOST_ZONE_NAME__STACK_OUTPUT_NAME] + "."
return _host_zone_name |
<SYSTEM_TASK:>
Delete the specified log group
<END_TASK>
<USER_TASK:>
Description:
def delete_log_group(awsclient, log_group_name):
"""Delete the specified log group
:param log_group_name: log group name
:return:
""" |
client_logs = awsclient.get_client('logs')
response = client_logs.delete_log_group(
logGroupName=log_group_name
) |
<SYSTEM_TASK:>
Sets the retention of the specified log group
<END_TASK>
<USER_TASK:>
Description:
def put_retention_policy(awsclient, log_group_name, retention_in_days):
"""Sets the retention of the specified log group
if the log group does not yet exist than it will be created first.
:param log_group_name: log group name
:param retention_in_days: log group name
:return:
""" |
try:
# Note: for AWS Lambda the log_group is created once the first
# log event occurs. So if the log_group does not exist we create it
create_log_group(awsclient, log_group_name)
except GracefulExit:
raise
except Exception:
# TODO check that it is really a ResourceAlreadyExistsException
pass
client_logs = awsclient.get_client('logs')
response = client_logs.put_retention_policy(
logGroupName=log_group_name,
retentionInDays=retention_in_days
) |
<SYSTEM_TASK:>
Get info on the specified log group
<END_TASK>
<USER_TASK:>
Description:
def describe_log_group(awsclient, log_group_name):
"""Get info on the specified log group
:param log_group_name: log group name
:return:
""" |
client_logs = awsclient.get_client('logs')
request = {
'logGroupNamePrefix': log_group_name,
'limit': 1
}
response = client_logs.describe_log_groups(**request)
if response['logGroups']:
return response['logGroups'][0]
else:
return |
<SYSTEM_TASK:>
Get info on the specified log stream
<END_TASK>
<USER_TASK:>
Description:
def describe_log_stream(awsclient, log_group_name, log_stream_name):
"""Get info on the specified log stream
:param log_group_name: log group name
:param log_stream_name: log stream
:return:
""" |
client_logs = awsclient.get_client('logs')
response = client_logs.describe_log_streams(
logGroupName=log_group_name,
logStreamNamePrefix=log_stream_name,
limit=1
)
if response['logStreams']:
return response['logStreams'][0]
else:
return |
<SYSTEM_TASK:>
Creates a log group with the specified name.
<END_TASK>
<USER_TASK:>
Description:
def create_log_group(awsclient, log_group_name):
"""Creates a log group with the specified name.
:param log_group_name: log group name
:return:
""" |
client_logs = awsclient.get_client('logs')
response = client_logs.create_log_group(
logGroupName=log_group_name,
) |
<SYSTEM_TASK:>
Creates a log stream for the specified log group.
<END_TASK>
<USER_TASK:>
Description:
def create_log_stream(awsclient, log_group_name, log_stream_name):
"""Creates a log stream for the specified log group.
:param log_group_name: log group name
:param log_stream_name: log stream name
:return:
""" |
client_logs = awsclient.get_client('logs')
response = client_logs.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
) |
<SYSTEM_TASK:>
Put log events for the specified log group and stream.
<END_TASK>
<USER_TASK:>
Description:
def put_log_events(awsclient, log_group_name, log_stream_name, log_events,
sequence_token=None):
"""Put log events for the specified log group and stream.
:param log_group_name: log group name
:param log_stream_name: log stream name
:param log_events: [{'timestamp': 123, 'message': 'string'}, ...]
:param sequence_token: the sequence token
:return: next_token
""" |
client_logs = awsclient.get_client('logs')
request = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
'logEvents': log_events
}
if sequence_token:
request['sequenceToken'] = sequence_token
response = client_logs.put_log_events(**request)
if 'rejectedLogEventsInfo' in response:
log.warn(response['rejectedLogEventsInfo'])
if 'nextSequenceToken' in response:
return response['nextSequenceToken'] |
<SYSTEM_TASK:>
Get log events for the specified log group and stream.
<END_TASK>
<USER_TASK:>
Description:
def get_log_events(awsclient, log_group_name, log_stream_name, start_ts=None):
"""Get log events for the specified log group and stream.
this is used in tenkai output instance diagnostics
:param log_group_name: log group name
:param log_stream_name: log stream name
:param start_ts: timestamp
:return:
""" |
client_logs = awsclient.get_client('logs')
request = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name
}
if start_ts:
request['startTime'] = start_ts
# TODO exhaust the events!
# TODO use all_pages !
response = client_logs.get_log_events(**request)
if 'events' in response and response['events']:
return [{'timestamp': e['timestamp'], 'message': e['message']}
for e in response['events']] |
<SYSTEM_TASK:>
Reload the configuration from disk returning True if the
<END_TASK>
<USER_TASK:>
Description:
def reload(self):
"""Reload the configuration from disk returning True if the
configuration has changed from the previous values.
""" |
config = self._default_configuration()
if self._file_path:
config.update(self._load_config_file())
if config != self._values:
self._values = config
return True
return False |
<SYSTEM_TASK:>
Load the configuration file into memory, returning the content.
<END_TASK>
<USER_TASK:>
Description:
def _load_config_file(self):
"""Load the configuration file into memory, returning the content.
""" |
LOGGER.info('Loading configuration from %s', self._file_path)
if self._file_path.endswith('json'):
config = self._load_json_config()
else:
config = self._load_yaml_config()
for key, value in [(k, v) for k, v in config.items()]:
if key.title() != key:
config[key.title()] = value
del config[key]
return flatdict.FlatDict(config) |
<SYSTEM_TASK:>
Load the configuration file in JSON format
<END_TASK>
<USER_TASK:>
Description:
def _load_json_config(self):
"""Load the configuration file in JSON format
:rtype: dict
""" |
try:
return json.loads(self._read_config())
except ValueError as error:
raise ValueError(
'Could not read configuration file: {}'.format(error)) |
<SYSTEM_TASK:>
Loads the configuration file from a .yaml or .yml file
<END_TASK>
<USER_TASK:>
Description:
def _load_yaml_config(self):
"""Loads the configuration file from a .yaml or .yml file
:type: dict
""" |
try:
config = self._read_config()
except OSError as error:
raise ValueError('Could not read configuration file: %s' % error)
try:
return yaml.safe_load(config)
except yaml.YAMLError as error:
message = '\n'.join([' > %s' % line
for line in str(error).split('\n')])
sys.stderr.write('\n\n Error in the configuration file:\n\n'
'{}\n\n'.format(message))
sys.stderr.write(' Configuration should be a valid YAML file.\n')
sys.stderr.write(' YAML format validation available at '
'http://yamllint.com\n')
raise ValueError(error) |
<SYSTEM_TASK:>
Normalize the file path value.
<END_TASK>
<USER_TASK:>
Description:
def _normalize_file_path(file_path):
"""Normalize the file path value.
:param str file_path: The file path as passed in
:rtype: str
""" |
if not file_path:
return None
elif file_path.startswith('s3://') or \
file_path.startswith('http://') or \
file_path.startswith('https://'):
return file_path
return path.abspath(file_path) |
<SYSTEM_TASK:>
Read the configuration from the various places it may be read from.
<END_TASK>
<USER_TASK:>
Description:
def _read_config(self):
"""Read the configuration from the various places it may be read from.
:rtype: str
:raises: ValueError
""" |
if not self._file_path:
return None
elif self._file_path.startswith('s3://'):
return self._read_s3_config()
elif self._file_path.startswith('http://') or \
self._file_path.startswith('https://'):
return self._read_remote_config()
elif not path.exists(self._file_path):
raise ValueError(
'Configuration file not found: {}'.format(self._file_path))
with open(self._file_path, 'r') as handle:
return handle.read() |
<SYSTEM_TASK:>
Read a remote config via URL.
<END_TASK>
<USER_TASK:>
Description:
def _read_remote_config(self):
"""Read a remote config via URL.
:rtype: str
:raises: ValueError
""" |
try:
import requests
except ImportError:
requests = None
if not requests:
raise ValueError(
'Remote config URL specified but requests not installed')
result = requests.get(self._file_path)
if not result.ok:
raise ValueError(
'Failed to retrieve remote config: {}'.format(
result.status_code))
return result.text |
<SYSTEM_TASK:>
Read in the value of the configuration file in Amazon S3.
<END_TASK>
<USER_TASK:>
Description:
def _read_s3_config(self):
"""Read in the value of the configuration file in Amazon S3.
:rtype: str
:raises: ValueError
""" |
try:
import boto3
import botocore.exceptions
except ImportError:
boto3, botocore = None, None
if not boto3:
raise ValueError(
's3 URL specified for configuration but boto3 not installed')
parsed = parse.urlparse(self._file_path)
try:
response = boto3.client(
's3', endpoint_url=os.environ.get('S3_ENDPOINT')).get_object(
Bucket=parsed.netloc, Key=parsed.path.lstrip('/'))
except botocore.exceptions.ClientError as e:
raise ValueError(
'Failed to download configuration from S3: {}'.format(e))
return response['Body'].read().decode('utf-8') |
<SYSTEM_TASK:>
Update the internal configuration values, removing debug_only
<END_TASK>
<USER_TASK:>
Description:
def update(self, configuration, debug=None):
"""Update the internal configuration values, removing debug_only
handlers if debug is False. Returns True if the configuration has
changed from previous configuration values.
:param dict configuration: The logging configuration
:param bool debug: Toggles use of debug_only loggers
:rtype: bool
""" |
if self.config != dict(configuration) and debug != self.debug:
self.config = dict(configuration)
self.debug = debug
self.configure()
return True
return False |
<SYSTEM_TASK:>
Remove any handlers with an attribute of debug_only that is True and
<END_TASK>
<USER_TASK:>
Description:
def _remove_debug_handlers(self):
"""Remove any handlers with an attribute of debug_only that is True and
remove the references to said handlers from any loggers that are
referencing them.
""" |
remove = list()
for handler in self.config[self.HANDLERS]:
if self.config[self.HANDLERS][handler].get('debug_only'):
remove.append(handler)
for handler in remove:
del self.config[self.HANDLERS][handler]
for logger in self.config[self.LOGGERS].keys():
logger = self.config[self.LOGGERS][logger]
if handler in logger[self.HANDLERS]:
logger[self.HANDLERS].remove(handler)
self._remove_debug_only() |
<SYSTEM_TASK:>
Iterate through each handler removing the invalid dictConfig key of
<END_TASK>
<USER_TASK:>
Description:
def _remove_debug_only(self):
"""Iterate through each handler removing the invalid dictConfig key of
debug_only.
""" |
LOGGER.debug('Removing debug only from handlers')
for handler in self.config[self.HANDLERS]:
if self.DEBUG_ONLY in self.config[self.HANDLERS][handler]:
del self.config[self.HANDLERS][handler][self.DEBUG_ONLY] |
<SYSTEM_TASK:>
Convert this object to a dictionary with formatting appropriate for a PIF.
<END_TASK>
<USER_TASK:>
Description:
def as_dictionary(self):
"""
Convert this object to a dictionary with formatting appropriate for a PIF.
:returns: Dictionary with the content of this object formatted for a PIF.
""" |
return {to_camel_case(i): Serializable._convert_to_dictionary(self.__dict__[i])
for i in self.__dict__ if self.__dict__[i] is not None} |
<SYSTEM_TASK:>
Convert obj to a dictionary with formatting appropriate for a PIF. This function attempts to treat obj as
<END_TASK>
<USER_TASK:>
Description:
def _convert_to_dictionary(obj):
"""
Convert obj to a dictionary with formatting appropriate for a PIF. This function attempts to treat obj as
a Pio object and otherwise returns obj.
:param obj: Object to convert to a dictionary.
:returns: Input object as a dictionary or the original object.
""" |
if isinstance(obj, list):
return [Serializable._convert_to_dictionary(i) for i in obj]
elif hasattr(obj, 'as_dictionary'):
return obj.as_dictionary()
else:
return obj |
<SYSTEM_TASK:>
Helper function that returns an object, or if it is a dictionary, initializes it from class_.
<END_TASK>
<USER_TASK:>
Description:
def _get_object(class_, obj):
"""
Helper function that returns an object, or if it is a dictionary, initializes it from class_.
:param class_: Class to use to instantiate object.
:param obj: Object to process.
:return: One or more objects.
""" |
if isinstance(obj, list):
return [Serializable._get_object(class_, i) for i in obj]
elif isinstance(obj, dict):
return class_(**keys_to_snake_case(obj))
else:
return obj |
<SYSTEM_TASK:>
Calculates the total sound pressure level based on multiple source levels
<END_TASK>
<USER_TASK:>
Description:
def total_level(source_levels):
"""
Calculates the total sound pressure level based on multiple source levels
""" |
sums = 0.0
for l in source_levels:
if l is None:
continue
if l == 0:
continue
sums += pow(10.0, float(l) / 10.0)
level = 10.0 * math.log10(sums)
return level |
<SYSTEM_TASK:>
Calculates the A-rated total sound pressure level
<END_TASK>
<USER_TASK:>
Description:
def total_rated_level(octave_frequencies):
"""
Calculates the A-rated total sound pressure level
based on octave band frequencies
""" |
sums = 0.0
for band in OCTAVE_BANDS.keys():
if band not in octave_frequencies:
continue
if octave_frequencies[band] is None:
continue
if octave_frequencies[band] == 0:
continue
sums += pow(10.0, ((float(octave_frequencies[band]) + OCTAVE_BANDS[band][1]) / 10.0))
level = 10.0 * math.log10(sums)
return level |
<SYSTEM_TASK:>
Calculates the sound pressure level
<END_TASK>
<USER_TASK:>
Description:
def distant_level(reference_level, distance, reference_distance=1.0):
"""
Calculates the sound pressure level
in dependence of a distance
where a perfect ball-shaped source and spread is assumed.
reference_level: Sound pressure level in reference distance in dB
distance: Distance to calculate sound pressure level for, in meters
reference_distance: reference distance in meters (defaults to 1)
""" |
rel_dist = float(reference_distance) / float(distance)
level = float(reference_level) + 20.0 * (math.log(rel_dist) / math.log(10))
return level |
<SYSTEM_TASK:>
Calculates the damped, A-rated total sound pressure level
<END_TASK>
<USER_TASK:>
Description:
def distant_total_damped_rated_level(
octave_frequencies,
distance,
temp,
relhum,
reference_distance=1.0):
"""
Calculates the damped, A-rated total sound pressure level
in a given distance, temperature and relative humidity
from octave frequency sound pressure levels in a reference distance
""" |
damping_distance = distance - reference_distance
sums = 0.0
for band in OCTAVE_BANDS.keys():
if band not in octave_frequencies:
continue
if octave_frequencies[band] is None:
continue
# distance-adjusted level per band
distant_val = distant_level(
reference_level=float(octave_frequencies[band]),
distance=distance,
reference_distance=reference_distance
)
# damping
damp_per_meter = damping(
temp=temp,
relhum=relhum,
freq=OCTAVE_BANDS[band][0])
distant_val = distant_val - (damping_distance * damp_per_meter)
# applyng A-rating
distant_val += OCTAVE_BANDS[band][1]
sums += pow(10.0, (distant_val / 10.0))
level = 10.0 * math.log10(sums)
return level |
<SYSTEM_TASK:>
This is used by gcdt plugins to get a logger with the right level.
<END_TASK>
<USER_TASK:>
Description:
def getLogger(name):
"""This is used by gcdt plugins to get a logger with the right level.""" |
logger = logging.getLogger(name)
# note: the level might be adjusted via '-v' option
logger.setLevel(logging_config['loggers']['gcdt']['level'])
return logger |
<SYSTEM_TASK:>
If the session expired, logs back in.
<END_TASK>
<USER_TASK:>
Description:
def keep_session_alive(self):
"""If the session expired, logs back in.""" |
try:
self.resources()
except xmlrpclib.Fault as fault:
if fault.faultCode == 5:
self.login()
else:
raise |
<SYSTEM_TASK:>
Prints discovered resources and their associated methods. Nice when
<END_TASK>
<USER_TASK:>
Description:
def help(self):
"""Prints discovered resources and their associated methods. Nice when
noodling in the terminal to wrap your head around Magento's insanity.
""" |
print('Resources:')
print('')
for name in sorted(self._resources.keys()):
methods = sorted(self._resources[name]._methods.keys())
print('{}: {}'.format(bold(name), ', '.join(methods))) |
<SYSTEM_TASK:>
Import the controller and run it.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Import the controller and run it.
This mimics the processing done by :func:`helper.start`
when a controller is run in the foreground. A new instance
of ``self.controller`` is created and run until a keyboard
interrupt occurs or the controller stops on its own accord.
""" |
segments = self.controller.split('.')
controller_class = reduce(getattr, segments[1:],
__import__('.'.join(segments[:-1])))
cmd_line = ['-f']
if self.configuration is not None:
cmd_line.extend(['-c', self.configuration])
args = parser.get().parse_args(cmd_line)
controller_instance = controller_class(args, platform)
try:
controller_instance.start()
except KeyboardInterrupt:
controller_instance.stop() |
<SYSTEM_TASK:>
Returns the base32 info hash of the torrent. Useful for generating
<END_TASK>
<USER_TASK:>
Description:
def info_hash_base32(self):
"""
Returns the base32 info hash of the torrent. Useful for generating
magnet links.
.. note:: ``generate()`` must be called first.
""" |
if getattr(self, '_data', None):
return b32encode(sha1(bencode(self._data['info'])).digest())
else:
raise exceptions.TorrentNotGeneratedException |
<SYSTEM_TASK:>
Upload bundle and deploy to deployment group.
<END_TASK>
<USER_TASK:>
Description:
def deploy(awsclient, applicationName, deploymentGroupName,
deploymentConfigName, bucket, bundlefile):
"""Upload bundle and deploy to deployment group.
This includes the bundle-action.
:param applicationName:
:param deploymentGroupName:
:param deploymentConfigName:
:param bucket:
:param bundlefile:
:return: deploymentId from create_deployment
""" |
etag, version = upload_file_to_s3(awsclient, bucket,
_build_bundle_key(applicationName),
bundlefile)
client_codedeploy = awsclient.get_client('codedeploy')
response = client_codedeploy.create_deployment(
applicationName=applicationName,
deploymentGroupName=deploymentGroupName,
revision={
'revisionType': 'S3',
's3Location': {
'bucket': bucket,
'key': _build_bundle_key(applicationName),
'bundleType': 'tgz',
'eTag': etag,
'version': version,
},
},
deploymentConfigName=deploymentConfigName,
description='deploy with tenkai',
ignoreApplicationStopFailures=True
)
log.info(
"Deployment: {} -> URL: https://{}.console.aws.amazon.com/codedeploy/home?region={}#/deployments/{}".format(
Fore.MAGENTA + response['deploymentId'] + Fore.RESET,
client_codedeploy.meta.region_name,
client_codedeploy.meta.region_name,
response['deploymentId'],
))
return response['deploymentId'] |
<SYSTEM_TASK:>
Wait until an deployment is in an steady state and output information.
<END_TASK>
<USER_TASK:>
Description:
def output_deployment_status(awsclient, deployment_id, iterations=100):
"""Wait until an deployment is in an steady state and output information.
:param deployment_id:
:param iterations:
:return: exit_code
""" |
counter = 0
steady_states = ['Succeeded', 'Failed', 'Stopped']
client_codedeploy = awsclient.get_client('codedeploy')
while counter <= iterations:
response = client_codedeploy.get_deployment(deploymentId=deployment_id)
status = response['deploymentInfo']['status']
if status not in steady_states:
log.info('Deployment: %s - State: %s' % (deployment_id, status))
time.sleep(10)
elif status == 'Failed':
log.info(
colored.red('Deployment: {} failed: {}'.format(
deployment_id,
json.dumps(response['deploymentInfo']['errorInformation'],
indent=2)
))
)
return 1
else:
log.info('Deployment: %s - State: %s' % (deployment_id, status))
break
return 0 |
<SYSTEM_TASK:>
stop tenkai deployment.
<END_TASK>
<USER_TASK:>
Description:
def stop_deployment(awsclient, deployment_id):
"""stop tenkai deployment.
:param awsclient:
:param deployment_id:
""" |
log.info('Deployment: %s - stopping active deployment.', deployment_id)
client_codedeploy = awsclient.get_client('codedeploy')
response = client_codedeploy.stop_deployment(
deploymentId=deployment_id,
autoRollbackEnabled=True
) |
<SYSTEM_TASK:>
list deployment instances.
<END_TASK>
<USER_TASK:>
Description:
def _list_deployment_instances(awsclient, deployment_id):
"""list deployment instances.
:param awsclient:
:param deployment_id:
""" |
client_codedeploy = awsclient.get_client('codedeploy')
instances = []
next_token = None
# TODO refactor generic exhaust_function from this
while True:
request = {
'deploymentId': deployment_id
}
if next_token:
request['nextToken'] = next_token
response = client_codedeploy.list_deployment_instances(**request)
instances.extend(response['instancesList'])
if 'nextToken' not in response:
break
next_token = response['nextToken']
return instances |
<SYSTEM_TASK:>
instance summary.
<END_TASK>
<USER_TASK:>
Description:
def _get_deployment_instance_summary(awsclient, deployment_id, instance_id):
"""instance summary.
:param awsclient:
:param deployment_id:
:param instance_id:
return: status, last_event
""" |
client_codedeploy = awsclient.get_client('codedeploy')
request = {
'deploymentId': deployment_id,
'instanceId': instance_id
}
response = client_codedeploy.get_deployment_instance(**request)
return response['instanceSummary']['status'], \
response['instanceSummary']['lifecycleEvents'][-1]['lifecycleEventName'] |
<SYSTEM_TASK:>
Gets you the diagnostics details for the first 'Failed' event.
<END_TASK>
<USER_TASK:>
Description:
def _get_deployment_instance_diagnostics(awsclient, deployment_id, instance_id):
"""Gets you the diagnostics details for the first 'Failed' event.
:param awsclient:
:param deployment_id:
:param instance_id:
return: None or (error_code, script_name, message, log_tail)
""" |
client_codedeploy = awsclient.get_client('codedeploy')
request = {
'deploymentId': deployment_id,
'instanceId': instance_id
}
response = client_codedeploy.get_deployment_instance(**request)
# find first 'Failed' event
for i, event in enumerate(response['instanceSummary']['lifecycleEvents']):
if event['status'] == 'Failed':
return event['diagnostics']['errorCode'], \
event['diagnostics']['scriptName'], \
event['diagnostics']['message'], \
event['diagnostics']['logTail']
return None |
<SYSTEM_TASK:>
label SPACE num
<END_TASK>
<USER_TASK:>
Description:
def directive_SPACE(self, label, params):
"""
label SPACE num
Allocate space on the stack. `num` is the number of bytes to allocate
""" |
# TODO allow equations
params = params.strip()
try:
self.convert_to_integer(params)
except ValueError:
warnings.warn("Unknown parameters; {}".format(params))
return
self.labels[label] = self.space_pointer
if params in self.equates:
params = self.equates[params]
self.space_pointer += self.convert_to_integer(params) |
<SYSTEM_TASK:>
A method decorator that filters out sign_original_specals coming from models that don't
<END_TASK>
<USER_TASK:>
Description:
def instance_ik_model_receiver(fn):
"""
A method decorator that filters out sign_original_specals coming from models that don't
have fields that function as ImageFieldSourceGroup sources.
""" |
@wraps(fn)
def receiver(self, sender, **kwargs):
# print 'inspect.isclass(sender? %s'%(inspect.isclass(sender))
if not inspect.isclass(sender):
return
for src in self._source_groups:
if issubclass(sender, src.model_class):
fn(self, sender=sender, **kwargs)
# If we find a match, return. We don't want to handle the signal
# more than once.
return
return receiver |
<SYSTEM_TASK:>
Returns a list of the source fields for the given instance.
<END_TASK>
<USER_TASK:>
Description:
def get_source_fields(self, instance):
"""
Returns a list of the source fields for the given instance.
""" |
return set(src.image_field
for src in self._source_groups
if isinstance(instance, src.model_class)) |
<SYSTEM_TASK:>
Check if the hook configuration is absent or has both register AND deregister.
<END_TASK>
<USER_TASK:>
Description:
def check_hook_mechanism_is_intact(module):
"""Check if the hook configuration is absent or has both register AND deregister.
:param module:
:return: True if valid plugin / module.
""" |
result = True
if check_register_present(module):
result = not result
if check_deregister_present(module):
result = not result
return result |
<SYSTEM_TASK:>
Start the Helper controller either in the foreground or as a daemon
<END_TASK>
<USER_TASK:>
Description:
def start(controller_class):
"""Start the Helper controller either in the foreground or as a daemon
process.
:param controller_class: The controller class handle to create and run
:type controller_class: callable
""" |
args = parser.parse()
obj = controller_class(args, platform.operating_system())
if args.foreground:
try:
obj.start()
except KeyboardInterrupt:
obj.stop()
else:
try:
with platform.Daemon(obj) as daemon:
daemon.start()
except (OSError, ValueError) as error:
sys.stderr.write('\nError starting %s: %s\n\n' %
(sys.argv[0], error))
sys.exit(1) |
<SYSTEM_TASK:>
Helper function that checks the input object type against each in a list of classes. This function
<END_TASK>
<USER_TASK:>
Description:
def _validate_type(self, name, obj, *args):
"""
Helper function that checks the input object type against each in a list of classes. This function
also allows the input value to be equal to None.
:param name: Name of the object.
:param obj: Object to check the type of.
:param args: List of classes.
:raises TypeError: if the input object is not of any of the allowed types.
""" |
if obj is None:
return
for arg in args:
if isinstance(obj, arg):
return
raise TypeError(self.__class__.__name__ + '.' + name + ' is of type ' + type(obj).__name__ +
'. Must be equal to None or one of the following types: ' + str(args)) |
<SYSTEM_TASK:>
Helper function that checks the input object type against each in a list of classes, or if the input object
<END_TASK>
<USER_TASK:>
Description:
def _validate_list_type(self, name, obj, *args):
"""
Helper function that checks the input object type against each in a list of classes, or if the input object
is a list, each value that it contains against that list.
:param name: Name of the object.
:param obj: Object to check the type of.
:param args: List of classes.
:raises TypeError: if the input object is not of any of the allowed types.
""" |
if obj is None:
return
if isinstance(obj, list):
for i in obj:
self._validate_type_not_null(name, i, *args)
else:
self._validate_type(name, obj, *args) |
<SYSTEM_TASK:>
Helper function that checks the input object as a list then recursively until nested_level is 1.
<END_TASK>
<USER_TASK:>
Description:
def _validate_nested_list_type(self, name, obj, nested_level, *args):
"""
Helper function that checks the input object as a list then recursively until nested_level is 1.
:param name: Name of the object.
:param obj: Object to check the type of.
:param nested_level: Integer with the current nested level.
:param args: List of classes.
:raises TypeError: if the input object is not of any of the allowed types.
""" |
if nested_level <= 1:
self._validate_list_type(name, obj, *args)
else:
if obj is None:
return
if not isinstance(obj, list):
raise TypeError(self.__class__.__name__ + '.' + name + ' contains value of type ' +
type(obj).__name__ + ' where a list is expected')
for sub_obj in obj:
self._validate_nested_list_type(name, sub_obj, nested_level - 1, *args) |
<SYSTEM_TASK:>
Returns a PEP 440-compliant version number from VERSION.
<END_TASK>
<USER_TASK:>
Description:
def get_version(version):
"""
Returns a PEP 440-compliant version number from VERSION.
Created by modifying django.utils.version.get_version
""" |
# Now build the two parts of the version number:
# major = X.Y[.Z]
# sub = .devN - for development releases
# | {a|b|rc}N - for alpha, beta and rc releases
# | .postN - for post-release releases
assert len(version) == 5
version_parts = version[:2] if version[2] == 0 else version[:3]
# Build the first part of the version
major = '.'.join(str(x) for x in version_parts)
# Just return it if this is a final release version
if version[3] == 'final':
return major
# Add the rest
sub = ''.join(str(x) for x in version[3:5])
if version[3] == 'dev':
# Override the sub part. Add in a timestamp
timestamp = get_git_changeset()
sub = 'dev%s' % (timestamp if timestamp else version[4])
return '%s.%s' % (major, sub)
if version[3] == 'post':
# We need a dot for post
return '%s.%s' % (major, sub)
elif version[3] in ('a', 'b', 'rc'):
# No dot for these
return '%s%s' % (major, sub)
else:
raise ValueError('Invalid version: %s' % str(version)) |
<SYSTEM_TASK:>
Returns a numeric identifier of the latest git changeset.
<END_TASK>
<USER_TASK:>
Description:
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
""" |
repo_dir = os.path.dirname(os.path.abspath(__file__))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
return timestamp.strftime('%Y%m%d%H%M%S')
except ValueError:
return None |
<SYSTEM_TASK:>
Load cloudformation template from path.
<END_TASK>
<USER_TASK:>
Description:
def load_cloudformation_template(path=None):
"""Load cloudformation template from path.
:param path: Absolute or relative path of cloudformation template. Defaults to cwd.
:return: module, success
""" |
if not path:
path = os.path.abspath('cloudformation.py')
else:
path = os.path.abspath(path)
if isinstance(path, six.string_types):
try:
sp = sys.path
# temporarily add folder to allow relative path
sys.path.append(os.path.abspath(os.path.dirname(path)))
cloudformation = imp.load_source('cloudformation', path)
sys.path = sp # restore
# use cfn template hooks
if not check_hook_mechanism_is_intact(cloudformation):
# no hooks - do nothing
log.debug(
'No valid hook configuration: \'%s\'. Not using hooks!',
path)
else:
if check_register_present(cloudformation):
# register the template hooks so they listen to gcdt_signals
cloudformation.register()
return cloudformation, True
except GracefulExit:
raise
except ImportError as e:
print('could not find package for import: %s' % e)
except Exception as e:
print('could not import cloudformation.py, maybe something wrong ',
'with your code?')
print(e)
return None, False |
<SYSTEM_TASK:>
get differences between local config and currently active config
<END_TASK>
<USER_TASK:>
Description:
def get_parameter_diff(awsclient, config):
"""get differences between local config and currently active config
""" |
client_cf = awsclient.get_client('cloudformation')
try:
stack_name = config['stack']['StackName']
if stack_name:
response = client_cf.describe_stacks(StackName=stack_name)
if response['Stacks']:
stack_id = response['Stacks'][0]['StackId']
stack = response['Stacks'][0]
else:
return None
else:
print(
'StackName is not configured, could not create parameter diff')
return None
except GracefulExit:
raise
except Exception:
# probably the stack is not existent
return None
changed = 0
table = []
table.append(['Parameter', 'Current Value', 'New Value'])
# Check if there are parameters for the stack
if 'Parameters' in stack:
for param in stack['Parameters']:
try:
old = str(param['ParameterValue'])
# can not compare list with str!!
# if ',' in old:
# old = old.split(',')
new = config['parameters'][param['ParameterKey']]
if old != new:
if old.startswith('***'):
# parameter is configured with `NoEcho=True`
# this means we can not really say if the value changed!!
# for security reasons we block viewing the new value
new = old
table.append([param['ParameterKey'], old, new])
changed += 1
except GracefulExit:
raise
except Exception:
print('Did not find %s in local config file' % param[
'ParameterKey'])
if changed > 0:
print(tabulate(table, tablefmt='fancy_grid'))
return changed > 0 |
<SYSTEM_TASK:>
Invoke the pre_hook BEFORE the config is read.
<END_TASK>
<USER_TASK:>
Description:
def call_pre_hook(awsclient, cloudformation):
"""Invoke the pre_hook BEFORE the config is read.
:param awsclient:
:param cloudformation:
""" |
# TODO: this is deprecated!! move this to glomex_config_reader
# no config available
if not hasattr(cloudformation, 'pre_hook'):
# hook is not present
return
hook_func = getattr(cloudformation, 'pre_hook')
if not hook_func.func_code.co_argcount:
hook_func() # for compatibility with existing templates
else:
log.error('pre_hock can not have any arguments. The pre_hook it is ' +
'executed BEFORE config is read') |
<SYSTEM_TASK:>
Deploy the stack to AWS cloud. Does either create or update the stack.
<END_TASK>
<USER_TASK:>
Description:
def deploy_stack(awsclient, context, conf, cloudformation, override_stack_policy=False):
"""Deploy the stack to AWS cloud. Does either create or update the stack.
:param conf:
:param override_stack_policy:
:return: exit_code
""" |
stack_name = _get_stack_name(conf)
parameters = _generate_parameters(conf)
if stack_exists(awsclient, stack_name):
exit_code = _update_stack(awsclient, context, conf, cloudformation,
parameters, override_stack_policy)
else:
exit_code = _create_stack(awsclient, context, conf, cloudformation,
parameters)
# add 'stack_output' to the context so it becomes available
# in 'command_finalized' hook
context['stack_output'] = _get_stack_outputs(
awsclient.get_client('cloudformation'), stack_name)
_call_hook(awsclient, conf, stack_name, parameters, cloudformation,
hook='post_hook',
message='CloudFormation is done, now executing post hook...')
return exit_code |
<SYSTEM_TASK:>
Delete the stack from AWS cloud.
<END_TASK>
<USER_TASK:>
Description:
def delete_stack(awsclient, conf, feedback=True):
"""Delete the stack from AWS cloud.
:param awsclient:
:param conf:
:param feedback: print out stack events (defaults to True)
""" |
client_cf = awsclient.get_client('cloudformation')
stack_name = _get_stack_name(conf)
last_event = _get_stack_events_last_timestamp(awsclient, stack_name)
request = {}
dict_selective_merge(request, conf['stack'], ['StackName', 'RoleARN'])
response = client_cf.delete_stack(**request)
if feedback:
return _poll_stack_events(awsclient, stack_name, last_event) |
<SYSTEM_TASK:>
Print out the list of stacks deployed at AWS cloud.
<END_TASK>
<USER_TASK:>
Description:
def list_stacks(awsclient):
"""Print out the list of stacks deployed at AWS cloud.
:param awsclient:
:return:
""" |
client_cf = awsclient.get_client('cloudformation')
response = client_cf.list_stacks(
StackStatusFilter=[
'CREATE_IN_PROGRESS', 'CREATE_COMPLETE', 'ROLLBACK_IN_PROGRESS',
'ROLLBACK_COMPLETE', 'DELETE_IN_PROGRESS', 'DELETE_FAILED',
'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_IN_PROGRESS',
'UPDATE_ROLLBACK_FAILED',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE',
]
)
result = {}
stack_sum = 0
for summary in response['StackSummaries']:
result['StackName'] = summary["StackName"]
result['CreationTime'] = summary['CreationTime']
result['StackStatus'] = summary['StackStatus']
print(json2table(result))
stack_sum += 1
print('listed %s stacks' % str(stack_sum)) |
<SYSTEM_TASK:>
Print out the change_set to console.
<END_TASK>
<USER_TASK:>
Description:
def describe_change_set(awsclient, change_set_name, stack_name):
"""Print out the change_set to console.
This needs to run create_change_set first.
:param awsclient:
:param change_set_name:
:param stack_name:
""" |
client = awsclient.get_client('cloudformation')
status = None
while status not in ['CREATE_COMPLETE', 'FAILED']:
response = client.describe_change_set(
ChangeSetName=change_set_name,
StackName=stack_name)
status = response['Status']
# print('##### %s' % status)
if status == 'FAILED':
print(response['StatusReason'])
elif status == 'CREATE_COMPLETE':
for change in response['Changes']:
print(json2table(change['ResourceChange'])) |
<SYSTEM_TASK:>
Delete specified change set. Currently we only use this during
<END_TASK>
<USER_TASK:>
Description:
def delete_change_set(awsclient, change_set_name, stack_name):
"""Delete specified change set. Currently we only use this during
automated regression testing. But we have plans so lets locate this
functionality here
:param awsclient:
:param change_set_name:
:param stack_name:
""" |
client = awsclient.get_client('cloudformation')
response = client.delete_change_set(
ChangeSetName=change_set_name,
StackName=stack_name) |
<SYSTEM_TASK:>
collect info and output to console
<END_TASK>
<USER_TASK:>
Description:
def info(awsclient, config, format=None):
"""
collect info and output to console
:param awsclient:
:param config:
:param json: True / False to use json format as output
:return:
""" |
if format is None:
format = 'tabular'
stack_name = _get_stack_name(config)
client_cfn = awsclient.get_client('cloudformation')
resources = all_pages(
client_cfn.list_stack_resources,
{'StackName': stack_name},
lambda x: [(r['ResourceType'], r['LogicalResourceId'], r['ResourceStatus'])
for r in x['StackResourceSummaries']]
)
infos = {
'stack_output': _get_stack_outputs(client_cfn, stack_name),
'stack_state': _get_stack_state(client_cfn, stack_name),
'resources': resources
}
if format == 'json':
print(json.dumps(infos))
elif format == 'tabular':
print('stack output:')
print(tabulate(infos['stack_output'], tablefmt='fancy_grid'))
print('\nstack_state: %s' % infos['stack_state'])
print('\nresources:')
print(tabulate(infos['resources'], tablefmt='fancy_grid')) |
<SYSTEM_TASK:>
Shows one entry per distinct metric name
<END_TASK>
<USER_TASK:>
Description:
def get_queryset(self, request):
"""Shows one entry per distinct metric name""" |
queryset = super(MetricGroupAdmin, self).get_queryset(request)
# poor-man's DISTINCT ON for Sqlite3
qs_values = queryset.values('id', 'name')
# 2.7+ only :(
# = {metric['name']: metric['id'] for metric in qs_values}
distinct_names = {}
for metric in qs_values:
distinct_names[metric['name']] = metric['id']
queryset = self.model.objects.filter(id__in=distinct_names.values())
return queryset |
<SYSTEM_TASK:>
Updates all metrics with the same name
<END_TASK>
<USER_TASK:>
Description:
def save_model(self, request, obj, form, change):
"""Updates all metrics with the same name""" |
like_metrics = self.model.objects.filter(name=obj.name)
# 2.7+ only :(
# = {key: form.cleaned_data[key] for key in form.changed_data}
updates = {}
for key in form.changed_data:
updates[key] = form.cleaned_data[key]
like_metrics.update(**updates) |
<SYSTEM_TASK:>
Return a list of the parsed code
<END_TASK>
<USER_TASK:>
Description:
def parse_lines(self, code):
"""
Return a list of the parsed code
For each line, return a three-tuple containing:
1. The label
2. The instruction
3. Any arguments or parameters
An element in the tuple may be None or '' if it did not find anything
:param code: The code to parse
:return: A list of tuples in the form of (label, instruction, parameters)
""" |
remove_comments = re.compile(r'^([^;@\n]*);?.*$', re.MULTILINE)
code = '\n'.join(remove_comments.findall(code)) # TODO can probably do this better
# TODO labels with spaces between pipes is allowed `|label with space| INST OPER`
parser = re.compile(r'^(\S*)?[\s]*(\S*)([^\n]*)$', re.MULTILINE)
res = parser.findall(code)
# Make all parsing of labels and instructions adhere to all uppercase
res = [(label.upper(), instruction.upper(), parameters.strip()) for (label, instruction, parameters) in res]
return res |
<SYSTEM_TASK:>
Low registers are R0 - R7
<END_TASK>
<USER_TASK:>
Description:
def rule_low_registers(self, arg):
"""Low registers are R0 - R7""" |
r_num = self.check_register(arg)
if r_num > 7:
raise iarm.exceptions.RuleError(
"Register {} is not a low register".format(arg)) |
<SYSTEM_TASK:>
Get two parameters from a given regex expression
<END_TASK>
<USER_TASK:>
Description:
def get_two_parameters(self, regex_exp, parameters):
"""
Get two parameters from a given regex expression
Raise an exception if more than two were found
:param regex_exp:
:param parameters:
:return:
""" |
Rx, Ry, other = self.get_parameters(regex_exp, parameters)
if other is not None and other.strip():
raise iarm.exceptions.ParsingError("Extra arguments found: {}".format(other))
if Rx and Ry:
return Rx.upper(), Ry.upper()
elif not Rx:
raise iarm.exceptions.ParsingError("Missing first positional argument")
else:
raise iarm.exceptions.ParsingError("Missing second positional argument") |
<SYSTEM_TASK:>
Raises an exception if the register is not a special register
<END_TASK>
<USER_TASK:>
Description:
def rule_special_registers(self, arg):
"""Raises an exception if the register is not a special register""" |
# TODO is PSR supposed to be here?
special_registers = "PSR APSR IPSR EPSR PRIMASK FAULTMASK BASEPRI CONTROL"
if arg not in special_registers.split():
raise iarm.exceptions.RuleError("{} is not a special register; Must be [{}]".format(arg, special_registers)) |
<SYSTEM_TASK:>
Function decorator implementing retrying logic.
<END_TASK>
<USER_TASK:>
Description:
def retries(max_tries, delay=1, backoff=2, exceptions=(Exception,), hook=None):
"""Function decorator implementing retrying logic.
delay: Sleep this many seconds * backoff * try number after failure
backoff: Multiply delay by this factor after each failure
exceptions: A tuple of exception classes; default (Exception,)
hook: A function with the signature: (tries_remaining, exception, mydelay)
""" |
"""
def example_hook(tries_remaining, exception, delay):
'''Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised.
'''
print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % (exception, tries_remaining, delay)
The decorator will call the function up to max_tries times if it raises
an exception.
By default it catches instances of the Exception class and subclasses.
This will recover after all but the most fatal errors. You may specify a
custom tuple of exception classes with the 'exceptions' argument; the
function will only be retried if it raises one of the specified
exceptions.
Additionally you may specify a hook function which will be called prior
to retrying with the number of remaining tries and the exception instance;
see given example. This is primarily intended to give the opportunity to
log the failure. Hook is not called after failure if no retries remain.
"""
def dec(func):
def f2(*args, **kwargs):
mydelay = delay
#tries = range(max_tries)
#tries.reverse()
tries = range(max_tries-1, -1, -1)
for tries_remaining in tries:
try:
return func(*args, **kwargs)
except GracefulExit:
raise
except exceptions as e:
if tries_remaining > 0:
if hook is not None:
hook(tries_remaining, e, mydelay)
sleep(mydelay)
mydelay *= backoff
else:
raise
return f2
return dec |
<SYSTEM_TASK:>
This assembles the tool context. Private members are preceded by a '_'.
<END_TASK>
<USER_TASK:>
Description:
def get_context(awsclient, env, tool, command, arguments=None):
"""This assembles the tool context. Private members are preceded by a '_'.
:param tool:
:param command:
:return: dictionary containing the gcdt tool context
""" |
# TODO: elapsed, artifact(stack, depl-grp, lambda, api)
if arguments is None:
arguments = {}
context = {
'_awsclient': awsclient,
'env': env,
'tool': tool,
'command': command,
'_arguments': arguments, # TODO clean up arguments -> args
'version': __version__,
'user': _get_user(),
'plugins': get_plugin_versions().keys()
}
return context |
<SYSTEM_TASK:>
Extract the first argument from arguments parsed by docopt.
<END_TASK>
<USER_TASK:>
Description:
def get_command(arguments):
"""Extract the first argument from arguments parsed by docopt.
:param arguments parsed by docopt:
:return: command
""" |
return [k for k, v in arguments.items()
if not k.startswith('-') and v is True][0] |
<SYSTEM_TASK:>
Check whether a newer gcdt is available and output a warning.
<END_TASK>
<USER_TASK:>
Description:
def check_gcdt_update():
"""Check whether a newer gcdt is available and output a warning.
""" |
try:
inst_version, latest_version = get_package_versions('gcdt')
if inst_version < latest_version:
log.warn('Please consider an update to gcdt version: %s' %
latest_version)
except GracefulExit:
raise
except Exception:
log.warn('PyPi appears to be down - we currently can\'t check for newer gcdt versions') |
<SYSTEM_TASK:>
Conditionally merges b into a if b's keys are contained in selection
<END_TASK>
<USER_TASK:>
Description:
def dict_selective_merge(a, b, selection, path=None):
"""Conditionally merges b into a if b's keys are contained in selection
:param a:
:param b:
:param selection: limit merge to these top-level keys
:param path:
:return:
""" |
if path is None:
path = []
for key in b:
if key in selection:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
dict_selective_merge(a[key], b[key], b[key].keys(), path + [str(key)])
elif a[key] != b[key]:
# update the value
a[key] = b[key]
else:
a[key] = b[key]
return a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.