text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Launch data fetching then load data received.
<END_TASK>
<USER_TASK:>
Description:
def load_from_db(cls, callback_etat=print, out=None):
"""Launch data fetching then load data received.
The method _load_remote_db should be overridden.
If out is given, datas are set in it, instead of returning a new base object.
""" |
dic = cls._load_remote_db(callback_etat)
callback_etat("Chargement...", 2, 3)
if out is None:
return cls(dic)
cls.__init__(out, datas=dic) |
<SYSTEM_TASK:>
Saved current in memory base to local file.
<END_TASK>
<USER_TASK:>
Description:
def save_to_local(self, callback_etat=print):
"""
Saved current in memory base to local file.
It's a backup, not a convenient way to update datas
:param callback_etat: state callback, taking str,int,int as args
""" |
callback_etat("Aquisition...", 0, 3)
d = self.dumps()
s = json.dumps(d, indent=4, cls=formats.JsonEncoder)
callback_etat("Chiffrement...", 1, 3)
s = security.protege_data(s, True)
callback_etat("Enregistrement...", 2, 3)
try:
with open(self.LOCAL_DB_PATH, 'wb') as f:
f.write(s)
except (FileNotFoundError):
logging.exception(self.__class__.__name__)
raise StructureError("Chemin de sauvegarde introuvable !") |
<SYSTEM_TASK:>
reads the cell at position x and y; puts the default styles in xlwt
<END_TASK>
<USER_TASK:>
Description:
def read_cell(self, x, y):
"""
reads the cell at position x and y; puts the default styles in xlwt
""" |
cell = self._sheet.row(x)[y]
if self._file.xf_list[
cell.xf_index].background.pattern_colour_index == 64:
self._file.xf_list[
cell.xf_index].background.pattern_colour_index = 9
if self._file.xf_list[
cell.xf_index].background.pattern_colour_index in self.colors.keys():
style = self.colors[self._file.xf_list[
cell.xf_index].background.pattern_colour_index]
else:
style = self.xlwt.easyxf(
'pattern: pattern solid; border: top thin, right thin, bottom thin, left thin;')
style.pattern.pattern_fore_colour = self._file.xf_list[
cell.xf_index].background.pattern_colour_index
self.colors[self._file.xf_list[
cell.xf_index].background.pattern_colour_index] = style
style.font.name = self._file.font_list[
self._file.xf_list[cell.xf_index].font_index].name
style.font.bold = self._file.font_list[
self._file.xf_list[cell.xf_index].font_index].bold
if isinstance(self.header[y], tuple):
header = self.header[y][0]
else:
header = self.header[y]
if self.strip:
if is_str_or_unicode(cell.value):
cell.value = cell.value.strip()
if self.style:
return {header: (cell.value, style)}
else:
return {header: cell.value} |
<SYSTEM_TASK:>
writing style and value in the cell of x and y position
<END_TASK>
<USER_TASK:>
Description:
def write_cell(self, x, y, value, style=None):
"""
writing style and value in the cell of x and y position
""" |
if isinstance(style, str):
style = self.xlwt.easyxf(style)
if style:
self._sheet.write(x, y, label=value, style=style)
else:
self._sheet.write(x, y, label=value) |
<SYSTEM_TASK:>
This function checks if a path was given as string, and tries to read the
<END_TASK>
<USER_TASK:>
Description:
def get_string(string):
""" This function checks if a path was given as string, and tries to read the
file and return the string.
""" |
truestring = string
if string is not None:
if '/' in string:
if os.path.isfile(string):
try:
with open_(string,'r') as f:
truestring = ' '.join(line.strip() for line in f)
except: pass
if truestring.strip() == '': truestring = None
return truestring |
<SYSTEM_TASK:>
This function handles and validates the wrapper arguments.
<END_TASK>
<USER_TASK:>
Description:
def get_arguments(options):
""" This function handles and validates the wrapper arguments. """ |
# These the next couple of lines defines the header of the Help output
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
usage=("""%(prog)s
--------------------------------------------------------------------------------
"""),
description=("""
Service Wrapper
===============
This is the service wrapper script, which is a part of the CGE services.
Read the online manual for help.
A list of all published services can be found at:
cge.cbs.dtu.dk/services
"""), epilog=("""
--------------------------------------------------------------------------------
"""))
#ADDING ARGUMENTS
setarg = parser.add_argument
#SERVICE SPECIFIC ARGUMENTS
if isinstance(options, str):
options = [[x for i,x in enumerate(line.split()) if i in [1,2]] for line in options.split('\n') if len(line)>0]
for o in options:
try:
setarg(o[1], type=str, dest=o[0], default=None, help=SUPPRESS)
except:
None
else:
for o in options:
if o[2] is True:
# Handle negative flags
setarg(o[0], action="store_false", dest=o[1], default=o[2],
help=o[3])
elif o[2] is False:
# Handle positive flags
setarg(o[0], action="store_true", dest=o[1], default=o[2],
help=o[3])
else:
help_ = o[3] if o[2] is None else "%s [%s]"%(o[3], '%(default)s')
setarg(o[0], type=str, dest=o[1], default=o[2],
help=help_)
# VALIDATION OF ARGUMENTS
args = parser.parse_args()
debug.log("ARGS: %s"%args)
return args |
<SYSTEM_TASK:>
This function returns list of files in the given dir
<END_TASK>
<USER_TASK:>
Description:
def make_file_list(upload_path):
""" This function returns list of files in the given dir """ |
newlist = []
for el in sorted(os.listdir(upload_path)):
if ' ' in el:
raise Exception('Error: Spaces are not allowed in file names!\n')
newlist.append(os.path.normpath(upload_path+'/'+el))
debug.log('InputFiles: %s\n'%newlist)
return newlist |
<SYSTEM_TASK:>
Shortcut for string like fields
<END_TASK>
<USER_TASK:>
Description:
def _type_string(label, case=None):
"""Shortcut for string like fields""" |
return label, abstractSearch.in_string, lambda s: abstractRender.default(s, case=case), "" |
<SYSTEM_TASK:>
Shortcut fot boolean like fields
<END_TASK>
<USER_TASK:>
Description:
def _type_bool(label,default=False):
"""Shortcut fot boolean like fields""" |
return label, abstractSearch.nothing, abstractRender.boolen, default |
<SYSTEM_TASK:>
Lists the available languages for the given translation domain.
<END_TASK>
<USER_TASK:>
Description:
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
""" |
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale_, alias) in six.iteritems(aliases):
if locale_ in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list) |
<SYSTEM_TASK:>
Gets the translated unicode representation of the given object.
<END_TASK>
<USER_TASK:>
Description:
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
""" |
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj |
<SYSTEM_TASK:>
Translates all the translatable elements of the given arguments object.
<END_TASK>
<USER_TASK:>
Description:
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
""" |
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale) |
<SYSTEM_TASK:>
Translate this message to the desired locale.
<END_TASK>
<USER_TASK:>
Description:
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
""" |
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message |
<SYSTEM_TASK:>
Sanitize the object being modded with this Message.
<END_TASK>
<USER_TASK:>
Description:
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
""" |
if other is None:
params = (other,)
elif isinstance(other, dict):
params = self._trim_dictionary_parameters(other)
else:
params = self._copy_param(other)
return params |
<SYSTEM_TASK:>
Return a dict that only has matching entries in the msgid.
<END_TASK>
<USER_TASK:>
Description:
def _trim_dictionary_parameters(self, dict_param):
"""Return a dict that only has matching entries in the msgid.""" |
# NOTE(luisg): Here we trim down the dictionary passed as parameters
# to avoid carrying a lot of unnecessary weight around in the message
# object, for example if someone passes in Message() % locals() but
# only some params are used, and additionally we prevent errors for
# non-deepcopyable objects by unicoding() them.
# Look for %(param) keys in msgid;
# Skip %% and deal with the case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
# If we don't find any %(param) keys but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
# Apparently the full dictionary is the parameter
params = self._copy_param(dict_param)
else:
params = {}
# Save our existing parameters as defaults to protect
# ourselves from losing values if we are called through an
# (erroneous) chain that builds a valid Message with
# arguments, and then does something like "msg % kwds"
# where kwds is an empty dictionary.
src = {}
if isinstance(self.params, dict):
src.update(self.params)
src.update(dict_param)
for key in keys:
params[key] = self._copy_param(src[key])
return params |
<SYSTEM_TASK:>
Returns a tuple of lookups to order by for the given column
<END_TASK>
<USER_TASK:>
Description:
def get_ordering_for_column(self, column, direction):
"""
Returns a tuple of lookups to order by for the given column
and direction. Direction is an integer, either -1, 0 or 1.
""" |
if direction == 0:
return ()
if column in self.orderings:
ordering = self.orderings[column]
else:
field = self.get_field(column)
if field is None:
return ()
ordering = column
if not isinstance(ordering, (tuple, list)):
ordering = [ordering]
if direction == 1:
return ordering
return [lookup[1:] if lookup[0] == '-' else '-' + lookup
for lookup in ordering] |
<SYSTEM_TASK:>
Take a model instance and return it as a json struct
<END_TASK>
<USER_TASK:>
Description:
def model_to_json(self, object, cleanup=True):
"""Take a model instance and return it as a json struct""" |
model_name = type(object).__name__
if model_name not in self.swagger_dict['definitions']:
raise ValidationError("Swagger spec has no definition for model %s" % model_name)
model_def = self.swagger_dict['definitions'][model_name]
log.debug("Marshalling %s into json" % model_name)
m = marshal_model(self.spec, model_def, object)
if cleanup:
self.cleanup_model(m)
return m |
<SYSTEM_TASK:>
Validate an object against its swagger model
<END_TASK>
<USER_TASK:>
Description:
def validate(self, model_name, object):
"""Validate an object against its swagger model""" |
if model_name not in self.swagger_dict['definitions']:
raise ValidationError("Swagger spec has no definition for model %s" % model_name)
model_def = self.swagger_dict['definitions'][model_name]
log.debug("Validating %s" % model_name)
return validate_schema_object(self.spec, model_def, object) |
<SYSTEM_TASK:>
Find all server endpoints defined in the swagger spec and calls 'callback' for each,
<END_TASK>
<USER_TASK:>
Description:
def call_on_each_endpoint(self, callback):
"""Find all server endpoints defined in the swagger spec and calls 'callback' for each,
with an instance of EndpointData as argument.
""" |
if 'paths' not in self.swagger_dict:
return
for path, d in list(self.swagger_dict['paths'].items()):
for method, op_spec in list(d.items()):
data = EndpointData(path, method)
# Which server method handles this endpoint?
if 'x-bind-server' not in op_spec:
if 'x-no-bind-server' in op_spec:
# That route should not be auto-generated
log.info("Skipping generation of %s %s" % (method, path))
continue
else:
raise Exception("Swagger api defines no x-bind-server for %s %s" % (method, path))
data.handler_server = op_spec['x-bind-server']
# Make sure that endpoint only produces 'application/json'
if 'produces' not in op_spec:
raise Exception("Swagger api has no 'produces' section for %s %s" % (method, path))
if len(op_spec['produces']) != 1:
raise Exception("Expecting only one type under 'produces' for %s %s" % (method, path))
if op_spec['produces'][0] == 'application/json':
data.produces_json = True
elif op_spec['produces'][0] == 'text/html':
data.produces_html = True
else:
raise Exception("Only 'application/json' or 'text/html' are supported. See %s %s" % (method, path))
# Which client method handles this endpoint?
if 'x-bind-client' in op_spec:
data.handler_client = op_spec['x-bind-client']
# Should we decorate the server handler?
if 'x-decorate-server' in op_spec:
data.decorate_server = op_spec['x-decorate-server']
# Should we manipulate the requests parameters?
if 'x-decorate-request' in op_spec:
data.decorate_request = op_spec['x-decorate-request']
# Generate a bravado-core operation object
data.operation = Operation.from_spec(self.spec, path, method, op_spec)
# Figure out how parameters are passed: one json in body? one or
# more values in query?
if 'parameters' in op_spec:
params = op_spec['parameters']
for p in params:
if p['in'] == 'body':
data.param_in_body = True
if p['in'] == 'query':
data.param_in_query = True
if p['in'] == 'path':
data.param_in_path = True
if data.param_in_path:
# Substitute {...} with <...> in path, to make a Flask friendly path
data.path = data.path.replace('{', '<').replace('}', '>')
if data.param_in_body and data.param_in_query:
raise Exception("Cannot support params in both body and param (%s %s)" % (method, path))
else:
data.no_params = True
callback(data) |
<SYSTEM_TASK:>
Buffer stdin and flush, and avoid incomplete files.
<END_TASK>
<USER_TASK:>
Description:
def main(args=None):
"""Buffer stdin and flush, and avoid incomplete files.""" |
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument(
'--binary',
dest='mode',
action='store_const',
const="wb",
default="w",
help='write in binary mode')
parser.add_argument(
'output', metavar='FILE', type=unicode, help='Output file')
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='[%(levelname)s elapsed=%(relativeCreated)dms] %(message)s')
args = parser.parse_args(args or sys.argv[1:])
with open(args.output, args.mode) as fd:
for line in sys.stdin:
fd.write(line) |
<SYSTEM_TASK:>
Deletes a Constant Contact email marketing campaign.
<END_TASK>
<USER_TASK:>
Description:
def delete_email_marketing_campaign(self, email_marketing_campaign):
"""Deletes a Constant Contact email marketing campaign.
""" |
url = self.api.join('/'.join([
self.EMAIL_MARKETING_CAMPAIGN_URL,
str(email_marketing_campaign.constant_contact_id)]))
response = url.delete()
self.handle_response_status(response)
return response |
<SYSTEM_TASK:>
Pull constant_contact_id out of data.
<END_TASK>
<USER_TASK:>
Description:
def pre_save(cls, sender, instance, *args, **kwargs):
"""Pull constant_contact_id out of data.
""" |
instance.constant_contact_id = str(instance.data['id']) |
<SYSTEM_TASK:>
Deletes the CC email marketing campaign associated with me.
<END_TASK>
<USER_TASK:>
Description:
def pre_delete(cls, sender, instance, *args, **kwargs):
"""Deletes the CC email marketing campaign associated with me.
""" |
cc = ConstantContact()
response = cc.delete_email_marketing_campaign(instance)
response.raise_for_status() |
<SYSTEM_TASK:>
Will send a multi-format email to recipients. Email may be queued through celery
<END_TASK>
<USER_TASK:>
Description:
def send_email(recipients, subject, text_content=None, html_content=None, from_email=None, use_base_template=True, category=None, fail_silently=False, language=None, cc=None, bcc=None, attachments=None, headers=None, bypass_queue=False, bypass_hijacking=False, attach_files=None):
"""
Will send a multi-format email to recipients. Email may be queued through celery
""" |
from django.conf import settings
if not bypass_queue and hasattr(settings, 'MAILING_USE_CELERY') and settings.MAILING_USE_CELERY:
from celery.execute import send_task
return send_task('mailing.queue_send_email',[recipients, subject, text_content, html_content, from_email, use_base_template, category, fail_silently, language if language else translation.get_language(), cc, bcc, attachments, headers, bypass_hijacking, attach_files])
else:
header_category_value = '%s%s' % (settings.MAILING_HEADER_CATEGORY_PREFIX if hasattr(settings, 'MAILING_HEADER_CATEGORY_PREFIX') else '', category)
# Check for sendgrid support and add category header
# --------------------------------
if hasattr(settings, 'MAILING_USE_SENDGRID'):
send_grid_support = settings.MAILING_USE_SENDGRID
else:
send_grid_support = False
if not headers:
headers = dict()
if send_grid_support and category:
headers['X-SMTPAPI'] = '{"category": "%s"}' % header_category_value
# Check for Mailgun support and add label header
# --------------------------------
if hasattr(settings, 'MAILING_USE_MAILGUN'):
mailgun_support = settings.MAILING_USE_MAILGUN
else:
mailgun_support = False
if not headers:
headers = dict()
if mailgun_support and category:
headers['X-Mailgun-Tag'] = header_category_value
# Ensure recipients are in a list
# --------------------------------
if isinstance(recipients, basestring):
recipients_list = [recipients]
else:
recipients_list = recipients
# Check if we need to hijack the email
# --------------------------------
if hasattr(settings, 'MAILING_MAILTO_HIJACK') and not bypass_hijacking:
headers['X-MAILER-ORIGINAL-MAILTO'] = ','.join(recipients_list)
recipients_list = [settings.MAILING_MAILTO_HIJACK]
if not subject:
raise MailerMissingSubjectError('Subject not supplied')
# Send ascii, html or multi-part email
# --------------------------------
if text_content or html_content:
if use_base_template:
prev_language = translation.get_language()
language and translation.activate(language)
text_content = render_to_string('mailing/base.txt', {'mailing_text_body': text_content, 'mailing_subject': subject, 'settings': settings}) if text_content else None
html_content = render_to_string('mailing/base.html', {'mailing_html_body': html_content, 'mailing_subject': subject, 'settings': settings}) if html_content else None
translation.activate(prev_language)
msg = EmailMultiAlternatives(subject, text_content if text_content else html_content, from_email if from_email else settings.DEFAULT_FROM_EMAIL, recipients_list, cc=cc, bcc=bcc, attachments=attachments, headers = headers)
if html_content and text_content:
msg.attach_alternative(html_content, "text/html")
elif html_content: # Only HTML
msg.content_subtype = "html"
# Attach files through attach_files helper
# --------------------------------
if attach_files:
for att in attach_files: # attachments are tuples of (filepath, mimetype, filename)
with open(att[0], 'rb') as f:
content = f.read()
msg.attach(att[2], content, att[1])
# Send email
# --------------------------------
msg.send(fail_silently=fail_silently)
else:
raise MailerInvalidBodyError('No text or html body supplied.') |
<SYSTEM_TASK:>
Initialize a database connection by each connection string
<END_TASK>
<USER_TASK:>
Description:
def initialize_connections(self, scopefunc=None):
"""
Initialize a database connection by each connection string
defined in the configuration file
""" |
for connection_name, connection_string in\
self.app.config['FLASK_PHILO_SQLALCHEMY'].items():
engine = create_engine(connection_string)
session = scoped_session(sessionmaker(), scopefunc=scopefunc)
session.configure(bind=engine)
self.connections[connection_name] = Connection(engine, session) |
<SYSTEM_TASK:>
Return the row of given Id if it'exists, otherwise None. Only works with pseudo-acces
<END_TASK>
<USER_TASK:>
Description:
def index_from_id(self,Id):
"""Return the row of given Id if it'exists, otherwise None. Only works with pseudo-acces""" |
try:
return [a.Id for a in self].index(Id)
except IndexError:
return |
<SYSTEM_TASK:>
Append acces to list. Quite slow since it checks uniqueness.
<END_TASK>
<USER_TASK:>
Description:
def append(self, acces, **kwargs):
"""Append acces to list. Quite slow since it checks uniqueness.
kwargs may set `info` for this acces.
""" |
if acces.Id in set(ac.Id for ac in self):
raise ValueError("Acces id already in list !")
list.append(self, acces)
if kwargs:
self.infos[acces.Id] = kwargs |
<SYSTEM_TASK:>
Returns information associated with Id or list index
<END_TASK>
<USER_TASK:>
Description:
def get_info(self, key=None, Id=None) -> dict:
"""Returns information associated with Id or list index""" |
if key is not None:
Id = self[key].Id
return self.infos.get(Id,{}) |
<SYSTEM_TASK:>
Merges collections. Ensure uniqueness of ids
<END_TASK>
<USER_TASK:>
Description:
def extend(self, collection):
"""Merges collections. Ensure uniqueness of ids""" |
l_ids = set([a.Id for a in self])
for acces in collection:
if not acces.Id in l_ids:
list.append(self,acces)
info = collection.get_info(Id=acces.Id)
if info:
self.infos[acces.Id] = info |
<SYSTEM_TASK:>
Return True if before is older than seconds.
<END_TASK>
<USER_TASK:>
Description:
def is_older_than(before, seconds):
"""Return True if before is older than seconds.""" |
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
else:
before = before.replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds) |
<SYSTEM_TASK:>
Return True if after is newer than seconds.
<END_TASK>
<USER_TASK:>
Description:
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds.""" |
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
else:
after = after.replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds) |
<SYSTEM_TASK:>
Advance overridden time using a datetime.timedelta.
<END_TASK>
<USER_TASK:>
Description:
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta.""" |
assert(utcnow.override_time is not None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta |
<SYSTEM_TASK:>
Make an rpc-safe datetime with microseconds.
<END_TASK>
<USER_TASK:>
Description:
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
""" |
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond) |
<SYSTEM_TASK:>
Return the total seconds of datetime.timedelta object.
<END_TASK>
<USER_TASK:>
Description:
def total_seconds(delta):
"""Return the total seconds of datetime.timedelta object.
Compute total seconds of datetime.timedelta, datetime.timedelta
doesn't have method total_seconds in Python2.6, calculate it manually.
""" |
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6)) |
<SYSTEM_TASK:>
Determines if time is going to happen in the next window seconds.
<END_TASK>
<USER_TASK:>
Description:
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:params dt: the time
:params window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
""" |
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon |
<SYSTEM_TASK:>
Temporarily change or set the environment variable during the execution of a function.
<END_TASK>
<USER_TASK:>
Description:
def write(name, value):
"""Temporarily change or set the environment variable during the execution of a function.
Args:
name: The name of the environment variable
value: A value to set for the environment variable
Returns:
The function return value.
""" |
def wrapped(func):
@functools.wraps(func)
def _decorator(*args, **kwargs):
existing_env = core.read(name, allow_none=True)
core.write(name, value)
func_val = func(*args, **kwargs)
core.write(name, existing_env)
return func_val
return _decorator
return wrapped |
<SYSTEM_TASK:>
Only execute the function if the variable is set.
<END_TASK>
<USER_TASK:>
Description:
def isset(name):
"""Only execute the function if the variable is set.
Args:
name: The name of the environment variable
Returns:
The function return value or `None` if the function was skipped.
""" |
def wrapped(func):
@functools.wraps(func)
def _decorator(*args, **kwargs):
if core.isset(name):
return func(*args, **kwargs)
return _decorator
return wrapped |
<SYSTEM_TASK:>
Only execute the function if the boolean variable is set.
<END_TASK>
<USER_TASK:>
Description:
def bool(name, execute_bool=True, default=None):
"""Only execute the function if the boolean variable is set.
Args:
name: The name of the environment variable
execute_bool: The boolean value to execute the function on
default: The default value if the environment variable is not set (respects `execute_bool`)
Returns:
The function return value or `None` if the function was skipped.
""" |
def wrapped(func):
@functools.wraps(func)
def _decorator(*args, **kwargs):
if core.isset(name) and core.bool(name) == execute_bool:
return func(*args, **kwargs)
elif default is not None and default == execute_bool:
return func(*args, **kwargs)
return _decorator
return wrapped |
<SYSTEM_TASK:>
All fields are selectable
<END_TASK>
<USER_TASK:>
Description:
def flags(self, index: QModelIndex):
"""All fields are selectable""" |
if self.IS_EDITABLE and self.header[index.column()] in self.EDITABLE_FIELDS:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
else:
return super().flags(index) | Qt.ItemIsSelectable |
<SYSTEM_TASK:>
Order is defined by the current state of sorting
<END_TASK>
<USER_TASK:>
Description:
def sort(self, section: int, order=None):
"""Order is defined by the current state of sorting""" |
attr = self.header[section]
old_i, old_sort = self.sort_state
self.beginResetModel()
if section == old_i:
self.collection.sort(attr, not old_sort)
self.sort_state = (section, not old_sort)
else:
self.collection.sort(attr, True)
self.sort_state = (section, True)
self.endResetModel() |
<SYSTEM_TASK:>
Base implementation just pops the item from collection.
<END_TASK>
<USER_TASK:>
Description:
def remove_line(self, section):
"""Base implementation just pops the item from collection.
Re-implements to add global behaviour
""" |
self.beginResetModel()
self.collection.pop(section)
self.endResetModel() |
<SYSTEM_TASK:>
Changes item at index in collection. Emit dataChanged signal.
<END_TASK>
<USER_TASK:>
Description:
def set_item(self, index, new_item):
""" Changes item at index in collection. Emit dataChanged signal.
:param index: Number of row or index of cell
:param new_item: Dict-like object
""" |
row = index.row() if hasattr(index, "row") else index
self.collection[row] = new_item
self.dataChanged.emit(self.index(
row, 0), self.index(row, self.rowCount() - 1)) |
<SYSTEM_TASK:>
Return a model with a collection from a list of entry
<END_TASK>
<USER_TASK:>
Description:
def model_from_list(l, header):
"""Return a model with a collection from a list of entry""" |
col = groups.sortableListe(PseudoAccesCategorie(n) for n in l)
return MultiSelectModel(col, header) |
<SYSTEM_TASK:>
Return error string code if the response is an error, otherwise ``"OK"``
<END_TASK>
<USER_TASK:>
Description:
def _parse_status_code(response):
"""
Return error string code if the response is an error, otherwise ``"OK"``
""" |
# This happens when a status response is expected
if isinstance(response, string_types):
return response
# This happens when a list of structs are expected
is_single_list = isinstance(response, list) and len(response) == 1
if is_single_list and isinstance(response[0], string_types):
return response[0]
# This happens when a struct of any kind is returned
return "OK" |
<SYSTEM_TASK:>
Remove the zone record with the given ID that belongs to the given
<END_TASK>
<USER_TASK:>
Description:
def remove_zone_record(self, id, domain, subdomain=None):
"""
Remove the zone record with the given ID that belongs to the given
domain and sub domain. If no sub domain is given the wildcard sub-domain
is assumed.
""" |
if subdomain is None:
subdomain = "@"
_validate_int("id", id)
self._call("removeZoneRecord", domain, subdomain, id) |
<SYSTEM_TASK:>
Parse the module and class name part of the fully qualifed class name.
<END_TASK>
<USER_TASK:>
Description:
def parse_module_class(self):
"""Parse the module and class name part of the fully qualifed class name.
""" |
cname = self.class_name
match = re.match(self.CLASS_REGEX, cname)
if not match:
raise ValueError(f'not a fully qualified class name: {cname}')
return match.groups() |
<SYSTEM_TASK:>
Return the module and class as a tuple of the given class in the
<END_TASK>
<USER_TASK:>
Description:
def get_module_class(self):
"""Return the module and class as a tuple of the given class in the
initializer.
:param reload: if ``True`` then reload the module before returning the
class
""" |
pkg, cname = self.parse_module_class()
logger.debug(f'pkg: {pkg}, class: {cname}')
pkg = pkg.split('.')
mod = reduce(lambda m, n: getattr(m, n), pkg[1:], __import__(pkg[0]))
logger.debug(f'mod: {mod}')
if self.reload:
importlib.reload(mod)
cls = getattr(mod, cname)
logger.debug(f'class: {cls}')
return mod, cls |
<SYSTEM_TASK:>
Create an instance of the specified class in the initializer.
<END_TASK>
<USER_TASK:>
Description:
def instance(self, *args, **kwargs):
"""Create an instance of the specified class in the initializer.
:param args: the arguments given to the initializer of the new class
:param kwargs: the keyword arguments given to the initializer of the
new class
""" |
mod, cls = self.get_module_class()
inst = cls(*args, **kwargs)
logger.debug(f'inst: {inst}')
return inst |
<SYSTEM_TASK:>
Convenciene method to set the log level of the module given in the
<END_TASK>
<USER_TASK:>
Description:
def set_log_level(self, level=logging.INFO):
"""Convenciene method to set the log level of the module given in the
initializer of this class.
:param level: and instance of ``logging.<level>``
""" |
mod, cls = self.parse_module_class()
logging.getLogger(mod).setLevel(level) |
<SYSTEM_TASK:>
Register a class with the factory.
<END_TASK>
<USER_TASK:>
Description:
def register(cls, instance_class, name=None):
"""Register a class with the factory.
:param instance_class: the class to register with the factory (not a
string)
:param name: the name to use as the key for instance class lookups;
defaults to the name of the class
""" |
if name is None:
name = instance_class.__name__
cls.INSTANCE_CLASSES[name] = instance_class |
<SYSTEM_TASK:>
Return the instance.
<END_TASK>
<USER_TASK:>
Description:
def _instance(self, cls, *args, **kwargs):
"""Return the instance.
:param cls: the class to create the instance from
:param args: given to the ``__init__`` method
:param kwargs: given to the ``__init__`` method
""" |
logger.debug(f'args: {args}, kwargs: {kwargs}')
return cls(*args, **kwargs) |
<SYSTEM_TASK:>
Create a new instance using key ``name``.
<END_TASK>
<USER_TASK:>
Description:
def instance(self, name=None, *args, **kwargs):
"""Create a new instance using key ``name``.
:param name: the name of the class (by default) or the key name of the
class used to find the class
:param args: given to the ``__init__`` method
:param kwargs: given to the ``__init__`` method
""" |
logger.info(f'new instance of {name}')
t0 = time()
name = self.default_name if name is None else name
logger.debug(f'creating instance of {name}')
class_name, params = self._class_name_params(name)
cls = self._find_class(class_name)
params.update(kwargs)
if self._has_init_config(cls):
logger.debug(f'found config parameter')
params['config'] = self.config
if self._has_init_name(cls):
logger.debug(f'found name parameter')
params['name'] = name
if logger.level >= logging.DEBUG:
for k, v in params.items():
logger.debug(f'populating {k} -> {v} ({type(v)})')
inst = self._instance(cls, *args, **params)
logger.info(f'created {name} instance of {cls.__name__} ' +
f'in {(time() - t0):.2f}s')
return inst |
<SYSTEM_TASK:>
Clear the data, and thus, force it to be created on the next fetch. This is
<END_TASK>
<USER_TASK:>
Description:
def clear(self):
"""Clear the data, and thus, force it to be created on the next fetch. This is
done by removing the attribute from ``owner``, deleting it from globals
and removing the file from the disk.
""" |
vname = self.varname
if self.path.exists():
logger.debug('deleting cached work: {}'.format(self.path))
self.path.unlink()
if self.owner is not None and hasattr(self.owner, vname):
logger.debug('removing instance var: {}'.format(vname))
delattr(self.owner, vname)
self.clear_global() |
<SYSTEM_TASK:>
Invoke the file system operations to get the data, or create work.
<END_TASK>
<USER_TASK:>
Description:
def _load_or_create(self, *argv, **kwargs):
"""Invoke the file system operations to get the data, or create work.
If the file does not exist, calling ``__do_work__`` and save it.
""" |
if self.path.exists():
self._info('loading work from {}'.format(self.path))
with open(self.path, 'rb') as f:
obj = pickle.load(f)
else:
self._info('saving work to {}'.format(self.path))
with open(self.path, 'wb') as f:
obj = self._do_work(*argv, **kwargs)
pickle.dump(obj, f)
return obj |
<SYSTEM_TASK:>
Return whether or not the stash has any data available or not.
<END_TASK>
<USER_TASK:>
Description:
def has_data(self):
"""Return whether or not the stash has any data available or not.""" |
if not hasattr(self, '_has_data'):
try:
next(iter(self.delegate.keys()))
self._has_data = True
except StopIteration:
self._has_data = False
return self._has_data |
<SYSTEM_TASK:>
Return an opened shelve object.
<END_TASK>
<USER_TASK:>
Description:
def shelve(self):
"""Return an opened shelve object.
""" |
logger.info('creating shelve data')
fname = str(self.create_path.absolute())
inst = sh.open(fname, writeback=self.writeback)
self.is_open = True
return inst |
<SYSTEM_TASK:>
Load all instances witih multiple threads.
<END_TASK>
<USER_TASK:>
Description:
def load_all(self, workers=None, limit=None, n_expected=None):
"""Load all instances witih multiple threads.
:param workers: number of workers to use to load instances, which
defaults to what was given in the class initializer
:param limit: return a maximum, which defaults to no limit
:param n_expected: rerun the iteration on the data if we didn't find
enough data, or more specifically, number of found
data points is less than ``n_expected``; defaults to
all
""" |
if not self.has_data:
self._preempt(True)
# we did the best we could (avoid repeat later in this method)
n_expected = 0
keys = tuple(self.delegate.keys())
if n_expected is not None and len(keys) < n_expected:
self._preempt(True)
keys = self.delegate.keys()
keys = it.islice(limit, keys) if limit is not None else keys
pool = self._create_thread_pool(workers)
logger.debug(f'workers={workers}, keys: {keys}')
try:
return iter(pool.map(self.delegate.load, keys))
finally:
pool.close() |
<SYSTEM_TASK:>
Auto-generate server endpoints implementing the API into this Flask app
<END_TASK>
<USER_TASK:>
Description:
def spawn_api(self, app, decorator=None):
"""Auto-generate server endpoints implementing the API into this Flask app""" |
if decorator:
assert type(decorator).__name__ == 'function'
self.is_server = True
self.app = app
if self.local:
# Re-generate client callers, this time as local and passing them the app
self._generate_client_callers(app)
return spawn_server_api(self.name, app, self.api_spec, self.error_callback, decorator) |
<SYSTEM_TASK:>
Check the format of a osmnet_config object.
<END_TASK>
<USER_TASK:>
Description:
def format_check(settings):
"""
Check the format of a osmnet_config object.
Parameters
----------
settings : dict
osmnet_config as a dictionary
Returns
-------
Nothing
""" |
valid_keys = ['logs_folder', 'log_file', 'log_console', 'log_name',
'log_filename', 'keep_osm_tags']
for key in list(settings.keys()):
assert key in valid_keys, \
('{} not found in list of valid configuation keys').format(key)
assert isinstance(key, str), ('{} must be a string').format(key)
if key == 'keep_osm_tags':
assert isinstance(settings[key], list), \
('{} must be a list').format(key)
for value in settings[key]:
assert all(isinstance(element, str) for element in value), \
'all elements must be a string'
if key == 'log_file' or key == 'log_console':
assert isinstance(settings[key], bool), \
('{} must be boolean').format(key) |
<SYSTEM_TASK:>
Return a dict representation of an osmnet osmnet_config instance.
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
"""
Return a dict representation of an osmnet osmnet_config instance.
""" |
return {'logs_folder': self.logs_folder,
'log_file': self.log_file,
'log_console': self.log_console,
'log_name': self.log_name,
'log_filename': self.log_filename,
'keep_osm_tags': self.keep_osm_tags
} |
<SYSTEM_TASK:>
Create a filter to query Overpass API for the specified OSM network type.
<END_TASK>
<USER_TASK:>
Description:
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
""" |
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter |
<SYSTEM_TASK:>
Download OSM ways and nodes within a bounding box from the Overpass API.
<END_TASK>
<USER_TASK:>
Description:
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
""" |
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons} |
<SYSTEM_TASK:>
Send a request to the Overpass API via HTTP POST and return the
<END_TASK>
<USER_TASK:>
Description:
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
""" |
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json |
<SYSTEM_TASK:>
Check the Overpass API status endpoint to determine how long to wait until
<END_TASK>
<USER_TASK:>
Description:
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
""" |
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration |
<SYSTEM_TASK:>
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
<END_TASK>
<USER_TASK:>
Description:
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
""" |
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs |
<SYSTEM_TASK:>
Process a node element entry into a dict suitable for going into a
<END_TASK>
<USER_TASK:>
Description:
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
""" |
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node |
<SYSTEM_TASK:>
Process a way element entry into a list of dicts suitable for going into
<END_TASK>
<USER_TASK:>
Description:
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
""" |
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes |
<SYSTEM_TASK:>
Get DataFrames of OSM data in a bounding box.
<END_TASK>
<USER_TASK:>
Description:
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
""" |
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)) |
<SYSTEM_TASK:>
Returns a set of all the nodes that appear in 2 or more ways.
<END_TASK>
<USER_TASK:>
Description:
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
""" |
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values) |
<SYSTEM_TASK:>
Create a table of node pairs with the distances between them.
<END_TASK>
<USER_TASK:>
Description:
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
""" |
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs |
<SYSTEM_TASK:>
Returns a list of lines from a input markdown file.
<END_TASK>
<USER_TASK:>
Description:
def read_lines(in_file):
"""Returns a list of lines from a input markdown file.""" |
with open(in_file, 'r') as inf:
in_contents = inf.read().split('\n')
return in_contents |
<SYSTEM_TASK:>
Gets headlines from the markdown document and creates anchor tags.
<END_TASK>
<USER_TASK:>
Description:
def tag_and_collect(lines, id_tag=True, back_links=False, exclude_h=None, remove_dashes=False):
"""
Gets headlines from the markdown document and creates anchor tags.
Keyword arguments:
lines: a list of sublists where every sublist
represents a line from a Markdown document.
id_tag: if true, creates inserts a the <a id> tags (not req. by GitHub)
back_links: if true, adds "back to top" links below each headline
exclude_h: header levels to exclude. E.g., [2, 3]
excludes level 2 and 3 headings.
Returns a tuple of 2 lists:
1st list:
A modified version of the input list where
<a id="some-header"></a> anchor tags where inserted
above the header lines (if github is False).
2nd list:
A list of 3-value sublists, where the first value
represents the heading, the second value the string
that was inserted assigned to the IDs in the anchor tags,
and the third value is an integer that reprents the headline level.
E.g.,
[['some header lvl3', 'some-header-lvl3', 3], ...]
""" |
out_contents = []
headlines = []
for l in lines:
saw_headline = False
orig_len = len(l)
l = l.lstrip()
if l.startswith(('# ', '## ', '### ', '#### ', '##### ', '###### ')):
# comply with new markdown standards
# not a headline if '#' not followed by whitespace '##no-header':
if not l.lstrip('#').startswith(' '):
continue
# not a headline if more than 6 '#':
if len(l) - len(l.lstrip('#')) > 6:
continue
# headers can be indented by at most 3 spaces:
if orig_len - len(l) > 3:
continue
# ignore empty headers
if not set(l) - {'#', ' '}:
continue
saw_headline = True
slugified = slugify_headline(l, remove_dashes)
if not exclude_h or not slugified[-1] in exclude_h:
if id_tag:
id_tag = '<a class="mk-toclify" id="%s"></a>'\
% (slugified[1])
out_contents.append(id_tag)
headlines.append(slugified)
out_contents.append(l)
if back_links and saw_headline:
out_contents.append('[[back to top](#table-of-contents)]')
return out_contents, headlines |
<SYSTEM_TASK:>
Creates the table of contents from the headline list
<END_TASK>
<USER_TASK:>
Description:
def create_toc(headlines, hyperlink=True, top_link=False, no_toc_header=False):
"""
Creates the table of contents from the headline list
that was returned by the tag_and_collect function.
Keyword Arguments:
headlines: list of lists
e.g., ['Some header lvl3', 'some-header-lvl3', 3]
hyperlink: Creates hyperlinks in Markdown format if True,
e.g., '- [Some header lvl1](#some-header-lvl1)'
top_link: if True, add a id tag for linking the table
of contents itself (for the back-to-top-links)
no_toc_header: suppresses TOC header if True.
Returns a list of headlines for a table of contents
in Markdown format,
e.g., [' - [Some header lvl3](#some-header-lvl3)', ...]
""" |
processed = []
if not no_toc_header:
if top_link:
processed.append('<a class="mk-toclify" id="table-of-contents"></a>\n')
processed.append('# Table of Contents')
for line in headlines:
if hyperlink:
item = '%s- [%s](#%s)' % ((line[2]-1)*' ', line[0], line[1])
else:
item = '%s- %s' % ((line[2]-1)*' ', line[0])
processed.append(item)
processed.append('\n')
return processed |
<SYSTEM_TASK:>
Returns a string with the Markdown output contents incl.
<END_TASK>
<USER_TASK:>
Description:
def build_markdown(toc_headlines, body, spacer=0, placeholder=None):
"""
Returns a string with the Markdown output contents incl.
the table of contents.
Keyword arguments:
toc_headlines: lines for the table of contents
as created by the create_toc function.
body: contents of the Markdown file including
ID-anchor tags as returned by the
tag_and_collect function.
spacer: Adds vertical space after the table
of contents. Height in pixels.
placeholder: If a placeholder string is provided, the placeholder
will be replaced by the TOC instead of inserting the TOC at
the top of the document
""" |
if spacer:
spacer_line = ['\n<div style="height:%spx;"></div>\n' % (spacer)]
toc_markdown = "\n".join(toc_headlines + spacer_line)
else:
toc_markdown = "\n".join(toc_headlines)
body_markdown = "\n".join(body).strip()
if placeholder:
markdown = body_markdown.replace(placeholder, toc_markdown)
else:
markdown = toc_markdown + body_markdown
return markdown |
<SYSTEM_TASK:>
Writes to an output file if `outfile` is a valid path.
<END_TASK>
<USER_TASK:>
Description:
def output_markdown(markdown_cont, output_file):
"""
Writes to an output file if `outfile` is a valid path.
""" |
if output_file:
with open(output_file, 'w') as out:
out.write(markdown_cont) |
<SYSTEM_TASK:>
Function to add table of contents to markdown files.
<END_TASK>
<USER_TASK:>
Description:
def markdown_toclify(input_file, output_file=None, github=False,
back_to_top=False, nolink=False,
no_toc_header=False, spacer=0, placeholder=None,
exclude_h=None, remove_dashes=False):
""" Function to add table of contents to markdown files.
Parameters
-----------
input_file: str
Path to the markdown input file.
output_file: str (defaul: None)
Path to the markdown output file.
github: bool (default: False)
Uses GitHub TOC syntax if True.
back_to_top: bool (default: False)
Inserts back-to-top links below headings if True.
nolink: bool (default: False)
Creates the table of contents without internal links if True.
no_toc_header: bool (default: False)
Suppresses the Table of Contents header if True
spacer: int (default: 0)
Inserts horizontal space (in pixels) after the table of contents.
placeholder: str (default: None)
Inserts the TOC at the placeholder string instead
of inserting the TOC at the top of the document.
exclude_h: list (default None)
Excludes header levels, e.g., if [2, 3], ignores header
levels 2 and 3 in the TOC.
remove_dashes: bool (default: False)
Removes dashes from headline slugs
Returns
-----------
cont: str
Markdown contents including the TOC.
""" |
raw_contents = read_lines(input_file)
cleaned_contents = remove_lines(raw_contents, remove=('[[back to top]', '<a class="mk-toclify"'))
processed_contents, raw_headlines = tag_and_collect(
cleaned_contents,
id_tag=not github,
back_links=back_to_top,
exclude_h=exclude_h,
remove_dashes=remove_dashes
)
leftjustified_headlines = positioning_headlines(raw_headlines)
processed_headlines = create_toc(leftjustified_headlines,
hyperlink=not nolink,
top_link=not nolink and not github,
no_toc_header=no_toc_header)
if nolink:
processed_contents = cleaned_contents
cont = build_markdown(toc_headlines=processed_headlines,
body=processed_contents,
spacer=spacer,
placeholder=placeholder)
if output_file:
output_markdown(cont, output_file)
return cont |
<SYSTEM_TASK:>
parse urls with different prefixes
<END_TASK>
<USER_TASK:>
Description:
def url_parse(name):
"""parse urls with different prefixes""" |
position = name.find("github.com")
if position >= 0:
if position != 0:
position_1 = name.find("www.github.com")
position_2 = name.find("http://github.com")
position_3 = name.find("https://github.com")
if position_1*position_2*position_3 != 0:
exception()
sys.exit(0)
name = name[position+11:]
if name.endswith('/'):
name = name[:-1]
return name
else:
if name.endswith('/'):
name = name[:-1]
return name |
<SYSTEM_TASK:>
Helper method that parses special fields to Python objects
<END_TASK>
<USER_TASK:>
Description:
def _parse_special_fields(self, data):
"""
Helper method that parses special fields to Python objects
:param data: response from Monzo API request
:type data: dict
""" |
self.created = parse_date(data.pop('created'))
if data.get('settled'): # Not always returned
self.settled = parse_date(data.pop('settled'))
# Merchant field can contain either merchant ID or the whole object
if (data.get('merchant') and
not isinstance(data['merchant'], six.text_type)):
self.merchant = MonzoMerchant(data=data.pop('merchant')) |
<SYSTEM_TASK:>
Helper method to handle HTTP requests and catch API errors
<END_TASK>
<USER_TASK:>
Description:
def _get_response(self, method, endpoint, params=None):
"""
Helper method to handle HTTP requests and catch API errors
:param method: valid HTTP method
:type method: str
:param endpoint: API endpoint
:type endpoint: str
:param params: extra parameters passed with the request
:type params: dict
:returns: API response
:rtype: Response
""" |
url = urljoin(self.api_url, endpoint)
try:
response = getattr(self._session, method)(url, params=params)
# Check if Monzo API returned HTTP 401, which could mean that the
# token is expired
if response.status_code == 401:
raise TokenExpiredError
except TokenExpiredError:
# For some reason 'requests-oauthlib' automatic token refreshing
# doesn't work so we do it here semi-manually
self._refresh_oath_token()
self._session = OAuth2Session(
client_id=self._client_id,
token=self._token,
)
response = getattr(self._session, method)(url, params=params)
if response.status_code != requests.codes.ok:
raise MonzoAPIError(
"Something went wrong: {}".format(response.json())
)
return response |
<SYSTEM_TASK:>
Returns a list of accounts owned by the currently authorised user.
<END_TASK>
<USER_TASK:>
Description:
def accounts(self, refresh=False):
"""
Returns a list of accounts owned by the currently authorised user.
It's often used when deciding whether to require explicit account ID
or use the only available one, so we cache the response by default.
Official docs:
https://monzo.com/docs/#list-accounts
:param refresh: decides if the accounts information should be refreshed
:type refresh: bool
:returns: list of Monzo accounts
:rtype: list of MonzoAccount
""" |
if not refresh and self._cached_accounts:
return self._cached_accounts
endpoint = '/accounts'
response = self._get_response(
method='get', endpoint=endpoint,
)
accounts_json = response.json()['accounts']
accounts = [MonzoAccount(data=account) for account in accounts_json]
self._cached_accounts = accounts
return accounts |
<SYSTEM_TASK:>
Returns balance information for a specific account.
<END_TASK>
<USER_TASK:>
Description:
def balance(self, account_id=None):
"""
Returns balance information for a specific account.
Official docs:
https://monzo.com/docs/#read-balance
:param account_id: Monzo account ID
:type account_id: str
:raises: ValueError
:returns: Monzo balance instance
:rtype: MonzoBalance
""" |
if not account_id:
if len(self.accounts()) == 1:
account_id = self.accounts()[0].id
else:
raise ValueError("You need to pass account ID")
endpoint = '/balance'
response = self._get_response(
method='get', endpoint=endpoint,
params={
'account_id': account_id,
},
)
return MonzoBalance(data=response.json()) |
<SYSTEM_TASK:>
Returns a list of pots owned by the currently authorised user.
<END_TASK>
<USER_TASK:>
Description:
def pots(self, refresh=False):
"""
Returns a list of pots owned by the currently authorised user.
Official docs:
https://monzo.com/docs/#pots
:param refresh: decides if the pots information should be refreshed.
:type refresh: bool
:returns: list of Monzo pots
:rtype: list of MonzoPot
""" |
if not refresh and self._cached_pots:
return self._cached_pots
endpoint = '/pots/listV1'
response = self._get_response(
method='get', endpoint=endpoint,
)
pots_json = response.json()['pots']
pots = [MonzoPot(data=pot) for pot in pots_json]
self._cached_pots = pots
return pots |
<SYSTEM_TASK:>
Returns a list of transactions on the user's account.
<END_TASK>
<USER_TASK:>
Description:
def transactions(self, account_id=None, reverse=True, limit=None):
"""
Returns a list of transactions on the user's account.
Official docs:
https://monzo.com/docs/#list-transactions
:param account_id: Monzo account ID
:type account_id: str
:param reverse: whether transactions should be in in descending order
:type reverse: bool
:param limit: how many transactions should be returned; None for all
:type limit: int
:returns: list of Monzo transactions
:rtype: list of MonzoTransaction
""" |
if not account_id:
if len(self.accounts()) == 1:
account_id = self.accounts()[0].id
else:
raise ValueError("You need to pass account ID")
endpoint = '/transactions'
response = self._get_response(
method='get', endpoint=endpoint,
params={
'account_id': account_id,
},
)
# The API does not allow reversing the list or limiting it, so to do
# the basic query of 'get the latest transaction' we need to always get
# all transactions and do the reversing and slicing in Python
# I send Monzo an email, we'll se how they'll respond
transactions = response.json()['transactions']
if reverse:
transactions.reverse()
if limit:
transactions = transactions[:limit]
return [MonzoTransaction(data=t) for t in transactions] |
<SYSTEM_TASK:>
Returns an individual transaction, fetched by its id.
<END_TASK>
<USER_TASK:>
Description:
def transaction(self, transaction_id, expand_merchant=False):
"""
Returns an individual transaction, fetched by its id.
Official docs:
https://monzo.com/docs/#retrieve-transaction
:param transaction_id: Monzo transaction ID
:type transaction_id: str
:param expand_merchant: whether merchant data should be included
:type expand_merchant: bool
:returns: Monzo transaction details
:rtype: MonzoTransaction
""" |
endpoint = '/transactions/{}'.format(transaction_id)
data = dict()
if expand_merchant:
data['expand[]'] = 'merchant'
response = self._get_response(
method='get', endpoint=endpoint, params=data,
)
return MonzoTransaction(data=response.json()['transaction']) |
<SYSTEM_TASK:>
Create auth string from credentials.
<END_TASK>
<USER_TASK:>
Description:
def get_auth_string(self):
"""Create auth string from credentials.""" |
auth_info = '{}:{}'.format(self.sauce_username, self.sauce_access_key)
return base64.b64encode(auth_info.encode('utf-8')).decode('utf-8') |
<SYSTEM_TASK:>
Add authorization header.
<END_TASK>
<USER_TASK:>
Description:
def make_auth_headers(self, content_type):
"""Add authorization header.""" |
headers = self.make_headers(content_type)
headers['Authorization'] = 'Basic {}'.format(self.get_auth_string())
return headers |
<SYSTEM_TASK:>
Create a sub account.
<END_TASK>
<USER_TASK:>
Description:
def create_user(self, username, password, name, email):
"""Create a sub account.""" |
method = 'POST'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
body = json.dumps({'username': username, 'password': password,
'name': name, 'email': email, })
return self.client.request(method, endpoint, body) |
<SYSTEM_TASK:>
Get a list of sub accounts associated with a parent account.
<END_TASK>
<USER_TASK:>
Description:
def get_subaccounts(self):
"""Get a list of sub accounts associated with a parent account.""" |
method = 'GET'
endpoint = '/rest/v1/users/{}/list-subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) |
<SYSTEM_TASK:>
Get a list of sibling accounts associated with provided account.
<END_TASK>
<USER_TASK:>
Description:
def get_siblings(self):
"""Get a list of sibling accounts associated with provided account.""" |
method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) |
<SYSTEM_TASK:>
Get information about a sub account.
<END_TASK>
<USER_TASK:>
Description:
def get_subaccount_info(self):
"""Get information about a sub account.""" |
method = 'GET'
endpoint = '/rest/v1/users/{}/subaccounts'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) |
<SYSTEM_TASK:>
Change access key of your account.
<END_TASK>
<USER_TASK:>
Description:
def change_access_key(self):
"""Change access key of your account.""" |
method = 'POST'
endpoint = '/rest/v1/users/{}/accesskey/change'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) |
<SYSTEM_TASK:>
Get a list of objects describing all the OS and browser platforms
<END_TASK>
<USER_TASK:>
Description:
def get_platforms(self, automation_api='all'):
"""Get a list of objects describing all the OS and browser platforms
currently supported on Sauce Labs.""" |
method = 'GET'
endpoint = '/rest/v1/info/platforms/{}'.format(automation_api)
return self.client.request(method, endpoint) |
<SYSTEM_TASK:>
Get details about the static assets collected for a specific job.
<END_TASK>
<USER_TASK:>
Description:
def get_job_asset_url(self, job_id, filename):
"""Get details about the static assets collected for a specific job.""" |
return 'https://saucelabs.com/rest/v1/{}/jobs/{}/assets/{}'.format(
self.client.sauce_username, job_id, filename) |
<SYSTEM_TASK:>
Uploads a file to the temporary sauce storage.
<END_TASK>
<USER_TASK:>
Description:
def upload_file(self, filepath, overwrite=True):
"""Uploads a file to the temporary sauce storage.""" |
method = 'POST'
filename = os.path.split(filepath)[1]
endpoint = '/rest/v1/storage/{}/{}?overwrite={}'.format(
self.client.sauce_username, filename, "true" if overwrite else "false")
with open(filepath, 'rb') as filehandle:
body = filehandle.read()
return self.client.request(method, endpoint, body,
content_type='application/octet-stream') |
<SYSTEM_TASK:>
Check which files are in your temporary storage.
<END_TASK>
<USER_TASK:>
Description:
def get_stored_files(self):
"""Check which files are in your temporary storage.""" |
method = 'GET'
endpoint = '/rest/v1/storage/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint) |
<SYSTEM_TASK:>
Get information for a tunnel given its ID.
<END_TASK>
<USER_TASK:>
Description:
def get_tunnel(self, tunnel_id):
"""Get information for a tunnel given its ID.""" |
method = 'GET'
endpoint = '/rest/v1/{}/tunnels/{}'.format(
self.client.sauce_username, tunnel_id)
return self.client.request(method, endpoint) |
<SYSTEM_TASK:>
Apply a patch.
<END_TASK>
<USER_TASK:>
Description:
def apply(patch):
"""Apply a patch.
The patch's :attr:`~Patch.obj` attribute is injected into the patch's
:attr:`~Patch.destination` under the patch's :attr:`~Patch.name`.
This is a wrapper around calling
``setattr(patch.destination, patch.name, patch.obj)``.
Parameters
----------
patch : gorilla.Patch
Patch.
Raises
------
RuntimeError
Overwriting an existing attribute is not allowed when the setting
:attr:`Settings.allow_hit` is set to ``True``.
Note
----
If both the attributes :attr:`Settings.allow_hit` and
:attr:`Settings.store_hit` are ``True`` but that the target attribute seems
to have already been stored, then it won't be stored again to avoid losing
the original attribute that was stored the first time around.
""" |
settings = Settings() if patch.settings is None else patch.settings
# When a hit occurs due to an attribute at the destination already existing
# with the patch's name, the existing attribute is referred to as 'target'.
try:
target = get_attribute(patch.destination, patch.name)
except AttributeError:
pass
else:
if not settings.allow_hit:
raise RuntimeError(
"An attribute named '%s' already exists at the destination "
"'%s'. Set a different name through the patch object to avoid "
"a name clash or set the setting 'allow_hit' to True to "
"overwrite the attribute. In the latter case, it is "
"recommended to also set the 'store_hit' setting to True in "
"order to store the original attribute under a different "
"name so it can still be accessed."
% (patch.name, patch.destination.__name__))
if settings.store_hit:
original_name = _ORIGINAL_NAME % (patch.name,)
if not hasattr(patch.destination, original_name):
setattr(patch.destination, original_name, target)
setattr(patch.destination, patch.name, patch.obj) |
<SYSTEM_TASK:>
Decorator to create a patch.
<END_TASK>
<USER_TASK:>
Description:
def patch(destination, name=None, settings=None):
"""Decorator to create a patch.
The object being decorated becomes the :attr:`~Patch.obj` attribute of the
patch.
Parameters
----------
destination : object
Patch destination.
name : str
Name of the attribute at the destination.
settings : gorilla.Settings
Settings.
Returns
-------
object
The decorated object.
See Also
--------
:class:`Patch`.
""" |
def decorator(wrapped):
base = _get_base(wrapped)
name_ = base.__name__ if name is None else name
settings_ = copy.deepcopy(settings)
patch = Patch(destination, name_, wrapped, settings=settings_)
data = get_decorator_data(base, set_default=True)
data.patches.append(patch)
return wrapped
return decorator |
<SYSTEM_TASK:>
Decorator to create a patch for each member of a module or a class.
<END_TASK>
<USER_TASK:>
Description:
def patches(destination, settings=None, traverse_bases=True,
filter=default_filter, recursive=True, use_decorators=True):
"""Decorator to create a patch for each member of a module or a class.
Parameters
----------
destination : object
Patch destination.
settings : gorilla.Settings
Settings.
traverse_bases : bool
If the object is a class, the base classes are also traversed.
filter : function
Attributes for which the function returns ``False`` are skipped. The
function needs to define two parameters: ``name``, the attribute name,
and ``obj``, the attribute value. If ``None``, no attribute is skipped.
recursive : bool
If ``True``, and a hit occurs due to an attribute at the destination
already existing with the given name, and both the member and the
target attributes are classes, then instead of creating a patch
directly with the member attribute value as is, a patch for each of its
own members is created with the target as new destination.
use_decorators : bool
Allows to take any modifier decorator into consideration to allow for
more granular customizations.
Returns
-------
object
The decorated object.
Note
----
A 'target' differs from a 'destination' in that a target represents an
existing attribute at the destination about to be hit by a patch.
See Also
--------
:class:`Patch`, :func:`create_patches`.
""" |
def decorator(wrapped):
settings_ = copy.deepcopy(settings)
patches = create_patches(
destination, wrapped, settings=settings_,
traverse_bases=traverse_bases, filter=filter, recursive=recursive,
use_decorators=use_decorators)
data = get_decorator_data(_get_base(wrapped), set_default=True)
data.patches.extend(patches)
return wrapped
return decorator |
<SYSTEM_TASK:>
Modifier decorator to update a patch's destination.
<END_TASK>
<USER_TASK:>
Description:
def destination(value):
"""Modifier decorator to update a patch's destination.
This only modifies the behaviour of the :func:`create_patches` function
and the :func:`patches` decorator, given that their parameter
``use_decorators`` is set to ``True``.
Parameters
----------
value : object
Patch destination.
Returns
-------
object
The decorated object.
""" |
def decorator(wrapped):
data = get_decorator_data(_get_base(wrapped), set_default=True)
data.override['destination'] = value
return wrapped
return decorator |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.