text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Converts the given representation value and sets the specified <END_TASK> <USER_TASK:> Description: def set_terminal_converted(self, attr, repr_value): """ Converts the given representation value and sets the specified attribute value to the converted value. :param attr: Attribute to set. :param str repr_value: String value of the attribute to set. """
value = self.converter_registry.convert_from_representation( repr_value, attr.value_type) self.data[attr.repr_name] = value
<SYSTEM_TASK:> Will eventually load information for Apple_Boot volume. \ <END_TASK> <USER_TASK:> Description: def load(self, filename, offset): """Will eventually load information for Apple_Boot volume. \ Not yet implemented"""
try: self.offset = offset # self.fd = open(filename, 'rb') # self.fd.close() except IOError: self.logger.error('Unable to load EfiSystem volume')
<SYSTEM_TASK:> Sends the file to the remote host and archives <END_TASK> <USER_TASK:> Description: def send(self, filenames=None): """Sends the file to the remote host and archives the sent file locally. """
try: with self.ssh_client.connect() as ssh_conn: with self.sftp_client.connect(ssh_conn) as sftp_conn: for filename in filenames: sftp_conn.copy(filename=filename) self.archive(filename=filename) if self.update_history_model: self.update_history(filename=filename) except SSHClientError as e: raise TransactionFileSenderError(e) from e except SFTPClientError as e: raise TransactionFileSenderError(e) from e return filenames
<SYSTEM_TASK:> renders template_name + self.extension file with data using jinja <END_TASK> <USER_TASK:> Description: def compile(self, name, folder=None, data=None): """ renders template_name + self.extension file with data using jinja """
template_name = name.replace(os.sep, "") if folder is None: folder = "" full_name = os.path.join( folder.strip(os.sep), template_name) if data is None: data = {} try: self.templates[template_name] = \ self.jinja.get_template(full_name).render(data) except TemplateNotFound as template_error: if current_app.config['DEBUG']: raise template_error
<SYSTEM_TASK:> Verify encrypted JWT <END_TASK> <USER_TASK:> Description: def verify_token(self, token) -> bool: """ Verify encrypted JWT """
try: self.data = jwt.decode(Security.decrypt(token), self.app_secret) return True except (Exception, BaseException) as error: self.errors.append(error) return False return False
<SYSTEM_TASK:> Use request information to validate JWT <END_TASK> <USER_TASK:> Description: def verify_http_auth_token(self) -> bool: """ Use request information to validate JWT """
authorization_token = self.get_http_token() if authorization_token is not None: if self.verify_token(authorization_token): if self.data is not None: self.data = self.data['data'] return True return False else: return False return False
<SYSTEM_TASK:> Create an encrypted JWT with a refresh_token <END_TASK> <USER_TASK:> Description: def create_token_with_refresh_token(self, data, token_valid_for=180, refresh_token_valid_for=86400): """ Create an encrypted JWT with a refresh_token """
refresh_token = None refresh_token = jwt.encode({ 'exp': datetime.utcnow() + timedelta(seconds=refresh_token_valid_for)}, self.app_secret).decode("utf-8") jwt_token = jwt.encode({ 'data': data, 'refresh_token': refresh_token, 'exp': datetime.utcnow() + timedelta(seconds=token_valid_for)}, self.app_secret) return Security.encrypt(jwt_token)
<SYSTEM_TASK:> Use request information to validate refresh JWT <END_TASK> <USER_TASK:> Description: def verify_refresh_token(self, expired_token) -> bool: """ Use request information to validate refresh JWT """
try: decoded_token = jwt.decode( Security.decrypt(expired_token), self.app_secret, options={'verify_exp': False}) if 'refresh_token' in decoded_token and \ decoded_token['refresh_token'] is not None: try: jwt.decode(decoded_token['refresh_token'], self.app_secret) self.data = decoded_token return True except (Exception, BaseException) as error: self.errors.append(error) return False except (Exception, BaseException) as error: self.errors.append(error) return False return False
<SYSTEM_TASK:> Use expired token to check refresh token information <END_TASK> <USER_TASK:> Description: def verify_http_auth_refresh_token(self) -> bool: """ Use expired token to check refresh token information """
authorization_token = self.get_http_token() if authorization_token is not None: if self.verify_refresh_token(authorization_token): if self.data is not None: self.data = self.data['data'] return True return False else: return False return False
<SYSTEM_TASK:> Quickly construct response using a status code <END_TASK> <USER_TASK:> Description: def quick_response(self, status_code): """ Quickly construct response using a status code """
translator = Translator(environ=self.environ) if status_code == 404: self.status(404) self.message(translator.trans('http_messages.404')) elif status_code == 401: self.status(401) self.message(translator.trans('http_messages.401')) elif status_code == 400: self.status(400) self.message(translator.trans('http_messages.400')) elif status_code == 200: self.status(200) self.message(translator.trans('http_messages.200'))
<SYSTEM_TASK:> Return preferred encoding for reading from ``stream``. <END_TASK> <USER_TASK:> Description: def getinputencoding(stream=None): """Return preferred encoding for reading from ``stream``. ``stream`` defaults to sys.stdin. """
if stream is None: stream = sys.stdin encoding = stream.encoding if not encoding: encoding = getpreferredencoding() return encoding
<SYSTEM_TASK:> Return preferred encoding for writing to ``stream``. <END_TASK> <USER_TASK:> Description: def getoutputencoding(stream=None): """Return preferred encoding for writing to ``stream``. ``stream`` defaults to sys.stdout. """
if stream is None: stream = sys.stdout encoding = stream.encoding if not encoding: encoding = getpreferredencoding() return encoding
<SYSTEM_TASK:> Decode from specified encoding. <END_TASK> <USER_TASK:> Description: def decode(string, encoding=None, errors=None): """Decode from specified encoding. ``encoding`` defaults to the preferred encoding. ``errors`` defaults to the preferred error handler. """
if encoding is None: encoding = getpreferredencoding() if errors is None: errors = getpreferrederrors() return string.decode(encoding, errors)
<SYSTEM_TASK:> Encode to specified encoding. <END_TASK> <USER_TASK:> Description: def encode(string, encoding=None, errors=None): """Encode to specified encoding. ``encoding`` defaults to the preferred encoding. ``errors`` defaults to the preferred error handler. """
if encoding is None: encoding = getpreferredencoding() if errors is None: errors = getpreferrederrors() return string.encode(encoding, errors)
<SYSTEM_TASK:> Returns the reponse MIME type for this view. <END_TASK> <USER_TASK:> Description: def _get_response_mime_type(self): """ Returns the reponse MIME type for this view. :raises: :class:`pyramid.httpexceptions.HTTPNotAcceptable` if the MIME content type(s) the client specified can not be handled by the view. """
view_name = self.request.view_name if view_name != '': mime_type = get_registered_mime_type_for_name(view_name) else: mime_type = None acc = None for acc in self.request.accept: if acc == '*/*': # The client does not care; use the default. mime_type = self.__get_default_response_mime_type() break try: mime_type = \ get_registered_mime_type_for_string(acc.lower()) except KeyError: pass else: break if mime_type is None: if not acc is None: # The client specified a MIME type we can not handle; this # is a 406 exception. We supply allowed MIME content # types in the body of the response. headers = \ [('Location', self.request.path_url), ('Content-Type', TextPlainMime.mime_type_string), ] mime_strings = get_registered_mime_strings() exc = HTTPNotAcceptable('Requested MIME content type(s) ' 'not acceptable.', body=','.join(mime_strings), headers=headers) raise exc mime_type = self.__get_default_response_mime_type() return mime_type
<SYSTEM_TASK:> Converts the given resource to a result to be returned from the view. <END_TASK> <USER_TASK:> Description: def _get_result(self, resource): """ Converts the given resource to a result to be returned from the view. Unless a custom renderer is employed, this will involve creating a representer and using it to convert the resource to a string. :param resource: Resource to convert. :type resource: Object implementing :class:`evererst.interfaces.IResource`. :returns: :class:`pyramid.reposnse.Response` object or a dictionary with a single key "context" mapped to the given resource (to be passed on to a custom renderer). """
if self._convert_response: self._update_response_body(resource) result = self.request.response else: result = dict(context=resource) return result
<SYSTEM_TASK:> Creates a representer and updates the response body with the byte <END_TASK> <USER_TASK:> Description: def _update_response_body(self, resource): """ Creates a representer and updates the response body with the byte representation created for the given resource. """
rpr = self._get_response_representer(resource) # Set content type and body of the response. self.request.response.content_type = \ rpr.content_type.mime_type_string rpr_body = rpr.to_bytes(resource) self.request.response.body = rpr_body
<SYSTEM_TASK:> Adds a new or replaces an existing Location header to the response <END_TASK> <USER_TASK:> Description: def _update_response_location_header(self, resource): """ Adds a new or replaces an existing Location header to the response headers pointing to the URL of the given resource. """
location = resource_to_url(resource, request=self.request) loc_hdr = ('Location', location) hdr_names = [hdr[0].upper() for hdr in self.request.response.headerlist] try: idx = hdr_names.index('LOCATION') except ValueError: self.request.response.headerlist.append(loc_hdr) else: # Replace existing location header. # FIXME: It is not clear under which conditions this happens, so # we do not have a test for it yet. self.request.response.headerlist[idx] = loc_hdr
<SYSTEM_TASK:> Returns a representer for the content type specified in the request. <END_TASK> <USER_TASK:> Description: def _get_request_representer(self): """ Returns a representer for the content type specified in the request. :raises HTTPUnsupportedMediaType: If the specified content type is not supported. """
try: mime_type = \ get_registered_mime_type_for_string(self.request.content_type) except KeyError: # The client sent a content type we do not support (415). raise HTTPUnsupportedMediaType() return as_representer(self.context, mime_type)
<SYSTEM_TASK:> Extracts the data from the representation submitted in the request <END_TASK> <USER_TASK:> Description: def _extract_request_data(self): """ Extracts the data from the representation submitted in the request body and returns it. This default implementation uses a representer for the content type specified by the request to perform the extraction and returns an object implementing the :class:`everest.representers.interfaces.IResourceDataElement` interface. :raises HTTPError: To indicate problems with the request data extraction in terms of HTTP codes. """
rpr = self._get_request_representer() return rpr.data_from_bytes(self.request.body)
<SYSTEM_TASK:> Handles requests that triggered a conflict. <END_TASK> <USER_TASK:> Description: def _handle_conflict(self, name): """ Handles requests that triggered a conflict. Respond with a 409 "Conflict" """
err = HTTPConflict('Member "%s" already exists!' % name).exception return self.request.get_response(err)
<SYSTEM_TASK:> Implements user message checking for views. <END_TASK> <USER_TASK:> Description: def check(self): """ Implements user message checking for views. Checks if the current request has an explicit "ignore-message" parameter (a GUID) pointing to a message with identical text from a previous request, in which case further processing is allowed. """
request = get_current_request() ignore_guid = request.params.get('ignore-message') coll = request.root['_messages'] vote = False if ignore_guid: ignore_mb = coll.get(ignore_guid) if not ignore_mb is None and ignore_mb.text == self.message.text: vote = True return vote
<SYSTEM_TASK:> Creates a 307 "Temporary Redirect" response including a HTTP Warning <END_TASK> <USER_TASK:> Description: def create_307_response(self): """ Creates a 307 "Temporary Redirect" response including a HTTP Warning header with code 299 that contains the user message received during processing the request. """
request = get_current_request() msg_mb = UserMessageMember(self.message) coll = request.root['_messages'] coll.add(msg_mb) # Figure out the new location URL. qs = self.__get_new_query_string(request.query_string, self.message.slug) resubmit_url = "%s?%s" % (request.path_url, qs) headers = [('Warning', '299 %s' % self.message.text), # ('Content-Type', cnt_type), ] http_exc = HttpWarningResubmit(location=resubmit_url, detail=self.message.text, headers=headers) return request.get_response(http_exc)
<SYSTEM_TASK:> Create a directory specified by a sequence of subdirectories <END_TASK> <USER_TASK:> Description: def mkdir(*args): """Create a directory specified by a sequence of subdirectories >>> mkdir("/tmp", "foo", "bar", "baz") '/tmp/foo/bar/baz' >>> os.path.isdir('/tmp/foo/bar/baz') True """
path = '' for chunk in args: path = os.path.join(path, chunk) if not os.path.isdir(path): os.mkdir(path) return path
<SYSTEM_TASK:> Execute shell command and return output <END_TASK> <USER_TASK:> Description: def shell(cmd, *args, **kwargs): # type: (Union[str, unicode], *Union[str, unicode], **Any) ->Tuple[int, str] """ Execute shell command and return output Args: cmd (str): the command itself, i.e. part until the first space *args: positional arguments, i.e. other space-separated parts rel_path (bool): execute relative to the path (default: `False`) raise_on_status(bool): bool, raise exception if command exited with non-zero status (default: `True`) stderr (file-like): file-like object to collect stderr output, None by default Returns: Tuple[int, str]: status, shell output """
if kwargs.get('rel_path') and not cmd.startswith("/"): cmd = os.path.join(kwargs['rel_path'], cmd) status = 0 try: output = subprocess.check_output( (cmd,) + args, stderr=kwargs.get('stderr')) except subprocess.CalledProcessError as e: if kwargs.get('raise_on_status', True): raise e output = e.output status = e.returncode except OSError as e: # command not found if kwargs.get('raise_on_status', True): raise e if 'stderr' in kwargs: kwargs['stderr'].write(e.message) return -1, "" if six.PY3: output = output.decode('utf8') return status, output
<SYSTEM_TASK:> Pubsub event listener <END_TASK> <USER_TASK:> Description: def listen_for_events(): """Pubsub event listener Listen for events in the pubsub bus and calls the process function when somebody comes to play. """
import_event_modules() conn = redis_connection.get_connection() pubsub = conn.pubsub() pubsub.subscribe("eventlib") for message in pubsub.listen(): if message['type'] != 'message': continue data = loads(message["data"]) if 'name' in data: event_name = data.pop('name') process_external(event_name, data)
<SYSTEM_TASK:> Dump all resources that were modified by the given session back into <END_TASK> <USER_TASK:> Description: def commit(self, unit_of_work): """ Dump all resources that were modified by the given session back into the repository. """
MemoryRepository.commit(self, unit_of_work) if self.is_initialized: entity_classes_to_dump = set() for state in unit_of_work.iterator(): entity_classes_to_dump.add(type(state.entity)) for entity_cls in entity_classes_to_dump: self.__dump_entities(entity_cls)
<SYSTEM_TASK:> Validate that the pdf_path configuration is set and the referenced <END_TASK> <USER_TASK:> Description: def _validate_pdf_file(self): """Validate that the pdf_path configuration is set and the referenced file exists. Exits the program with status 1 if validation fails. """
if self['pdf_path'] is None: self._logger.error('--pdf argument must be set') sys.exit(1) if not os.path.exists(self['pdf_path']): self._logger.error('Cannot find PDF ' + self['pdf_path']) sys.exit(1)
<SYSTEM_TASK:> Get a docushare URL given document's handle. <END_TASK> <USER_TASK:> Description: def _get_docushare_url(handle, validate=True): """Get a docushare URL given document's handle. Parameters ---------- handle : `str` Handle name, such as ``'LDM-151'``. validate : `bool`, optional Set to `True` to request that the link resolves by performing a HEAD request over the network. `False` disables this testing. Default is `True`. Returns ------- docushare_url : `str` Shortened DocuShare URL for the document corresponding to the handle. Raises ------ lander.exceptions.DocuShareError Raised for any error related to validating the DocuShare URL. """
logger = structlog.get_logger(__name__) logger.debug('Using Configuration._get_docushare_url') # Make a short link to the DocuShare version page since # a) It doesn't immediately trigger a PDF download, # b) It gives the user extra information about the document before # downloading it. url = 'https://ls.st/{handle}*'.format(handle=handle.lower()) if validate: # Test that the short link successfully resolves to DocuShare logger.debug('Validating {0}'.format(url)) try: response = requests.head(url, allow_redirects=True, timeout=30) except requests.exceptions.RequestException as e: raise DocuShareError(str(e)) error_message = 'URL {0} does not resolve to DocuShare'.format(url) if response.status_code != 200: logger.warning('HEAD {0} status: {1:d}'.format( url, response.status_code)) raise DocuShareError(error_message) redirect_url_parts = urllib.parse.urlsplit(response.url) if redirect_url_parts.netloc != 'docushare.lsst.org': logger.warning('{0} resolved to {1}'.format(url, response.url)) raise DocuShareError(error_message) return url
<SYSTEM_TASK:> Removes group permissions for historical models <END_TASK> <USER_TASK:> Description: def remove_historical_group_permissions(group=None, allowed_permissions=None): """Removes group permissions for historical models except those whose prefix is in `allowed_historical_permissions`. Default removes all except `view`. """
allowed_permissions = allowed_permissions or ["view"] for action in allowed_permissions: for permission in group.permissions.filter( codename__contains="historical" ).exclude(codename__startswith=action): group.permissions.remove(permission)
<SYSTEM_TASK:> Delete a mongo database using pymongo. Mongo daemon assumed to be running. <END_TASK> <USER_TASK:> Description: def delete_database(mongo_uri, database_name): """ Delete a mongo database using pymongo. Mongo daemon assumed to be running. Inputs: - mongo_uri: A MongoDB URI. - database_name: The mongo database name as a python string. """
client = pymongo.MongoClient(mongo_uri) client.drop_database(database_name)
<SYSTEM_TASK:> Delete a mongo document collection using pymongo. Mongo daemon assumed to be running. <END_TASK> <USER_TASK:> Description: def delete_collection(mongo_uri, database_name, collection_name): """ Delete a mongo document collection using pymongo. Mongo daemon assumed to be running. Inputs: - mongo_uri: A MongoDB URI. - database_name: The mongo database name as a python string. - collection_name: The mongo collection as a python string. """
client = pymongo.MongoClient(mongo_uri) db = client[database_name] db.drop_collection(collection_name)
<SYSTEM_TASK:> Helper function to create a staging collection for the given registered <END_TASK> <USER_TASK:> Description: def create_staging_collection(resource): """ Helper function to create a staging collection for the given registered resource. :param resource: registered resource :type resource: class implementing or instance providing or subclass of a registered resource interface. """
ent_cls = get_entity_class(resource) coll_cls = get_collection_class(resource) agg = StagingAggregate(ent_cls) return coll_cls.create_from_aggregate(agg)
<SYSTEM_TASK:> Run the parser over the entire sourestring and return the results. <END_TASK> <USER_TASK:> Description: def parse(self): """Run the parser over the entire sourestring and return the results."""
try: return self.parse_top_level() except PartpyError as ex: self.error = True print(ex.pretty_print())
<SYSTEM_TASK:> The top level parser will do a loop where it looks for a single <END_TASK> <USER_TASK:> Description: def parse_top_level(self): """The top level parser will do a loop where it looks for a single contact parse and then eats all whitespace until there is no more input left or another contact is found to be parsed and stores them. """
contacts = [] while not self.eos: contact = self.parse_contact() # match a contact expression. if not contact: # There was no contact so end file. break # This would be a nice place to put other expressions. contacts.append(contact) # skip all whitespace between the end of the last contact and the # next non whitespace character, ie until something interesting. self.parse_whitespace() output = {} for key, value in contacts: output[key] = value return output
<SYSTEM_TASK:> Parse a top level contact expression, these consist of a name <END_TASK> <USER_TASK:> Description: def parse_contact(self): """Parse a top level contact expression, these consist of a name expression a special char and an email expression. The characters found in a name and email expression are returned. """
self.parse_whitespace() name = self.parse_name() # parse a name expression and get the string. if not name: # No name was found so shout it out. raise PartpyError(self, 'Expecting a name') self.parse_whitespace() # allow name and email to be delimited by either a ':' or '-' if not self.match_any_char(':-'): raise PartpyError(self, 'Expecting : or -') self.eat_length(1) self.parse_whitespace() email = self.parse_email() # parse an email and store its string. if not email: raise PartpyError(self, 'Expecting an email address') return (name, email)
<SYSTEM_TASK:> This function uses string patterns to match a title cased name. <END_TASK> <USER_TASK:> Description: def parse_name(self): """This function uses string patterns to match a title cased name. This is done in a loop until there are no more names to match so as to be able to include surnames etc. in the output."""
name = [] while True: # Match the current char until it doesnt match the given pattern: # first char must be an uppercase alpha and the rest must be lower # cased alphas. part = self.match_string_pattern(spat.alphau, spat.alphal) if part == '': break # There is no more matchable strings. self.eat_string(part) # Eat the found string name.append(part) # Store this name part if self.get_char() == ' ': # if the current char is a single space # eat it. This allows one space between parts self.eat_length(1) if not len(name): # if no name parts where detected raise an expection. raise PartpyError(self, 'Expecting a title cased name') return ' '.join(name)
<SYSTEM_TASK:> Returns the development container name <END_TASK> <USER_TASK:> Description: def get_development_container_name(self): """ Returns the development container name """
if self.__prefix: return "{0}:{1}-{2}-dev".format( self.__repository, self.__prefix, self.__branch) else: return "{0}:{1}-dev".format( self.__repository, self.__branch)
<SYSTEM_TASK:> Return the build container tag <END_TASK> <USER_TASK:> Description: def get_build_container_tag(self): """ Return the build container tag """
if self.__prefix: return "{0}-{1}-{2}".format( self.__prefix, self.__branch, self.__version) else: return "{0}-{1}".format( self.__branch, self.__version)
<SYSTEM_TASK:> Returns the branch container tag <END_TASK> <USER_TASK:> Description: def get_branch_container_tag(self): """ Returns the branch container tag """
if self.__prefix: return "{0}-{1}".format( self.__prefix, self.__branch) else: return "{0}".format(self.__branch)
<SYSTEM_TASK:> 500 error handler. Displays a full trackback for superusers and the first line of the <END_TASK> <USER_TASK:> Description: def custom_server_error(request, template_name='500.html', admin_template_name='500A.html'): """ 500 error handler. Displays a full trackback for superusers and the first line of the traceback for staff members. Templates: `500.html` or `500A.html` (admin) Context: trace Holds the traceback information for debugging. """
trace = None if request.user.is_authenticated() and (request.user.is_staff or request.user.is_superuser): try: import traceback, sys trace = traceback.format_exception(*(sys.exc_info())) if not request.user.is_superuser and trace: trace = trace[-1:] trace = '\n'.join(trace) except: pass # if url is part of the admin site, use the 500A.html template if request.path.startswith('/%s' % admin.site.name): template_name = admin_template_name t = loader.get_template(template_name) # You need to create a 500.html and 500A.html template. return http.HttpResponseServerError(t.render(Context({'trace': trace})))
<SYSTEM_TASK:> This function parses a "math"-like string as a function of CPU count. <END_TASK> <USER_TASK:> Description: def parse_n_jobs(s): """ This function parses a "math"-like string as a function of CPU count. It is useful for specifying the number of jobs. For example, on an 8-core machine:: assert parse_n_jobs('0.5 * n') == 4 assert parse_n_jobs('2n') == 16 assert parse_n_jobs('n') == 8 assert parse_n_jobs('4') == 4 :param str s: string to parse for number of CPUs """
n_jobs = None N = cpu_count() if isinstance(s, int): n_jobs = s elif isinstance(s, float): n_jobs = int(s) elif isinstance(s, str): m = re.match(r'(\d*(?:\.\d*)?)?(\s*\*?\s*n)?$', s.strip()) if m is None: raise ValueError('Unable to parse n_jobs="{}"'.format(s)) k = float(m.group(1)) if m.group(1) else 1 if m.group(2): n_jobs = k * N elif k < 1: n_jobs = k * N else: n_jobs = int(k) else: raise TypeError('n_jobs argument must be of type str, int, or float.') n_jobs = int(n_jobs) if n_jobs <= 0: warnings.warn('n_jobs={} is invalid. Setting n_jobs=1.'.format(n_jobs)) n_jobs = 1 #end if return int(n_jobs)
<SYSTEM_TASK:> Loads the relevant settings from the specified ``source``. <END_TASK> <USER_TASK:> Description: def _load_settings_from_source(self, source): """ Loads the relevant settings from the specified ``source``. :returns: a standard :func:`dict` containing the settings from the source :rtype: dict """
if not source: pass elif source == 'env_settings_uri': for env_settings_uri_key in self.env_settings_uri_keys: env_settings_uri = self._search_environ(env_settings_uri_key) if env_settings_uri: logger.debug('Found {} in the environment.'.format(env_settings_uri_key)) yield env_settings_uri, self._load_settings_from_uri(env_settings_uri) #end if #end for elif source == 'env': logger.debug('Loaded {} settings from the environment.'.format(len(os.environ))) yield source, dict(os.environ.items()) elif isinstance(source, ParseResult): settings = self._load_settings_from_uri(source) yield source, settings elif isinstance(source, str): try: spec = importlib.util.find_spec(source) except (AttributeError, ImportError): spec = None settings = self._load_settings_from_spec(spec, name=source) if settings is None: _, ext = os.path.splitext(source) with uri_open(source, 'rb') as f: yield source, self._load_settings_from_file(f, ext=ext) else: yield source, settings #end if elif hasattr(source, 'read'): yield source.name, self._load_settings_from_file(source) elif hasattr(source, 'items'): source_type = type(source).__name__ for dict_settings_uri_key in self.dict_settings_uri_keys: if dict_settings_uri_key and dict_settings_uri_key in source and source[dict_settings_uri_key]: logger.debug('Found {} in the dict-like object <{}>.'.format(dict_settings_uri_key, source_type)) yield from self._load_settings_from_source(source[dict_settings_uri_key]) #end if #end for logger.debug('Loaded {} settings from dict-like object <{}>.'.format(len(source), source_type)) yield self._get_unique_name(source_type), source else: source_type = type(source).__name__ for object_settings_uri_key in self.object_settings_uri_keys: if object_settings_uri_key and hasattr(source, object_settings_uri_key) and getattr(source, object_settings_uri_key): logger.debug('Found {} in the object <{}>.'.format(object_settings_uri_key, source_type)) yield from self._load_settings_from_source(getattr(source, object_settings_uri_key)) #end if #end for settings = dict((k, v) for k, v in source.__dict__.items() if not k.startswith('__')) logger.debug('Loaded {} settings from object <{}>.'.format(len(settings), source_type)) yield self._get_unique_name(source_type), settings
<SYSTEM_TASK:> Gets the setting specified by ``key``. For efficiency, we cache the retrieval of settings to avoid multiple searches through the sources list. <END_TASK> <USER_TASK:> Description: def get(self, key, *, default=None, cast_func=None, case_sensitive=None, raise_exception=None, warn_missing=None, use_cache=True, additional_sources=[]): """ Gets the setting specified by ``key``. For efficiency, we cache the retrieval of settings to avoid multiple searches through the sources list. :param str key: settings key to retrieve :param str default: use this as default value when the setting key is not found :param func cast_func: cast the value of the settings using this function :param bool case_sensitive: whether to make case sensitive comparisons for settings key :param bool raise_exception: whether to raise a :exc:`MissingSettingException` exception when the setting is not found :param bool warn_missing: whether to display a warning when the setting is not found :param list additional_sources: additional sources to search for the key; note that the values obtained here could be cached in a future call :returns: the setting value :rtype: str """
case_sensitive = self.case_sensitive if case_sensitive is None else case_sensitive raise_exception = self.raise_exception if raise_exception is None else raise_exception warn_missing = self.warn_missing if warn_missing is None else warn_missing if not case_sensitive: key = key.lower() if use_cache and key in self._cache: return cast_func(self._cache[key]) if cast_func else self._cache[key] found, value = False, None for source, settings in chain(self._settings.items(), map(self._load_settings_from_source, additional_sources)): if case_sensitive: if key in settings: found = True value = settings[key] else: continue else: possible_keys = [k for k in settings.keys() if k.lower() == key] if not possible_keys: continue else: if len(possible_keys) > 1: warnings.warn('There are more than one possible value for "{}" in <{}> settings due to case insensitivity.'.format(key, source)) found = True value = settings[possible_keys[0]] #end if #end if if found: break #end for if not found: if raise_exception: raise MissingSettingException('The "{}" setting is missing.'.format(key)) if warn_missing: warnings.warn('The "{}" setting is missing.'.format(key)) return default #end if if use_cache: self._cache[key] = value if cast_func: value = cast_func(value) return value
<SYSTEM_TASK:> Check that an item as contained in a list. <END_TASK> <USER_TASK:> Description: def _in_list(self, original_list, item): """ Check that an item as contained in a list. :param original_list: The list. :type original_list: list(object) :param item: The item. :type item: hatemile.util.html.htmldomelement.HTMLDOMElement :return: True if the item contained in the list or False if not. :rtype: bool """
# pylint: disable=no-self-use for item_list in original_list: if item is item_list: return True return False
<SYSTEM_TASK:> Order the results. <END_TASK> <USER_TASK:> Description: def _sort_results(self, results): """ Order the results. :param results: The disordened results. :type results: array.bs4.element.Tag :return: The ordened results. :rtype: array.bs4.element.Tag """
parents = [] groups = [] for result in results: if not self._in_list(parents, result.parent): parents.append(result.parent) groups.append([]) groups[len(groups) - 1].append(result) else: groups[parents.index(result.parent)].append(result) array = [] for group in groups: array += sorted( group, key=lambda element: element.parent.contents.index(element) ) return array
<SYSTEM_TASK:> Replace all hyphens of data attributes for 'aaaaa', to avoid error in <END_TASK> <USER_TASK:> Description: def _fix_data_select(self): """ Replace all hyphens of data attributes for 'aaaaa', to avoid error in search. """
elements = self.document.select('*') for element in elements: attributes = element.attrs.keys() data_attributes = list() for attribute in attributes: if bool(re.findall('^data-', attribute)): data_attributes.append({ 'original': attribute, 'modified': re.sub('data-', 'dataaaaaa', attribute), 'value': element[attribute] }) if data_attributes: auxiliar_element = BeautifulSoupHTMLDOMElement(element) for data_attribute in data_attributes: auxiliar_element.remove_attribute( data_attribute['original'] ) auxiliar_element.set_attribute( data_attribute['modified'], data_attribute['value'] )
<SYSTEM_TASK:> Given an activity, will attempt to render the matching template snippet <END_TASK> <USER_TASK:> Description: def render_activity(activity, grouped_activity=None, *args, **kwargs): """ Given an activity, will attempt to render the matching template snippet for that activity's content object or will return a simple representation of the activity. Also takes an optional 'grouped_activity' argument that would match up with what is produced by utils.group_activity """
template_name = 'activity_monitor/includes/models/{0.app_label}_{0.model}.html'.format(activity.content_type) try: tmpl = loader.get_template(template_name) except template.TemplateDoesNotExist: return None # we know we have a template, so render it content_object = activity.content_object return tmpl.render(Context({ 'activity': activity, 'obj': content_object, 'grouped_activity': grouped_activity }))
<SYSTEM_TASK:> Simple filter to get activity count for a given day. <END_TASK> <USER_TASK:> Description: def show_activity_count(date=None): """ Simple filter to get activity count for a given day. Defaults to today. """
if not date: today = datetime.datetime.now() - datetime.timedelta(hours = 24) return Activity.objects.filter(timestamp__gte=today).count() return Activity.objects.filter(timestamp__gte=date).count()
<SYSTEM_TASK:> Build a JSON-LD dataset for LSST Projectmeta. <END_TASK> <USER_TASK:> Description: def write_metadata(self, output_path): """Build a JSON-LD dataset for LSST Projectmeta. Parameters ---------- output_path : `str` File path where the ``metadata.jsonld`` should be written for the build. """
if self._config.lsstdoc is None: self._logger.info('No known LSST LaTeX source (--tex argument). ' 'Not writing a metadata.jsonld file.') return # Build a JSON-LD dataset for the report+source repository. product_data = ltdclient.get_product(self._config) metadata = self._config.lsstdoc.build_jsonld( url=product_data['published_url'], code_url=product_data['doc_repo'], ci_url='https://travis-ci.org/' + self._config['github_slug'], readme_url=None, license_id=None) json_text = encode_jsonld( metadata, separators=(',', ':'), # compact ensure_ascii=False) # unicode output with open(output_path, 'w') as f: f.write(json_text)
<SYSTEM_TASK:> return installed library names. <END_TASK> <USER_TASK:> Description: def libraries(): """return installed library names."""
ls = libraries_dir().dirs() ls = [str(x.name) for x in ls] ls.sort() return ls
<SYSTEM_TASK:> eval without import <END_TASK> <USER_TASK:> Description: def safe_eval(source, *args, **kwargs): """ eval without import """
source = source.replace('import', '') # import is not allowed return eval(source, *args, **kwargs)
<SYSTEM_TASK:> Intersect a Line or Point Collection and the Shoreline <END_TASK> <USER_TASK:> Description: def intersect(self, **kwargs): """ Intersect a Line or Point Collection and the Shoreline Returns the point of intersection along the coastline Should also return a linestring buffer around the interseciton point so we can calculate the direction to bounce a particle. """
ls = None if "linestring" in kwargs: ls = kwargs.pop('linestring') spoint = Point(ls.coords[0]) epoint = Point(ls.coords[-1]) elif "start_point" and "end_point" in kwargs: spoint = kwargs.get('start_point') epoint = kwargs.get('end_point') ls = LineString(list(spoint.coords) + list(epoint.coords)) elif "single_point" in kwargs: spoint = kwargs.get('single_point') epoint = None ls = LineString(list(spoint.coords) + list(spoint.coords)) else: raise TypeError( "must provide a LineString geometry object, (2) Point geometry objects, or (1) Point geometry object" ) inter = False # If the current point lies outside of our current shapefile index, # re-query the shapefile in a buffer around this point if self._spatial_query_object is None or (self._spatial_query_object and not ls.within(self._spatial_query_object)): self.index(point=spoint) for element in self._geoms: prepped_element = prep(element) # Test if starting on land if prepped_element.contains(spoint): if epoint is None: # If we only passed in one point, return the intersection is true. return {'point': spoint, 'feature': None} else: # If we are testing a linestring, raise an exception that we started on land. raise Exception('Starting point on land: %s %s %s' % (spoint.envelope, epoint.envelope, element.envelope)) else: # If we are just checking a single point, continue looping. if epoint is None: continue inter = ls.intersection(element) if inter: # Return the first point in the linestring, and the linestring that it hit if isinstance(inter, MultiLineString): inter = inter.geoms[0] inter = Point(inter.coords[0]) smaller_int = inter.buffer(self._spatialbuffer) shorelines = element.exterior.intersection(smaller_int) if isinstance(shorelines, LineString): shorelines = [shorelines] else: shorelines = list(shorelines) for shore_segment in shorelines: # Once we find the linestring in the Polygon that was # intersected, break out and return if ls.touches(shore_segment): break return {'point': Point(inter.x, inter.y, 0), 'feature': shore_segment or None} return None
<SYSTEM_TASK:> Bounce off of the shoreline. <END_TASK> <USER_TASK:> Description: def __bounce(self, **kwargs): """ Bounce off of the shoreline. NOTE: This does not work, but left here for future implementation feature = Linestring of two points, being the line segment the particle hit. angle = decimal degrees from 0 (x-axis), couter-clockwise (math style) """
start_point = kwargs.pop('start_point') hit_point = kwargs.pop('hit_point') end_point = kwargs.pop('end_point') feature = kwargs.pop('feature') distance = kwargs.pop('distance') angle = kwargs.pop('angle') # Figure out the angle of the shoreline here (beta) points_in_shore = map(lambda x: Point(x), list(feature.coords)) points_in_shore = sorted(points_in_shore, key=lambda x: x.x) # The point on the left (least longitude is always the first Point) first_shore = points_in_shore[0] last_shore = points_in_shore[-1] shoreline_x = abs(abs(first_shore.x) - abs(last_shore.x)) shoreline_y = abs(abs(first_shore.y) - abs(last_shore.y)) beta = math.degrees(math.atan(shoreline_x / shoreline_y)) theta = 90 - angle - beta bounce_azimuth = AsaMath.math_angle_to_azimuth(angle=2 * theta + angle) print "Beta: " + str(beta) print "Incoming Angle: " + str(angle) print "ShorelineAngle: " + str(theta + angle) print "Bounce Azimuth: " + str(bounce_azimuth) print "Bounce Angle: " + str(AsaMath.azimuth_to_math_angle(azimuth=bounce_azimuth)) after_distance = distance - AsaGreatCircle.great_distance(start_point=start_point, end_point=hit_point)['distance'] new_point = AsaGreatCircle.great_circle(distance=after_distance, azimuth=bounce_azimuth, start_point=hit_point) return Location4D(latitude=new_point['latitude'], longitude=new_point['longitude'], depth=start_point.depth)
<SYSTEM_TASK:> Reverse particle just off of the shore in the direction that it came in. <END_TASK> <USER_TASK:> Description: def __reverse(self, **kwargs): """ Reverse particle just off of the shore in the direction that it came in. Adds a slight random factor to the distance and angle it is reversed in. """
start_point = kwargs.pop('start_point') hit_point = kwargs.pop('hit_point') distance = kwargs.pop('distance') azimuth = kwargs.pop('azimuth') reverse_azimuth = kwargs.pop('reverse_azimuth') reverse_distance = kwargs.get('reverse_distance', None) if reverse_distance is None: reverse_distance = 100 # Randomize the reverse angle slightly (+/- 5 degrees) random_azimuth = reverse_azimuth + AsaRandom.random() * 5 count = 0 nudge_distance = 0.01 nudge_point = AsaGreatCircle.great_circle(distance=nudge_distance, azimuth=reverse_azimuth, start_point=hit_point) nudge_loc = Location4D(latitude=nudge_point['latitude'], longitude=nudge_point['longitude'], depth=start_point.depth) # Find point just offshore to do testing with. Try 15 times (~350m). This makes sure the start_point is in the water # for the next call to intersect (next while loop). while self.intersect(single_point=nudge_loc.point) and count < 16: nudge_distance *= 2 nudge_point = AsaGreatCircle.great_circle(distance=nudge_distance, azimuth=reverse_azimuth, start_point=hit_point) nudge_loc = Location4D(latitude=nudge_point['latitude'], longitude=nudge_point['longitude'], depth=start_point.depth) count += 1 # We tried 16 times and couldn't find a point. This should totally never happen. if count == 16: logger.debug("WOW. Could not find location in water to do shoreline calculation with. Assuming particle did not move from original location") return start_point # Keep trying to throw particle back, halfing the distance each time until it is in water. # Only half it 12 times before giving up and returning the point which the particle came from. count = 0 # Distance amount to half each iteration changing_distance = reverse_distance new_point = AsaGreatCircle.great_circle(distance=reverse_distance, azimuth=random_azimuth, start_point=hit_point) new_loc = Location4D(latitude=new_point['latitude'], longitude=new_point['longitude'], depth=start_point.depth) while self.intersect(start_point=nudge_loc.point, end_point=new_loc.point) and count < 12: changing_distance /= 2 new_point = AsaGreatCircle.great_circle(distance=changing_distance, azimuth=random_azimuth, start_point=hit_point) new_loc = Location4D(latitude=new_point['latitude'], longitude=new_point['longitude'], depth=start_point.depth) count += 1 # We tried 10 times and the particle was still on shore, return the point the particle started from. # No randomization. if count == 12: logger.debug("Could not react particle with shoreline. Assuming particle did not move from original location") return start_point return new_loc
<SYSTEM_TASK:> Gets FeatureType as a python dict. <END_TASK> <USER_TASK:> Description: def get_feature_type_info(self): """ Gets FeatureType as a python dict. Transforms feature_name info into python dict. """
caps = self.get_capabilities() if caps is None: return None el = caps.find('{http://www.opengis.net/wfs}FeatureTypeList') for e in el.findall('{http://www.opengis.net/wfs}FeatureType'): if e.find('{http://www.opengis.net/wfs}Name').text == self._feature_name: # transform into python dict # <Name>sample</Name> # <Abstract/> # <LatLongBoundingBox maxx="1" maxy="5" ... /> # # becomes: # # {'Name' :'sample', # 'Abtract' : None, # 'LatLongBoundingBox' : {'maxx':1, 'maxy':5 ... }} # d = {sube.tag[28:]:sube.text or sube.attrib or None for sube in e.getchildren()} # transform LatLongBoundingBox into a Shapely box llbb = {k:round(float(v), 4) for k,v in d['LatLongBoundingBox'].iteritems()} d['LatLongBoundingBox'] = box(llbb['minx'], llbb['miny'], llbb['maxx'], llbb['maxy']) return d return None
<SYSTEM_TASK:> This takes args and kwargs provided, and returns the names of the strings <END_TASK> <USER_TASK:> Description: def extract_edges_from_callable(fn): """ This takes args and kwargs provided, and returns the names of the strings assigned. If a string is not provided for a value, an exception is raised. This is how we extract the edges provided in the brap call lambdas. """
def extractor(*args, **kwargs): """ Because I don't think this technique is common in python... Service constructors were defined as: lambda c: c('a') In this function: fn = lambda c: c('a') fn(anything) # Results in anything('a') Here we provide a function which returns all args/kwargs fn(extractor) # ["a"] This isn't voodoo, it's just treating a function's call if it is data. """ return list(args) + list(kwargs.values()) edges = fn(extractor) for edge in edges: if not isinstance(edge, str): raise ValueError('Provided edge "{}" is not a string'.format(edge)) return list(edges)
<SYSTEM_TASK:> Copy a list of attributes of a element for other element. <END_TASK> <USER_TASK:> Description: def set_list_attributes(element1, element2, attributes): """ Copy a list of attributes of a element for other element. :param element1: The element that have attributes copied. :type element1: hatemile.util.html.htmldomelement.HTMLDOMElement :param element2: The element that copy the attributes. :type element2: hatemile.util.html.htmldomelement.HTMLDOMElement :param attributes: The list of attributes that will be copied. :type attributes: list(str) """
for attribute in attributes: if element1.has_attribute(attribute): element2.set_attribute( attribute, element1.get_attribute(attribute) )
<SYSTEM_TASK:> Increase a item in a HTML list. <END_TASK> <USER_TASK:> Description: def increase_in_list(list_to_increase, string_to_increase): """ Increase a item in a HTML list. :param list_to_increase: The list. :type list_to_increase: str :param string_to_increase: The value of item. :type string_to_increase: str :return: The HTML list with the item added, if the item not was contained in list. :rtype: str """
if (bool(list_to_increase)) and (bool(string_to_increase)): if CommonFunctions.in_list(list_to_increase, string_to_increase): return list_to_increase return list_to_increase + ' ' + string_to_increase elif bool(list_to_increase): return list_to_increase return string_to_increase
<SYSTEM_TASK:> Verify if the list contains the item. <END_TASK> <USER_TASK:> Description: def in_list(list_to_search, string_to_search): """ Verify if the list contains the item. :param list_to_search: The list. :type list_to_search: str :param string_to_search: The value of item. :type string_to_search: str :return: True if the list contains the item or False is not contains. :rtype: bool """
if (bool(list_to_search)) and (bool(string_to_search)): elements = re.split('[ \n\t\r]+', list_to_search) for element in elements: if element == string_to_search: return True return False
<SYSTEM_TASK:> Check that the element can be manipulated by HaTeMiLe. <END_TASK> <USER_TASK:> Description: def is_valid_element(element): """ Check that the element can be manipulated by HaTeMiLe. :param element: The element :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :return: True if element can be manipulated or False if element cannot be manipulated. :rtype: bool """
if element.has_attribute(CommonFunctions.DATA_IGNORE): return False else: parent_element = element.get_parent_element() if parent_element is not None: tag_name = parent_element.get_tag_name() if (tag_name != 'BODY') and (tag_name != 'HTML'): return CommonFunctions.is_valid_element(parent_element) return True return True
<SYSTEM_TASK:> Remove the earliest n ensemble members from the MCMC output <END_TASK> <USER_TASK:> Description: def burnin(self, n): """Remove the earliest n ensemble members from the MCMC output"""
self.sediment_rate = self.sediment_rate[:, n:] self.headage = self.headage[n:] self.sediment_memory = self.sediment_memory[n:] self.objective = self.objective[n:]
<SYSTEM_TASK:> Cache the current time, since it is needed at the end of every <END_TASK> <USER_TASK:> Description: def update_current_time(loop): """Cache the current time, since it is needed at the end of every keep-alive request to update the request timeout time :param loop: :return: """
global current_time current_time = time() loop.call_later(1, partial(update_current_time, loop))
<SYSTEM_TASK:> Adds command-line options for this plugin. <END_TASK> <USER_TASK:> Description: def options(self, parser, env=None): """ Adds command-line options for this plugin. """
if env is None: env = os.environ env_opt_name = 'NOSE_%s' % self.__dest_opt_name.upper() parser.add_option("--%s" % self.__opt_name, dest=self.__dest_opt_name, type="string", default=env.get(env_opt_name), help=".ini file providing the environment for the " "test web application.")
<SYSTEM_TASK:> Create an instance using the launcher. <END_TASK> <USER_TASK:> Description: def create(self, ami, count, config=None): """Create an instance using the launcher."""
return self.Launcher(config=config).launch(ami, count)
<SYSTEM_TASK:> Provides a configurable launcher for EC2 instances. <END_TASK> <USER_TASK:> Description: def Launcher(self, config=None): """Provides a configurable launcher for EC2 instances."""
class _launcher(EC2ApiClient): """Configurable launcher for EC2 instances. Create the Launcher (passing an optional dict of its attributes), set its attributes (as described in the RunInstances API docs), then launch(). """ def __init__(self, aws, config): super(_launcher, self).__init__(aws) self.config = config self._attr = list(self.__dict__.keys()) + ['_attr'] def launch(self, ami, min_count, max_count=0): """Use given AMI to launch min_count instances with the current configuration. Returns instance info list. """ params = config.copy() params.update(dict([i for i in self.__dict__.items() if i[0] not in self._attr])) return self.call("RunInstances", ImageId=ami, MinCount=min_count, MaxCount=max_count or min_count, response_data_key="Instances", **params) if not config: config = {} return _launcher(self._aws, config)
<SYSTEM_TASK:> a list of tuples containing instance Id's and event information <END_TASK> <USER_TASK:> Description: def events(self, all_instances=None, instance_ids=None, filters=None): """a list of tuples containing instance Id's and event information"""
params = {} if filters: params["filters"] = make_filters(filters) if instance_ids: params['InstanceIds'] = instance_ids statuses = self.status(all_instances, **params) event_list = [] for status in statuses: if status.get("Events"): for event in status.get("Events"): event[u"InstanceId"] = status.get('InstanceId') event_list.append(event) return event_list
<SYSTEM_TASK:> Attach a volume to an instance, exposing it with a device name. <END_TASK> <USER_TASK:> Description: def attach(self, volume_id, instance_id, device_path): """Attach a volume to an instance, exposing it with a device name."""
return self.call("AttachVolume", VolumeId=volume_id, InstanceId=instance_id, Device=device_path)
<SYSTEM_TASK:> Overrides the save method <END_TASK> <USER_TASK:> Description: def save(self, *args, **kwargs): """ Overrides the save method """
self.slug = self.create_slug() super(Slugable, self).save(*args, **kwargs)
<SYSTEM_TASK:> Creates slug, checks if slug is unique, and loop if not <END_TASK> <USER_TASK:> Description: def create_slug(self): """ Creates slug, checks if slug is unique, and loop if not """
name = self.slug_source counter = 0 # loops until slug is unique while True: if counter == 0: slug = slugify(name) else: # using the counter var to bump the slug name slug = slugify('{0} {1}'.format(name, str(counter))) try: # does the slug already exist, excluding the current object self.__class__.objects.exclude(pk=self.pk).get(slug=slug) # if slug exists increment counter and loop counter += 1 except ObjectDoesNotExist: # the slug does not exist # we can break from the loop break return slug
<SYSTEM_TASK:> Use Google Cached Url. <END_TASK> <USER_TASK:> Description: def get_html(url, headers=None, timeout=None, errors="strict", wait_time=None, driver=None, zillow_only=False, cache_only=False, zillow_first=False, cache_first=False, random=False, **kwargs): """ Use Google Cached Url. :param cache_only: if True, then real zillow site will never be used. :param driver: selenium browser driver。 """
if wait_time is None: wait_time = Config.Crawler.wait_time # prepare url cache_url1 = prefix + url + "/" cache_url2 = prefix + url zillow_url = url only_flags = [zillow_only, cache_only] if sum(only_flags) == 0: first_flags = [zillow_first, cache_first] if sum(first_flags) == 0: if random: if randint(0, 1): all_url = [zillow_url, cache_url1, cache_url2] else: all_url = [cache_url1, cache_url2, zillow_url] else: all_url = [zillow_url, cache_url1, cache_url2] elif sum(first_flags) == 1: if zillow_first: all_url = [zillow_url, cache_url1, cache_url2] elif cache_first: all_url = [cache_url1, cache_url2, zillow_url] else: raise ValueError( "Only zero or one `xxx_first` argument could be `True`!") elif sum(only_flags) == 1: if zillow_only: all_url = [zillow_url, ] elif cache_only: all_url = [cache_url1, cache_url2] else: raise ValueError( "Only zero or one `xxx_only` argument could be `True`!") for url in all_url: try: html = _get_html(url, headers, timeout, errors, wait_time, driver, **kwargs) return html except Exception as e: pass raise e
<SYSTEM_TASK:> Call the function with args normalized and cast to the correct types. <END_TASK> <USER_TASK:> Description: def call(func, args): """Call the function with args normalized and cast to the correct types. Args: func: The function to call. args: The arguments parsed by docopt. Returns: The return value of func. """
assert hasattr(func, '__call__'), 'Cannot call func: {}'.format( func.__name__) raw_func = ( func if isinstance(func, FunctionType) else func.__class__.__call__) hints = collections.defaultdict(lambda: Any, get_type_hints(raw_func)) argspec = _getargspec(raw_func) named_args = {} varargs = () for k, nk, v in _normalize(args): if nk == argspec.varargs: hints[nk] = Tuple[hints[nk], ...] elif nk not in argspec.args and argspec.varkw in hints: hints[nk] = hints[argspec.varkw] try: value = cast(hints[nk], v) except TypeError as e: _LOGGER.exception(e) six.raise_from(exc.InvalidCliValueError(k, v), e) if nk == argspec.varargs: varargs = value elif (nk in argspec.args or argspec.varkw) and ( nk not in named_args or named_args[nk] is None): named_args[nk] = value return func(*varargs, **named_args)
<SYSTEM_TASK:> Return a callable object from the subcommand. <END_TASK> <USER_TASK:> Description: def get_callable(subcommand): # type: (config.RcliEntryPoint) -> Union[FunctionType, MethodType] """Return a callable object from the subcommand. Args: subcommand: A object loaded from an entry point. May be a module, class, or function. Returns: The callable entry point for the subcommand. If the subcommand is a function, it will be returned unchanged. If the subcommand is a module or a class, an instance of the command class will be returned. Raises: AssertionError: Raised when a module entry point does not have a callable class named Command. """
_LOGGER.debug( 'Creating callable from subcommand "%s".', subcommand.__name__) if isinstance(subcommand, ModuleType): _LOGGER.debug('Subcommand is a module.') assert hasattr(subcommand, 'Command'), ( 'Module subcommand must have callable "Command" class definition.') callable_ = subcommand.Command # type: ignore else: callable_ = subcommand if any(isinstance(callable_, t) for t in six.class_types): return callable_() return callable_
<SYSTEM_TASK:> Return a Python 3-like argspec object. <END_TASK> <USER_TASK:> Description: def _getargspec(func): """Return a Python 3-like argspec object. Note: args contains varargs and varkw if they exist. This behavior differs from getargspec and getfullargspec. Args: func: The function to inspect. Returns: A named tuple with three parameters: args: All named arguments, including varargs and varkw if they are not None. varargs: The name of the *args variable. May be None. varkw: The name of the **kwargs variable. May be None. """
argspec = _getspec(func) args = list(argspec.args) if argspec.varargs: args += [argspec.varargs] if argspec[2]: # "keywords" in PY2 and "varkw" in PY3 args += [argspec[2]] return _ArgSpec(args, argspec.varargs, argspec[2])
<SYSTEM_TASK:> Yield a 3-tuple containing the key, a normalized key, and the value. <END_TASK> <USER_TASK:> Description: def _normalize(args): # type: (Dict[str, Any]) -> Generator[Tuple[str, str, Any], None, None] """Yield a 3-tuple containing the key, a normalized key, and the value. Args: args: The arguments parsed by docopt. Yields: A 3-tuple that contains the docopt parameter name, the parameter name normalized to be a valid python identifier, and the value assigned to the parameter. """
for k, v in six.iteritems(args): nk = re.sub(r'\W|^(?=\d)', '_', k).strip('_').lower() do_not_shadow = dir(six.moves.builtins) # type: ignore if keyword.iskeyword(nk) or nk in do_not_shadow: nk += '_' _LOGGER.debug('Normalized "%s" to "%s".', k, nk) yield k, nk, v
<SYSTEM_TASK:> Puts on destination as a temp file, renames on <END_TASK> <USER_TASK:> Description: def copy(self, filename=None): """Puts on destination as a temp file, renames on the destination. """
dst = os.path.join(self.dst_path, filename) src = os.path.join(self.src_path, filename) dst_tmp = os.path.join(self.dst_tmp, filename) self.put(src=src, dst=dst_tmp, callback=self.update_progress, confirm=True) self.rename(src=dst_tmp, dst=dst)
<SYSTEM_TASK:> Grab a single image from the Xeoma web server <END_TASK> <USER_TASK:> Description: async def async_get_camera_image(self, image_name, username=None, password=None): """ Grab a single image from the Xeoma web server Arguments: image_name: the name of the image to fetch (i.e. image01) username: the username to directly access this image password: the password to directly access this image """
try: data = await self.async_fetch_image_data( image_name, username, password) if data is None: raise XeomaError('Unable to authenticate with Xeoma web ' 'server') return data except asyncio.TimeoutError: raise XeomaError('Connection timeout while fetching camera image.') except aiohttp.ClientError as e: raise XeomaError('Unable to fetch image: {}'.format(e))
<SYSTEM_TASK:> Fetch image data from the Xeoma web server <END_TASK> <USER_TASK:> Description: async def async_fetch_image_data(self, image_name, username, password): """ Fetch image data from the Xeoma web server Arguments: image_name: the name of the image to fetch (i.e. image01) username: the username to directly access this image password: the password to directly access this image """
params = {} cookies = self.get_session_cookie() if username is not None and password is not None: params['user'] = self.encode_user(username, password) else: params['user'] = '' async with aiohttp.ClientSession(cookies=cookies) as session: resp = await session.get( '{}/{}.jpg'.format(self._base_url, image_name), params=params ) if resp.headers['Content-Type'] == 'image/jpeg': data = await resp.read() else: data = None return data
<SYSTEM_TASK:> Parse web server camera view for camera image names <END_TASK> <USER_TASK:> Description: async def async_get_image_names(self): """ Parse web server camera view for camera image names """
cookies = self.get_session_cookie() try: async with aiohttp.ClientSession(cookies=cookies) as session: resp = await session.get( self._base_url ) t = await resp.text() match = re.findall('(?:\w|\d|")/(.*?).(?:mjpg|jpg)', t) if len(match) == 0: raise XeomaError('Unable to find any camera image names') image_names = set(match) results = [] for image_name in image_names: match = re.search( image_name + '\.(?:mjpg|jpg).*?user=(.*?)&', t ) if match and len(match.group(1)) > 0: d = base64.b64decode(unquote(match.group(1))) \ .decode('ASCII') creds = d.split(':') if len(creds) < 2: raise XeomaError('Error parsing image credentials') results.append((image_name, creds[0], creds[1])) else: results.append((image_name, None, None)) return results except asyncio.TimeoutError as e: raise XeomaError("Unable to connect to Xeoma web server")
<SYSTEM_TASK:> Return the sha256 digest of the content in the <END_TASK> <USER_TASK:> Description: def _get_sha256_digest(self, content): """Return the sha256 digest of the content in the header format the Merchant API expects. """
content_sha256 = base64.b64encode(SHA256.new(content).digest()) return 'SHA256=' + content_sha256
<SYSTEM_TASK:> Sign the request with SHA256. <END_TASK> <USER_TASK:> Description: def _sha256_sign(self, method, url, headers, body): """Sign the request with SHA256. """
d = '' sign_headers = method.upper() + '|' + url + '|' for key, value in sorted(headers.items()): if key.startswith('X-Mcash-'): sign_headers += d + key.upper() + '=' + value d = '&' rsa_signature = base64.b64encode( self.signer.sign(SHA256.new(sign_headers))) return 'RSA-SHA256 ' + rsa_signature
<SYSTEM_TASK:> Creates a new Python toolbox where each task name is a GPTool in the toolbox. <END_TASK> <USER_TASK:> Description: def create_toolbox(self, filename): """ Creates a new Python toolbox where each task name is a GPTool in the toolbox. :param filename: the filename of the generated toolbox :param service_name: The name of the ESE service containing the tasks. Only tasks from one service may be used. :param tasks: The list of tasks from the service to build as GPTools. """
filename = os.path.splitext(filename)[0] label = os.path.basename(filename) # Get task information first so we can build the tool list tool_list = [] for task in self.tasks: tool_list.append(task.name) file_descriptor = os.open(filename + '.pyt', os.O_WRONLY | os.O_CREAT | os.O_EXCL) with os.fdopen(file_descriptor, 'w') as self.toolbox_file: self.toolbox_file.write(self._imports_template.substitute({})) toolbox_class = self._toolbox_class_template.substitute( {'label': label, 'alias': self.alias, 'toolList': param_builder.convert_list(tool_list) } ) self.toolbox_file.write(toolbox_class) for task in self.tasks: gp_tool = self.create_tool(task) self.toolbox_file.write(gp_tool) toolbox_help_filename = '.'.join((filename, task.name, 'pyt', 'xml')) help_builder.create(toolbox_help_filename, task, self.alias) return filename
<SYSTEM_TASK:> Creates a new GPTool for the toolbox. <END_TASK> <USER_TASK:> Description: def create_tool(self, task): """ Creates a new GPTool for the toolbox. """
gp_tool = dict(taskName=task.name, taskDisplayName=task.display_name, taskDescription=task.description, canRunInBackground=True, taskUri=task.uri) gp_tool['execute'] = self._execute_template.substitute(gp_tool) gp_tool['parameterInfo'] = param_builder.create_param_info(task.parameters, self.parameter_map) gp_tool['updateParameter'] = param_builder.create_update_parameter(task.parameters, self.parameter_map) gp_tool['preExecute'] = param_builder.create_pre_execute(task.parameters, self.parameter_map) gp_tool['postExecute'] = param_builder.create_post_execute(task.parameters, self.parameter_map) return self._tool_template.substitute(gp_tool)
<SYSTEM_TASK:> Finds the script file and copies it into the toolbox <END_TASK> <USER_TASK:> Description: def import_script(self, script_name): """Finds the script file and copies it into the toolbox"""
filename = os.path.abspath(script_name) with open(filename, 'r') as script_file: self.toolbox_file.write(script_file.read())
<SYSTEM_TASK:> Create a relationship object for this attribute from the given <END_TASK> <USER_TASK:> Description: def make_relationship(self, relator, direction= RELATIONSHIP_DIRECTIONS.BIDIRECTIONAL): """ Create a relationship object for this attribute from the given relator and relationship direction. """
if IEntity.providedBy(relator): # pylint:disable=E1101 rel = DomainRelationship(relator, self, direction=direction) elif IResource.providedBy(relator): # pylint:disable=E1101 rel = ResourceRelationship(relator, self, direction=direction) else: raise ValueError('Invalid relator argument "%s" for ' 'relationship; must provide IEntity or ' 'IResource.' % relator) return rel
<SYSTEM_TASK:> Register the blueprint to the mach9 app. <END_TASK> <USER_TASK:> Description: def register(self, app, options): """Register the blueprint to the mach9 app."""
url_prefix = options.get('url_prefix', self.url_prefix) # Routes for future in self.routes: # attach the blueprint name to the handler so that it can be # prefixed properly in the router future.handler.__blueprintname__ = self.name # Prepend the blueprint URI prefix if available uri = url_prefix + future.uri if url_prefix else future.uri app.route( uri=uri[1:] if uri.startswith('//') else uri, methods=future.methods, host=future.host or self.host, strict_slashes=future.strict_slashes, stream=future.stream )(future.handler) # Middleware for future in self.middlewares: if future.args or future.kwargs: app.middleware(*future.args, **future.kwargs)(future.middleware) else: app.middleware(future.middleware) # Exceptions for future in self.exceptions: app.exception(*future.args, **future.kwargs)(future.handler) # Static Files for future in self.statics: # Prepend the blueprint URI prefix if available uri = url_prefix + future.uri if url_prefix else future.uri app.static(uri, future.file_or_directory, *future.args, **future.kwargs) # Event listeners for event, listeners in self.listeners.items(): for listener in listeners: app.listener(event)(listener)
<SYSTEM_TASK:> Create a blueprint route from a function. <END_TASK> <USER_TASK:> Description: def add_route(self, handler, uri, methods=frozenset({'GET'}), host=None, strict_slashes=False): """Create a blueprint route from a function. :param handler: function for handling uri requests. Accepts function, or class instance with a view_class method. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :return: function or class instance """
# Handle HTTPMethodView differently if hasattr(handler, 'view_class'): http_methods = ( 'GET', 'POST', 'PUT', 'HEAD', 'OPTIONS', 'PATCH', 'DELETE') methods = set() for method in http_methods: if getattr(handler.view_class, method.lower(), None): methods.add(method) # handle composition view differently if isinstance(handler, self._composition_view_class): methods = handler.handlers.keys() self.route(uri=uri, methods=methods, host=host, strict_slashes=strict_slashes)(handler) return handler
<SYSTEM_TASK:> remove boards by GUI. <END_TASK> <USER_TASK:> Description: def remove_boards_gui(hwpack=''): """remove boards by GUI."""
if not hwpack: if len(hwpack_names()) > 1: hwpack = psidialogs.choice(hwpack_names(), 'select hardware package to select board from!', title='select') else: hwpack = hwpack_names()[0] print('%s selected' % hwpack) if hwpack: sel = psidialogs.multi_choice(board_names(hwpack), 'select boards to remove from %s!' % boards_txt( hwpack), title='remove boards') print('%s selected' % sel) if sel: for x in sel: remove_board(x) print('%s was removed' % x)
<SYSTEM_TASK:> An element maker with a single namespace that uses that namespace as the default <END_TASK> <USER_TASK:> Description: def single(C, namespace=None): """An element maker with a single namespace that uses that namespace as the default"""
if namespace is None: B = C()._ else: B = C(default=namespace, _=namespace)._ return B
<SYSTEM_TASK:> Intersect Point and Bathymetry <END_TASK> <USER_TASK:> Description: def intersect(self, **kwargs): """ Intersect Point and Bathymetry returns bool """
end_point = kwargs.pop('end_point') depth = self.get_depth(location=end_point) # Bathymetry and a particle's depth are both negative down if depth < 0 and depth > end_point.depth: inter = True else: inter = False return inter
<SYSTEM_TASK:> The time of recation is ignored hereTime is ignored here <END_TASK> <USER_TASK:> Description: def react(self, **kwargs): """ The time of recation is ignored hereTime is ignored here and should be handled by whatever called this function. """
react_type = kwargs.get("type", self._type) if react_type == 'hover': return self.__hover(**kwargs) elif react_type == 'stick': pass elif react_type == 'reverse': return self.__reverse(**kwargs) else: raise ValueError("Bathymetry interaction type not supported")
<SYSTEM_TASK:> This hovers the particle 1m above the bathymetry WHERE IT WOULD HAVE ENDED UP. <END_TASK> <USER_TASK:> Description: def __hover(self, **kwargs): """ This hovers the particle 1m above the bathymetry WHERE IT WOULD HAVE ENDED UP. This is WRONG and we need to compute the location that it actually hit the bathymetry and hover 1m above THAT. """
end_point = kwargs.pop('end_point') # The location argument here should be the point that intersected the bathymetry, # not the end_point that is "through" the bathymetry. depth = self.get_depth(location=end_point) return Location4D(latitude=end_point.latitude, longitude=end_point.longitude, depth=(depth + 1.))
<SYSTEM_TASK:> If we hit the bathymetry, set the location to where we came from. <END_TASK> <USER_TASK:> Description: def __reverse(self, **kwargs): """ If we hit the bathymetry, set the location to where we came from. """
start_point = kwargs.pop('start_point') return Location4D(latitude=start_point.latitude, longitude=start_point.longitude, depth=start_point.depth)
<SYSTEM_TASK:> Parse the command line options and launch the requested command. <END_TASK> <USER_TASK:> Description: def main(): # type: () -> typing.Any """Parse the command line options and launch the requested command. If the command is 'help' then print the help message for the subcommand; if no subcommand is given, print the standard help message. """
colorama.init(wrap=six.PY3) doc = usage.get_primary_command_usage() allow_subcommands = '<command>' in doc args = docopt(doc, version=settings.version, options_first=allow_subcommands) if sys.excepthook is sys.__excepthook__: sys.excepthook = log.excepthook try: log.enable_logging(log.get_log_level(args)) default_args = sys.argv[2 if args.get('<command>') else 1:] if (args.get('<command>') == 'help' and None not in settings.subcommands): subcommand = next(iter(args.get('<args>', default_args)), None) return usage.get_help_usage(subcommand) argv = [args.get('<command>')] + args.get('<args>', default_args) return _run_command(argv) except exc.InvalidCliValueError as e: return str(e)
<SYSTEM_TASK:> Return the function for the specified subcommand. <END_TASK> <USER_TASK:> Description: def _get_subcommand(name): # type: (str) -> config.RcliEntryPoint """Return the function for the specified subcommand. Args: name: The name of a subcommand. Returns: The loadable object from the entry point represented by the subcommand. """
_LOGGER.debug('Accessing subcommand "%s".', name) if name not in settings.subcommands: raise ValueError( '"{subcommand}" is not a {command} command. \'{command} help -a\' ' 'lists all available subcommands.'.format( command=settings.command, subcommand=name) ) return settings.subcommands[name]
<SYSTEM_TASK:> Run the command with the given CLI options and exit. <END_TASK> <USER_TASK:> Description: def _run_command(argv): # type: (typing.List[str]) -> typing.Any """Run the command with the given CLI options and exit. Command functions are expected to have a __doc__ string that is parseable by docopt. Args: argv: The list of command line arguments supplied for a command. The first argument is expected to be the name of the command to be run. Note that this is different than the full arguments parsed by docopt for the entire program. Raises: ValueError: Raised if the user attempted to run an invalid command. """
command_name, argv = _get_command_and_argv(argv) _LOGGER.info('Running command "%s %s" with args: %s', settings.command, command_name, argv) subcommand = _get_subcommand(command_name) func = call.get_callable(subcommand) doc = usage.format_usage(subcommand.__doc__) args = _get_parsed_args(command_name, doc, argv) return call.call(func, args) or 0
<SYSTEM_TASK:> Extract the command name and arguments to pass to docopt. <END_TASK> <USER_TASK:> Description: def _get_command_and_argv(argv): # type: (typing.List[str]) -> typing.Tuple[str, typing.List[str]] """Extract the command name and arguments to pass to docopt. Args: argv: The argument list being used to run the command. Returns: A tuple containing the name of the command and the arguments to pass to docopt. """
command_name = argv[0] if not command_name: argv = argv[1:] elif command_name == settings.command: argv.remove(command_name) return command_name, argv
<SYSTEM_TASK:> Parse the docstring with docopt. <END_TASK> <USER_TASK:> Description: def _get_parsed_args(command_name, doc, argv): # type: (str, str, typing.List[str]) -> typing.Dict[str, typing.Any] """Parse the docstring with docopt. Args: command_name: The name of the subcommand to parse. doc: A docopt-parseable string. argv: The list of arguments to pass to docopt during parsing. Returns: The docopt results dictionary. If the subcommand has the same name as the primary command, the subcommand value will be added to the dictionary. """
_LOGGER.debug('Parsing docstring: """%s""" with arguments %s.', doc, argv) args = docopt(doc, argv=argv) if command_name == settings.command: args[command_name] = True return args
<SYSTEM_TASK:> Print a trace message to stderr if environment variable is set. <END_TASK> <USER_TASK:> Description: def trace(msg): """Print a trace message to stderr if environment variable is set. """
if os.environ.get('JARN_TRACE') == '1': print('TRACE:', msg, file=sys.stderr)
<SYSTEM_TASK:> Enable every module that isn't marked as disabled in the modules folder. <END_TASK> <USER_TASK:> Description: def enable_modules_from_last_session(seashcommanddict): """ Enable every module that isn't marked as disabled in the modules folder. This function is meant to be called when seash is initializing and nowhere else. A module is marked as disabled when there is a modulename.disabled file. """
successfully_enabled_modules = [] modules_to_enable = get_enabled_modules() for modulename in modules_to_enable: # There are no bad side effects to seash's state when we do this # The only thing that should happen is that the modulename.disabled file # gets created (temporarily) disable(seashcommanddict, modulename) try: enable(seashcommanddict, modulename) successfully_enabled_modules.append(modulename) except seash_exceptions.ModuleConflictError, e: print "Failed to enable the '"+modulename+"' module due to the following conflicting command:" print str(e) # We mark this module as disabled by adding a modulename.disabled file. open(MODULES_FOLDER_PATH + os.sep + modulename + ".disabled", 'w') except seash_exceptions.InitializeError, e: print "Failed to enable the '"+modulename+"' module." disable(seashcommanddict, modulename) successfully_enabled_modules.sort() print 'Enabled modules:', ', '.join(successfully_enabled_modules), '\n'