text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Aggregates district-level estimates for each table within the country. <END_TASK> <USER_TASK:> Description: def aggregate_national_estimates_by_district(self): """ Aggregates district-level estimates for each table within the country. Creates data structure designed for an export in this format: ...{series}/{year}/{table}/districts.json """
data = {} fips = "00" aggregated_labels = [] states = Division.objects.filter(level=self.DISTRICT_LEVEL) estimates = CensusEstimate.objects.filter( division__level=self.DISTRICT_LEVEL ) for estimate in estimates: series = estimate.variable.table.series year = estimate.variable.table.year table = estimate.variable.table.code label = estimate.variable.label.label table_label = "{}{}".format(table, label) code = estimate.variable.code if series not in data: data[series] = {} if year not in data[series]: data[series][year] = {} if table not in data[series][year]: data[series][year][table] = {} if fips not in data[series][year][table]: data[series][year][table][fips] = {} if label is not None: if table_label not in aggregated_labels: # c= {**a, **b} aggregated_labels.append(table_label) data[series][year][table][fips][label] = [ self.aggregate_variable(estimate, division.id) for division in states if len( CensusEstimate.objects.filter( variable=estimate.variable, division=division.id, ) ) > 0 ] else: if code in data[series][year][table][fips]: data[series][year][table][fips][code].append( estimate.estimate ) else: data[series][year][table][fips][code] = [estimate.estimate] # print(data) return data
<SYSTEM_TASK:> Aggregates county-level estimates for each table within a given state. <END_TASK> <USER_TASK:> Description: def aggregate_state_estimates_by_county(self, parent): """ Aggregates county-level estimates for each table within a given state. Creates data structure designed for an export in this format: ...{series}/{year}/{table}/{state_fips}/counties.json """
data = {} for division in tqdm( Division.objects.filter(level=self.COUNTY_LEVEL, parent=parent) ): fips = division.code id = division.id aggregated_labels = [] # Keep track of already agg'ed variables for estimate in division.census_estimates.all(): series = estimate.variable.table.series year = estimate.variable.table.year table = estimate.variable.table.code label = estimate.variable.label.label table_label = "{}{}".format(table, label) code = estimate.variable.code if series not in data: data[series] = {} if year not in data[series]: data[series][year] = {} if table not in data[series][year]: data[series][year][table] = {} if fips not in data[series][year][table]: data[series][year][table][fips] = {} if label is not None: if table_label not in aggregated_labels: aggregated_labels.append(table_label) data[series][year][table][fips][ label ] = self.aggregate_variable(estimate, id) else: data[series][year][table][division.code][ code ] = estimate.estimate # print(data) return data
<SYSTEM_TASK:> return the endnotes from the docx, keyed to string id. <END_TASK> <USER_TASK:> Description: def endnotemap(self, cache=True): """return the endnotes from the docx, keyed to string id."""
if self.__endnotemap is not None and cache==True: return self.__endnotemap else: x = self.xml(src='word/endnotes.xml') d = Dict() if x is None: return d for endnote in x.root.xpath("w:endnote", namespaces=self.NS): id = endnote.get("{%(w)s}id" % self.NS) typ = endnote.get("{%(w)s}type" % self.NS) d[id] = Dict(id=id, type=typ, elem=endnote) if cache==True: self.__endnotemap = d return d
<SYSTEM_TASK:> return the footnotes from the docx, keyed to string id. <END_TASK> <USER_TASK:> Description: def footnotemap(self, cache=True): """return the footnotes from the docx, keyed to string id."""
if self.__footnotemap is not None and cache==True: return self.__footnotemap else: x = self.xml(src='word/footnotes.xml') d = Dict() if x is None: return d for footnote in x.root.xpath("w:footnote", namespaces=self.NS): id = footnote.get("{%(w)s}id" % self.NS) typ = footnote.get("{%(w)s}type" % self.NS) d[id] = Dict(id=id, type=typ, elem=footnote) if cache==True: self.__footnotemap = d return d
<SYSTEM_TASK:> return the comments from the docx, keyed to string id. <END_TASK> <USER_TASK:> Description: def commentmap(self, cache=True): """return the comments from the docx, keyed to string id."""
if self.__commentmap is not None and cache==True: return self.__commentmap else: x = self.xml(src='word/comments.xml') d = Dict() if x is None: return d for comment in x.root.xpath("w:comment", namespaces=self.NS): id = comment.get("{%(w)s}id" % self.NS) typ = comment.get("{%(w)s}type" % self.NS) d[id] = Dict(id=id, type=typ, elem=comment) if cache==True: self.__commentmap = d return d
<SYSTEM_TASK:> return the selector for the given stylemap style <END_TASK> <USER_TASK:> Description: def selector(C, style): """return the selector for the given stylemap style"""
clas = C.classname(style.name) if style.type == 'paragraph': # heading outline levels are 0..7 internally, indicating h1..h8 outlineLvl = int((style.properties.get('outlineLvl') or {}).get('val') or 8) + 1 if outlineLvl < 9: tag = 'h%d' % outlineLvl else: tag = 'p' elif style.type == 'character': tag = 'span' elif style.type == 'table': tag = 'table' elif style.type == 'numbering': tag = 'ol' return "%s.%s" % (tag, clas)
<SYSTEM_TASK:> Creates a new collection for the registered resource and calls <END_TASK> <USER_TASK:> Description: def load_collection_from_stream(resource, stream, content_type): """ Creates a new collection for the registered resource and calls `load_into_collection_from_stream` with it. """
coll = create_staging_collection(resource) load_into_collection_from_stream(coll, stream, content_type) return coll
<SYSTEM_TASK:> Loads resources from the specified file into the given collection <END_TASK> <USER_TASK:> Description: def load_into_collection_from_file(collection, filename, content_type=None): """ Loads resources from the specified file into the given collection resource. If no content type is provided, an attempt is made to look up the extension of the given filename in the MIME content type registry. """
if content_type is None: ext = os.path.splitext(filename)[1] try: content_type = MimeTypeRegistry.get_type_for_extension(ext) except KeyError: raise ValueError('Could not infer MIME type for file extension ' '"%s".' % ext) load_into_collection_from_stream(collection, open(filename, 'rU'), content_type)
<SYSTEM_TASK:> Loads resources from the representation contained in the given URL into <END_TASK> <USER_TASK:> Description: def load_into_collection_from_url(collection, url, content_type=None): """ Loads resources from the representation contained in the given URL into the given collection resource. :returns: collection resource """
parsed = urlparse.urlparse(url) scheme = parsed.scheme # pylint: disable=E1101 if scheme == 'file': # Assume a local path. load_into_collection_from_file(collection, parsed.path, # pylint: disable=E1101 content_type=content_type) else: raise ValueError('Unsupported URL scheme "%s".' % scheme)
<SYSTEM_TASK:> Creates a new collection for the registered resource and calls <END_TASK> <USER_TASK:> Description: def load_collection_from_url(resource, url, content_type=None): """ Creates a new collection for the registered resource and calls `load_into_collection_from_url` with it. """
coll = create_staging_collection(resource) load_into_collection_from_url(coll, url, content_type=content_type) return coll
<SYSTEM_TASK:> Loads resources contained in the given ZIP archive into each of the <END_TASK> <USER_TASK:> Description: def load_into_collections_from_zipfile(collections, zipfile): """ Loads resources contained in the given ZIP archive into each of the given collections. The ZIP file is expected to contain a list of file names obtained with the :func:`get_collection_filename` function, each pointing to a file of zipped collection resource data. :param collections: sequence of collection resources :param str zipfile: ZIP file name """
with ZipFile(zipfile) as zipf: names = zipf.namelist() name_map = dict([(os.path.splitext(name)[0], index) for (index, name) in enumerate(names)]) for coll in collections: coll_name = get_collection_name(coll) index = name_map.get(coll_name) if index is None: continue coll_fn = names[index] ext = os.path.splitext(coll_fn)[1] try: content_type = \ MimeTypeRegistry.get_type_for_extension(ext) except KeyError: raise ValueError('Could not infer MIME type for file ' 'extension "%s".' % ext) # Strings are always written as UTF-8 encoded byte strings when # the zip file is created, so we have to wrap the iterator into # a decoding step. coll_data = DecodingStream(zipf.open(coll_fn, 'r')) load_into_collection_from_stream(coll, coll_data, content_type)
<SYSTEM_TASK:> Builds a graph of dependencies among the given resource classes. <END_TASK> <USER_TASK:> Description: def build_resource_dependency_graph(resource_classes, include_backrefs=False): """ Builds a graph of dependencies among the given resource classes. The dependency graph is a directed graph with member resource classes as nodes. An edge between two nodes represents a member or collection attribute. :param resource_classes: resource classes to determine interdependencies of. :type resource_classes: sequence of registered resources. :param bool include_backrefs: flag indicating if dependencies introduced by back-references (e.g., a child resource referencing its parent) should be included in the dependency graph. """
def visit(mb_cls, grph, path, incl_backrefs): for attr_name in get_resource_class_attribute_names(mb_cls): if is_resource_class_terminal_attribute(mb_cls, attr_name): continue child_descr = getattr(mb_cls, attr_name) child_mb_cls = get_member_class(child_descr.attr_type) # We do not follow cyclic references back to a resource class # that is last in the path. if len(path) > 0 and child_mb_cls is path[-1] \ and not incl_backrefs: continue if not grph.has_node(child_mb_cls): grph.add_node(child_mb_cls) path.append(mb_cls) visit(child_mb_cls, grph, path, incl_backrefs) path.pop() if not grph.has_edge((mb_cls, child_mb_cls)): grph.add_edge((mb_cls, child_mb_cls)) dep_grph = digraph() for resource_class in resource_classes: mb_cls = get_member_class(resource_class) if not dep_grph.has_node(mb_cls): dep_grph.add_node(mb_cls) visit(mb_cls, dep_grph, [], include_backrefs) return dep_grph
<SYSTEM_TASK:> Traverses the graph of resources that is reachable from the given <END_TASK> <USER_TASK:> Description: def build_resource_graph(resource, dependency_graph=None): """ Traverses the graph of resources that is reachable from the given resource. If a resource dependency graph is given, links to other resources are only followed if the dependency graph has an edge connecting the two corresponding resource classes; otherwise, a default graph is built which ignores all direct cyclic resource references. :resource: a :class:`everest.resources.MemberResource` instance. :returns: a :class:`ResourceGraph` instance representing the graph of resources reachable from the given resource. """
def visit(rc, grph, dep_grph): mb_cls = type(rc) attr_map = get_resource_class_attributes(mb_cls) for attr_name, attr in iteritems_(attr_map): if is_resource_class_terminal_attribute(mb_cls, attr_name): continue # Only follow the resource attribute if the dependency graph # has an edge here. child_mb_cls = get_member_class(attr.attr_type) if not dep_grph.has_edge((mb_cls, child_mb_cls)): continue child_rc = getattr(rc, attr_name) if is_resource_class_collection_attribute(mb_cls, attr_name): for child_mb in child_rc: if not grph.has_node(child_mb): # Ignore cyclic references. grph.add_node(child_mb) grph.add_edge((rc, child_mb)) visit(child_mb, grph, dep_grph) else: # Member. if not grph.has_node(child_rc): # Ignore cyclic references. grph.add_node(child_rc) grph.add_edge((rc, child_rc)) visit(child_rc, grph, dep_grph) if dependency_graph is None: dependency_graph = build_resource_dependency_graph( [get_member_class(resource)]) graph = ResourceGraph() if provides_member_resource(resource): rcs = [resource] else: rcs = resource for rc in rcs: graph.add_node(rc) visit(rc, graph, dependency_graph) return graph
<SYSTEM_TASK:> Collects all resources connected to the given resource and returns a <END_TASK> <USER_TASK:> Description: def find_connected_resources(resource, dependency_graph=None): """ Collects all resources connected to the given resource and returns a dictionary mapping member resource classes to new collections containing the members found. """
# Build a resource_graph. resource_graph = \ build_resource_graph(resource, dependency_graph=dependency_graph) entity_map = OrderedDict() for mb in topological_sorting(resource_graph): mb_cls = get_member_class(mb) ents = entity_map.get(mb_cls) if ents is None: ents = [] entity_map[mb_cls] = ents ents.append(mb.get_entity()) return entity_map
<SYSTEM_TASK:> Dumps the given resource and all resources linked to it into a set of <END_TASK> <USER_TASK:> Description: def to_files(self, resource, directory): """ Dumps the given resource and all resources linked to it into a set of representation files in the given directory. """
collections = self.__collect(resource) for (mb_cls, coll) in iteritems_(collections): fn = get_write_collection_path(mb_cls, self.__content_type, directory=directory) with open_text(os.path.join(directory, fn)) as strm: dump_resource(coll, strm, content_type=self.__content_type)
<SYSTEM_TASK:> Dumps the given resource and all resources linked to it into the given <END_TASK> <USER_TASK:> Description: def to_zipfile(self, resource, zipfile): """ Dumps the given resource and all resources linked to it into the given ZIP file. """
rpr_map = self.to_strings(resource) with ZipFile(zipfile, 'w') as zipf: for (mb_cls, rpr_string) in iteritems_(rpr_map): fn = get_collection_filename(mb_cls, self.__content_type) zipf.writestr(fn, rpr_string, compress_type=ZIP_DEFLATED)
<SYSTEM_TASK:> Returns True if batch_id exists in the history. <END_TASK> <USER_TASK:> Description: def exists(self, batch_id=None): """Returns True if batch_id exists in the history. """
try: self.model.objects.get(batch_id=batch_id) except self.model.DoesNotExist: return False return True
<SYSTEM_TASK:> Populates the batch with unsaved model instances <END_TASK> <USER_TASK:> Description: def populate(self, deserialized_txs=None, filename=None, retry=None): """Populates the batch with unsaved model instances from a generator of deserialized objects. """
if not deserialized_txs: raise BatchError("Failed to populate batch. There are no objects to add.") self.filename = filename if not self.filename: raise BatchError("Invalid filename. Got None") try: for deserialized_tx in deserialized_txs: self.peek(deserialized_tx) self.objects.append(deserialized_tx.object) break for deserialized_tx in deserialized_txs: self.objects.append(deserialized_tx.object) except DeserializationError as e: raise BatchDeserializationError(e) from e except JSONFileError as e: raise BatchDeserializationError(e) from e
<SYSTEM_TASK:> Peeks into first tx and sets self attrs or raise. <END_TASK> <USER_TASK:> Description: def peek(self, deserialized_tx): """Peeks into first tx and sets self attrs or raise. """
self.batch_id = deserialized_tx.object.batch_id self.prev_batch_id = deserialized_tx.object.prev_batch_id self.producer = deserialized_tx.object.producer if self.batch_history.exists(batch_id=self.batch_id): raise BatchAlreadyProcessed( f"Batch {self.batch_id} has already been processed" ) if self.prev_batch_id != self.batch_id: if not self.batch_history.exists(batch_id=self.prev_batch_id): raise InvalidBatchSequence( f"Invalid import sequence. History does not exist for prev_batch_id. " f"Got file='{self.filename}', prev_batch_id=" f"{self.prev_batch_id}, batch_id={self.batch_id}." )
<SYSTEM_TASK:> Saves all model instances in the batch as model. <END_TASK> <USER_TASK:> Description: def save(self): """Saves all model instances in the batch as model. """
saved = 0 if not self.objects: raise BatchError("Save failed. Batch is empty") for deserialized_tx in self.objects: try: self.model.objects.get(pk=deserialized_tx.pk) except self.model.DoesNotExist: data = {} for field in self.model._meta.get_fields(): try: data.update({field.name: getattr(deserialized_tx, field.name)}) except AttributeError: pass self.model.objects.create(**data) saved += 1 return saved
<SYSTEM_TASK:> Imports the batch of outgoing transactions into <END_TASK> <USER_TASK:> Description: def import_batch(self, filename): """Imports the batch of outgoing transactions into model IncomingTransaction. """
batch = self.batch_cls() json_file = self.json_file_cls(name=filename, path=self.path) try: deserialized_txs = json_file.deserialized_objects except JSONFileError as e: raise TransactionImporterError(e) from e try: batch.populate(deserialized_txs=deserialized_txs, filename=json_file.name) except ( BatchDeserializationError, InvalidBatchSequence, BatchAlreadyProcessed, ) as e: raise TransactionImporterError(e) from e batch.save() batch.update_history() return batch
<SYSTEM_TASK:> Iterator that returns N steps of <END_TASK> <USER_TASK:> Description: def _populateBuffer(self, stream, n): """ Iterator that returns N steps of the genshi stream. Found that performance really sucks for n = 1 (0.5 requests/second for the root resources versus 80 requests/second for a blocking algorithm). Hopefully increasing the number of steps per timeslice will significantly improve performance. """
try: for x in xrange(n): output = stream.next() self._buffer.write(output) except StopIteration, e: self._deferred.callback(None) except Exception, e: self._deferred.errback(e) else: self.delayedCall = reactor.callLater(CALL_DELAY, self._populateBuffer, stream, n)
<SYSTEM_TASK:> Create message, containing meta and data in df-envelope format. <END_TASK> <USER_TASK:> Description: def create_message( json_meta, data, data_type=0, version=b'\x00\x01@\x00'): """Create message, containing meta and data in df-envelope format. @json_meta - metadata @data - binary data @data_type - data type code for binary data @version - version of machine header @return - message as bytearray """
__check_data(data) meta = __prepare_meta(json_meta) data = __compress(json_meta, data) header = __create_machine_header( json_meta, data, data_type, version) return header + meta + data
<SYSTEM_TASK:> Parse df message from bytearray. <END_TASK> <USER_TASK:> Description: def parse_message(message, nodata=False): """Parse df message from bytearray. @message - message data @nodata - do not load data @return - [binary header, metadata, binary data] """
header = read_machine_header(message) h_len = __get_machine_header_length(header) meta_raw = message[h_len:h_len + header['meta_len']] meta = __parse_meta(meta_raw, header) data_start = h_len + header['meta_len'] data = b'' if not nodata: data = __decompress( meta, message[data_start:data_start + header['data_len']] ) return header, meta, data
<SYSTEM_TASK:> Parse binary header. <END_TASK> <USER_TASK:> Description: def read_machine_header(data): """Parse binary header. @data - bytearray, contains binary header of file opened in 'rb' mode @return - parsed binary header """
if isinstance(data, (bytes, bytearray)): stream = io.BytesIO(data) elif isinstance(data, io.BufferedReader): stream = data else: raise ValueError("data should be either bytearray or file 'rb' mode.") header = dict() header_type = stream.read(6) if header_type == b"#!\x00\x01@\x00": header['type'] = header_type[2:6] header['time'] = struct.unpack('>I', stream.read(4))[0] header['meta_type'] = struct.unpack('>I', stream.read(4))[0] header['meta_len'] = struct.unpack('>I', stream.read(4))[0] header['data_type'] = struct.unpack('>I', stream.read(4))[0] header['data_len'] = struct.unpack('>I', stream.read(4))[0] stream.read(4) elif header_type == b"#~DF02": header['type'] = header_type[2:6] header['meta_type'] = stream.read(2) header['meta_len'] = struct.unpack('>I', stream.read(4))[0] header['data_len'] = struct.unpack('>I', stream.read(4))[0] stream.read(4) else: raise NotImplementedError( "Parser for machine header %s not implemented" % (header_type.decode())) return header
<SYSTEM_TASK:> Extract complete messages from stream and cut out them from stream. <END_TASK> <USER_TASK:> Description: def get_messages_from_stream(data): """Extract complete messages from stream and cut out them from stream. @data - stream binary data @return - [list of messages, choped stream data] """
messages = [] iterator = HEADER_RE.finditer(data) last_pos = 0 for match in iterator: pos = match.span()[0] header = read_machine_header(data[pos:]) h_len = __get_machine_header_length(header) cur_last_pos = pos + h_len + header['meta_len'] + header['data_len'] if cur_last_pos > len(data): break header, meta, bin_data = parse_message(data[pos:]) messages.append({'header': header, 'meta': meta, 'data': bin_data}) last_pos = cur_last_pos data = data[last_pos:] return messages, data
<SYSTEM_TASK:> Returns a clone of this mapping that is configured with the given <END_TASK> <USER_TASK:> Description: def clone(self, options=None, attribute_options=None): """ Returns a clone of this mapping that is configured with the given option and attribute option dictionaries. :param dict options: Maps representer options to their values. :param dict attribute_options: Maps attribute names to dictionaries mapping attribute options to their values. """
copied_cfg = self.__configurations[-1].copy() upd_cfg = type(copied_cfg)(options=options, attribute_options=attribute_options) copied_cfg.update(upd_cfg) return self.__class__(self.__mp_reg, self.__mapped_cls, self.__de_cls, copied_cfg)
<SYSTEM_TASK:> Updates this mapping with the given option and attribute option maps. <END_TASK> <USER_TASK:> Description: def update(self, options=None, attribute_options=None): """ Updates this mapping with the given option and attribute option maps. :param dict options: Maps representer options to their values. :param dict attribute_options: Maps attribute names to dictionaries mapping attribute options to their values. """
attr_map = self.__get_attribute_map(self.__mapped_cls, None, 0) for attributes in attribute_options: for attr_name in attributes: if not attr_name in attr_map: raise AttributeError('Trying to configure non-existing ' 'resource attribute "%s"' % (attr_name)) cfg = RepresenterConfiguration(options=options, attribute_options=attribute_options) self.configuration.update(cfg)
<SYSTEM_TASK:> Returns an ordered map of the mapped attributes for the given mapped <END_TASK> <USER_TASK:> Description: def get_attribute_map(self, mapped_class=None, key=None): """ Returns an ordered map of the mapped attributes for the given mapped class and attribute key. :param key: Tuple of attribute names specifying a path to a nested attribute in a resource tree. If this is not given, all attributes in this mapping will be returned. """
if mapped_class is None: mapped_class = self.__mapped_cls if key is None: key = MappedAttributeKey(()) return OrderedDict([(attr.resource_attr, attr) for attr in self._attribute_iterator(mapped_class, key)])
<SYSTEM_TASK:> Returns a new data element for the given mapped class. <END_TASK> <USER_TASK:> Description: def create_data_element(self, mapped_class=None): """ Returns a new data element for the given mapped class. :returns: object implementing :class:`IResourceDataElement`. """
if not mapped_class is None and mapped_class != self.__mapped_cls: mp = self.__mp_reg.find_or_create_mapping(mapped_class) data_el = mp.create_data_element() else: data_el = self.__de_cls.create() return data_el
<SYSTEM_TASK:> Returns a new linked data element for the given url and kind. <END_TASK> <USER_TASK:> Description: def create_linked_data_element(self, url, kind, id=None, # pylint: disable=W0622 relation=None, title=None): """ Returns a new linked data element for the given url and kind. :param str url: URL to assign to the linked data element. :param str kind: kind of the resource that is linked. One of the constantes defined by :class:`everest.constants.RESOURCE_KINDS`. :returns: object implementing :class:`ILinkedDataElement`. """
mp = self.__mp_reg.find_or_create_mapping(Link) return mp.data_element_class.create(url, kind, id=id, relation=relation, title=title)
<SYSTEM_TASK:> Maps the given data element to a new resource or updates the given <END_TASK> <USER_TASK:> Description: def map_to_resource(self, data_element, resource=None): """ Maps the given data element to a new resource or updates the given resource. :raises ValueError: If :param:`data_element` does not provide :class:`everest.representers.interfaces.IDataElement`. """
if not IDataElement.providedBy(data_element): # pylint:disable=E1101 raise ValueError('Expected data element, got %s.' % data_element) if resource is None: coll = \ create_staging_collection(data_element.mapping.mapped_class) agg = coll.get_aggregate() agg.add(data_element) if IMemberDataElement.providedBy(data_element): # pylint: disable=E1101 ent = next(iter(agg)) resource = \ data_element.mapping.mapped_class.create_from_entity(ent) else: resource = coll else: resource.update(data_element) return resource
<SYSTEM_TASK:> Maps the given resource to a data element tree. <END_TASK> <USER_TASK:> Description: def map_to_data_element(self, resource): """ Maps the given resource to a data element tree. """
trv = ResourceTreeTraverser(resource, self.as_pruning()) visitor = DataElementBuilderResourceTreeVisitor(self) trv.run(visitor) return visitor.data_element
<SYSTEM_TASK:> Pushes the given configuration object on the stack of configurations <END_TASK> <USER_TASK:> Description: def push_configuration(self, configuration): """ Pushes the given configuration object on the stack of configurations managed by this mapping and makes it the active configuration. """
self.__mapped_attr_cache.clear() self.__configurations.append(configuration)
<SYSTEM_TASK:> Pushes the currently active configuration from the stack of <END_TASK> <USER_TASK:> Description: def pop_configuration(self): """ Pushes the currently active configuration from the stack of configurations managed by this mapping. :raises IndexError: If there is only one configuration in the stack. """
if len(self.__configurations) == 1: raise IndexError('Can not pop the last configuration from the ' 'stack of configurations.') self.__configurations.pop() self.__mapped_attr_cache.clear()
<SYSTEM_TASK:> Returns a context in which this mapping is updated with the given <END_TASK> <USER_TASK:> Description: def with_updated_configuration(self, options=None, attribute_options=None): """ Returns a context in which this mapping is updated with the given options and attribute options. """
new_cfg = self.__configurations[-1].copy() if not options is None: for o_name, o_value in iteritems_(options): new_cfg.set_option(o_name, o_value) if not attribute_options is None: for attr_name, ao_opts in iteritems_(attribute_options): for ao_name, ao_value in iteritems_(ao_opts): new_cfg.set_attribute_option(attr_name, ao_name, ao_value) # upd_cfg = type(new_cfg)(options=options, # attribute_options=attribute_options) # new_cfg.update(upd_cfg) return MappingConfigurationContext(self, new_cfg)
<SYSTEM_TASK:> Returns an iterator over the attributes in this mapping for the <END_TASK> <USER_TASK:> Description: def _attribute_iterator(self, mapped_class, key): """ Returns an iterator over the attributes in this mapping for the given mapped class and attribute key. If this is a pruning mapping, attributes that are ignored because of a custom configuration or because of the default ignore rules are skipped. """
for attr in \ itervalues_(self.__get_attribute_map(mapped_class, key, 0)): if self.is_pruning: do_ignore = attr.should_ignore(key) else: do_ignore = False if not do_ignore: yield attr
<SYSTEM_TASK:> Creates a new mapping for the given mapped class and representer <END_TASK> <USER_TASK:> Description: def create_mapping(self, mapped_class, configuration=None): """ Creates a new mapping for the given mapped class and representer configuration. :param configuration: configuration for the new data element class. :type configuration: :class:`RepresenterConfiguration` :returns: newly created instance of :class:`Mapping` """
cfg = self.__configuration.copy() if not configuration is None: cfg.update(configuration) provided_ifcs = provided_by(object.__new__(mapped_class)) if IMemberResource in provided_ifcs: base_data_element_class = self.member_data_element_base_class elif ICollectionResource in provided_ifcs: base_data_element_class = self.collection_data_element_base_class elif IResourceLink in provided_ifcs: base_data_element_class = self.linked_data_element_base_class else: raise ValueError('Mapped class for data element class does not ' 'implement one of the required interfaces.') name = "%s%s" % (mapped_class.__name__, base_data_element_class.__name__) de_cls = type(name, (base_data_element_class,), {}) mp = self.mapping_class(self, mapped_class, de_cls, cfg) # Set the data element class' mapping. # FIXME: This looks like a hack. de_cls.mapping = mp return mp
<SYSTEM_TASK:> Returns the mapping registered for the given mapped class or any of <END_TASK> <USER_TASK:> Description: def find_mapping(self, mapped_class): """ Returns the mapping registered for the given mapped class or any of its base classes. Returns `None` if no mapping can be found. :param mapped_class: mapped type :type mapped_class: type :returns: instance of :class:`Mapping` or `None` """
if not self.__is_initialized: self.__is_initialized = True self._initialize() mapping = None for base_cls in mapped_class.__mro__: try: mapping = self.__mappings[base_cls] except KeyError: continue else: break return mapping
<SYSTEM_TASK:> Class decorator that wraps every single method in its own method decorator <END_TASK> <USER_TASK:> Description: def eachMethod(decorator, methodFilter=lambda fName: True): """ Class decorator that wraps every single method in its own method decorator methodFilter: a function which accepts a function name and should return True if the method is one which we want to decorate, False if we want to leave this method alone. methodFilter can also be simply a string prefix. If it is a string, it is assumed to be the prefix we're looking for. """
if isinstance(methodFilter, basestring): # Is it a string? If it is, change it into a function that takes a string. prefix = methodFilter methodFilter = lambda fName: fName.startswith(prefix) ismethod = lambda fn: inspect.ismethod(fn) or inspect.isfunction(fn) def innerDeco(cls): assert inspect.isclass(cls), "eachMethod is designed to be used only on classes" for fName, fn in inspect.getmembers(cls): if methodFilter(fName): if ismethod(fn): # We attempt to avoid decorating staticmethods by looking for an arg named cls # or self; this is a kludge, but there's no other way to tell, and # staticmethods do not work correctly with eachMethod if getargspec(fn).args[0] not in ['cls', 'self']: continue setattr(cls, fName, decorator(fn)) return cls return innerDeco
<SYSTEM_TASK:> Return the path to a sibling of a file in the filesystem. <END_TASK> <USER_TASK:> Description: def _sibpath(path, sibling): """ Return the path to a sibling of a file in the filesystem. This is useful in conjunction with the special C{__file__} attribute that Python provides for modules, so modules can load associated resource files. (Stolen from twisted.python.util) """
return os.path.join(os.path.dirname(os.path.abspath(path)), sibling)
<SYSTEM_TASK:> Global cache decorator <END_TASK> <USER_TASK:> Description: def cache(cls, func): """ Global cache decorator :param func: the function to be decorated :return: the decorator """
@functools.wraps(func) def func_wrapper(*args, **kwargs): func_key = cls.get_key(func) val_cache = cls.get_cache(func_key) lock = cls.get_cache_lock(func_key) return cls._get_value_from_cache( func, val_cache, lock, *args, **kwargs) return func_wrapper
<SYSTEM_TASK:> Save the cache to `self` <END_TASK> <USER_TASK:> Description: def instance_cache(cls, func): """ Save the cache to `self` This decorator take it for granted that the decorated function is a method. The first argument of the function is `self`. :param func: function to decorate :return: the decorator """
@functools.wraps(func) def func_wrapper(*args, **kwargs): if not args: raise ValueError('`self` is not available.') else: the_self = args[0] func_key = cls.get_key(func) val_cache = cls.get_self_cache(the_self, func_key) lock = cls.get_self_cache_lock(the_self, func_key) return cls._get_value_from_cache( func, val_cache, lock, *args, **kwargs) return func_wrapper
<SYSTEM_TASK:> clear the instance cache <END_TASK> <USER_TASK:> Description: def clear_instance_cache(cls, func): """ clear the instance cache Decorate a method of a class, the first parameter is supposed to be `self`. It clear all items cached by the `instance_cache` decorator. :param func: function to decorate """
@functools.wraps(func) def func_wrapper(*args, **kwargs): if not args: raise ValueError('`self` is not available.') else: the_self = args[0] cls.clear_self_cache(the_self) return func(*args, **kwargs) return func_wrapper
<SYSTEM_TASK:> Cache the return of the function for given time. <END_TASK> <USER_TASK:> Description: def persisted(cls, seconds=0, minutes=0, hours=0, days=0, weeks=0): """ Cache the return of the function for given time. Default to 1 day. :param weeks: as name :param seconds: as name :param minutes: as name :param hours: as name :param days: as name :return: return of the function decorated """
days += weeks * 7 hours += days * 24 minutes += hours * 60 seconds += minutes * 60 if seconds == 0: # default to 1 day seconds = 24 * 60 * 60 def get_persisted_file(hash_number): folder = cls.get_persist_folder() if not os.path.exists(folder): os.makedirs(folder) return os.path.join(folder, '{}.pickle'.format(hash_number)) def is_expired(filename): if os.path.exists(filename): file_age = cls.get_file_age(filename) if file_age > seconds: log.debug('persisted cache expired: {}'.format(filename)) ret = True else: ret = False else: ret = True return ret def decorator(func): def func_wrapper(*args, **kwargs): def _key_gen(): string = '{}-{}-{}-{}'.format( func.__module__, func.__name__, args, kwargs.items() ) return hashlib.sha256(string.encode('utf-8')).hexdigest() key = _key_gen() persisted_file = get_persisted_file(key) if is_expired(persisted_file): ret = func(*args, **kwargs) with open(persisted_file, 'wb') as f: pickle.dump(ret, f) else: with open(persisted_file, 'rb') as f: ret = pickle.load(f) return ret return func_wrapper return decorator
<SYSTEM_TASK:> Get all repository events, following paging, until the end <END_TASK> <USER_TASK:> Description: def getEvents(self, repo_user, repo_name, until_id=None): """Get all repository events, following paging, until the end or until UNTIL_ID is seen. Returns a Deferred."""
done = False page = 0 events = [] while not done: new_events = yield self.api.makeRequest( ['repos', repo_user, repo_name, 'events'], page) # terminate if we find a matching ID if new_events: for event in new_events: if event['id'] == until_id: done = True break events.append(event) else: done = True page += 1 defer.returnValue(events)
<SYSTEM_TASK:> Utility for finding a virtualenv location based on a project path <END_TASK> <USER_TASK:> Description: def from_project_path(cls, path): """Utility for finding a virtualenv location based on a project path"""
path = vistir.compat.Path(path) if path.name == 'Pipfile': pipfile_path = path path = path.parent else: pipfile_path = path / 'Pipfile' pipfile_location = cls.normalize_path(pipfile_path) venv_path = path / '.venv' if venv_path.exists(): if not venv_path.is_dir(): possible_path = vistir.compat.Path(venv_path.read_text().strip()) if possible_path.exists(): return cls(possible_path.as_posix()) else: if venv_path.joinpath('lib').exists(): return cls(venv_path.as_posix()) sanitized = re.sub(r'[ $`!*@"\\\r\n\t]', "_", path.name)[0:42] hash_ = hashlib.sha256(pipfile_location.encode()).digest()[:6] encoded_hash = base64.urlsafe_b64encode(hash_).decode() hash_fragment = encoded_hash[:8] venv_name = "{0}-{1}".format(sanitized, hash_fragment) return cls(cls.get_workon_home().joinpath(venv_name).as_posix())
<SYSTEM_TASK:> Get setup.py install args for installing the supplied package in the virtualenv <END_TASK> <USER_TASK:> Description: def get_setup_install_args(self, pkgname, setup_py, develop=False): """Get setup.py install args for installing the supplied package in the virtualenv :param str pkgname: The name of the package to install :param str setup_py: The path to the setup file of the package :param bool develop: Whether the package is in development mode :return: The installation arguments to pass to the interpreter when installing :rtype: list """
headers = self.base_paths["headers"] headers = headers / "python{0}".format(self.python_version) / pkgname install_arg = "install" if not develop else "develop" return [ self.python, "-u", "-c", SETUPTOOLS_SHIM % setup_py, install_arg, "--single-version-externally-managed", "--install-headers={0}".format(self.base_paths["headers"]), "--install-purelib={0}".format(self.base_paths["purelib"]), "--install-platlib={0}".format(self.base_paths["platlib"]), "--install-scripts={0}".format(self.base_paths["scripts"]), "--install-data={0}".format(self.base_paths["data"]), ]
<SYSTEM_TASK:> Install an sdist or an editable package into the virtualenv <END_TASK> <USER_TASK:> Description: def setuptools_install(self, chdir_to, pkg_name, setup_py_path=None, editable=False): """Install an sdist or an editable package into the virtualenv :param str chdir_to: The location to change to :param str setup_py_path: The path to the setup.py, if applicable defaults to None :param bool editable: Whether the package is editable, defaults to False """
install_options = ["--prefix={0}".format(self.prefix.as_posix()),] with vistir.contextmanagers.cd(chdir_to): c = self.run( self.get_setup_install_args(pkg_name, setup_py_path, develop=editable) + install_options, cwd=chdir_to ) return c.returncode
<SYSTEM_TASK:> Install a package into the virtualenv <END_TASK> <USER_TASK:> Description: def install(self, req, editable=False, sources=[]): """Install a package into the virtualenv :param req: A requirement to install :type req: :class:`requirementslib.models.requirement.Requirement` :param bool editable: Whether the requirement is editable, defaults to False :param list sources: A list of pip sources to consult, defaults to [] :return: A return code, 0 if successful :rtype: int """
try: packagebuilder = self.safe_import("packagebuilder") except ImportError: packagebuilder = None with self.activated(include_extras=False): if not packagebuilder: return 2 ireq = req.as_ireq() sources = self.filter_sources(req, sources) cache_dir = os.environ.get('PASSA_CACHE_DIR', os.environ.get( 'PIPENV_CACHE_DIR', vistir.path.create_tracked_tempdir(prefix="passabuild") ) ) built = packagebuilder.build.build(ireq, sources, cache_dir) if isinstance(built, distlib.wheel.Wheel): maker = distlib.scripts.ScriptMaker(None, None) built.install(self.paths, maker) else: path = vistir.compat.Path(built.path) cd_path = path.parent setup_py = cd_path.joinpath("setup.py") return self.setuptools_install( cd_path.as_posix(), req.name, setup_py.as_posix(), editable=req.editable ) return 0
<SYSTEM_TASK:> A context manager which activates the virtualenv. <END_TASK> <USER_TASK:> Description: def activated(self, include_extras=True, extra_dists=[]): """A context manager which activates the virtualenv. :param list extra_dists: Paths added to the context after the virtualenv is activated. This context manager sets the following environment variables: * `PYTHONUSERBASE` * `VIRTUAL_ENV` * `PYTHONIOENCODING` * `PYTHONDONTWRITEBYTECODE` In addition, it activates the virtualenv inline by calling `activate_this.py`. """
original_path = sys.path original_prefix = sys.prefix original_user_base = os.environ.get("PYTHONUSERBASE", None) original_venv = os.environ.get("VIRTUAL_ENV", None) parent_path = vistir.compat.Path(__file__).absolute().parent.parent.as_posix() prefix = self.prefix.as_posix() with vistir.contextmanagers.temp_environ(), vistir.contextmanagers.temp_path(): os.environ["PATH"] = os.pathsep.join([ vistir.compat.fs_str(self.scripts_dir), vistir.compat.fs_str(self.prefix.as_posix()), os.environ.get("PATH", "") ]) os.environ["PYTHONIOENCODING"] = vistir.compat.fs_str("utf-8") os.environ["PYTHONDONTWRITEBYTECODE"] = vistir.compat.fs_str("1") os.environ["PATH"] = self.base_paths["PATH"] os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"] if self.is_venv: os.environ["VIRTUAL_ENV"] = vistir.compat.fs_str(prefix) sys.path = self.sys_path sys.prefix = self.sys_prefix site.addsitedir(self.base_paths["purelib"]) if include_extras: site.addsitedir(parent_path) extra_dists = list(self.extra_dists) + extra_dists for extra_dist in extra_dists: if extra_dist not in self.get_working_set(): extra_dist.activate(self.sys_path) sys.modules["recursive_monkey_patch"] = self.recursive_monkey_patch try: yield finally: del os.environ["VIRTUAL_ENV"] if original_user_base: os.environ["PYTHONUSERBASE"] = original_user_base if original_venv: os.environ["VIRTUAL_ENV"] = original_venv sys.path = original_path sys.prefix = original_prefix six.moves.reload_module(pkg_resources)
<SYSTEM_TASK:> Returns a monkeypatched `UninstallPathset` for using to uninstall packages from the virtualenv <END_TASK> <USER_TASK:> Description: def get_monkeypatched_pathset(self): """Returns a monkeypatched `UninstallPathset` for using to uninstall packages from the virtualenv :return: A patched `UninstallPathset` which enables uninstallation of venv packages :rtype: :class:`pip._internal.req.req_uninstall.UninstallPathset` """
from pip_shims.shims import InstallRequirement # Determine the path to the uninstall module name based on the install module name uninstall_path = InstallRequirement.__module__.replace( "req_install", "req_uninstall" ) req_uninstall = self.safe_import(uninstall_path) self.recursive_monkey_patch.monkey_patch( PatchedUninstaller, req_uninstall.UninstallPathSet ) return req_uninstall.UninstallPathSet
<SYSTEM_TASK:> A context manager which allows uninstallation of packages from the virtualenv <END_TASK> <USER_TASK:> Description: def uninstall(self, pkgname, *args, **kwargs): """A context manager which allows uninstallation of packages from the virtualenv :param str pkgname: The name of a package to uninstall >>> venv = VirtualEnv("/path/to/venv/root") >>> with venv.uninstall("pytz", auto_confirm=True, verbose=False) as uninstaller: cleaned = uninstaller.paths >>> if cleaned: print("uninstalled packages: %s" % cleaned) """
auto_confirm = kwargs.pop("auto_confirm", True) verbose = kwargs.pop("verbose", False) with self.activated(): pathset_base = self.get_monkeypatched_pathset() dist = next( iter(filter(lambda d: d.project_name == pkgname, self.get_working_set())), None ) pathset = pathset_base.from_dist(dist) if pathset is not None: pathset.remove(auto_confirm=auto_confirm, verbose=verbose) try: yield pathset except Exception as e: if pathset is not None: pathset.rollback() else: if pathset is not None: pathset.commit() if pathset is None: return
<SYSTEM_TASK:> Used by Ajax calls <END_TASK> <USER_TASK:> Description: def upload_progress(request): """ Used by Ajax calls Return the upload progress and total length values """
if 'X-Progress-ID' in request.GET: progress_id = request.GET['X-Progress-ID'] elif 'X-Progress-ID' in request.META: progress_id = request.META['X-Progress-ID'] if progress_id: cache_key = "%s_%s" % (request.META['REMOTE_ADDR'], progress_id) data = cache.get(cache_key) return HttpResponse(simplejson.dumps(data))
<SYSTEM_TASK:> Updates username created on ADD only. <END_TASK> <USER_TASK:> Description: def pre_save(self, model_instance, add): """Updates username created on ADD only."""
value = super(UserField, self).pre_save(model_instance, add) if not value and not add: # fall back to OS user if not accessing through browser # better than nothing ... value = self.get_os_username() setattr(model_instance, self.attname, value) return value return value
<SYSTEM_TASK:> Returns this site-package esri toolbox directory. <END_TASK> <USER_TASK:> Description: def sys_toolbox_dir(): """ Returns this site-package esri toolbox directory. """
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'esri', 'toolboxes')
<SYSTEM_TASK:> Generate a list of elements from the markov chain. <END_TASK> <USER_TASK:> Description: def random_output(self, max=100): """ Generate a list of elements from the markov chain. The `max` value is in place in order to prevent excessive iteration. """
output = [] item1 = item2 = MarkovChain.START for i in range(max-3): item3 = self[(item1, item2)].roll() if item3 is MarkovChain.END: break output.append(item3) item1 = item2 item2 = item3 return output
<SYSTEM_TASK:> Start the periodic runner <END_TASK> <USER_TASK:> Description: def start(self): """ Start the periodic runner """
if self._isRunning: return if self._cease.is_set(): self._cease.clear() # restart class Runner(threading.Thread): @classmethod def run(cls): nextRunAt = cls.setNextRun() while not self._cease.is_set(): if datetime.now() >= nextRunAt: self._run() nextRunAt = cls.setNextRun() @classmethod def setNextRun(cls): # return datetime.now() + timedelta(seconds=self._interval) return self._interval.nextRunAt() runner = Runner() runner.setDaemon(True) runner.start() self._isRunning = True
<SYSTEM_TASK:> Change parameter of the callback function. <END_TASK> <USER_TASK:> Description: def useThis(self, *args, **kwargs): """ Change parameter of the callback function. :param *args, **kwargs: parameter(s) to use when executing the callback function. """
self._callback = functools.partial(self._callback, *args, **kwargs)
<SYSTEM_TASK:> Stop the periodic runner <END_TASK> <USER_TASK:> Description: def stop(self): """ Stop the periodic runner """
self._cease.set() time.sleep(0.1) # let the thread closing correctly. self._isRunning = False
<SYSTEM_TASK:> check JWT, then check session for validity <END_TASK> <USER_TASK:> Description: def condition(self) -> bool: """ check JWT, then check session for validity """
jwt = JWT() if jwt.verify_http_auth_token(): if not current_app.config['AUTH']['FAST_SESSIONS']: session = SessionModel.where_session_id( jwt.data['session_id']) if session is None: return False Session.set_current_session(jwt.data['session_id']) return True return False
<SYSTEM_TASK:> render as hidden widget <END_TASK> <USER_TASK:> Description: def render_hidden(name, value): """ render as hidden widget """
if isinstance(value, list): return MultipleHiddenInput().render(name, value) return HiddenInput().render(name, value)
<SYSTEM_TASK:> Deserializes all transactions for this batch and <END_TASK> <USER_TASK:> Description: def next_task(self, item, raise_exceptions=None, **kwargs): """Deserializes all transactions for this batch and archives the file. """
filename = os.path.basename(item) batch = self.get_batch(filename) tx_deserializer = self.tx_deserializer_cls( allow_self=self.allow_self, override_role=self.override_role ) try: tx_deserializer.deserialize_transactions( transactions=batch.saved_transactions ) except (DeserializationError, TransactionDeserializerError) as e: raise TransactionsFileQueueError(e) from e else: batch.close() self.archive(filename)
<SYSTEM_TASK:> Returns a batch instance given the filename. <END_TASK> <USER_TASK:> Description: def get_batch(self, filename=None): """Returns a batch instance given the filename. """
try: history = self.history_model.objects.get(filename=filename) except self.history_model.DoesNotExist as e: raise TransactionsFileQueueError( f"Batch history not found for '{filename}'." ) from e if history.consumed: raise TransactionsFileQueueError( f"Batch closed for '{filename}'. Got consumed=True" ) batch = self.batch_cls() batch.batch_id = history.batch_id batch.filename = history.filename return batch
<SYSTEM_TASK:> Get state, county, zipcode, address code from lists page. <END_TASK> <USER_TASK:> Description: def get_items(self, html): """ Get state, county, zipcode, address code from lists page. Example: target url: http://www.zillow.com/browse/homes/md/ <<<<<<< HEAD data: ``[(href, name), ...]`` ======= data: [(href, name)] >>>>>>> 4507a26c6cc47e0affe1f7000f912e536c45212b """
captcha_patterns = [ "https://www.google.com/recaptcha/api.js", "I'm not a robot"] for captcha_pattern in captcha_patterns: if captcha_pattern in html: raise exc.CaptchaError("Found %r in html!" % captcha_pattern) data = list() soup = self.to_soup(html) div = soup.find("div", class_="zsg-lg-1-2 zsg-sm-1-1") for li in div.find_all("li"): a = li.find_all("a")[0] href = a["href"] name = a.text.strip() data.append((href, name)) return data
<SYSTEM_TASK:> Based on a Location4D object and this Diel object, calculate <END_TASK> <USER_TASK:> Description: def get_time(self, loc4d=None): """ Based on a Location4D object and this Diel object, calculate the time at which this Diel migration is actually happening """
if loc4d is None: raise ValueError("Location4D object can not be None") if self.pattern == self.PATTERN_CYCLE: c = SunCycles.cycles(loc=loc4d) if self.cycle == self.CYCLE_SUNRISE: r = c[SunCycles.RISING] elif self.cycle == self.CYCLE_SUNSET: r = c[SunCycles.SETTING] td = timedelta(hours=self.time_delta) if self.plus_or_minus == self.HOURS_PLUS: r = r + td elif self.plus_or_minus == self.HOURS_MINUS: r = r - td return r elif self.pattern == self.PATTERN_SPECIFICTIME: return self._time.replace(year=loc4d.time.year, month=loc4d.time.month, day=loc4d.time.day)
<SYSTEM_TASK:> This only works if min is less than max. <END_TASK> <USER_TASK:> Description: def move(self, particle, u, v, w, modelTimestep, **kwargs): # If the particle is settled, don't move it anywhere if particle.settled: return { 'u': 0, 'v': 0, 'w': 0 } # If the particle is halted (but not settled), don't move it anywhere if particle.halted: return { 'u': 0, 'v': 0, 'w': 0 } # How far could I move? We don't want to overshoot our desired depth. vertical_potential = w * modelTimestep """ This only works if min is less than max. No checks are done here, so it should be done before calling this function. """
""" I'm below my desired max depth, so i need to go down ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -------------------------------------- min -------------------------------------- max x me ______________________________________ """ if particle.location.depth < self.max_depth: logger.debug("DIEL: %s - Moving UP to desired depth from %f" % (self.logstring(), particle.location.depth)) # If we are going to overshoot the desired minimum depth, # calculate a new w to land in the middle of the range. overshoot_distance = abs(particle.location.depth - self.min_depth) if overshoot_distance < abs(vertical_potential): halfway_distance = abs((self.max_depth - self.min_depth) / 2) w = ((overshoot_distance - halfway_distance) / modelTimestep) return { 'u': u, 'v': v, 'w': w } """ I'm above my desired min depth, so i need to go down ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ x me -------------------------------------- min -------------------------------------- max ______________________________________ """ if particle.location.depth > self.min_depth: logger.debug("DIEL: %s - Moving DOWN to desired depth from %f" % (self.logstring(), particle.location.depth)) # If we are going to overshoot the desired maximum depth, # calculate a new w to land in the middle of the range. overshoot_distance = abs(particle.location.depth - self.max_depth) if overshoot_distance < abs(vertical_potential): halfway_distance = abs((self.max_depth - self.min_depth) / 2) w = ((overshoot_distance - halfway_distance) / modelTimestep) return { 'u': u, 'v': v, 'w': -w } """ I'm in my desired depth, so I'm just gonna chill here ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -------------------------------------- min x me -------------------------------------- max ______________________________________ """ return { 'u': u, 'v': v, 'w': 0 }
<SYSTEM_TASK:> Returns a new relationship aggregate for the given relationship. <END_TASK> <USER_TASK:> Description: def make_relationship_aggregate(self, relationship): """ Returns a new relationship aggregate for the given relationship. :param relationship: Instance of :class:`everest.entities.relationship.DomainRelationship`. """
if not self._session.IS_MANAGING_BACKREFERENCES: relationship.direction &= ~RELATIONSHIP_DIRECTIONS.REVERSE return RelationshipAggregate(self, relationship)
<SYSTEM_TASK:> Registers the given entity for the given class as NEW. <END_TASK> <USER_TASK:> Description: def register_new(self, entity_class, entity): """ Registers the given entity for the given class as NEW. :raises ValueError: If the given entity already holds state that was created by another Unit Of Work. """
EntityState.manage(entity, self) EntityState.get_state(entity).status = ENTITY_STATUS.NEW self.__entity_set_map[entity_class].add(entity)
<SYSTEM_TASK:> Registers the given entity for the given class as CLEAN. <END_TASK> <USER_TASK:> Description: def register_clean(self, entity_class, entity): """ Registers the given entity for the given class as CLEAN. :returns: Cloned entity. """
EntityState.manage(entity, self) EntityState.get_state(entity).status = ENTITY_STATUS.CLEAN self.__entity_set_map[entity_class].add(entity)
<SYSTEM_TASK:> Registers the given entity for the given class as DELETED. <END_TASK> <USER_TASK:> Description: def register_deleted(self, entity_class, entity): """ Registers the given entity for the given class as DELETED. :raises ValueError: If the given entity already holds state that was created by another Unit Of Work. """
EntityState.manage(entity, self) EntityState.get_state(entity).status = ENTITY_STATUS.DELETED self.__entity_set_map[entity_class].add(entity)
<SYSTEM_TASK:> Unregisters the given entity for the given class and discards its <END_TASK> <USER_TASK:> Description: def unregister(self, entity_class, entity): """ Unregisters the given entity for the given class and discards its state information. """
EntityState.release(entity, self) self.__entity_set_map[entity_class].remove(entity)
<SYSTEM_TASK:> Checks if the given entity is marked with status NEW. Returns `False` <END_TASK> <USER_TASK:> Description: def is_marked_new(self, entity): """ Checks if the given entity is marked with status NEW. Returns `False` if the entity has no state information. """
try: result = EntityState.get_state(entity).status == ENTITY_STATUS.NEW except ValueError: result = False return result
<SYSTEM_TASK:> Checks if the given entity is marked with status DELETED. Returns <END_TASK> <USER_TASK:> Description: def is_marked_deleted(self, entity): """ Checks if the given entity is marked with status DELETED. Returns `False` if the entity has no state information. """
try: result = EntityState.get_state(entity).status \ == ENTITY_STATUS.DELETED except ValueError: result = False return result
<SYSTEM_TASK:> Marks the given entity as CLEAN. <END_TASK> <USER_TASK:> Description: def mark_clean(self, entity): """ Marks the given entity as CLEAN. This is done when an entity is loaded fresh from the repository or after a commit. """
state = EntityState.get_state(entity) state.status = ENTITY_STATUS.CLEAN state.is_persisted = True
<SYSTEM_TASK:> Returns an iterator over all entity states held by this Unit Of Work. <END_TASK> <USER_TASK:> Description: def iterator(self): """ Returns an iterator over all entity states held by this Unit Of Work. """
# FIXME: There is no dependency tracking; objects are iterated in # random order. for ent_cls in list(self.__entity_set_map.keys()): for ent in self.__entity_set_map[ent_cls]: yield EntityState.get_state(ent)
<SYSTEM_TASK:> Easy save of a file <END_TASK> <USER_TASK:> Description: def file_save(self, name, filename=None, folder="", keep_ext=True) -> bool: """ Easy save of a file """
if name in self.files: file_object = self.files[name] clean_filename = secure_filename(file_object.filename) if filename is not None and keep_ext: clean_filename = filename + ".%s" % \ (clean_filename.rsplit('.', 1)[1].lower()) elif filename is not None and not keep_ext: clean_filename = filename file_object.save(os.path.join( current_app.config['UPLOADS']['FOLDER'], folder, clean_filename)) return None
<SYSTEM_TASK:> Parse text fields and file fields for values and files <END_TASK> <USER_TASK:> Description: def parse(self, fail_callback=None): """ Parse text fields and file fields for values and files """
# get text fields for field in self.field_arguments: self.values[field['name']] = self.__get_value(field['name']) if self.values[field['name']] is None and field['required']: if fail_callback is not None: fail_callback() self.__invalid_request(field['error']) # get file fields for file in self.file_arguments: self.files[file['name']] = self.__get_file(file) if self.files[file['name']] is None and file['required']: if fail_callback is not None: fail_callback() self.__invalid_request(file['error'])
<SYSTEM_TASK:> Get request Json value by field name <END_TASK> <USER_TASK:> Description: def __get_value(self, field_name): """ Get request Json value by field name """
value = request.values.get(field_name) if value is None: if self.json_form_data is None: value = None elif field_name in self.json_form_data: value = self.json_form_data[field_name] return value
<SYSTEM_TASK:> Get request file and do a security check <END_TASK> <USER_TASK:> Description: def __get_file(self, file): """ Get request file and do a security check """
file_object = None if file['name'] in request.files: file_object = request.files[file['name']] clean_filename = secure_filename(file_object.filename) if clean_filename == '': return file_object if file_object and self.__allowed_extension( clean_filename, file['extensions']): return file_object elif file['name'] not in request.files and file['required']: return file_object return file_object
<SYSTEM_TASK:> Check allowed file extensions <END_TASK> <USER_TASK:> Description: def __allowed_extension(self, filename, extensions): """ Check allowed file extensions """
allowed_extensions = current_app.config['UPLOADS']['EXTENSIONS'] if extensions is not None: allowed_extensions = extensions return '.' in filename and filename.rsplit('.', 1)[1].lower() in \ allowed_extensions
<SYSTEM_TASK:> Error response on failure <END_TASK> <USER_TASK:> Description: def __invalid_request(self, error): """ Error response on failure """
# TODO: make this modifiable error = { 'error': { 'message': error } } abort(JsonResponse(status_code=400, data=error))
<SYSTEM_TASK:> Returns unpacked Python struct array. <END_TASK> <USER_TASK:> Description: def get_field(self, offset, length, format): """Returns unpacked Python struct array. Args: offset (int): offset to byte array within structure length (int): how many bytes to unpack format (str): Python struct format string for unpacking See Also: https://docs.python.org/2/library/struct.html#format-characters """
return struct.unpack(format, self.data[offset:offset + length])[0]
<SYSTEM_TASK:> Exports byte array to specified destination <END_TASK> <USER_TASK:> Description: def export(self, filename, offset=0, length=None): """Exports byte array to specified destination Args: filename (str): destination to output file offset (int): byte offset (default: 0) """
self.__validate_offset(filename=filename, offset=offset, length=length) with open(filename, 'w') as f: if length is None: length = len(self.data) - offset if offset > 0: output = self.data[offset:length] else: output = self.data[:length] f.write(output)
<SYSTEM_TASK:> Create a tempdir context for the cwd and remove it after. <END_TASK> <USER_TASK:> Description: def tmpdir(): """ Create a tempdir context for the cwd and remove it after. """
target = None try: with _tmpdir_extant() as target: yield target finally: if target is not None: shutil.rmtree(target, ignore_errors=True)
<SYSTEM_TASK:> Run a shell command. Capture the stdout and stderr as a single stream. <END_TASK> <USER_TASK:> Description: def run(command, verbose=False): """ Run a shell command. Capture the stdout and stderr as a single stream. Capture the status code. If verbose=True, then print command and the output to the terminal as it comes in. """
def do_nothing(*args, **kwargs): return None v_print = print if verbose else do_nothing p = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, ) v_print("run:", command) def log_and_yield(line): if six.PY2: # If not unicode, try to decode it first if isinstance(line, str): line = line.decode('utf8', 'replace') v_print(line) return line output = ''.join(map(log_and_yield, p.stdout)) status_code = p.wait() return CommandResult(command, output, status_code)
<SYSTEM_TASK:> Asks the current host what version of LXC it has. Returns it as a <END_TASK> <USER_TASK:> Description: def get_lxc_version(): """ Asks the current host what version of LXC it has. Returns it as a string. If LXC is not installed, raises subprocess.CalledProcessError"""
runner = functools.partial( subprocess.check_output, stderr=subprocess.STDOUT, universal_newlines=True, ) # Old LXC had an lxc-version executable, and prefixed its result with # "lxc version: " try: result = runner(['lxc-version']).rstrip() return parse_version(result.replace("lxc version: ", "")) except (OSError, subprocess.CalledProcessError): pass # New LXC instead has a --version option on most installed executables. return parse_version(runner(['lxc-start', '--version']).rstrip())
<SYSTEM_TASK:> Simulate the next state <END_TASK> <USER_TASK:> Description: def simulate(s0, transmat, steps=1): """Simulate the next state Parameters ---------- s0 : ndarray Vector with state variables at t=0 transmat : ndarray The estimated transition/stochastic matrix. steps : int (Default: 1) The number of steps to simulate model outputs ahead. If steps>1 the a Mult-Step Simulation is triggered. Returns ------- out : ndarray (steps=1) Vector with simulated state variables (). (steps>1) Matrix with out[:,step] columns (Fortran order) from a Multi-Step Simulation. The first column is the initial state vector out[:,0]=s0 for algorithmic reasons. """
# Single-Step simulation if steps == 1: return np.dot(s0, transmat) # Multi-Step simulation out = np.zeros(shape=(steps + 1, len(s0)), order='C') out[0, :] = s0 for i in range(1, steps + 1): out[i, :] = np.dot(out[i - 1, :], transmat) return out
<SYSTEM_TASK:> Verify that `path` has a supported numpy file format <END_TASK> <USER_TASK:> Description: def verify(path): """Verify that `path` has a supported numpy file format"""
path = pathlib.Path(path) valid = False if path.suffix == ".npy": try: nf = np.load(str(path), mmap_mode="r", allow_pickle=False) except (OSError, ValueError, IsADirectoryError): pass else: if len(nf.shape) == 2: valid = True return valid
<SYSTEM_TASK:> Return the name of the file without it's extension. <END_TASK> <USER_TASK:> Description: def _get_stem(self): """ Return the name of the file without it's extension. """
filename = os.path.basename(self.src_path) stem, ext = os.path.splitext(filename) return "index" if stem in ("index", "README", "__init__") else stem
<SYSTEM_TASK:> Checks that a given bit of data conforms to the type provided <END_TASK> <USER_TASK:> Description: def type_check(thetype, data, bindings = None): """ Checks that a given bit of data conforms to the type provided """
if not bindings: bindings = Bindings() if isinstance(thetype, core.RecordType): for name,child in zip(thetype.child_names, thetype.child_types): value = data[name] type_check(child, value, bindings) elif isinstance(thetype, core.TupleType): assert isinstance(data, tuple) assert len(data) == len(thetype.child_types) for value,child_type in zip(data, thetype.child_types): type_check(child_type, value, bindings) elif isinstance(thetype, core.UnionType): assert isinstance(thetype, dict) children = [(name,child) for name,child in zip(thetype.child_names, thetype.child_types) if name in data] assert len(fields) == 1, "0 or more than 1 entry in Union" child_name,child_type = children[0] type_check(child_type, data[child_name], bindings) elif isinstance(thetype, core.TypeApp): # Type applications are tricky. These will "affect" bindings bindings.push() for k,v in thetype.param_values.items(): bindings[k] = v type_check(thetype.root_type, data, bindings) bindings.pop() elif isinstance(thetype, core.TypeVar): # Find the binding for this type variable bound_type = bindings[thetype.name] if bound_type is None: raise errors.ValidationError("TypeVar(%s) is not bound to a type." % thetype.name) type_check(bound_type, data, bindings) elif isinstance(thetype, core.NativeType): # Native types are interesting - these can be plain types such as Int, Float etc # or they can be generic types like Array<T>, Map<K,V> # While plain types are fine, generic types (ie native types with args) pose a problem. # How do we perform type checking on "contents" of the data given native types. # We need native types to be able to apply mapper functions on data as they see fit. # So to deal with custom validations on native types we need # native types to expose mapper functors for us!!! if thetype.args and thetype.mapper_functor: def type_check_functor(*values): for arg, value in zip(thetype.args, values): bound_type = bindings[arg] if bound_type is None: raise errors.ValidationError("Arg(%s) is not bound to a type." % arg) type_check(bound_type, value) thetype.mapper_functor(type_check_functor, data) # Finally apply any other validators that were nominated # specifically for that particular type if thetype.validator: thetype.validator(thetype, data, bindings)
<SYSTEM_TASK:> install programmer in programmers.txt. <END_TASK> <USER_TASK:> Description: def install_programmer(programmer_id, programmer_options, replace_existing=False): """install programmer in programmers.txt. :param programmer_id: string identifier :param programmer_options: dict like :param replace_existing: bool :rtype: None """
doaction = 0 if programmer_id in programmers().keys(): log.debug('programmer already exists: %s', programmer_id) if replace_existing: log.debug('remove programmer: %s', programmer_id) remove_programmer(programmer_id) doaction = 1 else: doaction = 1 if doaction: lines = bunch2properties(programmer_id, programmer_options) programmers_txt().write_lines([''] + lines, append=1)
<SYSTEM_TASK:> Schedule job to run at when nanoseconds since the UNIX epoch. <END_TASK> <USER_TASK:> Description: def schedule(self, job, when): """ Schedule job to run at when nanoseconds since the UNIX epoch."""
pjob = pickle.dumps(job) self._redis.zadd('ss:scheduled', when, pjob)
<SYSTEM_TASK:> Schedule job to run at datetime.timedelta from now. <END_TASK> <USER_TASK:> Description: def schedule_in(self, job, timedelta): """ Schedule job to run at datetime.timedelta from now."""
now = long(self._now() * 1e6) when = now + timedelta.total_seconds() * 1e6 self.schedule(job, when)
<SYSTEM_TASK:> Schedule job to run as soon as possible. <END_TASK> <USER_TASK:> Description: def schedule_now(self, job): """ Schedule job to run as soon as possible."""
now = long(self._now() * 1e6) self.schedule(job, now)
<SYSTEM_TASK:> Returns the appropriate value for attribute aria-autocomplete of field. <END_TASK> <USER_TASK:> Description: def _get_aria_autocomplete(self, field): """ Returns the appropriate value for attribute aria-autocomplete of field. :param field: The field. :type field: hatemile.util.html.htmldomelement.HTMLDOMElement :return: The ARIA value of field. :rtype: str """
tag_name = field.get_tag_name() input_type = None if field.has_attribute('type'): input_type = field.get_attribute('type').lower() if ( (tag_name == 'TEXTAREA') or ( (tag_name == 'INPUT') and (not ( (input_type == 'button') or (input_type == 'submit') or (input_type == 'reset') or (input_type == 'image') or (input_type == 'file') or (input_type == 'checkbox') or (input_type == 'radio') or (input_type == 'hidden') )) ) ): value = None if field.has_attribute('autocomplete'): value = field.get_attribute('autocomplete').lower() else: form = self.parser.find(field).find_ancestors( 'form' ).first_result() if (form is None) and (field.has_attribute('form')): form = self.parser.find( '#' + field.get_attribute('form') ).first_result() if (form is not None) and (form.has_attribute('autocomplete')): value = form.get_attribute('autocomplete').lower() if value == 'on': return 'both' elif ( (field.has_attribute('list')) and (self.parser.find( 'datalist[id="' + field.get_attribute('list') + '"]' ).first_result() is not None) ): return 'list' elif value == 'off': return 'none' return None
<SYSTEM_TASK:> Validate the field when its value change. <END_TASK> <USER_TASK:> Description: def _validate(self, field, list_attribute): """ Validate the field when its value change. :param field: The field. :param list_attribute: The list attribute of field with validation. """
if not self.scripts_added: self._generate_validation_scripts() self.id_generator.generate_id(field) self.script_list_fields_with_validation.append_text( 'hatemileValidationList.' + list_attribute + '.push("' + field.get_attribute('id') + '");' )
<SYSTEM_TASK:> Delete item, whether it's a file, a folder, or a folder <END_TASK> <USER_TASK:> Description: def remove(item): """ Delete item, whether it's a file, a folder, or a folder full of other files and folders. """
if os.path.isdir(item): shutil.rmtree(item) else: # Assume it's a file. error if not. os.remove(item)
<SYSTEM_TASK:> Given a root path, read any .slugignore file inside and return a list of <END_TASK> <USER_TASK:> Description: def get_slugignores(root, fname='.slugignore'): """ Given a root path, read any .slugignore file inside and return a list of patterns that should be removed prior to slug compilation. Return empty list if file does not exist. """
try: with open(os.path.join(root, fname)) as f: return [l.rstrip('\n') for l in f] except IOError: return []
<SYSTEM_TASK:> Given a path, delete anything specified in .slugignore. <END_TASK> <USER_TASK:> Description: def clean_slug_dir(root): """ Given a path, delete anything specified in .slugignore. """
if not root.endswith('/'): root += '/' for pattern in get_slugignores(root): print("pattern", pattern) remove_pattern(root, pattern)