text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Adapts the given registered resource to its configured repository. <END_TASK> <USER_TASK:> Description: def as_repository(resource): """ Adapts the given registered resource to its configured repository. :return: object implementing :class:`everest.repositories.interfaces.IRepository`. """
reg = get_current_registry() if IInterface in provided_by(resource): resource = reg.getUtility(resource, name='collection-class') return reg.getAdapter(resource, IRepository)
<SYSTEM_TASK:> Strict commit veto to use with the transaction manager. <END_TASK> <USER_TASK:> Description: def commit_veto(request, response): # unused request arg pylint: disable=W0613 """ Strict commit veto to use with the transaction manager. Unlike the default commit veto supplied with the transaction manager, this will veto all commits for HTTP status codes other than 2xx unless a commit is explicitly requested by setting the "x-tm" response header to "commit". As with the default commit veto, the commit is always vetoed if the "x-tm" response header is set to anything other than "commit". """
tm_header = response.headers.get('x-tm') if not tm_header is None: result = tm_header != 'commit' else: result = not response.status.startswith('2') \ and not tm_header == 'commit' return result
<SYSTEM_TASK:> Sets the given object as global object for the given key. <END_TASK> <USER_TASK:> Description: def set(cls, key, obj): """ Sets the given object as global object for the given key. """
with cls._lock: if not cls._globs.get(key) is None: raise ValueError('Duplicate key "%s".' % key) cls._globs[key] = obj return cls._globs[key]
<SYSTEM_TASK:> Adapts the given resource and content type to a representer. <END_TASK> <USER_TASK:> Description: def as_representer(resource, content_type): """ Adapts the given resource and content type to a representer. :param resource: resource to adapt. :param str content_type: content (MIME) type to obtain a representer for. """
reg = get_current_registry() rpr_reg = reg.queryUtility(IRepresenterRegistry) return rpr_reg.create(type(resource), content_type)
<SYSTEM_TASK:> Creates a string representation of the given data element tree. <END_TASK> <USER_TASK:> Description: def data_element_tree_to_string(data_element): """ Creates a string representation of the given data element tree. """
# FIXME: rewrite this as a visitor to use the data element tree traverser. def __dump(data_el, stream, offset): name = data_el.__class__.__name__ stream.write("%s%s" % (' ' * offset, name)) offset += 2 ifcs = provided_by(data_el) if ICollectionDataElement in ifcs: stream.write("[") first_member = True for member_data_el in data_el.get_members(): if first_member: stream.write('%s' % os.linesep + ' ' * offset) first_member = False else: stream.write(',%s' % os.linesep + ' ' * offset) __dump(member_data_el, stream, offset) stream.write("]") else: stream.write("(") if ILinkedDataElement in ifcs: stream.write("url=%s, kind=%s, relation=%s" % (data_el.get_url(), data_el.get_kind(), data_el.get_relation())) else: first_attr = True for attr_name, attr_value in iteritems_(data_el.data): if first_attr: first_attr = False else: stream.write(',%s' % os.linesep + ' ' * (offset + len(name) + 1)) if attr_value is None: continue if not IResourceDataElement in provided_by(attr_value): stream.write("%s=%s" % (attr_name, attr_value)) else: stream.write("%s=" % attr_name) __dump(attr_value, stream, offset) stream.write(')') stream = NativeIO() __dump(data_element, stream, 0) return stream.getvalue()
<SYSTEM_TASK:> make the consumer_state ready for the next MC path <END_TASK> <USER_TASK:> Description: def initialize_path(self, path_num=None): """ make the consumer_state ready for the next MC path :param int path_num: """
for c in self.consumers: c.initialize_path(path_num) self.state = [c.state for c in self.consumers]
<SYSTEM_TASK:> finalize path and populate result for ConsumerConsumer <END_TASK> <USER_TASK:> Description: def finalize_path(self, path_num=None): """finalize path and populate result for ConsumerConsumer"""
for c in self.consumers: c.finalize_path(path_num) self.result = [c.result for c in self.consumers]
<SYSTEM_TASK:> get to given consumer states. <END_TASK> <USER_TASK:> Description: def get(self, queue_get): """ get to given consumer states. This function is used for merging of results of parallelized MC. The first state is used for merging in place. The states must be disjoint. :param object queue_get: second consumer state """
for (c, cs) in izip(self.consumers, queue_get): c.get(cs) self.result = [c.result for c in self.consumers]
<SYSTEM_TASK:> Determines attribute type at the offset and returns \ <END_TASK> <USER_TASK:> Description: def _get_attribute(self, offset): """Determines attribute type at the offset and returns \ initialized attribute object. Returns: MftAttr: One of the attribute objects \ (eg. :class:`~.mft_attribute.MftAttrFilename`). None: If atttribute type does not mach any one of the supported \ attribute types. """
attr_type = self.get_uint_le(offset) # Attribute length is in header @ offset 0x4 length = self.get_uint_le(offset + 0x04) data = self.get_chunk(offset, length) return MftAttr.factory(attr_type, data)
<SYSTEM_TASK:> construct the email message <END_TASK> <USER_TASK:> Description: def construct_message(self, email=None): """ construct the email message """
# add subject, from and to self.multipart['Subject'] = self.subject self.multipart['From'] = self.config['EMAIL'] self.multipart['Date'] = formatdate(localtime=True) if email is None and self.send_as_one: self.multipart['To'] = ", ".join(self.addresses) elif email is not None and self.send_as_one is False: self.multipart['To'] = email # add ccs if self.ccs is not None and self.ccs: self.multipart['Cc'] = ", ".join(self.ccs) # add html and text body html = MIMEText(self.html, 'html') alt_text = MIMEText(self.text, 'plain') self.multipart.attach(html) self.multipart.attach(alt_text) for file in self.files: self.multipart.attach(file)
<SYSTEM_TASK:> main function to construct and send email <END_TASK> <USER_TASK:> Description: def create_email(self): """ main function to construct and send email """
self.connect() if self.send_as_one: self.construct_message() self.send() elif self.send_as_one is False: for email in self.addresses: self.construct_message(email) self.send(email) self.disconnect()
<SYSTEM_TASK:> Returns dictionary of id, first names of people who posted on my wall <END_TASK> <USER_TASK:> Description: def get_definition(query): """Returns dictionary of id, first names of people who posted on my wall between start and end time"""
try: return get_definition_api(query) except: raise # http://api.wordnik.com:80/v4/word.json/discrimination/definitions?limit=200&includeRelated=true&sourceDictionaries=all&useCanonical=false&includeTags=false&api_key=a2a73e7b926c924fad7001ca3111acd55af2ffabf50eb4ae5 import json payload = {'q': query, 'limit': 200, 'includeRelated': 'true', 'sourceDictionaries': 'all', 'useCanonical': 'false', 'includeTags': 'false', 'api_key': 'a2a73e7b926c924fad7001ca3111acd55af2ffabf50eb4ae5'} url = 'http://api.wordnik.com:80/v4/word.json/%s/definitions' % query r = requests.get(url, params=payload) result = json.loads(r.text) return result
<SYSTEM_TASK:> Add session_id to flask globals for current request <END_TASK> <USER_TASK:> Description: def set_current_session(session_id) -> bool: """ Add session_id to flask globals for current request """
try: g.session_id = session_id return True except (Exception, BaseException) as error: # catch all on config update if current_app.config['DEBUG']: print(error) return False
<SYSTEM_TASK:> Visits a member node in a resource data tree. <END_TASK> <USER_TASK:> Description: def visit_member(self, attribute_key, attribute, member_node, member_data, is_link_node, parent_data, index=None): """ Visits a member node in a resource data tree. :param tuple attribute_key: tuple containing the attribute tokens identifying the member node's position in the resource data tree. :param attribute: mapped attribute holding information about the member node's name (in the parent) and type etc. :type attribute: :class:`everest.representers.attributes.MappedAttribute` :param member_node: the node holding resource data. This is either a resource instance (when using a :class:`ResourceTreeTraverser` on a tree of resources) or a data element instance (when using a :class:`DataElementTreeTraverser` on a data element tree. :param dict member_data: dictionary holding all member data extracted during traversal (with mapped attributes as keys). :param bool is_link_node: indicates if the given member node is a link. :param dict parent_data: dictionary holding all parent data extracted during traversal (with mapped attributes as keys). :param int index: this indicates a member node's index in a collection parent node. If the parent node is a member node, it will be `None`. """
raise NotImplementedError('Abstract method.')
<SYSTEM_TASK:> Returns the domain relationship object for the given resource <END_TASK> <USER_TASK:> Description: def get_relationship(self, attribute): """ Returns the domain relationship object for the given resource attribute. """
rel = self.__relationships.get(attribute.entity_attr) if rel is None: rel = LazyDomainRelationship(self, attribute, direction= self.relationship_direction) self.__relationships[attribute.entity_attr] = rel return rel
<SYSTEM_TASK:> Publishes a simple notification. <END_TASK> <USER_TASK:> Description: def simple_notification(connection, queue_name, exchange_name, routing_key, text_body): """ Publishes a simple notification. Inputs: - connection: A rabbitmq connection object. - queue_name: The name of the queue to be checked or created. - exchange_name: The name of the notification exchange. - routing_key: The routing key for the exchange-queue binding. - text_body: The text to be published. """
channel = connection.channel() try: channel.queue_declare(queue_name, durable=True, exclusive=False, auto_delete=False) except PreconditionFailed: pass try: channel.exchange_declare(exchange_name, type="fanout", durable=True, auto_delete=False) except PreconditionFailed: pass channel.queue_bind(queue_name, exchange_name, routing_key=routing_key) message = Message(text_body) channel.basic_publish(message, exchange_name, routing_key)
<SYSTEM_TASK:> Returns the reversed url given a string or dict and prints errors if MENU_DEBUG is enabled <END_TASK> <USER_TASK:> Description: def get_url(self, url_or_dict): """ Returns the reversed url given a string or dict and prints errors if MENU_DEBUG is enabled """
if isinstance(url_or_dict, basestring): url_or_dict = {'viewname': url_or_dict} try: return reverse(**url_or_dict) except NoReverseMatch: if MENU_DEBUG: print >>stderr,'Unable to reverse URL with kwargs %s' % url_or_dict
<SYSTEM_TASK:> Time of the TIFF file <END_TASK> <USER_TASK:> Description: def get_time(self): """Time of the TIFF file Currently, only the file modification time is supported. Note that the modification time of the TIFF file is dependent on the file system and may have temporal resolution as low as 3 seconds. """
if isinstance(self.path, pathlib.Path): thetime = self.path.stat().st_mtime else: thetime = np.nan return thetime
<SYSTEM_TASK:> Verify that `path` is a valid TIFF file <END_TASK> <USER_TASK:> Description: def verify(path): """Verify that `path` is a valid TIFF file"""
valid = False try: tf = SingleTifHolo._get_tif(path) except (ValueError, IsADirectoryError): pass else: if len(tf) == 1: valid = True return valid
<SYSTEM_TASK:> Store a string representation of content_object as target <END_TASK> <USER_TASK:> Description: def save(self, *args, **kwargs): """ Store a string representation of content_object as target and actor name for fast retrieval and sorting. """
if not self.target: self.target = str(self.content_object) if not self.actor_name: self.actor_name = str(self.actor) super(Activity, self).save()
<SYSTEM_TASK:> Given a ``frame``, obtain the version number of the module running there. <END_TASK> <USER_TASK:> Description: def version_from_frame(frame): """ Given a ``frame``, obtain the version number of the module running there. """
module = getmodule(frame) if module is None: s = "<unknown from {0}:{1}>" return s.format(frame.f_code.co_filename, frame.f_lineno) module_name = module.__name__ variable = "AUTOVERSION_{}".format(module_name.upper()) override = os.environ.get(variable, None) if override is not None: return override while True: try: get_distribution(module_name) except DistributionNotFound: # Look at what's to the left of "." module_name, dot, _ = module_name.partition(".") if dot == "": # There is no dot, nothing more we can do. break else: return getversion(module_name) return None
<SYSTEM_TASK:> Return ``n`` as an integer if it is numeric, otherwise return the input <END_TASK> <USER_TASK:> Description: def try_fix_num(n): """ Return ``n`` as an integer if it is numeric, otherwise return the input """
if not n.isdigit(): return n if n.startswith("0"): n = n.lstrip("0") if not n: n = "0" return int(n)
<SYSTEM_TASK:> Split ``version`` into a lexicographically comparable tuple. <END_TASK> <USER_TASK:> Description: def tupleize_version(version): """ Split ``version`` into a lexicographically comparable tuple. "1.0.3" -> ((1, 0, 3),) "1.0.3-dev" -> ((1, 0, 3), ("dev",)) "1.0.3-rc-5" -> ((1, 0, 3), ("rc",), (5,)) """
if version is None: return (("unknown",),) if version.startswith("<unknown"): return (("unknown",),) split = re.split("(?:\.|(-))", version) parsed = tuple(try_fix_num(x) for x in split if x) # Put the tuples in groups by "-" def is_dash(s): return s == "-" grouped = groupby(parsed, is_dash) return tuple(tuple(group) for dash, group in grouped if not dash)
<SYSTEM_TASK:> Return a string describing the version of the repository at ``path`` if <END_TASK> <USER_TASK:> Description: def get_version(cls, path, memo={}): """ Return a string describing the version of the repository at ``path`` if possible, otherwise throws ``subprocess.CalledProcessError``. (Note: memoizes the result in the ``memo`` parameter) """
if path not in memo: memo[path] = subprocess.check_output( "git describe --tags --dirty 2> /dev/null", shell=True, cwd=path).strip().decode("utf-8") v = re.search("-[0-9]+-", memo[path]) if v is not None: # Replace -n- with -branchname-n- branch = r"-{0}-\1-".format(cls.get_branch(path)) (memo[path], _) = re.subn("-([0-9]+)-", branch, memo[path], 1) return memo[path]
<SYSTEM_TASK:> Return ``True`` if ``path`` is a source controlled repository. <END_TASK> <USER_TASK:> Description: def is_repo_instance(cls, path): """ Return ``True`` if ``path`` is a source controlled repository. """
try: cls.get_version(path) return True except subprocess.CalledProcessError: # Git returns non-zero status return False except OSError: # Git unavailable? return False
<SYSTEM_TASK:> Always sort `index` or `README` as first filename in list. <END_TASK> <USER_TASK:> Description: def _sort_modules(mods): """ Always sort `index` or `README` as first filename in list. """
def compare(x, y): x = x[1] y = y[1] if x == y: return 0 if y.stem == "__init__.py": return 1 if x.stem == "__init__.py" or x < y: return -1 return 1 return sorted(mods, key=cmp_to_key(compare))
<SYSTEM_TASK:> Generate a References section. <END_TASK> <USER_TASK:> Description: def refs_section(doc): """ Generate a References section. Parameters ---------- doc : dict Dictionary produced by numpydoc Returns ------- list of str Markdown for references section """
lines = [] if "References" in doc and len(doc["References"]) > 0: # print("Found refs") for ref in doc["References"]: # print(ref) ref_num = re.findall("\[([0-9]+)\]", ref)[0] # print(ref_num) ref_body = " ".join(ref.split(" ")[2:]) # print(f"[^{ref_num}] {ref_body}" + "\n") lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n") # print(lines) return lines
<SYSTEM_TASK:> Generate markdown for Examples section. <END_TASK> <USER_TASK:> Description: def examples_section(doc, header_level): """ Generate markdown for Examples section. Parameters ---------- doc : dict Dict from numpydoc header_level : int Number of `#`s to use for header Returns ------- list of str Markdown for examples section """
lines = [] if "Examples" in doc and len(doc["Examples"]) > 0: lines.append(f"{'#'*(header_level+1)} Examples \n") egs = "\n".join(doc["Examples"]) lines += mangle_examples(doc["Examples"]) return lines
<SYSTEM_TASK:> Generate markdown for Returns section. <END_TASK> <USER_TASK:> Description: def returns_section(thing, doc, header_level): """ Generate markdown for Returns section. Parameters ---------- thing : function Function to produce returns for doc : dict Dict from numpydoc header_level : int Number of `#`s to use for header Returns ------- list of str Markdown for examples section """
lines = [] return_type = None try: return_type = thing.__annotations__["return"] except AttributeError: try: return_type = thing.fget.__annotations__["return"] except: pass except KeyError: pass if return_type is None: return_type = "" else: # print(f"{thing} has annotated return type {return_type}") try: return_type = ( f"{return_type.__name__}" if return_type.__module__ == "builtins" else f"{return_type.__module__}.{return_type.__name__}" ) except AttributeError: return_type = str(return_type) # print(return_type) try: if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "": lines.append(f"{'#'*(header_level+1)} Returns\n") if return_type != "" and len(doc["Returns"]) == 1: name, typ, desc = doc["Returns"][0] if typ != "": lines.append(f"- `{name}`: ``{return_type}``") else: lines.append(f"- ``{return_type}``") lines.append("\n\n") if desc != "": lines.append(f" {' '.join(desc)}\n\n") elif return_type != "": lines.append(f"- ``{return_type}``") lines.append("\n\n") else: for name, typ, desc in doc["Returns"]: if ":" in name: name, typ = name.split(":") if typ != "": line = f"- `{name}`: {mangle_types(typ)}" else: line = f"- {mangle_types(name)}" line += "\n\n" lines.append(line) lines.append(f" {' '.join(desc)}\n\n") except Exception as e: # print(e) # print(doc) pass return lines
<SYSTEM_TASK:> Generate markdown for summary section. <END_TASK> <USER_TASK:> Description: def summary(doc): """ Generate markdown for summary section. Parameters ---------- doc : dict Output from numpydoc Returns ------- list of str Markdown strings """
lines = [] if "Summary" in doc and len(doc["Summary"]) > 0: lines.append(fix_footnotes(" ".join(doc["Summary"]))) lines.append("\n") if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0: lines.append(fix_footnotes(" ".join(doc["Extended Summary"]))) lines.append("\n") return lines
<SYSTEM_TASK:> Generate markdown for Parameters section. <END_TASK> <USER_TASK:> Description: def params_section(thing, doc, header_level): """ Generate markdown for Parameters section. Parameters ---------- thing : functuon Function to produce parameters from doc : dict Dict from numpydoc header_level : int Number of `#`s to use for header Returns ------- list of str Markdown for examples section """
lines = [] class_doc = doc["Parameters"] return type_list( inspect.signature(thing), class_doc, "#" * (header_level + 1) + " Parameters\n\n", )
<SYSTEM_TASK:> Construct a string representation of a type annotation. <END_TASK> <USER_TASK:> Description: def string_annotation(typ, default): """ Construct a string representation of a type annotation. Parameters ---------- typ : type Type to turn into a string default : any Default value (if any) of the type Returns ------- str String version of the type annotation """
try: type_string = ( f"`{typ.__name__}`" if typ.__module__ == "builtins" else f"`{typ.__module__}.{typ.__name__}`" ) except AttributeError: type_string = f"`{str(typ)}`" if default is None: type_string = f"{type_string}, default ``None``" elif default == inspect._empty: pass else: type_string = f"{type_string}, default ``{default}``" return type_string
<SYSTEM_TASK:> Construct a list of types, preferring type annotations to <END_TASK> <USER_TASK:> Description: def type_list(signature, doc, header): """ Construct a list of types, preferring type annotations to docstrings if they are available. Parameters ---------- signature : Signature Signature of thing doc : list of tuple Numpydoc's type list section Returns ------- list of str Markdown formatted type list """
lines = [] docced = set() lines.append(header) try: for names, types, description in doc: names, types = _get_names(names, types) unannotated = [] for name in names: docced.add(name) try: typ = signature.parameters[name].annotation if typ == inspect._empty: raise AttributeError default = signature.parameters[name].default type_string = string_annotation(typ, default) lines.append(f"- `{name}`: {type_string}") lines.append("\n\n") except (AttributeError, KeyError): unannotated.append(name) # No annotation if len(unannotated) > 0: lines.append("- ") lines.append(", ".join(f"`{name}`" for name in unannotated)) if types != "" and len(unannotated) > 0: lines.append(f": {mangle_types(types)}") lines.append("\n\n") lines.append(f" {' '.join(description)}\n\n") for names, types, description in doc: names, types = _get_names(names, types) for name in names: if name not in docced: try: typ = signature.parameters[name].annotation default = signature.parameters[name].default type_string = string_annotation(typ, default) lines.append(f"- `{name}`: {type_string}") lines.append("\n\n") except (AttributeError, KeyError): lines.append(f"- `{name}`") lines.append("\n\n") except Exception as e: print(e) return lines if len(lines) > 1 else []
<SYSTEM_TASK:> Generate an attributes section for classes. <END_TASK> <USER_TASK:> Description: def attributes_section(thing, doc, header_level): """ Generate an attributes section for classes. Prefers type annotations, if they are present. Parameters ---------- thing : class Class to document doc : dict Numpydoc output header_level : int Number of `#`s to use for header Returns ------- list of str Markdown formatted attribute list """
# Get Attributes if not inspect.isclass(thing): return [] props, class_doc = _split_props(thing, doc["Attributes"]) tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n") if len(tl) == 0 and len(props) > 0: tl.append("\n### Attributes\n\n") for prop in props: tl.append(f"- [`{prop}`](#{prop})\n\n") return tl
<SYSTEM_TASK:> Generate markdown for an enum <END_TASK> <USER_TASK:> Description: def enum_doc(name, enum, header_level, source_location): """ Generate markdown for an enum Parameters ---------- name : str Name of the thing being documented enum : EnumMeta Enum to document header_level : int Heading level source_location : str URL of repo containing source code """
lines = [f"{'#'*header_level} Enum **{name}**\n\n"] lines.append(f"```python\n{name}\n```\n") lines.append(get_source_link(enum, source_location)) try: doc = NumpyDocString(inspect.getdoc(thing))._parsed_data lines += summary(doc) except: pass lines.append(f"{'#'*(header_level + 1)} Members\n\n") lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum] return lines
<SYSTEM_TASK:> Generate markdown for a class or function <END_TASK> <USER_TASK:> Description: def to_doc(name, thing, header_level, source_location): """ Generate markdown for a class or function Parameters ---------- name : str Name of the thing being documented thing : class or function Class or function to document header_level : int Heading level source_location : str URL of repo containing source code """
if type(thing) is enum.EnumMeta: return enum_doc(name, thing, header_level, source_location) if inspect.isclass(thing): header = f"{'#'*header_level} Class **{name}**\n\n" else: header = f"{'#'*header_level} {name}\n\n" lines = [ header, get_signature(name, thing), get_source_link(thing, source_location), ] try: doc = NumpyDocString(inspect.getdoc(thing))._parsed_data lines += summary(doc) lines += attributes_section(thing, doc, header_level) lines += params_section(thing, doc, header_level) lines += returns_section(thing, doc, header_level) lines += examples_section(doc, header_level) lines += notes_section(doc) lines += refs_section(doc) except Exception as e: # print(f"No docstring for {name}, src {source_location}: {e}") pass return lines
<SYSTEM_TASK:> Set the current colors. <END_TASK> <USER_TASK:> Description: def set_color(fg=None, bg=None): """Set the current colors. If no arguments are given, sets default colors. """
if fg or bg: _color_manager.set_color(fg, bg) else: _color_manager.set_defaults()
<SYSTEM_TASK:> Print a colored string to the target handle. <END_TASK> <USER_TASK:> Description: def cprint(string, fg=None, bg=None, end='\n', target=sys.stdout): """Print a colored string to the target handle. fg and bg specify foreground- and background colors, respectively. The remaining keyword arguments are the same as for Python's built-in print function. Colors are returned to their defaults before the function returns. """
_color_manager.set_color(fg, bg) target.write(string + end) target.flush() # Needed for Python 3.x _color_manager.set_defaults()
<SYSTEM_TASK:> Parse and print a colored and perhaps formatted string. <END_TASK> <USER_TASK:> Description: def fprint(fmt, *args, **kwargs): """Parse and print a colored and perhaps formatted string. The remaining keyword arguments are the same as for Python's built-in print function. Colors are returning to their defaults before the function returns. """
if not fmt: return hascolor = False target = kwargs.get("target", sys.stdout) # Format the string before feeding it to the parser fmt = fmt.format(*args, **kwargs) for txt, markups in _color_format_parser.parse(fmt): if markups != (None, None): _color_manager.set_color(*markups) hascolor = True else: if hascolor: _color_manager.set_defaults() hascolor = False target.write(txt) target.flush() # Needed for Python 3.x _color_manager.set_defaults() target.write(kwargs.get('end', '\n')) _color_manager.set_defaults()
<SYSTEM_TASK:> Wrap color syntax around a string and return it. <END_TASK> <USER_TASK:> Description: def formatcolor(string, fg=None, bg=None): """Wrap color syntax around a string and return it. fg and bg specify foreground- and background colors, respectively. """
if fg is bg is None: return string temp = (['fg='+fg] if fg else []) +\ (['bg='+bg] if bg else []) fmt = _color_format_parser._COLOR_DELIM.join(temp) return _color_format_parser._START_TOKEN + fmt +\ _color_format_parser._FMT_TOKEN + string +\ _color_format_parser._STOP_TOKEN
<SYSTEM_TASK:> Wrap color syntax around characters using indices and return it. <END_TASK> <USER_TASK:> Description: def formatbyindex(string, fg=None, bg=None, indices=[]): """Wrap color syntax around characters using indices and return it. fg and bg specify foreground- and background colors, respectively. """
if not string or not indices or (fg is bg is None): return string result, p = '', 0 # The lambda syntax is necessary to support both Python 2 and 3 for k, g in itertools.groupby(enumerate(sorted(indices)), lambda x: x[0]-x[1]): tmp = list(map(operator.itemgetter(1), g)) s, e = tmp[0], tmp[-1]+1 if s < len(string): result += string[p:s] result += formatcolor(string[s:e], fg, bg) p = e if p < len(string): result += string[p:] return result
<SYSTEM_TASK:> Highlight characters using indices and print it to the target handle. <END_TASK> <USER_TASK:> Description: def highlight(string, fg=None, bg=None, indices=[], end='\n', target=sys.stdout): """Highlight characters using indices and print it to the target handle. fg and bg specify foreground- and background colors, respectively. The remaining keyword arguments are the same as for Python's built-in print function. """
if not string or not indices or (fg is bg is None): return p = 0 # The lambda syntax is necessary to support both Python 2 and 3 for k, g in itertools.groupby(enumerate(sorted(indices)), lambda x: x[0]-x[1]): tmp = list(map(operator.itemgetter(1), g)) s, e = tmp[0], tmp[-1]+1 target.write(string[p:s]) target.flush() # Needed for Python 3.x _color_manager.set_color(fg, bg) target.write(string[s:e]) target.flush() # Needed for Python 3.x _color_manager.set_defaults() p = e if p < len(string): target.write(string[p:]) target.write(end)
<SYSTEM_TASK:> Builds the code block for the GPTool GetParameterInfo method based on the input task_params. <END_TASK> <USER_TASK:> Description: def create_param_info(task_params, parameter_map): """ Builds the code block for the GPTool GetParameterInfo method based on the input task_params. :param task_params: A list of task parameters to map to GPTool parameters. :return: A string representing the code block to the GPTool GetParameterInfo method. """
gp_params = [] gp_param_list = [] gp_param_idx_list = [] gp_param_idx = 0 for task_param in task_params: # Setup to gp_param dictionary used to substitute against the parameter info template. gp_param = {} # Convert DataType data_type = task_param['type'].upper() if 'dimensions' in task_param: if len(task_param['dimensions'].split(',')) > 1: raise UnknownDataTypeError('Only one-dimensional arrays are supported.') data_type += 'ARRAY' if data_type in parameter_map: gp_param['dataType'] = parameter_map[data_type].data_type else: # No Mapping exists for this data type! raise UnknownDataTypeError('Unable to map task datatype: ' + data_type + '. A template must be created.') gp_param['name'] = task_param['name'] gp_param['displayName'] = task_param['display_name'] gp_param['direction'] = _DIRECTION_MAP[task_param['direction']] gp_param['paramType'] = 'Required' if task_param['required'] else 'Optional' # ENVI/IDL output type translates to a derived output type in Arc if gp_param['direction'] is 'Output': gp_param['paramType'] = 'Derived' gp_param['multiValue'] = True if 'dimensions' in task_param else False # Substitute values into the template gp_params.append(parameter_map[data_type].get_parameter(task_param).substitute(gp_param)) # Convert the default value if 'default_value' in task_param: gp_param['defaultValue'] = task_param['default_value'] gp_params.append(parameter_map[data_type].default_value().substitute(gp_param)) # Convert any choicelist if 'choice_list' in task_param: gp_param['choiceList'] = task_param['choice_list'] gp_params.append(_CHOICELIST_TEMPLATE.substitute(gp_param)) # Construct the parameter list and indicies for future reference for param_name in parameter_map[data_type].parameter_names(task_param): gp_param_list.append(param_name.substitute(gp_param)) gp_param_idx_list.append(_PARAM_INDEX_TEMPLATE.substitute( {'name': param_name.substitute(gp_param), 'idx': gp_param_idx})) gp_param_idx += 1 # Construct the final parameter string gp_params.append(_PARAM_RETURN_TEMPLATE.substitute({'paramList': convert_list(gp_param_list)})) return ''.join((''.join(gp_params), ''.join(gp_param_idx_list)))
<SYSTEM_TASK:> Builds the code block for the GPTool UpdateParameter method based on the input task_params. <END_TASK> <USER_TASK:> Description: def create_update_parameter(task_params, parameter_map): """ Builds the code block for the GPTool UpdateParameter method based on the input task_params. :param task_params: A list of task parameters from the task info structure. :return: A string representing the code block to the GPTool UpdateParameter method. """
gp_params = [] for param in task_params: if param['direction'].upper() == 'OUTPUT': continue # Convert DataType data_type = param['type'].upper() if 'dimensions' in param: data_type += 'ARRAY' if data_type in parameter_map: gp_params.append(parameter_map[data_type].update_parameter().substitute(param)) return ''.join(gp_params)
<SYSTEM_TASK:> Builds the code block for the GPTool Execute method before the job is <END_TASK> <USER_TASK:> Description: def create_pre_execute(task_params, parameter_map): """ Builds the code block for the GPTool Execute method before the job is submitted based on the input task_params. :param task_params: A list of task parameters from the task info structure. :return: A string representing the code block to the GPTool Execute method. """
gp_params = [_PRE_EXECUTE_INIT_TEMPLATE] for task_param in task_params: if task_param['direction'].upper() == 'OUTPUT': continue # Convert DataType data_type = task_param['type'].upper() if 'dimensions' in task_param: data_type += 'ARRAY' if data_type in parameter_map: gp_params.append(parameter_map[data_type].pre_execute().substitute(task_param)) gp_params.append(_PRE_EXECUTE_CLEANUP_TEMPLATE) return ''.join(gp_params)
<SYSTEM_TASK:> Builds the code block for the GPTool Execute method after the job is <END_TASK> <USER_TASK:> Description: def create_post_execute(task_params, parameter_map): """ Builds the code block for the GPTool Execute method after the job is submitted based on the input task_params. :param task_params: A list of task parameters from the task info structure. :return: A string representing the code block to the GPTool Execute method. """
gp_params = [] for task_param in task_params: if task_param['direction'].upper() == 'INPUT': continue # Convert DataType data_type = task_param['type'].upper() if 'dimensions' in task_param: data_type += 'ARRAY' if data_type in parameter_map: gp_params.append(parameter_map[data_type].post_execute().substitute(task_param)) return ''.join(gp_params)
<SYSTEM_TASK:> remove hardware package. <END_TASK> <USER_TASK:> Description: def remove_hwpack(name): """remove hardware package. :param name: hardware package name (e.g. 'Sanguino') :rtype: None """
targ_dlib = hwpack_dir() / name log.debug('remove %s', targ_dlib) targ_dlib.rmtree()
<SYSTEM_TASK:> Add autodetected commands as entry points. <END_TASK> <USER_TASK:> Description: def setup_keyword(dist, _, value): # type: (setuptools.dist.Distribution, str, bool) -> None """Add autodetected commands as entry points. Args: dist: The distutils Distribution object for the project being installed. _: The keyword used in the setup function. Unused. value: The value set to the keyword in the setup function. If the value is not True, this function will do nothing. """
if value is not True: return dist.entry_points = _ensure_entry_points_is_dict(dist.entry_points) for command, subcommands in six.iteritems(_get_commands(dist)): entry_point = '{command} = rcli.dispatcher:main'.format( command=command) entry_points = dist.entry_points.setdefault('console_scripts', []) if entry_point not in entry_points: entry_points.append(entry_point) dist.entry_points.setdefault('rcli', []).extend(subcommands)
<SYSTEM_TASK:> Read rcli configuration and write it out to the egg info. <END_TASK> <USER_TASK:> Description: def egg_info_writer(cmd, basename, filename): # type: (setuptools.command.egg_info.egg_info, str, str) -> None """Read rcli configuration and write it out to the egg info. Args: cmd: An egg info command instance to use for writing. basename: The basename of the file to write. filename: The full path of the file to write into the egg info. """
setupcfg = next((f for f in setuptools.findall() if os.path.basename(f) == 'setup.cfg'), None) if not setupcfg: return parser = six.moves.configparser.ConfigParser() # type: ignore parser.read(setupcfg) if not parser.has_section('rcli') or not parser.items('rcli'): return config = dict(parser.items('rcli')) # type: typing.Dict[str, typing.Any] for k, v in six.iteritems(config): if v.lower() in ('y', 'yes', 'true'): config[k] = True elif v.lower() in ('n', 'no', 'false'): config[k] = False else: try: config[k] = json.loads(v) except ValueError: pass cmd.write_file(basename, filename, json.dumps(config))
<SYSTEM_TASK:> Find all commands belonging to the given distribution. <END_TASK> <USER_TASK:> Description: def _get_commands(dist # type: setuptools.dist.Distribution ): # type: (...) -> typing.Dict[str, typing.Set[str]] """Find all commands belonging to the given distribution. Args: dist: The Distribution to search for docopt-compatible docstrings that can be used to generate command entry points. Returns: A dictionary containing a mapping of primary commands to sets of subcommands. """
py_files = (f for f in setuptools.findall() if os.path.splitext(f)[1].lower() == '.py') pkg_files = (f for f in py_files if _get_package_name(f) in dist.packages) commands = {} # type: typing.Dict[str, typing.Set[str]] for file_name in pkg_files: with open(file_name) as py_file: module = typing.cast(ast.Module, ast.parse(py_file.read())) module_name = _get_module_name(file_name) _append_commands(commands, module_name, _get_module_commands(module)) _append_commands(commands, module_name, _get_class_commands(module)) _append_commands(commands, module_name, _get_function_commands(module)) return commands
<SYSTEM_TASK:> Append entry point strings representing the given Command objects. <END_TASK> <USER_TASK:> Description: def _append_commands(dct, # type: typing.Dict[str, typing.Set[str]] module_name, # type: str commands # type:typing.Iterable[_EntryPoint] ): # type: (...) -> None """Append entry point strings representing the given Command objects. Args: dct: The dictionary to append with entry point strings. Each key will be a primary command with a value containing a list of entry point strings representing a Command. module_name: The name of the module in which the command object resides. commands: A list of Command objects to convert to entry point strings. """
for command in commands: entry_point = '{command}{subcommand} = {module}{callable}'.format( command=command.command, subcommand=(':{}'.format(command.subcommand) if command.subcommand else ''), module=module_name, callable=(':{}'.format(command.callable) if command.callable else ''), ) dct.setdefault(command.command, set()).add(entry_point)
<SYSTEM_TASK:> Yield all Command objects represented by the python module. <END_TASK> <USER_TASK:> Description: def _get_module_commands(module): # type: (ast.Module) -> typing.Generator[_EntryPoint, None, None] """Yield all Command objects represented by the python module. Module commands consist of a docopt-style module docstring and a callable Command class. Args: module: An ast.Module object used to retrieve docopt-style commands. Yields: Command objects that represent entry points to append to setup.py. """
cls = next((n for n in module.body if isinstance(n, ast.ClassDef) and n.name == 'Command'), None) if not cls: return methods = (n.name for n in cls.body if isinstance(n, ast.FunctionDef)) if '__call__' not in methods: return docstring = ast.get_docstring(module) for commands, _ in usage.parse_commands(docstring): yield _EntryPoint(commands[0], next(iter(commands[1:]), None), None)
<SYSTEM_TASK:> Yield all Command objects represented by python functions in the module. <END_TASK> <USER_TASK:> Description: def _get_function_commands(module): # type: (ast.Module) -> typing.Generator[_EntryPoint, None, None] """Yield all Command objects represented by python functions in the module. Function commands consist of all top-level functions that contain docopt-style docstrings. Args: module: An ast.Module object used to retrieve docopt-style commands. Yields: Command objects that represent entry points to append to setup.py. """
nodes = (n for n in module.body if isinstance(n, ast.FunctionDef)) for func in nodes: docstring = ast.get_docstring(func) for commands, _ in usage.parse_commands(docstring): yield _EntryPoint(commands[0], next(iter(commands[1:]), None), func.name)
<SYSTEM_TASK:> Simply re-saves all objects from models listed in settings.TIMELINE_MODELS. Since the timeline <END_TASK> <USER_TASK:> Description: def handle(self, **kwargs): """ Simply re-saves all objects from models listed in settings.TIMELINE_MODELS. Since the timeline app is now following these models, it will register each item as it is re-saved. The purpose of this script is to register content in your database that existed prior to installing the timeline app. """
for item in settings.ACTIVITY_MONITOR_MODELS: app_label, model = item['model'].split('.', 1) content_type = ContentType.objects.get(app_label=app_label, model=model) model = content_type.model_class() objects = model.objects.all() for object in objects: try: object.save() except Exception as e: print("Error saving: {}".format(e))
<SYSTEM_TASK:> Converts the given filter specification to a CQL filter expression. <END_TASK> <USER_TASK:> Description: def make_filter_string(cls, filter_specification): """ Converts the given filter specification to a CQL filter expression. """
registry = get_current_registry() visitor_cls = registry.getUtility(IFilterSpecificationVisitor, name=EXPRESSION_KINDS.CQL) visitor = visitor_cls() filter_specification.accept(visitor) return str(visitor.expression)
<SYSTEM_TASK:> Converts the given order specification to a CQL order expression. <END_TASK> <USER_TASK:> Description: def make_order_string(cls, order_specification): """ Converts the given order specification to a CQL order expression. """
registry = get_current_registry() visitor_cls = registry.getUtility(IOrderSpecificationVisitor, name=EXPRESSION_KINDS.CQL) visitor = visitor_cls() order_specification.accept(visitor) return str(visitor.expression)
<SYSTEM_TASK:> Converts the given start and size query parts to a slice key. <END_TASK> <USER_TASK:> Description: def make_slice_key(cls, start_string, size_string): """ Converts the given start and size query parts to a slice key. :return: slice key :rtype: slice """
try: start = int(start_string) except ValueError: raise ValueError('Query parameter "start" must be a number.') if start < 0: raise ValueError('Query parameter "start" must be zero or ' 'a positive number.') try: size = int(size_string) except ValueError: raise ValueError('Query parameter "size" must be a number.') if size < 1: raise ValueError('Query parameter "size" must be a positive ' 'number.') return slice(start, start + size)
<SYSTEM_TASK:> Converts the given slice key to start and size query parts. <END_TASK> <USER_TASK:> Description: def make_slice_strings(cls, slice_key): """ Converts the given slice key to start and size query parts. """
start = slice_key.start size = slice_key.stop - start return (str(start), str(size))
<SYSTEM_TASK:> decorator that allows us to match by expression or by xpath for each transformation method <END_TASK> <USER_TASK:> Description: def match(self, expression=None, xpath=None, namespaces=None): """decorator that allows us to match by expression or by xpath for each transformation method"""
class MatchObject(Dict): pass def _match(function): self.matches.append( MatchObject(expression=expression, xpath=xpath, function=function, namespaces=namespaces)) def wrapper(self, *args, **params): return function(self, *args, **params) return wrapper return _match
<SYSTEM_TASK:> for the given elem, return the @match function that will be applied <END_TASK> <USER_TASK:> Description: def get_match(self, elem): """for the given elem, return the @match function that will be applied"""
for m in self.matches: if (m.expression is not None and eval(m.expression)==True) \ or (m.xpath is not None and len(elem.xpath(m.xpath, namespaces=m.namespaces)) > 0): LOG.debug("=> match: %r" % m.expression) return m
<SYSTEM_TASK:> Ensure that the input element is immutable by the transformation. Returns a single element. <END_TASK> <USER_TASK:> Description: def Element(self, elem, **params): """Ensure that the input element is immutable by the transformation. Returns a single element."""
res = self.__call__(deepcopy(elem), **params) if len(res) > 0: return res[0] else: return None
<SYSTEM_TASK:> read properties file into bunch. <END_TASK> <USER_TASK:> Description: def read_properties(filename): """read properties file into bunch. :param filename: string :rtype: bunch (dict like and object like) """
s = path(filename).text() dummy_section = 'xxx' cfgparser = configparser.RawConfigParser() # avoid converting options to lower case cfgparser.optionxform = str cfgparser.readfp(StringIO('[%s]\n' % dummy_section + s)) bunch = AutoBunch() for x in cfgparser.options(dummy_section): setattr(bunch, x, cfgparser.get(dummy_section, str(x))) return bunch
<SYSTEM_TASK:> Create the URL to submit to the Steam Web API <END_TASK> <USER_TASK:> Description: def create_request_url(self, interface, method, version, parameters): """Create the URL to submit to the Steam Web API interface: Steam Web API interface containing methods. method: The method to call. version: The version of the method. paramters: Parameters to supply to the method. """
if 'format' in parameters: parameters['key'] = self.apikey else: parameters.update({'key' : self.apikey, 'format' : self.format}) version = "v%04d" % (version) url = "http://api.steampowered.com/%s/%s/%s/?%s" % (interface, method, version, urlencode(parameters)) return url
<SYSTEM_TASK:> Open the given url and decode and return the response <END_TASK> <USER_TASK:> Description: def retrieve_request(self, url): """Open the given url and decode and return the response url: The url to open. """
try: data = urlopen(url) except: print("Error Retrieving Data from Steam") sys.exit(2) return data.read().decode('utf-8')
<SYSTEM_TASK:> Format and return data appropriate to the requested API format. <END_TASK> <USER_TASK:> Description: def return_data(self, data, format=None): """Format and return data appropriate to the requested API format. data: The data retured by the api request """
if format is None: format = self.format if format == "json": formatted_data = json.loads(data) else: formatted_data = data return formatted_data
<SYSTEM_TASK:> Request the friends list of a given steam ID filtered by role. <END_TASK> <USER_TASK:> Description: def get_friends_list(self, steamID, relationship='all', format=None): """Request the friends list of a given steam ID filtered by role. steamID: The user ID relationship: Type of friend to request (all, friend) format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {'steamid' : steamID, 'relationship' : relationship} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetFriendsList', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Request the communities a steam id is banned in. <END_TASK> <USER_TASK:> Description: def get_player_bans(self, steamIDS, format=None): """Request the communities a steam id is banned in. steamIDS: Comma-delimited list of SteamIDs format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {'steamids' : steamIDS} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetPlayerBans', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Request a list of groups a user is subscribed to. <END_TASK> <USER_TASK:> Description: def get_user_group_list(self, steamID, format=None): """Request a list of groups a user is subscribed to. steamID: User ID format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {'steamid' : steamID} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetUserGroupList', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Request the steam id associated with a vanity url. <END_TASK> <USER_TASK:> Description: def resolve_vanity_url(self, vanityURL, url_type=1, format=None): """Request the steam id associated with a vanity url. vanityURL: The users vanity URL url_type: The type of vanity URL. 1 (default): Individual profile, 2: Group, 3: Official game group format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {'vanityurl' : vanityURL, "url_type" : url_type} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'ResolveVanityUrl', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Request statistics showing global achievements that have been <END_TASK> <USER_TASK:> Description: def get_global_achievement_percentages_for_app(self, gameID, format=None): """Request statistics showing global achievements that have been unlocked. gameID: The id of the game. format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {'gameid' : gameID} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetGlobalAchievementPercentagesForApp', 2, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Request global stats for a give game. <END_TASK> <USER_TASK:> Description: def get_global_stats_for_game(self, appID, count, names, startdate, enddate, format=None): """Request global stats for a give game. appID: The app ID count: Number of stats to get. names: A list of names of stats to get. startdate: The start time to gather stats. Unix timestamp enddate: The end time to gather stats. Unix timestamp format: Return format. None defaults to json. (json, xml, vdf) """
parameters = { 'appid' : appID, 'count' : count, 'startdate' : startdate, 'enddate' : enddate } count = 0 for name in names: param = "name[" + str(count) + "]" parameters[param] = name count += 1 if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetGlobalStatsForGame', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Request the current number of players for a given app. <END_TASK> <USER_TASK:> Description: def get_number_of_current_players(self, appID, format=None): """Request the current number of players for a given app. appID: The app ID format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {'appid' : appID} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetNumberOfCurrentPlayers', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Request the achievements for a given app and steam id. <END_TASK> <USER_TASK:> Description: def get_player_achievements(self, steamID, appID, language=None, format=None): """Request the achievements for a given app and steam id. steamID: Users steam ID appID: The app id language: The language to return the results in. None uses default. format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {'steamid' : steamID, 'appid' : appID} if format is not None: parameters['format'] = format if language is not None: parameters['l'] = language else: parameters['l'] = self.language url = self.create_request_url(self.interface, 'GetPlayerAchievements', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Request the available achievements and stats for a game. <END_TASK> <USER_TASK:> Description: def get_schema_for_game(self, appID, language=None, format=None): """Request the available achievements and stats for a game. appID: The app id language: The language to return the results in. None uses default. format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {'appid' : appID} if format is not None: parameters['format'] = format if language is not None: parameters['l'] = language else: parameters['l'] = self.language url = self.create_request_url(self.interface, 'GetSchemaForGame', 2, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Request the user stats for a given game. <END_TASK> <USER_TASK:> Description: def get_user_stats_for_game(self, steamID, appID, format=None): """Request the user stats for a given game. steamID: The users ID appID: The app id format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {'steamid' : steamID, 'appid' : appID} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetUserStatsForGame', 2, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Request a list of recently played games by a given steam id. <END_TASK> <USER_TASK:> Description: def get_recently_played_games(self, steamID, count=0, format=None): """Request a list of recently played games by a given steam id. steamID: The users ID count: Number of games to return. (0 is all recent games.) format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {'steamid' : steamID, 'count' : count} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetRecentlyPlayedGames', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Request a list of games owned by a given steam id. <END_TASK> <USER_TASK:> Description: def get_owned_games(self, steamID, include_appinfo=1, include_played_free_games=0, appids_filter=None, format=None): """Request a list of games owned by a given steam id. steamID: The users id include_appinfo: boolean. include_played_free_games: boolean. appids_filter: a json encoded list of app ids. format: Return format. None defaults to json. (json, xml, vdf) """
parameters = { 'steamid' : steamID, 'include_appinfo' : include_appinfo, 'include_played_free_games' : include_played_free_games } if format is not None: parameters['format'] = format if appids_filter is not None: parameters['appids_filter'] = appids_filter url = self.create_request_url(self.interface, 'GetOwnedGames', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Gets all the quests needed to get the specified badge, and which are completed. <END_TASK> <USER_TASK:> Description: def get_community_badge_progress(self, steamID, badgeID, format=None): """Gets all the quests needed to get the specified badge, and which are completed. steamID: The users ID badgeID: The badge we're asking about format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {'steamid' : steamID, 'badgeid' : badgeID} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetCommunityBadgeProgress', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Returns valid lender SteamID if game currently played is borrowed. <END_TASK> <USER_TASK:> Description: def is_playing_shared_game(self, steamID, appid_playing, format=None): """Returns valid lender SteamID if game currently played is borrowed. steamID: The users ID appid_playing: The game player is currently playing format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {'steamid' : steamID, 'appid_playing' : appid_playing} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'IsPlayingSharedGame', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Request the Steam Web API status and time. <END_TASK> <USER_TASK:> Description: def get_server_info(self, format=None): """Request the Steam Web API status and time. format: Return format. None defaults to json. (json, xml, vdf) """
parameters = {} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetServerInfo', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
<SYSTEM_TASK:> Create the url to submit to the Steam Community XML feed. <END_TASK> <USER_TASK:> Description: def create_request_url(self, profile_type, steamID): """Create the url to submit to the Steam Community XML feed."""
regex = re.compile('^\d{17,}$') if regex.match(steamID): if profile_type == self.USER: url = "http://steamcommunity.com/profiles/%s/?xml=1" % (steamID) if profile_type == self.GROUP: url = "http://steamcommunity.com/gid/%s/memberslistxml/?xml=1" % (steamID) else: if profile_type == self.USER: url = "http://steamcommunity.com/id/%s/?xml=1" % (steamID) if profile_type == self.GROUP: url = "http://steamcommunity.com/groups/%s/memberslistxml/?xml=1" % (steamID) return url
<SYSTEM_TASK:> Request the Steam Community XML feed for a specific user. <END_TASK> <USER_TASK:> Description: def get_user_info(self, steamID): """Request the Steam Community XML feed for a specific user."""
url = self.create_request_url(self.USER, steamID) data = self.retrieve_request(url) return self.return_data(data, format='xml')
<SYSTEM_TASK:> Request the Steam Community XML feed for a specific group. <END_TASK> <USER_TASK:> Description: def get_group_info(self, steamID): """Request the Steam Community XML feed for a specific group."""
url = self.create_request_url(self.GROUP, steamID) data = self.retrieve_request(url) return self.return_data(data, format='xml')
<SYSTEM_TASK:> Export particle and datetime data to Pickled objects. <END_TASK> <USER_TASK:> Description: def export(cls, folder, particles, datetimes): """ Export particle and datetime data to Pickled objects. This can be used to debug or to generate different output in the future. """
if not os.path.exists(folder): os.makedirs(folder) particle_path = os.path.join(folder, 'particles.pickle') f = open(particle_path, "wb") pickle.dump(particles, f) f.close() datetimes_path = os.path.join(folder, 'datetimes.pickle') f = open(datetimes_path, "wb") pickle.dump(datetimes, f) f.close()
<SYSTEM_TASK:> Return the class pointed to be an app setting variable. <END_TASK> <USER_TASK:> Description: def import_settings_class(setting_name): """ Return the class pointed to be an app setting variable. """
config_value = getattr(settings, setting_name) if config_value is None: raise ImproperlyConfigured("Required setting not found: {0}".format(setting_name)) return import_class(config_value, setting_name)
<SYSTEM_TASK:> Import a class by name. <END_TASK> <USER_TASK:> Description: def import_class(import_path, setting_name=None): """ Import a class by name. """
mod_name, class_name = import_path.rsplit('.', 1) # import module mod = import_module_or_none(mod_name) if mod is not None: # Loaded module, get attribute try: return getattr(mod, class_name) except AttributeError: pass # For ImportError and AttributeError, raise the same exception. if setting_name: raise ImproperlyConfigured("{0} does not point to an existing class: {1}".format(setting_name, import_path)) else: raise ImproperlyConfigured("Class not found: {0}".format(import_path))
<SYSTEM_TASK:> Imports the module with the given name. <END_TASK> <USER_TASK:> Description: def import_module_or_none(module_label): """ Imports the module with the given name. Returns None if the module doesn't exist, but it does propagates import errors in deeper modules. """
try: # On Python 3, importlib has much more functionality compared to Python 2. return importlib.import_module(module_label) except ImportError: # Based on code from django-oscar: # There are 2 reasons why there could be an ImportError: # # 1. Module does not exist. In that case, we ignore the import and return None # 2. Module exists but another ImportError occurred when trying to import the module. # In that case, it is important to propagate the error. # # ImportError does not provide easy way to distinguish those two cases. # Fortunately, the traceback of the ImportError starts at __import__ # statement. If the traceback has more than one frame, it means that # application was found and ImportError originates within the local app __, __, exc_traceback = sys.exc_info() frames = traceback.extract_tb(exc_traceback) frames = [f for f in frames if f[0] != "<frozen importlib._bootstrap>" and # Python 3.6 f[0] != IMPORT_PATH_IMPORTLIB and not f[0].endswith(IMPORT_PATH_GEVENT) and not IMPORT_PATH_PYDEV in f[0]] if len(frames) > 1: raise return None
<SYSTEM_TASK:> Looks up a list of user ids and checks whether they are currently suspended. <END_TASK> <USER_TASK:> Description: def check_suspension(user_twitter_id_list): """ Looks up a list of user ids and checks whether they are currently suspended. Input: - user_twitter_id_list: A python list of Twitter user ids in integer format to be looked-up. Outputs: - suspended_user_twitter_id_list: A python list of suspended Twitter user ids in integer format. - non_suspended_user_twitter_id_list: A python list of non suspended Twitter user ids in integer format. - unknown_status_user_twitter_id_list: A python list of unknown status Twitter user ids in integer format. """
#################################################################################################################### # Log into my application. #################################################################################################################### twitter = login() #################################################################################################################### # Lookup users #################################################################################################################### # Initialize look-up lists suspended_user_twitter_id_list = list() non_suspended_user_twitter_id_list = list() unknown_status_user_twitter_id_list = list() append_suspended_twitter_user = suspended_user_twitter_id_list.append append_non_suspended_twitter_user = non_suspended_user_twitter_id_list.append extend_unknown_status_twitter_user = unknown_status_user_twitter_id_list.extend # Split twitter user id list into sub-lists of length 100 (This is the Twitter API function limit). user_lookup_counter = 0 user_lookup_time_window_start = time.perf_counter() for hundred_length_sub_list in chunks(list(user_twitter_id_list), 100): # Make safe twitter request. try: api_result, user_lookup_counter, user_lookup_time_window_start\ = safe_twitter_request_handler(twitter_api_func=twitter.lookup_user, call_rate_limit=60, call_counter=user_lookup_counter, time_window_start=user_lookup_time_window_start, max_retries=10, wait_period=2, parameters=hundred_length_sub_list) # If the call is succesful, turn hundred sub-list to a set for faster search. hundred_length_sub_list = set(hundred_length_sub_list) # Check who is suspended and who is not. for hydrated_user_object in api_result: hydrated_twitter_user_id = hydrated_user_object["id"] if hydrated_twitter_user_id in hundred_length_sub_list: append_non_suspended_twitter_user(hydrated_twitter_user_id) else: append_suspended_twitter_user(hydrated_twitter_user_id) except twython.TwythonError: # If the call is unsuccesful, we do not know about the status of the users. extend_unknown_status_twitter_user(hundred_length_sub_list) except URLError: # If the call is unsuccesful, we do not know about the status of the users. extend_unknown_status_twitter_user(hundred_length_sub_list) except BadStatusLine: # If the call is unsuccesful, we do not know about the status of the users. extend_unknown_status_twitter_user(hundred_length_sub_list) return suspended_user_twitter_id_list, non_suspended_user_twitter_id_list, unknown_status_user_twitter_id_list
<SYSTEM_TASK:> Copy the data back out of here and into a dict. Then return it. <END_TASK> <USER_TASK:> Description: def asdict(self): """ Copy the data back out of here and into a dict. Then return it. Some libraries may check specifically for dict objects, such as the json library; so, this makes it convenient to get the data back out. >>> import dictobj >>> d = {'a':1, 'b':2} >>> dictobj.DictionaryObject(d).asdict() == d True >>> d['c'] = {1:2, 3:4} >>> dictobj.DictionaryObject(d).asdict() == d True """
items = {} for name in self._items: value = self._items[name] if isinstance(value, DictionaryObject): items[name] = value.asdict() else: items[name] = value return items
<SYSTEM_TASK:> Return a jokes from one of the random services. <END_TASK> <USER_TASK:> Description: def get_joke(): """Return a jokes from one of the random services."""
joke = None while joke is None: service_num = randint(1, NUM_SERVICES) joke = load_joke(service_num) return joke
<SYSTEM_TASK:> Pulls the joke from the service based on the argument. <END_TASK> <USER_TASK:> Description: def load_joke(service_num=1): """Pulls the joke from the service based on the argument. It is expected that all services used will return a string when successful or None otherwise. """
result = { 1 : ronswanson.get_joke(), 2 : chucknorris.get_joke(), 3 : catfacts.get_joke(), 4 : dadjokes.get_joke(), }.get(service_num) return result
<SYSTEM_TASK:> Create CalibCurve instance from Bacon curve file <END_TASK> <USER_TASK:> Description: def read_14c(fl): """Create CalibCurve instance from Bacon curve file """
indata = pd.read_csv(fl, index_col=None, skiprows=11, header=None, names=['calbp', 'c14age', 'error', 'delta14c', 'sigma']) outcurve = CalibCurve(calbp=indata['calbp'], c14age=indata['c14age'], error=indata['error'], delta14c=indata['delta14c'], sigma=indata['sigma']) return outcurve
<SYSTEM_TASK:> Read a file to create a proxy record instance <END_TASK> <USER_TASK:> Description: def read_proxy(fl): """Read a file to create a proxy record instance """
outcore = ProxyRecord(data=pd.read_csv(fl, sep=r'\s*\,\s*', index_col=None, engine='python')) return outcore
<SYSTEM_TASK:> Return the "Authorization" HTTP header value to use for this URL. <END_TASK> <USER_TASK:> Description: def negotiate_header(url): """ Return the "Authorization" HTTP header value to use for this URL. """
hostname = urlparse(url).hostname _, krb_context = kerberos.authGSSClientInit('HTTP@%s' % hostname) # authGSSClientStep goes over the network to the KDC (ie blocking). yield threads.deferToThread(kerberos.authGSSClientStep, krb_context, '') negotiate_details = kerberos.authGSSClientResponse(krb_context) defer.returnValue('Negotiate ' + negotiate_details)
<SYSTEM_TASK:> Read the specification provided. It can either be a url or a file <END_TASK> <USER_TASK:> Description: def _get_specification(self, specification): """ Read the specification provided. It can either be a url or a file location. """
result = six.moves.urllib.parse.urlparse(specification) # If the specification has an http or an https scheme we can # retrieve it via an HTTP get request, else we try to open it # as a file. if result.scheme in ['http', 'https']: response = requests.get(specification) spec_json = response.json() else: with open(specification, 'r') as spec_file: spec_json = json.load(spec_file) return spec_json
<SYSTEM_TASK:> Get CSV file headers from the provided resource. <END_TASK> <USER_TASK:> Description: def _get_headers(self, resource): """ Get CSV file headers from the provided resource. """
# If the resource is a file we just open it up with the csv # reader (after being sure we're reading from the beginning # of the file if type(resource) == file: resource.seek(0) reader = csv.reader(resource) # If the resource is a basestring it is either a url or a file # location, so similarly to the specification mechanism we either # access it with an HTTP get request or by opening the file. elif isinstance(resource, basestring): result = six.moves.urllib.parse.urlparse(resource) if result.scheme in ['http', 'https']: with closing(requests.get(resource, stream=True)) as response: # Headers are alway the first row of a CSV file # so it's enought to just get the first line and # hopefully save bandwidth header_row = response.iter_lines().next() else: # It may seem weird to open up a csv file, read its header row # and then StringIO that into a new csv reader but this file # we want to close and we want the same interface for all with open(resource) as resource_file: reader = csv.reader(resource_file) header_row = reader.next() reader = csv.reader(cStringIO.StringIO(header_row)) else: raise IOError('Resource type not supported') return reader.next()
<SYSTEM_TASK:> The generated budget data package schema for this resource. <END_TASK> <USER_TASK:> Description: def schema(self): """ The generated budget data package schema for this resource. If the resource has any fields that do not conform to the provided specification this will raise a NotABudgetDataPackageException. """
if self.headers is None: raise exceptions.NoResourceLoadedException( 'Resource must be loaded to find schema') try: fields = self.specification.get('fields', {}) parsed = { 'primaryKey': 'id', 'fields': [{ 'name': header, 'type': fields[header]['type'], 'description': fields[header]['description'] } for header in self.headers] } except KeyError: raise exceptions.NotABudgetDataPackageException( 'Includes other fields than the Budget Data Package fields') return parsed
<SYSTEM_TASK:> Return the time of the tif data since the epoch <END_TASK> <USER_TASK:> Description: def get_time(self, idx=0): """Return the time of the tif data since the epoch The time is stored in the "61238" tag. """
timestr = SingleTifPhasics._get_meta_data(path=self.path, section="acquisition info", name="date & heure") if timestr is not None: timestr = timestr.split(".") # '2016-04-29_17h31m35s.00827' structtime = time.strptime(timestr[0], "%Y-%m-%d_%Hh%Mm%Ss") fracsec = float(timestr[1]) * 1e-5 # use calendar, because we need UTC thetime = calendar.timegm(structtime) + fracsec else: thetime = np.nan return thetime
<SYSTEM_TASK:> Not a pure generator expression as this has the side effect of <END_TASK> <USER_TASK:> Description: def _generate_and_store_mold_id_map(self, template_map, molds): """ Not a pure generator expression as this has the side effect of storing the resulting id and map it into a local dict. Produces a list of all valid mold_ids from the input template_keys. Internal function; NOT meant to be used outside of this class. """
name = self.req_tmpl_name for key in sorted(template_map.keys(), reverse=True): if len(key.split('/')) == 3 and key.endswith(name): mold_id = key[len(self.text_prefix):-len(name) - 1] molds[mold_id] = template_map[key][:-len(name) - 1] yield mold_id
<SYSTEM_TASK:> Lookup the filesystem path of a mold identifier. <END_TASK> <USER_TASK:> Description: def mold_id_to_path(self, mold_id, default=_marker): """ Lookup the filesystem path of a mold identifier. """
def handle_default(debug_msg=None): if debug_msg: logger.debug('mold_id_to_path:' + debug_msg, mold_id) if default is _marker: raise KeyError( 'Failed to lookup mold_id %s to a path' % mold_id) return default result = self.molds.get(mold_id) if result: return result if not self.tracked_entry_points: return handle_default() try: prefix, mold_basename = mold_id.split('/') except ValueError: return handle_default( 'mold_id %s not found and not in standard format') entry_point = self.tracked_entry_points.get(prefix) if entry_point is None: return handle_default() return join(self._entry_point_to_path(entry_point), mold_basename)