repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
pydanny/watdarepo
watdarepo/main.py
https://github.com/pydanny/watdarepo/blob/e252a7ed7e6d9888b127c8865d28230f41847897/watdarepo/main.py#L90-L124
def watdarepo(repo_url, mode='d', guess=False, repo_aliases=REPO_ALIASES, hosting_services=HOSTING_SERVICES): """ Gets vcs and hosting service for repo_urls :param repo_url: Repo URL of unknown type. :param mode: Return dictionary (default) or object :param guess: Whether or not to make guesses :returns: Hosting service or raises UnknownHostingService exception. """ repo_url = unicode(repo_url) # Set the repo_url repo_data = {'repo_url': repo_url} # Get the VCS type try: repo_data['vcs'] = identify_vcs(repo_url, repo_aliases=repo_aliases) except UnknownVCS: repo_data['vcs'] = None # Get the hosting service try: repo_data['hosting_service'] = identify_hosting_service(repo_url, hosting_services=hosting_services) except UnknownHostingService: repo_data['hosting_service'] = None # If mode is 'c' or 'o', return an object representation of data. if mode in ('c', 'o'): # Define the c/o response class Repo = type(str('Repo'), (object,), repo_data) # Return the c/o response object return Repo() # return dictionary representation of data. return repo_data
[ "def", "watdarepo", "(", "repo_url", ",", "mode", "=", "'d'", ",", "guess", "=", "False", ",", "repo_aliases", "=", "REPO_ALIASES", ",", "hosting_services", "=", "HOSTING_SERVICES", ")", ":", "repo_url", "=", "unicode", "(", "repo_url", ")", "# Set the repo_url", "repo_data", "=", "{", "'repo_url'", ":", "repo_url", "}", "# Get the VCS type", "try", ":", "repo_data", "[", "'vcs'", "]", "=", "identify_vcs", "(", "repo_url", ",", "repo_aliases", "=", "repo_aliases", ")", "except", "UnknownVCS", ":", "repo_data", "[", "'vcs'", "]", "=", "None", "# Get the hosting service", "try", ":", "repo_data", "[", "'hosting_service'", "]", "=", "identify_hosting_service", "(", "repo_url", ",", "hosting_services", "=", "hosting_services", ")", "except", "UnknownHostingService", ":", "repo_data", "[", "'hosting_service'", "]", "=", "None", "# If mode is 'c' or 'o', return an object representation of data.", "if", "mode", "in", "(", "'c'", ",", "'o'", ")", ":", "# Define the c/o response class", "Repo", "=", "type", "(", "str", "(", "'Repo'", ")", ",", "(", "object", ",", ")", ",", "repo_data", ")", "# Return the c/o response object", "return", "Repo", "(", ")", "# return dictionary representation of data.", "return", "repo_data" ]
Gets vcs and hosting service for repo_urls :param repo_url: Repo URL of unknown type. :param mode: Return dictionary (default) or object :param guess: Whether or not to make guesses :returns: Hosting service or raises UnknownHostingService exception.
[ "Gets", "vcs", "and", "hosting", "service", "for", "repo_urls", ":", "param", "repo_url", ":", "Repo", "URL", "of", "unknown", "type", ".", ":", "param", "mode", ":", "Return", "dictionary", "(", "default", ")", "or", "object", ":", "param", "guess", ":", "Whether", "or", "not", "to", "make", "guesses", ":", "returns", ":", "Hosting", "service", "or", "raises", "UnknownHostingService", "exception", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py#L374-L387
def qos_map_cos_traffic_class_cos5(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") map = ET.SubElement(qos, "map") cos_traffic_class = ET.SubElement(map, "cos-traffic-class") name_key = ET.SubElement(cos_traffic_class, "name") name_key.text = kwargs.pop('name') cos5 = ET.SubElement(cos_traffic_class, "cos5") cos5.text = kwargs.pop('cos5') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "qos_map_cos_traffic_class_cos5", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "qos", "=", "ET", ".", "SubElement", "(", "config", ",", "\"qos\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-qos\"", ")", "map", "=", "ET", ".", "SubElement", "(", "qos", ",", "\"map\"", ")", "cos_traffic_class", "=", "ET", ".", "SubElement", "(", "map", ",", "\"cos-traffic-class\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "cos_traffic_class", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "cos5", "=", "ET", ".", "SubElement", "(", "cos_traffic_class", ",", "\"cos5\"", ")", "cos5", ".", "text", "=", "kwargs", ".", "pop", "(", "'cos5'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
bcbio/bcbio-nextgen
bcbio/utils.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L782-L789
def get_perl_exports(tmpdir=None): """Environmental exports to use conda installed perl. """ perl_path = os.path.dirname(perl_cmd()) out = "unset PERL5LIB && export PATH=%s:\"$PATH\"" % (perl_path) if tmpdir: out += " && export TMPDIR=%s" % (tmpdir) return out
[ "def", "get_perl_exports", "(", "tmpdir", "=", "None", ")", ":", "perl_path", "=", "os", ".", "path", ".", "dirname", "(", "perl_cmd", "(", ")", ")", "out", "=", "\"unset PERL5LIB && export PATH=%s:\\\"$PATH\\\"\"", "%", "(", "perl_path", ")", "if", "tmpdir", ":", "out", "+=", "\" && export TMPDIR=%s\"", "%", "(", "tmpdir", ")", "return", "out" ]
Environmental exports to use conda installed perl.
[ "Environmental", "exports", "to", "use", "conda", "installed", "perl", "." ]
python
train
jut-io/jut-python-tools
jut/api/integrations.py
https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/integrations.py#L11-L33
def get_webhook_url(deployment_name, space='default', data_source='webhook', token_manager=None, app_url=defaults.APP_URL, **fields): """ return the webhook URL for posting webhook data to """ import_url = data_engine.get_import_data_url(deployment_name, app_url=app_url, token_manager=token_manager) api_key = deployments.get_apikey(deployment_name, token_manager=token_manager, app_url=app_url) fields_string = '&'.join(['%s=%s' % (key, value) for (key, value) in fields.items()]) return '%s/api/v1/import/webhook/?space=%s&data_source=%sk&apikey=%s&%s' % \ (import_url, space, data_source, api_key, fields_string)
[ "def", "get_webhook_url", "(", "deployment_name", ",", "space", "=", "'default'", ",", "data_source", "=", "'webhook'", ",", "token_manager", "=", "None", ",", "app_url", "=", "defaults", ".", "APP_URL", ",", "*", "*", "fields", ")", ":", "import_url", "=", "data_engine", ".", "get_import_data_url", "(", "deployment_name", ",", "app_url", "=", "app_url", ",", "token_manager", "=", "token_manager", ")", "api_key", "=", "deployments", ".", "get_apikey", "(", "deployment_name", ",", "token_manager", "=", "token_manager", ",", "app_url", "=", "app_url", ")", "fields_string", "=", "'&'", ".", "join", "(", "[", "'%s=%s'", "%", "(", "key", ",", "value", ")", "for", "(", "key", ",", "value", ")", "in", "fields", ".", "items", "(", ")", "]", ")", "return", "'%s/api/v1/import/webhook/?space=%s&data_source=%sk&apikey=%s&%s'", "%", "(", "import_url", ",", "space", ",", "data_source", ",", "api_key", ",", "fields_string", ")" ]
return the webhook URL for posting webhook data to
[ "return", "the", "webhook", "URL", "for", "posting", "webhook", "data", "to" ]
python
train
docker/docker-py
docker/models/volumes.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/models/volumes.py#L14-L25
def remove(self, force=False): """ Remove this volume. Args: force (bool): Force removal of volumes that were already removed out of band by the volume driver plugin. Raises: :py:class:`docker.errors.APIError` If volume failed to remove. """ return self.client.api.remove_volume(self.id, force=force)
[ "def", "remove", "(", "self", ",", "force", "=", "False", ")", ":", "return", "self", ".", "client", ".", "api", ".", "remove_volume", "(", "self", ".", "id", ",", "force", "=", "force", ")" ]
Remove this volume. Args: force (bool): Force removal of volumes that were already removed out of band by the volume driver plugin. Raises: :py:class:`docker.errors.APIError` If volume failed to remove.
[ "Remove", "this", "volume", "." ]
python
train
iotile/coretools
iotilecore/iotile/core/hw/transport/adapterstream.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/adapterstream.py#L232-L245
def disconnect(self): """Disconnect from the device that we are currently connected to.""" if not self.connected: raise HardwareError("Cannot disconnect when we are not connected") # Close the streaming and tracing interfaces when we disconnect self._reports = None self._traces = None self._loop.run_coroutine(self.adapter.disconnect(0)) self.connected = False self.connection_interrupted = False self.connection_string = None
[ "def", "disconnect", "(", "self", ")", ":", "if", "not", "self", ".", "connected", ":", "raise", "HardwareError", "(", "\"Cannot disconnect when we are not connected\"", ")", "# Close the streaming and tracing interfaces when we disconnect", "self", ".", "_reports", "=", "None", "self", ".", "_traces", "=", "None", "self", ".", "_loop", ".", "run_coroutine", "(", "self", ".", "adapter", ".", "disconnect", "(", "0", ")", ")", "self", ".", "connected", "=", "False", "self", ".", "connection_interrupted", "=", "False", "self", ".", "connection_string", "=", "None" ]
Disconnect from the device that we are currently connected to.
[ "Disconnect", "from", "the", "device", "that", "we", "are", "currently", "connected", "to", "." ]
python
train
ibis-project/ibis
ibis/expr/api.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L2449-L2479
def _array_slice(array, index): """Slice or index `array` at `index`. Parameters ---------- index : int or ibis.expr.types.IntegerValue or slice Returns ------- sliced_array : ibis.expr.types.ValueExpr If `index` is an ``int`` or :class:`~ibis.expr.types.IntegerValue` then the return type is the element type of `array`. If `index` is a ``slice`` then the return type is the same type as the input. """ if isinstance(index, slice): start = index.start stop = index.stop if (start is not None and start < 0) or ( stop is not None and stop < 0 ): raise ValueError('negative slicing not yet supported') step = index.step if step is not None and step != 1: raise NotImplementedError('step can only be 1') op = ops.ArraySlice(array, start if start is not None else 0, stop) else: op = ops.ArrayIndex(array, index) return op.to_expr()
[ "def", "_array_slice", "(", "array", ",", "index", ")", ":", "if", "isinstance", "(", "index", ",", "slice", ")", ":", "start", "=", "index", ".", "start", "stop", "=", "index", ".", "stop", "if", "(", "start", "is", "not", "None", "and", "start", "<", "0", ")", "or", "(", "stop", "is", "not", "None", "and", "stop", "<", "0", ")", ":", "raise", "ValueError", "(", "'negative slicing not yet supported'", ")", "step", "=", "index", ".", "step", "if", "step", "is", "not", "None", "and", "step", "!=", "1", ":", "raise", "NotImplementedError", "(", "'step can only be 1'", ")", "op", "=", "ops", ".", "ArraySlice", "(", "array", ",", "start", "if", "start", "is", "not", "None", "else", "0", ",", "stop", ")", "else", ":", "op", "=", "ops", ".", "ArrayIndex", "(", "array", ",", "index", ")", "return", "op", ".", "to_expr", "(", ")" ]
Slice or index `array` at `index`. Parameters ---------- index : int or ibis.expr.types.IntegerValue or slice Returns ------- sliced_array : ibis.expr.types.ValueExpr If `index` is an ``int`` or :class:`~ibis.expr.types.IntegerValue` then the return type is the element type of `array`. If `index` is a ``slice`` then the return type is the same type as the input.
[ "Slice", "or", "index", "array", "at", "index", "." ]
python
train
uw-it-cte/uw-restclients-wheniwork
uw_wheniwork/__init__.py
https://github.com/uw-it-cte/uw-restclients-wheniwork/blob/0d3ca09d5bbe808fec12e5f943596570d33a1731/uw_wheniwork/__init__.py#L57-L70
def _post_resource(self, url, body): """ When I Work POST method. """ headers = {"Content-Type": "application/json", "Accept": "application/json"} if self.token: headers["W-Token"] = "%s" % self.token response = WhenIWork_DAO().postURL(url, headers, json.dumps(body)) if not (response.status == 200 or response.status == 204): raise DataFailureException(url, response.status, response.data) return json.loads(response.data)
[ "def", "_post_resource", "(", "self", ",", "url", ",", "body", ")", ":", "headers", "=", "{", "\"Content-Type\"", ":", "\"application/json\"", ",", "\"Accept\"", ":", "\"application/json\"", "}", "if", "self", ".", "token", ":", "headers", "[", "\"W-Token\"", "]", "=", "\"%s\"", "%", "self", ".", "token", "response", "=", "WhenIWork_DAO", "(", ")", ".", "postURL", "(", "url", ",", "headers", ",", "json", ".", "dumps", "(", "body", ")", ")", "if", "not", "(", "response", ".", "status", "==", "200", "or", "response", ".", "status", "==", "204", ")", ":", "raise", "DataFailureException", "(", "url", ",", "response", ".", "status", ",", "response", ".", "data", ")", "return", "json", ".", "loads", "(", "response", ".", "data", ")" ]
When I Work POST method.
[ "When", "I", "Work", "POST", "method", "." ]
python
valid
barrust/mediawiki
mediawiki/utilities.py
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/utilities.py#L78-L83
def str_or_unicode(text): """ handle python 3 unicode and python 2.7 byte strings """ encoding = sys.stdout.encoding if sys.version_info > (3, 0): return text.encode(encoding).decode(encoding) return text.encode(encoding)
[ "def", "str_or_unicode", "(", "text", ")", ":", "encoding", "=", "sys", ".", "stdout", ".", "encoding", "if", "sys", ".", "version_info", ">", "(", "3", ",", "0", ")", ":", "return", "text", ".", "encode", "(", "encoding", ")", ".", "decode", "(", "encoding", ")", "return", "text", ".", "encode", "(", "encoding", ")" ]
handle python 3 unicode and python 2.7 byte strings
[ "handle", "python", "3", "unicode", "and", "python", "2", ".", "7", "byte", "strings" ]
python
train
joke2k/faker
faker/providers/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/__init__.py#L96-L105
def random_int(self, min=0, max=9999, step=1): """ Returns a random integer between two values. :param min: lower bound value (inclusive; default=0) :param max: upper bound value (inclusive; default=9999) :param step: range step (default=1) :returns: random integer between min and max """ return self.generator.random.randrange(min, max + 1, step)
[ "def", "random_int", "(", "self", ",", "min", "=", "0", ",", "max", "=", "9999", ",", "step", "=", "1", ")", ":", "return", "self", ".", "generator", ".", "random", ".", "randrange", "(", "min", ",", "max", "+", "1", ",", "step", ")" ]
Returns a random integer between two values. :param min: lower bound value (inclusive; default=0) :param max: upper bound value (inclusive; default=9999) :param step: range step (default=1) :returns: random integer between min and max
[ "Returns", "a", "random", "integer", "between", "two", "values", "." ]
python
train
wakatime/wakatime
wakatime/stats.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L136-L162
def guess_lexer_using_modeline(text): """Guess lexer for given text using Vim modeline. Returns a tuple of (lexer, accuracy). """ lexer, accuracy = None, None file_type = None try: file_type = get_filetype_from_buffer(text) except: # pragma: nocover log.traceback(logging.DEBUG) if file_type is not None: try: lexer = get_lexer_by_name(file_type) except ClassNotFound: log.traceback(logging.DEBUG) if lexer is not None: try: accuracy = lexer.analyse_text(text) except: # pragma: nocover log.traceback(logging.DEBUG) return lexer, accuracy
[ "def", "guess_lexer_using_modeline", "(", "text", ")", ":", "lexer", ",", "accuracy", "=", "None", ",", "None", "file_type", "=", "None", "try", ":", "file_type", "=", "get_filetype_from_buffer", "(", "text", ")", "except", ":", "# pragma: nocover", "log", ".", "traceback", "(", "logging", ".", "DEBUG", ")", "if", "file_type", "is", "not", "None", ":", "try", ":", "lexer", "=", "get_lexer_by_name", "(", "file_type", ")", "except", "ClassNotFound", ":", "log", ".", "traceback", "(", "logging", ".", "DEBUG", ")", "if", "lexer", "is", "not", "None", ":", "try", ":", "accuracy", "=", "lexer", ".", "analyse_text", "(", "text", ")", "except", ":", "# pragma: nocover", "log", ".", "traceback", "(", "logging", ".", "DEBUG", ")", "return", "lexer", ",", "accuracy" ]
Guess lexer for given text using Vim modeline. Returns a tuple of (lexer, accuracy).
[ "Guess", "lexer", "for", "given", "text", "using", "Vim", "modeline", "." ]
python
train
justquick/django-native-tags
native_tags/nodes.py
https://github.com/justquick/django-native-tags/blob/d40b976ee1cb13faeb04f0dedf02933d4274abf2/native_tags/nodes.py#L55-L77
def get_signature(token, contextable=False, comparison=False): """ Gets the signature tuple for any native tag contextable searchs for ``as`` variable to update context comparison if true uses ``negate`` (p) to ``not`` the result (~p) returns (``tag_name``, ``args``, ``kwargs``) """ bits = split(token.contents) args, kwargs = (), {} if comparison and bits[-1] == 'negate': kwargs['negate'] = True bits = bits[:-1] if contextable and len(bits) > 2 and bits[-2] == 'as': kwargs['varname'] = bits[-1] bits = bits[:-2] kwarg_re = re.compile(r'^([-\w]+)\=(.*)$') for bit in bits[1:]: match = kwarg_re.match(bit) if match: kwargs[str(match.group(1))] = force_unicode(match.group(2)) else: args += (bit,) return bits[0], args, kwargs
[ "def", "get_signature", "(", "token", ",", "contextable", "=", "False", ",", "comparison", "=", "False", ")", ":", "bits", "=", "split", "(", "token", ".", "contents", ")", "args", ",", "kwargs", "=", "(", ")", ",", "{", "}", "if", "comparison", "and", "bits", "[", "-", "1", "]", "==", "'negate'", ":", "kwargs", "[", "'negate'", "]", "=", "True", "bits", "=", "bits", "[", ":", "-", "1", "]", "if", "contextable", "and", "len", "(", "bits", ")", ">", "2", "and", "bits", "[", "-", "2", "]", "==", "'as'", ":", "kwargs", "[", "'varname'", "]", "=", "bits", "[", "-", "1", "]", "bits", "=", "bits", "[", ":", "-", "2", "]", "kwarg_re", "=", "re", ".", "compile", "(", "r'^([-\\w]+)\\=(.*)$'", ")", "for", "bit", "in", "bits", "[", "1", ":", "]", ":", "match", "=", "kwarg_re", ".", "match", "(", "bit", ")", "if", "match", ":", "kwargs", "[", "str", "(", "match", ".", "group", "(", "1", ")", ")", "]", "=", "force_unicode", "(", "match", ".", "group", "(", "2", ")", ")", "else", ":", "args", "+=", "(", "bit", ",", ")", "return", "bits", "[", "0", "]", ",", "args", ",", "kwargs" ]
Gets the signature tuple for any native tag contextable searchs for ``as`` variable to update context comparison if true uses ``negate`` (p) to ``not`` the result (~p) returns (``tag_name``, ``args``, ``kwargs``)
[ "Gets", "the", "signature", "tuple", "for", "any", "native", "tag", "contextable", "searchs", "for", "as", "variable", "to", "update", "context", "comparison", "if", "true", "uses", "negate", "(", "p", ")", "to", "not", "the", "result", "(", "~p", ")", "returns", "(", "tag_name", "args", "kwargs", ")" ]
python
train
Kortemme-Lab/klab
klab/bio/pdb.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L1014-L1056
def get_DB_references(self): ''' "The DBREF record provides cross-reference links between PDB sequences (what appears in SEQRES record) and a corresponding database sequence." - http://www.wwpdb.org/documentation/format33/sect3.html#DBREF ''' _database_names = { 'GB' : 'GenBank', 'PDB' : 'Protein Data Bank', 'UNP' : 'UNIPROT', 'NORINE': 'Norine', 'TREMBL': 'UNIPROT', } DBref = {} for l in self.parsed_lines["DBREF "]: # [l for l in self.lines if l.startswith('DBREF')] pdb_id = l[7:11] chain_id = l[12] seqBegin = int(l[14:18]) insertBegin = l[18] seqEnd = int(l[20:24]) insertEnd = l[24] database = _database_names[l[26:32].strip()] dbAccession = l[33:41].strip() dbIdCode = l[42:54].strip() dbseqBegin = int(l[55:60]) idbnsBeg = l[60] dbseqEnd = int(l[62:67]) dbinsEnd = l[67] DBref[pdb_id] = DBref.get(pdb_id, {}) DBref[pdb_id][database] = DBref[pdb_id].get(database, {}) if DBref[pdb_id][database].get(chain_id): if not(DBref[pdb_id][database][chain_id]['dbAccession'] == dbAccession and DBref[pdb_id][database][chain_id]['dbIdCode'] == dbIdCode): raise PDBParsingException('This code needs to be generalized. dbIdCode should really be a list to handle chimera cases.') else: DBref[pdb_id][database][chain_id] = {'dbAccession' : dbAccession, 'dbIdCode' : dbIdCode, 'PDBtoDB_mapping' : []} DBref[pdb_id][database][chain_id]['PDBtoDB_mapping'].append( {'PDBRange' : ("%d%s" % (seqBegin, insertBegin), "%d%s" % (seqEnd, insertEnd)), 'dbRange' : ("%d%s" % (dbseqBegin, idbnsBeg), "%d%s" % (dbseqEnd, dbinsEnd)), } ) return DBref
[ "def", "get_DB_references", "(", "self", ")", ":", "_database_names", "=", "{", "'GB'", ":", "'GenBank'", ",", "'PDB'", ":", "'Protein Data Bank'", ",", "'UNP'", ":", "'UNIPROT'", ",", "'NORINE'", ":", "'Norine'", ",", "'TREMBL'", ":", "'UNIPROT'", ",", "}", "DBref", "=", "{", "}", "for", "l", "in", "self", ".", "parsed_lines", "[", "\"DBREF \"", "]", ":", "# [l for l in self.lines if l.startswith('DBREF')]", "pdb_id", "=", "l", "[", "7", ":", "11", "]", "chain_id", "=", "l", "[", "12", "]", "seqBegin", "=", "int", "(", "l", "[", "14", ":", "18", "]", ")", "insertBegin", "=", "l", "[", "18", "]", "seqEnd", "=", "int", "(", "l", "[", "20", ":", "24", "]", ")", "insertEnd", "=", "l", "[", "24", "]", "database", "=", "_database_names", "[", "l", "[", "26", ":", "32", "]", ".", "strip", "(", ")", "]", "dbAccession", "=", "l", "[", "33", ":", "41", "]", ".", "strip", "(", ")", "dbIdCode", "=", "l", "[", "42", ":", "54", "]", ".", "strip", "(", ")", "dbseqBegin", "=", "int", "(", "l", "[", "55", ":", "60", "]", ")", "idbnsBeg", "=", "l", "[", "60", "]", "dbseqEnd", "=", "int", "(", "l", "[", "62", ":", "67", "]", ")", "dbinsEnd", "=", "l", "[", "67", "]", "DBref", "[", "pdb_id", "]", "=", "DBref", ".", "get", "(", "pdb_id", ",", "{", "}", ")", "DBref", "[", "pdb_id", "]", "[", "database", "]", "=", "DBref", "[", "pdb_id", "]", ".", "get", "(", "database", ",", "{", "}", ")", "if", "DBref", "[", "pdb_id", "]", "[", "database", "]", ".", "get", "(", "chain_id", ")", ":", "if", "not", "(", "DBref", "[", "pdb_id", "]", "[", "database", "]", "[", "chain_id", "]", "[", "'dbAccession'", "]", "==", "dbAccession", "and", "DBref", "[", "pdb_id", "]", "[", "database", "]", "[", "chain_id", "]", "[", "'dbIdCode'", "]", "==", "dbIdCode", ")", ":", "raise", "PDBParsingException", "(", "'This code needs to be generalized. dbIdCode should really be a list to handle chimera cases.'", ")", "else", ":", "DBref", "[", "pdb_id", "]", "[", "database", "]", "[", "chain_id", "]", "=", "{", "'dbAccession'", ":", "dbAccession", ",", "'dbIdCode'", ":", "dbIdCode", ",", "'PDBtoDB_mapping'", ":", "[", "]", "}", "DBref", "[", "pdb_id", "]", "[", "database", "]", "[", "chain_id", "]", "[", "'PDBtoDB_mapping'", "]", ".", "append", "(", "{", "'PDBRange'", ":", "(", "\"%d%s\"", "%", "(", "seqBegin", ",", "insertBegin", ")", ",", "\"%d%s\"", "%", "(", "seqEnd", ",", "insertEnd", ")", ")", ",", "'dbRange'", ":", "(", "\"%d%s\"", "%", "(", "dbseqBegin", ",", "idbnsBeg", ")", ",", "\"%d%s\"", "%", "(", "dbseqEnd", ",", "dbinsEnd", ")", ")", ",", "}", ")", "return", "DBref" ]
"The DBREF record provides cross-reference links between PDB sequences (what appears in SEQRES record) and a corresponding database sequence." - http://www.wwpdb.org/documentation/format33/sect3.html#DBREF
[ "The", "DBREF", "record", "provides", "cross", "-", "reference", "links", "between", "PDB", "sequences", "(", "what", "appears", "in", "SEQRES", "record", ")", "and", "a", "corresponding", "database", "sequence", ".", "-", "http", ":", "//", "www", ".", "wwpdb", ".", "org", "/", "documentation", "/", "format33", "/", "sect3", ".", "html#DBREF" ]
python
train
mosdef-hub/mbuild
mbuild/compound.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/compound.py#L775-L804
def _remove_references(self, removed_part): """Remove labels pointing to this part and vice versa. """ removed_part.parent = None # Remove labels in the hierarchy pointing to this part. referrers_to_remove = set() for referrer in removed_part.referrers: if removed_part not in referrer.ancestors(): for label, referred_part in list(referrer.labels.items()): if referred_part is removed_part: del referrer.labels[label] referrers_to_remove.add(referrer) removed_part.referrers -= referrers_to_remove # Remove labels in this part pointing into the hierarchy. labels_to_delete = [] if isinstance(removed_part, Compound): for label, part in list(removed_part.labels.items()): if not isinstance(part, Compound): for p in part: self._remove_references(p) elif removed_part not in part.ancestors(): try: part.referrers.discard(removed_part) except KeyError: pass else: labels_to_delete.append(label) for label in labels_to_delete: removed_part.labels.pop(label, None)
[ "def", "_remove_references", "(", "self", ",", "removed_part", ")", ":", "removed_part", ".", "parent", "=", "None", "# Remove labels in the hierarchy pointing to this part.", "referrers_to_remove", "=", "set", "(", ")", "for", "referrer", "in", "removed_part", ".", "referrers", ":", "if", "removed_part", "not", "in", "referrer", ".", "ancestors", "(", ")", ":", "for", "label", ",", "referred_part", "in", "list", "(", "referrer", ".", "labels", ".", "items", "(", ")", ")", ":", "if", "referred_part", "is", "removed_part", ":", "del", "referrer", ".", "labels", "[", "label", "]", "referrers_to_remove", ".", "add", "(", "referrer", ")", "removed_part", ".", "referrers", "-=", "referrers_to_remove", "# Remove labels in this part pointing into the hierarchy.", "labels_to_delete", "=", "[", "]", "if", "isinstance", "(", "removed_part", ",", "Compound", ")", ":", "for", "label", ",", "part", "in", "list", "(", "removed_part", ".", "labels", ".", "items", "(", ")", ")", ":", "if", "not", "isinstance", "(", "part", ",", "Compound", ")", ":", "for", "p", "in", "part", ":", "self", ".", "_remove_references", "(", "p", ")", "elif", "removed_part", "not", "in", "part", ".", "ancestors", "(", ")", ":", "try", ":", "part", ".", "referrers", ".", "discard", "(", "removed_part", ")", "except", "KeyError", ":", "pass", "else", ":", "labels_to_delete", ".", "append", "(", "label", ")", "for", "label", "in", "labels_to_delete", ":", "removed_part", ".", "labels", ".", "pop", "(", "label", ",", "None", ")" ]
Remove labels pointing to this part and vice versa.
[ "Remove", "labels", "pointing", "to", "this", "part", "and", "vice", "versa", "." ]
python
train
abourget/gevent-socketio
socketio/namespace.py
https://github.com/abourget/gevent-socketio/blob/1cdb1594a315326987a17ce0924ea448a82fab01/socketio/namespace.py#L365-L378
def error(self, error_name, error_message, msg_id=None, quiet=False): """Use this to use the configured ``error_handler`` yield an error message to your application. :param error_name: is a short string, to associate messages to recovery methods :param error_message: is some human-readable text, describing the error :param msg_id: is used to associate with a request :param quiet: specific to error_handlers. The default doesn't send a message to the user, but shows a debug message on the developer console. """ self.socket.error(error_name, error_message, endpoint=self.ns_name, msg_id=msg_id, quiet=quiet)
[ "def", "error", "(", "self", ",", "error_name", ",", "error_message", ",", "msg_id", "=", "None", ",", "quiet", "=", "False", ")", ":", "self", ".", "socket", ".", "error", "(", "error_name", ",", "error_message", ",", "endpoint", "=", "self", ".", "ns_name", ",", "msg_id", "=", "msg_id", ",", "quiet", "=", "quiet", ")" ]
Use this to use the configured ``error_handler`` yield an error message to your application. :param error_name: is a short string, to associate messages to recovery methods :param error_message: is some human-readable text, describing the error :param msg_id: is used to associate with a request :param quiet: specific to error_handlers. The default doesn't send a message to the user, but shows a debug message on the developer console.
[ "Use", "this", "to", "use", "the", "configured", "error_handler", "yield", "an", "error", "message", "to", "your", "application", "." ]
python
valid
contentful/contentful-management.py
contentful_management/webhook.py
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/webhook.py#L62-L76
def calls(self): """ Provides access to call overview for the given webhook. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/webhook-calls :return: :class:`WebhookWebhooksCallProxy <contentful_management.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy>` object. :rtype: contentful.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy Usage: >>> webhook_webhooks_call_proxy = webhook.calls() <WebhookWebhooksCallProxy space_id="cfexampleapi" webhook_id="my_webhook"> """ return WebhookWebhooksCallProxy(self._client, self.sys['space'].id, self.sys['id'])
[ "def", "calls", "(", "self", ")", ":", "return", "WebhookWebhooksCallProxy", "(", "self", ".", "_client", ",", "self", ".", "sys", "[", "'space'", "]", ".", "id", ",", "self", ".", "sys", "[", "'id'", "]", ")" ]
Provides access to call overview for the given webhook. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/webhook-calls :return: :class:`WebhookWebhooksCallProxy <contentful_management.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy>` object. :rtype: contentful.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy Usage: >>> webhook_webhooks_call_proxy = webhook.calls() <WebhookWebhooksCallProxy space_id="cfexampleapi" webhook_id="my_webhook">
[ "Provides", "access", "to", "call", "overview", "for", "the", "given", "webhook", "." ]
python
train
openstates/billy
billy/web/public/views/bills.py
https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/web/public/views/bills.py#L29-L123
def get_context_data(self, *args, **kwargs): ''' Context: If GET parameters are given: - search_text - form (FilterBillsForm) - long_description - description - get_params Otherwise, the only context item is an unbound FilterBillsForm. Templates: - Are specified in subclasses. ''' context = super(RelatedBillsList, self).get_context_data(*args, **kwargs) metadata = context['metadata'] FilterBillsForm = get_filter_bills_form(metadata) if self.request.GET: form = FilterBillsForm(self.request.GET) search_text = form.data.get('search_text') context.update(search_text=search_text) context.update(form=FilterBillsForm(self.request.GET)) # human readable description of search description = [] if metadata: description.append(metadata['name']) else: description = ['Search All'] long_description = [] chamber = form.data.get('chamber') session = form.data.get('session') type = form.data.get('type') status = form.data.getlist('status') subjects = form.data.getlist('subjects') sponsor = form.data.get('sponsor__leg_id') if chamber: if metadata: description.append(metadata['chambers'][chamber]['name'] ) else: description.extend([chamber.title(), 'Chamber']) description.append((type or 'Bill') + 's') if session: description.append( '(%s)' % metadata['session_details'][session]['display_name'] ) if 'signed' in status: long_description.append('which have been signed into law') elif 'passed_upper' in status and 'passed_lower' in status: long_description.append('which have passed both chambers') elif 'passed_lower' in status: chamber_name = (metadata['chambers']['lower']['name'] if metadata else 'lower chamber') long_description.append('which have passed the ' + chamber_name) elif 'passed_upper' in status: chamber_name = (metadata['chambers']['upper']['name'] if metadata else 'upper chamber') long_description.append('which have passed the ' + chamber_name) if sponsor: leg = db.legislators.find_one({'_all_ids': sponsor}, fields=('full_name', '_id')) leg = leg['full_name'] long_description.append('sponsored by ' + leg) if subjects: long_description.append('related to ' + ', '.join(subjects)) if search_text: long_description.append(u'containing the term "{0}"'.format( search_text)) context.update(long_description=long_description) else: if metadata: description = [metadata['name'], 'Bills'] else: description = ['All Bills'] context.update(form=FilterBillsForm()) context.update(description=' '.join(description)) # Add the correct path to paginated links. params = list(self.request.GET.lists()) for k, v in params[:]: if k == 'page': params.remove((k, v)) get_params = urllib.urlencode(params, doseq=True) context['get_params'] = get_params # Add the abbr. context['abbr'] = self.kwargs['abbr'] return context
[ "def", "get_context_data", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "context", "=", "super", "(", "RelatedBillsList", ",", "self", ")", ".", "get_context_data", "(", "*", "args", ",", "*", "*", "kwargs", ")", "metadata", "=", "context", "[", "'metadata'", "]", "FilterBillsForm", "=", "get_filter_bills_form", "(", "metadata", ")", "if", "self", ".", "request", ".", "GET", ":", "form", "=", "FilterBillsForm", "(", "self", ".", "request", ".", "GET", ")", "search_text", "=", "form", ".", "data", ".", "get", "(", "'search_text'", ")", "context", ".", "update", "(", "search_text", "=", "search_text", ")", "context", ".", "update", "(", "form", "=", "FilterBillsForm", "(", "self", ".", "request", ".", "GET", ")", ")", "# human readable description of search", "description", "=", "[", "]", "if", "metadata", ":", "description", ".", "append", "(", "metadata", "[", "'name'", "]", ")", "else", ":", "description", "=", "[", "'Search All'", "]", "long_description", "=", "[", "]", "chamber", "=", "form", ".", "data", ".", "get", "(", "'chamber'", ")", "session", "=", "form", ".", "data", ".", "get", "(", "'session'", ")", "type", "=", "form", ".", "data", ".", "get", "(", "'type'", ")", "status", "=", "form", ".", "data", ".", "getlist", "(", "'status'", ")", "subjects", "=", "form", ".", "data", ".", "getlist", "(", "'subjects'", ")", "sponsor", "=", "form", ".", "data", ".", "get", "(", "'sponsor__leg_id'", ")", "if", "chamber", ":", "if", "metadata", ":", "description", ".", "append", "(", "metadata", "[", "'chambers'", "]", "[", "chamber", "]", "[", "'name'", "]", ")", "else", ":", "description", ".", "extend", "(", "[", "chamber", ".", "title", "(", ")", ",", "'Chamber'", "]", ")", "description", ".", "append", "(", "(", "type", "or", "'Bill'", ")", "+", "'s'", ")", "if", "session", ":", "description", ".", "append", "(", "'(%s)'", "%", "metadata", "[", "'session_details'", "]", "[", "session", "]", "[", "'display_name'", "]", ")", "if", "'signed'", "in", "status", ":", "long_description", ".", "append", "(", "'which have been signed into law'", ")", "elif", "'passed_upper'", "in", "status", "and", "'passed_lower'", "in", "status", ":", "long_description", ".", "append", "(", "'which have passed both chambers'", ")", "elif", "'passed_lower'", "in", "status", ":", "chamber_name", "=", "(", "metadata", "[", "'chambers'", "]", "[", "'lower'", "]", "[", "'name'", "]", "if", "metadata", "else", "'lower chamber'", ")", "long_description", ".", "append", "(", "'which have passed the '", "+", "chamber_name", ")", "elif", "'passed_upper'", "in", "status", ":", "chamber_name", "=", "(", "metadata", "[", "'chambers'", "]", "[", "'upper'", "]", "[", "'name'", "]", "if", "metadata", "else", "'upper chamber'", ")", "long_description", ".", "append", "(", "'which have passed the '", "+", "chamber_name", ")", "if", "sponsor", ":", "leg", "=", "db", ".", "legislators", ".", "find_one", "(", "{", "'_all_ids'", ":", "sponsor", "}", ",", "fields", "=", "(", "'full_name'", ",", "'_id'", ")", ")", "leg", "=", "leg", "[", "'full_name'", "]", "long_description", ".", "append", "(", "'sponsored by '", "+", "leg", ")", "if", "subjects", ":", "long_description", ".", "append", "(", "'related to '", "+", "', '", ".", "join", "(", "subjects", ")", ")", "if", "search_text", ":", "long_description", ".", "append", "(", "u'containing the term \"{0}\"'", ".", "format", "(", "search_text", ")", ")", "context", ".", "update", "(", "long_description", "=", "long_description", ")", "else", ":", "if", "metadata", ":", "description", "=", "[", "metadata", "[", "'name'", "]", ",", "'Bills'", "]", "else", ":", "description", "=", "[", "'All Bills'", "]", "context", ".", "update", "(", "form", "=", "FilterBillsForm", "(", ")", ")", "context", ".", "update", "(", "description", "=", "' '", ".", "join", "(", "description", ")", ")", "# Add the correct path to paginated links.", "params", "=", "list", "(", "self", ".", "request", ".", "GET", ".", "lists", "(", ")", ")", "for", "k", ",", "v", "in", "params", "[", ":", "]", ":", "if", "k", "==", "'page'", ":", "params", ".", "remove", "(", "(", "k", ",", "v", ")", ")", "get_params", "=", "urllib", ".", "urlencode", "(", "params", ",", "doseq", "=", "True", ")", "context", "[", "'get_params'", "]", "=", "get_params", "# Add the abbr.", "context", "[", "'abbr'", "]", "=", "self", ".", "kwargs", "[", "'abbr'", "]", "return", "context" ]
Context: If GET parameters are given: - search_text - form (FilterBillsForm) - long_description - description - get_params Otherwise, the only context item is an unbound FilterBillsForm. Templates: - Are specified in subclasses.
[ "Context", ":", "If", "GET", "parameters", "are", "given", ":", "-", "search_text", "-", "form", "(", "FilterBillsForm", ")", "-", "long_description", "-", "description", "-", "get_params", "Otherwise", "the", "only", "context", "item", "is", "an", "unbound", "FilterBillsForm", "." ]
python
train
orbingol/NURBS-Python
geomdl/compatibility.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/compatibility.py#L257-L281
def generate_ctrlptsw2d_file(file_in='', file_out='ctrlptsw.txt'): """ Generates weighted control points from unweighted ones in 2-D. This function #. Takes in a 2-D control points file whose coordinates are organized in (x, y, z, w) format #. Converts into (x*w, y*w, z*w, w) format #. Saves the result to a file Therefore, the resultant file could be a direct input of the NURBS.Surface class. :param file_in: name of the input file (to be read) :type file_in: str :param file_out: name of the output file (to be saved) :type file_out: str :raises IOError: an error occurred reading or writing the file """ # Read control points ctrlpts2d, size_u, size_v = _read_ctrltps2d_file(file_in) # Multiply control points by weight new_ctrlpts2d = generate_ctrlptsw2d(ctrlpts2d) # Save new control points _save_ctrlpts2d_file(new_ctrlpts2d, size_u, size_v, file_out)
[ "def", "generate_ctrlptsw2d_file", "(", "file_in", "=", "''", ",", "file_out", "=", "'ctrlptsw.txt'", ")", ":", "# Read control points", "ctrlpts2d", ",", "size_u", ",", "size_v", "=", "_read_ctrltps2d_file", "(", "file_in", ")", "# Multiply control points by weight", "new_ctrlpts2d", "=", "generate_ctrlptsw2d", "(", "ctrlpts2d", ")", "# Save new control points", "_save_ctrlpts2d_file", "(", "new_ctrlpts2d", ",", "size_u", ",", "size_v", ",", "file_out", ")" ]
Generates weighted control points from unweighted ones in 2-D. This function #. Takes in a 2-D control points file whose coordinates are organized in (x, y, z, w) format #. Converts into (x*w, y*w, z*w, w) format #. Saves the result to a file Therefore, the resultant file could be a direct input of the NURBS.Surface class. :param file_in: name of the input file (to be read) :type file_in: str :param file_out: name of the output file (to be saved) :type file_out: str :raises IOError: an error occurred reading or writing the file
[ "Generates", "weighted", "control", "points", "from", "unweighted", "ones", "in", "2", "-", "D", "." ]
python
train
mozilla/elasticutils
elasticutils/__init__.py
https://github.com/mozilla/elasticutils/blob/b880cc5d51fb1079b0581255ec664c1ec934656e/elasticutils/__init__.py#L1320-L1371
def _process_query(self, query): """Takes a key/val pair and returns the Elasticsearch code for it""" key, val = query field_name, field_action = split_field_action(key) # Boost by name__action overrides boost by name. boost = self.field_boosts.get(key) if boost is None: boost = self.field_boosts.get(field_name) handler_name = 'process_query_{0}'.format(field_action) if field_action and hasattr(self, handler_name): return getattr(self, handler_name)(field_name, val, field_action) elif field_action in QUERY_ACTION_MAP: return { QUERY_ACTION_MAP[field_action]: _boosted_value( field_name, field_action, key, val, boost) } elif field_action == 'query_string': # query_string has different syntax, so it's handled # differently. # # Note: query_string queries are not boosted with # .boost()---they're boosted in the query text itself. return { 'query_string': {'default_field': field_name, 'query': val} } elif field_action in RANGE_ACTIONS: # Ranges are special and have a different syntax, so # we handle them separately. return { 'range': {field_name: _boosted_value( field_action, field_action, key, val, boost)} } elif field_action == 'range': lower, upper = val value = { 'gte': lower, 'lte': upper, } if boost: value['boost'] = boost return {'range': {field_name: value}} raise InvalidFieldActionError( '%s is not a valid field action' % field_action)
[ "def", "_process_query", "(", "self", ",", "query", ")", ":", "key", ",", "val", "=", "query", "field_name", ",", "field_action", "=", "split_field_action", "(", "key", ")", "# Boost by name__action overrides boost by name.", "boost", "=", "self", ".", "field_boosts", ".", "get", "(", "key", ")", "if", "boost", "is", "None", ":", "boost", "=", "self", ".", "field_boosts", ".", "get", "(", "field_name", ")", "handler_name", "=", "'process_query_{0}'", ".", "format", "(", "field_action", ")", "if", "field_action", "and", "hasattr", "(", "self", ",", "handler_name", ")", ":", "return", "getattr", "(", "self", ",", "handler_name", ")", "(", "field_name", ",", "val", ",", "field_action", ")", "elif", "field_action", "in", "QUERY_ACTION_MAP", ":", "return", "{", "QUERY_ACTION_MAP", "[", "field_action", "]", ":", "_boosted_value", "(", "field_name", ",", "field_action", ",", "key", ",", "val", ",", "boost", ")", "}", "elif", "field_action", "==", "'query_string'", ":", "# query_string has different syntax, so it's handled", "# differently.", "#", "# Note: query_string queries are not boosted with", "# .boost()---they're boosted in the query text itself.", "return", "{", "'query_string'", ":", "{", "'default_field'", ":", "field_name", ",", "'query'", ":", "val", "}", "}", "elif", "field_action", "in", "RANGE_ACTIONS", ":", "# Ranges are special and have a different syntax, so", "# we handle them separately.", "return", "{", "'range'", ":", "{", "field_name", ":", "_boosted_value", "(", "field_action", ",", "field_action", ",", "key", ",", "val", ",", "boost", ")", "}", "}", "elif", "field_action", "==", "'range'", ":", "lower", ",", "upper", "=", "val", "value", "=", "{", "'gte'", ":", "lower", ",", "'lte'", ":", "upper", ",", "}", "if", "boost", ":", "value", "[", "'boost'", "]", "=", "boost", "return", "{", "'range'", ":", "{", "field_name", ":", "value", "}", "}", "raise", "InvalidFieldActionError", "(", "'%s is not a valid field action'", "%", "field_action", ")" ]
Takes a key/val pair and returns the Elasticsearch code for it
[ "Takes", "a", "key", "/", "val", "pair", "and", "returns", "the", "Elasticsearch", "code", "for", "it" ]
python
train
nicolargo/glances
glances/plugins/glances_sensors.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_sensors.py#L168-L217
def msg_curse(self, args=None, max_width=None): """Return the dict to display in the curse interface.""" # Init the return message ret = [] # Only process if stats exist and display plugin enable... if not self.stats or self.is_disable(): return ret # Max size for the interface name name_max_width = max_width - 12 # Header msg = '{:{width}}'.format('SENSORS', width=name_max_width) ret.append(self.curse_add_line(msg, "TITLE")) # Stats for i in self.stats: # Do not display anything if no battery are detected if i['type'] == 'battery' and i['value'] == []: continue # New line ret.append(self.curse_new_line()) msg = '{:{width}}'.format(i["label"][:name_max_width], width=name_max_width) ret.append(self.curse_add_line(msg)) if i['value'] in (b'ERR', b'SLP', b'UNK', b'NOS'): msg = '{:>13}'.format(i['value']) ret.append(self.curse_add_line( msg, self.get_views(item=i[self.get_key()], key='value', option='decoration'))) else: if (args.fahrenheit and i['type'] != 'battery' and i['type'] != 'fan_speed'): value = to_fahrenheit(i['value']) unit = 'F' else: value = i['value'] unit = i['unit'] try: msg = '{:>13.0f}{}'.format(value, unit) ret.append(self.curse_add_line( msg, self.get_views(item=i[self.get_key()], key='value', option='decoration'))) except (TypeError, ValueError): pass return ret
[ "def", "msg_curse", "(", "self", ",", "args", "=", "None", ",", "max_width", "=", "None", ")", ":", "# Init the return message", "ret", "=", "[", "]", "# Only process if stats exist and display plugin enable...", "if", "not", "self", ".", "stats", "or", "self", ".", "is_disable", "(", ")", ":", "return", "ret", "# Max size for the interface name", "name_max_width", "=", "max_width", "-", "12", "# Header", "msg", "=", "'{:{width}}'", ".", "format", "(", "'SENSORS'", ",", "width", "=", "name_max_width", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "\"TITLE\"", ")", ")", "# Stats", "for", "i", "in", "self", ".", "stats", ":", "# Do not display anything if no battery are detected", "if", "i", "[", "'type'", "]", "==", "'battery'", "and", "i", "[", "'value'", "]", "==", "[", "]", ":", "continue", "# New line", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "msg", "=", "'{:{width}}'", ".", "format", "(", "i", "[", "\"label\"", "]", "[", ":", "name_max_width", "]", ",", "width", "=", "name_max_width", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "if", "i", "[", "'value'", "]", "in", "(", "b'ERR'", ",", "b'SLP'", ",", "b'UNK'", ",", "b'NOS'", ")", ":", "msg", "=", "'{:>13}'", ".", "format", "(", "i", "[", "'value'", "]", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "self", ".", "get_views", "(", "item", "=", "i", "[", "self", ".", "get_key", "(", ")", "]", ",", "key", "=", "'value'", ",", "option", "=", "'decoration'", ")", ")", ")", "else", ":", "if", "(", "args", ".", "fahrenheit", "and", "i", "[", "'type'", "]", "!=", "'battery'", "and", "i", "[", "'type'", "]", "!=", "'fan_speed'", ")", ":", "value", "=", "to_fahrenheit", "(", "i", "[", "'value'", "]", ")", "unit", "=", "'F'", "else", ":", "value", "=", "i", "[", "'value'", "]", "unit", "=", "i", "[", "'unit'", "]", "try", ":", "msg", "=", "'{:>13.0f}{}'", ".", "format", "(", "value", ",", "unit", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "self", ".", "get_views", "(", "item", "=", "i", "[", "self", ".", "get_key", "(", ")", "]", ",", "key", "=", "'value'", ",", "option", "=", "'decoration'", ")", ")", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "pass", "return", "ret" ]
Return the dict to display in the curse interface.
[ "Return", "the", "dict", "to", "display", "in", "the", "curse", "interface", "." ]
python
train
glyph/automat
automat/_core.py
https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_core.py#L69-L83
def addTransition(self, inState, inputSymbol, outState, outputSymbols): """ Add the given transition to the outputSymbol. Raise ValueError if there is already a transition with the same inState and inputSymbol. """ # keeping self._transitions in a flat list makes addTransition # O(n^2), but state machines don't tend to have hundreds of # transitions. for (anInState, anInputSymbol, anOutState, _) in self._transitions: if (anInState == inState and anInputSymbol == inputSymbol): raise ValueError( "already have transition from {} via {}".format(inState, inputSymbol)) self._transitions.add( (inState, inputSymbol, outState, tuple(outputSymbols)) )
[ "def", "addTransition", "(", "self", ",", "inState", ",", "inputSymbol", ",", "outState", ",", "outputSymbols", ")", ":", "# keeping self._transitions in a flat list makes addTransition", "# O(n^2), but state machines don't tend to have hundreds of", "# transitions.", "for", "(", "anInState", ",", "anInputSymbol", ",", "anOutState", ",", "_", ")", "in", "self", ".", "_transitions", ":", "if", "(", "anInState", "==", "inState", "and", "anInputSymbol", "==", "inputSymbol", ")", ":", "raise", "ValueError", "(", "\"already have transition from {} via {}\"", ".", "format", "(", "inState", ",", "inputSymbol", ")", ")", "self", ".", "_transitions", ".", "add", "(", "(", "inState", ",", "inputSymbol", ",", "outState", ",", "tuple", "(", "outputSymbols", ")", ")", ")" ]
Add the given transition to the outputSymbol. Raise ValueError if there is already a transition with the same inState and inputSymbol.
[ "Add", "the", "given", "transition", "to", "the", "outputSymbol", ".", "Raise", "ValueError", "if", "there", "is", "already", "a", "transition", "with", "the", "same", "inState", "and", "inputSymbol", "." ]
python
train
joke2k/django-environ
environ/environ.py
https://github.com/joke2k/django-environ/blob/c2620021614557abe197578f99deeef42af3e082/environ/environ.py#L163-L167
def float(self, var, default=NOTSET): """ :rtype: float """ return self.get_value(var, cast=float, default=default)
[ "def", "float", "(", "self", ",", "var", ",", "default", "=", "NOTSET", ")", ":", "return", "self", ".", "get_value", "(", "var", ",", "cast", "=", "float", ",", "default", "=", "default", ")" ]
:rtype: float
[ ":", "rtype", ":", "float" ]
python
train
Gandi/gandi.cli
gandi/cli/modules/webacc.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/webacc.py#L211-L217
def vhost_remove(cls, name): """ Delete a vhost in a webaccelerator """ oper = cls.call('hosting.rproxy.vhost.delete', name) cls.echo('Deleting your virtual host %s' % name) cls.display_progress(oper) cls.echo('Your virtual host have been removed') return oper
[ "def", "vhost_remove", "(", "cls", ",", "name", ")", ":", "oper", "=", "cls", ".", "call", "(", "'hosting.rproxy.vhost.delete'", ",", "name", ")", "cls", ".", "echo", "(", "'Deleting your virtual host %s'", "%", "name", ")", "cls", ".", "display_progress", "(", "oper", ")", "cls", ".", "echo", "(", "'Your virtual host have been removed'", ")", "return", "oper" ]
Delete a vhost in a webaccelerator
[ "Delete", "a", "vhost", "in", "a", "webaccelerator" ]
python
train
salu133445/pypianoroll
pypianoroll/track.py
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/track.py#L239-L250
def pad(self, pad_length): """ Pad the pianoroll with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad with zeros along the time axis. """ self.pianoroll = np.pad( self.pianoroll, ((0, pad_length), (0, 0)), 'constant')
[ "def", "pad", "(", "self", ",", "pad_length", ")", ":", "self", ".", "pianoroll", "=", "np", ".", "pad", "(", "self", ".", "pianoroll", ",", "(", "(", "0", ",", "pad_length", ")", ",", "(", "0", ",", "0", ")", ")", ",", "'constant'", ")" ]
Pad the pianoroll with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad with zeros along the time axis.
[ "Pad", "the", "pianoroll", "with", "zeros", "at", "the", "end", "along", "the", "time", "axis", "." ]
python
train
davidsoncasey/quiver
plotter/plotter.py
https://github.com/davidsoncasey/quiver/blob/030153ba56d03ef9a500b4cee52a52e0f7cdc6a9/plotter/plotter.py#L172-L186
def make_plot(self): """Draw the plot on the figure attribute Uses matplotlib to draw and format the chart """ X, Y, DX, DY = self._calc_partials() # Plot the values self.figure = plt.Figure() axes = self.figure.add_subplot(1, 1, 1) axes.quiver(X, Y, DX, DY, angles='xy', color='b', edgecolors=('k',)) axes.axhline(color='black') axes.axvline(color='black') latex = sympy.latex(self.equation) axes.set_title(r'Direction field for $\frac{dy}{dx} = %s$' % latex, y=1.01)
[ "def", "make_plot", "(", "self", ")", ":", "X", ",", "Y", ",", "DX", ",", "DY", "=", "self", ".", "_calc_partials", "(", ")", "# Plot the values", "self", ".", "figure", "=", "plt", ".", "Figure", "(", ")", "axes", "=", "self", ".", "figure", ".", "add_subplot", "(", "1", ",", "1", ",", "1", ")", "axes", ".", "quiver", "(", "X", ",", "Y", ",", "DX", ",", "DY", ",", "angles", "=", "'xy'", ",", "color", "=", "'b'", ",", "edgecolors", "=", "(", "'k'", ",", ")", ")", "axes", ".", "axhline", "(", "color", "=", "'black'", ")", "axes", ".", "axvline", "(", "color", "=", "'black'", ")", "latex", "=", "sympy", ".", "latex", "(", "self", ".", "equation", ")", "axes", ".", "set_title", "(", "r'Direction field for $\\frac{dy}{dx} = %s$'", "%", "latex", ",", "y", "=", "1.01", ")" ]
Draw the plot on the figure attribute Uses matplotlib to draw and format the chart
[ "Draw", "the", "plot", "on", "the", "figure", "attribute", "Uses", "matplotlib", "to", "draw", "and", "format", "the", "chart" ]
python
train
cedricbonhomme/Stegano
stegano/tools.py
https://github.com/cedricbonhomme/Stegano/blob/502e6303791d348e479290c22108551ba3be254f/stegano/tools.py#L71-L74
def bs(s: int) -> str: """Converts an int to its bits representation as a string of 0's and 1's. """ return str(s) if s <= 1 else bs(s >> 1) + str(s & 1)
[ "def", "bs", "(", "s", ":", "int", ")", "->", "str", ":", "return", "str", "(", "s", ")", "if", "s", "<=", "1", "else", "bs", "(", "s", ">>", "1", ")", "+", "str", "(", "s", "&", "1", ")" ]
Converts an int to its bits representation as a string of 0's and 1's.
[ "Converts", "an", "int", "to", "its", "bits", "representation", "as", "a", "string", "of", "0", "s", "and", "1", "s", "." ]
python
train
osrg/ryu
ryu/services/protocols/bgp/processor.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/processor.py#L292-L316
def _cmp_by_local_pref(path1, path2): """Selects a path with highest local-preference. Unlike the weight attribute, which is only relevant to the local router, local preference is an attribute that routers exchange in the same AS. Highest local-pref is preferred. If we cannot decide, we return None. """ # TODO(PH): Revisit this when BGPS has concept of policy to be applied to # in-bound NLRIs. # Default local-pref values is 100 lp1 = path1.get_pattr(BGP_ATTR_TYPE_LOCAL_PREF) lp2 = path2.get_pattr(BGP_ATTR_TYPE_LOCAL_PREF) if not (lp1 and lp2): return None # Highest local-preference value is preferred. lp1 = lp1.value lp2 = lp2.value if lp1 > lp2: return path1 elif lp2 > lp1: return path2 else: return None
[ "def", "_cmp_by_local_pref", "(", "path1", ",", "path2", ")", ":", "# TODO(PH): Revisit this when BGPS has concept of policy to be applied to", "# in-bound NLRIs.", "# Default local-pref values is 100", "lp1", "=", "path1", ".", "get_pattr", "(", "BGP_ATTR_TYPE_LOCAL_PREF", ")", "lp2", "=", "path2", ".", "get_pattr", "(", "BGP_ATTR_TYPE_LOCAL_PREF", ")", "if", "not", "(", "lp1", "and", "lp2", ")", ":", "return", "None", "# Highest local-preference value is preferred.", "lp1", "=", "lp1", ".", "value", "lp2", "=", "lp2", ".", "value", "if", "lp1", ">", "lp2", ":", "return", "path1", "elif", "lp2", ">", "lp1", ":", "return", "path2", "else", ":", "return", "None" ]
Selects a path with highest local-preference. Unlike the weight attribute, which is only relevant to the local router, local preference is an attribute that routers exchange in the same AS. Highest local-pref is preferred. If we cannot decide, we return None.
[ "Selects", "a", "path", "with", "highest", "local", "-", "preference", "." ]
python
train
PyCQA/pylint
pylint/message/message_handler_mix_in.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/message/message_handler_mix_in.py#L350-L400
def print_full_documentation(self, stream=None): """output a full documentation in ReST format""" if not stream: stream = sys.stdout print("Pylint global options and switches", file=stream) print("----------------------------------", file=stream) print("", file=stream) print("Pylint provides global options and switches.", file=stream) print("", file=stream) by_checker = {} for checker in self.get_checkers(): if checker.name == "master": if checker.options: for section, options in checker.options_by_section(): if section is None: title = "General options" else: title = "%s options" % section.capitalize() print(title, file=stream) print("~" * len(title), file=stream) _rest_format_section(stream, None, options) print("", file=stream) else: name = checker.name try: by_checker[name]["options"] += checker.options_and_values() by_checker[name]["msgs"].update(checker.msgs) by_checker[name]["reports"] += checker.reports except KeyError: by_checker[name] = { "options": list(checker.options_and_values()), "msgs": dict(checker.msgs), "reports": list(checker.reports), } print("Pylint checkers' options and switches", file=stream) print("-------------------------------------", file=stream) print("", file=stream) print("Pylint checkers can provide three set of features:", file=stream) print("", file=stream) print("* options that control their execution,", file=stream) print("* messages that they can raise,", file=stream) print("* reports that they can generate.", file=stream) print("", file=stream) print("Below is a list of all checkers and their features.", file=stream) print("", file=stream) for checker, info in sorted(by_checker.items()): self._print_checker_doc(checker, info, stream=stream)
[ "def", "print_full_documentation", "(", "self", ",", "stream", "=", "None", ")", ":", "if", "not", "stream", ":", "stream", "=", "sys", ".", "stdout", "print", "(", "\"Pylint global options and switches\"", ",", "file", "=", "stream", ")", "print", "(", "\"----------------------------------\"", ",", "file", "=", "stream", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "print", "(", "\"Pylint provides global options and switches.\"", ",", "file", "=", "stream", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "by_checker", "=", "{", "}", "for", "checker", "in", "self", ".", "get_checkers", "(", ")", ":", "if", "checker", ".", "name", "==", "\"master\"", ":", "if", "checker", ".", "options", ":", "for", "section", ",", "options", "in", "checker", ".", "options_by_section", "(", ")", ":", "if", "section", "is", "None", ":", "title", "=", "\"General options\"", "else", ":", "title", "=", "\"%s options\"", "%", "section", ".", "capitalize", "(", ")", "print", "(", "title", ",", "file", "=", "stream", ")", "print", "(", "\"~\"", "*", "len", "(", "title", ")", ",", "file", "=", "stream", ")", "_rest_format_section", "(", "stream", ",", "None", ",", "options", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "else", ":", "name", "=", "checker", ".", "name", "try", ":", "by_checker", "[", "name", "]", "[", "\"options\"", "]", "+=", "checker", ".", "options_and_values", "(", ")", "by_checker", "[", "name", "]", "[", "\"msgs\"", "]", ".", "update", "(", "checker", ".", "msgs", ")", "by_checker", "[", "name", "]", "[", "\"reports\"", "]", "+=", "checker", ".", "reports", "except", "KeyError", ":", "by_checker", "[", "name", "]", "=", "{", "\"options\"", ":", "list", "(", "checker", ".", "options_and_values", "(", ")", ")", ",", "\"msgs\"", ":", "dict", "(", "checker", ".", "msgs", ")", ",", "\"reports\"", ":", "list", "(", "checker", ".", "reports", ")", ",", "}", "print", "(", "\"Pylint checkers' options and switches\"", ",", "file", "=", "stream", ")", "print", "(", "\"-------------------------------------\"", ",", "file", "=", "stream", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "print", "(", "\"Pylint checkers can provide three set of features:\"", ",", "file", "=", "stream", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "print", "(", "\"* options that control their execution,\"", ",", "file", "=", "stream", ")", "print", "(", "\"* messages that they can raise,\"", ",", "file", "=", "stream", ")", "print", "(", "\"* reports that they can generate.\"", ",", "file", "=", "stream", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "print", "(", "\"Below is a list of all checkers and their features.\"", ",", "file", "=", "stream", ")", "print", "(", "\"\"", ",", "file", "=", "stream", ")", "for", "checker", ",", "info", "in", "sorted", "(", "by_checker", ".", "items", "(", ")", ")", ":", "self", ".", "_print_checker_doc", "(", "checker", ",", "info", ",", "stream", "=", "stream", ")" ]
output a full documentation in ReST format
[ "output", "a", "full", "documentation", "in", "ReST", "format" ]
python
test
obriencj/python-javatools
javatools/__init__.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/__init__.py#L1376-L1389
def get_arg_type_descriptors(self): """ The parameter type descriptor list for a method, or None for a field. Type descriptors are shorthand identifiers for the builtin java types. """ if not self.is_method: return tuple() tp = _typeseq(self.get_descriptor()) tp = _typeseq(tp[0][1:-1]) return tp
[ "def", "get_arg_type_descriptors", "(", "self", ")", ":", "if", "not", "self", ".", "is_method", ":", "return", "tuple", "(", ")", "tp", "=", "_typeseq", "(", "self", ".", "get_descriptor", "(", ")", ")", "tp", "=", "_typeseq", "(", "tp", "[", "0", "]", "[", "1", ":", "-", "1", "]", ")", "return", "tp" ]
The parameter type descriptor list for a method, or None for a field. Type descriptors are shorthand identifiers for the builtin java types.
[ "The", "parameter", "type", "descriptor", "list", "for", "a", "method", "or", "None", "for", "a", "field", ".", "Type", "descriptors", "are", "shorthand", "identifiers", "for", "the", "builtin", "java", "types", "." ]
python
train
tango-controls/pytango
tango/server.py
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/server.py#L220-L237
def __patch_attr_methods(tango_device_klass, attribute): """ Checks if the read and write methods have the correct signature. If a read/write method doesn't have a parameter (the traditional Attribute), then the method is wrapped into another method to make this work. :param tango_device_klass: a DeviceImpl class :type tango_device_klass: class :param attribute: the attribute data information :type attribute: AttrData """ if attribute.attr_write in (AttrWriteType.READ, AttrWriteType.READ_WRITE): __patch_read_method(tango_device_klass, attribute) if attribute.attr_write in (AttrWriteType.WRITE, AttrWriteType.READ_WRITE): __patch_write_method(tango_device_klass, attribute)
[ "def", "__patch_attr_methods", "(", "tango_device_klass", ",", "attribute", ")", ":", "if", "attribute", ".", "attr_write", "in", "(", "AttrWriteType", ".", "READ", ",", "AttrWriteType", ".", "READ_WRITE", ")", ":", "__patch_read_method", "(", "tango_device_klass", ",", "attribute", ")", "if", "attribute", ".", "attr_write", "in", "(", "AttrWriteType", ".", "WRITE", ",", "AttrWriteType", ".", "READ_WRITE", ")", ":", "__patch_write_method", "(", "tango_device_klass", ",", "attribute", ")" ]
Checks if the read and write methods have the correct signature. If a read/write method doesn't have a parameter (the traditional Attribute), then the method is wrapped into another method to make this work. :param tango_device_klass: a DeviceImpl class :type tango_device_klass: class :param attribute: the attribute data information :type attribute: AttrData
[ "Checks", "if", "the", "read", "and", "write", "methods", "have", "the", "correct", "signature", ".", "If", "a", "read", "/", "write", "method", "doesn", "t", "have", "a", "parameter", "(", "the", "traditional", "Attribute", ")", "then", "the", "method", "is", "wrapped", "into", "another", "method", "to", "make", "this", "work", "." ]
python
train
mitsei/dlkit
dlkit/json_/authorization/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/sessions.py#L1823-L1837
def use_plenary_vault_view(self): """A complete view of the ``Authorization`` and ``Vault`` returns is desired. Methods will return what is requested or result in an error. This view is used when greater precision is desired at the expense of interoperability. *compliance: mandatory -- This method is must be implemented.* """ # Implemented from template for # osid.resource.BinLookupSession.use_plenary_bin_view self._catalog_view = PLENARY if self._catalog_session is not None: self._catalog_session.use_plenary_catalog_view()
[ "def", "use_plenary_vault_view", "(", "self", ")", ":", "# Implemented from template for", "# osid.resource.BinLookupSession.use_plenary_bin_view", "self", ".", "_catalog_view", "=", "PLENARY", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "self", ".", "_catalog_session", ".", "use_plenary_catalog_view", "(", ")" ]
A complete view of the ``Authorization`` and ``Vault`` returns is desired. Methods will return what is requested or result in an error. This view is used when greater precision is desired at the expense of interoperability. *compliance: mandatory -- This method is must be implemented.*
[ "A", "complete", "view", "of", "the", "Authorization", "and", "Vault", "returns", "is", "desired", "." ]
python
train
molmod/molmod
molmod/binning.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/binning.py#L116-L152
def _setup_grid(self, cutoff, unit_cell, grid): """Choose a proper grid for the binning process""" if grid is None: # automatically choose a decent grid if unit_cell is None: grid = cutoff/2.9 else: # The following would be faster, but it is not reliable # enough yet. #grid = unit_cell.get_optimal_subcell(cutoff/2.0) divisions = np.ceil(unit_cell.spacings/cutoff) divisions[divisions<1] = 1 grid = unit_cell/divisions if isinstance(grid, float): grid_cell = UnitCell(np.array([ [grid, 0, 0], [0, grid, 0], [0, 0, grid] ])) elif isinstance(grid, UnitCell): grid_cell = grid else: raise TypeError("Grid must be None, a float or a UnitCell instance.") if unit_cell is not None: # The columns of integer_matrix are the unit cell vectors in # fractional coordinates of the grid cell. integer_matrix = grid_cell.to_fractional(unit_cell.matrix.transpose()).transpose() if abs((integer_matrix - np.round(integer_matrix))*self.unit_cell.active).max() > 1e-6: raise ValueError("The unit cell vectors are not an integer linear combination of grid cell vectors.") integer_matrix = integer_matrix.round() integer_cell = UnitCell(integer_matrix, unit_cell.active) else: integer_cell = None return grid_cell, integer_cell
[ "def", "_setup_grid", "(", "self", ",", "cutoff", ",", "unit_cell", ",", "grid", ")", ":", "if", "grid", "is", "None", ":", "# automatically choose a decent grid", "if", "unit_cell", "is", "None", ":", "grid", "=", "cutoff", "/", "2.9", "else", ":", "# The following would be faster, but it is not reliable", "# enough yet.", "#grid = unit_cell.get_optimal_subcell(cutoff/2.0)", "divisions", "=", "np", ".", "ceil", "(", "unit_cell", ".", "spacings", "/", "cutoff", ")", "divisions", "[", "divisions", "<", "1", "]", "=", "1", "grid", "=", "unit_cell", "/", "divisions", "if", "isinstance", "(", "grid", ",", "float", ")", ":", "grid_cell", "=", "UnitCell", "(", "np", ".", "array", "(", "[", "[", "grid", ",", "0", ",", "0", "]", ",", "[", "0", ",", "grid", ",", "0", "]", ",", "[", "0", ",", "0", ",", "grid", "]", "]", ")", ")", "elif", "isinstance", "(", "grid", ",", "UnitCell", ")", ":", "grid_cell", "=", "grid", "else", ":", "raise", "TypeError", "(", "\"Grid must be None, a float or a UnitCell instance.\"", ")", "if", "unit_cell", "is", "not", "None", ":", "# The columns of integer_matrix are the unit cell vectors in", "# fractional coordinates of the grid cell.", "integer_matrix", "=", "grid_cell", ".", "to_fractional", "(", "unit_cell", ".", "matrix", ".", "transpose", "(", ")", ")", ".", "transpose", "(", ")", "if", "abs", "(", "(", "integer_matrix", "-", "np", ".", "round", "(", "integer_matrix", ")", ")", "*", "self", ".", "unit_cell", ".", "active", ")", ".", "max", "(", ")", ">", "1e-6", ":", "raise", "ValueError", "(", "\"The unit cell vectors are not an integer linear combination of grid cell vectors.\"", ")", "integer_matrix", "=", "integer_matrix", ".", "round", "(", ")", "integer_cell", "=", "UnitCell", "(", "integer_matrix", ",", "unit_cell", ".", "active", ")", "else", ":", "integer_cell", "=", "None", "return", "grid_cell", ",", "integer_cell" ]
Choose a proper grid for the binning process
[ "Choose", "a", "proper", "grid", "for", "the", "binning", "process" ]
python
train
google/grr
grr/server/grr_response_server/hunts/implementation.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/hunts/implementation.py#L899-L909
def Pause(self): """Pauses the hunt (removes Foreman rules, does not touch expiry time).""" if not self.IsHuntStarted(): return self._RemoveForemanRule() self.hunt_obj.Set(self.hunt_obj.Schema.STATE("PAUSED")) self.hunt_obj.Flush() self._CreateAuditEvent("HUNT_PAUSED")
[ "def", "Pause", "(", "self", ")", ":", "if", "not", "self", ".", "IsHuntStarted", "(", ")", ":", "return", "self", ".", "_RemoveForemanRule", "(", ")", "self", ".", "hunt_obj", ".", "Set", "(", "self", ".", "hunt_obj", ".", "Schema", ".", "STATE", "(", "\"PAUSED\"", ")", ")", "self", ".", "hunt_obj", ".", "Flush", "(", ")", "self", ".", "_CreateAuditEvent", "(", "\"HUNT_PAUSED\"", ")" ]
Pauses the hunt (removes Foreman rules, does not touch expiry time).
[ "Pauses", "the", "hunt", "(", "removes", "Foreman", "rules", "does", "not", "touch", "expiry", "time", ")", "." ]
python
train
franciscogarate/pyliferisk
pyliferisk/__init__.py
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L268-L270
def Cx(mt, x): """ Return the Cx """ return ((1 / (1 + mt.i)) ** (x + 1)) * mt.dx[x] * ((1 + mt.i) ** 0.5)
[ "def", "Cx", "(", "mt", ",", "x", ")", ":", "return", "(", "(", "1", "/", "(", "1", "+", "mt", ".", "i", ")", ")", "**", "(", "x", "+", "1", ")", ")", "*", "mt", ".", "dx", "[", "x", "]", "*", "(", "(", "1", "+", "mt", ".", "i", ")", "**", "0.5", ")" ]
Return the Cx
[ "Return", "the", "Cx" ]
python
train
cltk/cltk
cltk/inflection/old_norse/nouns.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/inflection/old_norse/nouns.py#L494-L601
def decline_strong_neuter_noun(ns: str, gs: str, np: str): """ Gives the full declension of strong neuter nouns. a-stem Most of strong neuter nouns follow the declensions of skip, land and herað. >>> decline_strong_neuter_noun("skip", "skips", "skip") skip skip skipi skips skip skip skipum skipa >>> decline_strong_neuter_noun("land", "lands", "lönd") land land landi lands lönd lönd löndum landa >>> decline_strong_neuter_noun("herað", "heraðs", "heruð") herað herað heraði heraðs heruð heruð heruðum heraða # >>> decline_strong_neuter_noun("kyn", "kyns", "kyn") # kyn # kyn # kyni # kyns # kyn # kyn # kynjum # kynja # # >>> decline_strong_neuter_noun("högg", "höggs", "högg") # högg # högg # höggvi # höggs # högg # högg # höggum # höggva >>> decline_strong_neuter_noun("kvæði", "kvæðis", "kvæði") kvæði kvæði kvæði kvæðis kvæði kvæði kvæðum kvæða :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ # nominative singular print(ns) # accusative singular print(ns) # dative singular if ns[-1] == "i": print(ns) # TODO +"vi" else: print(ns+"i") # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural if ns[-1] in CONSONANTS: print(apply_u_umlaut(np)+"um") else: print(apply_u_umlaut(np[:-1]) + "um") # TODO +"vum" # genitive plural if ns[-1] in CONSONANTS: print(ns+"a") # TODO + "va" else: print(ns[:-1]+"a")
[ "def", "decline_strong_neuter_noun", "(", "ns", ":", "str", ",", "gs", ":", "str", ",", "np", ":", "str", ")", ":", "# nominative singular", "print", "(", "ns", ")", "# accusative singular", "print", "(", "ns", ")", "# dative singular", "if", "ns", "[", "-", "1", "]", "==", "\"i\"", ":", "print", "(", "ns", ")", "# TODO +\"vi\"", "else", ":", "print", "(", "ns", "+", "\"i\"", ")", "# genitive singular", "print", "(", "gs", ")", "# nominative plural", "print", "(", "np", ")", "# accusative plural", "print", "(", "np", ")", "# dative plural", "if", "ns", "[", "-", "1", "]", "in", "CONSONANTS", ":", "print", "(", "apply_u_umlaut", "(", "np", ")", "+", "\"um\"", ")", "else", ":", "print", "(", "apply_u_umlaut", "(", "np", "[", ":", "-", "1", "]", ")", "+", "\"um\"", ")", "# TODO +\"vum\"", "# genitive plural", "if", "ns", "[", "-", "1", "]", "in", "CONSONANTS", ":", "print", "(", "ns", "+", "\"a\"", ")", "# TODO + \"va\"", "else", ":", "print", "(", "ns", "[", ":", "-", "1", "]", "+", "\"a\"", ")" ]
Gives the full declension of strong neuter nouns. a-stem Most of strong neuter nouns follow the declensions of skip, land and herað. >>> decline_strong_neuter_noun("skip", "skips", "skip") skip skip skipi skips skip skip skipum skipa >>> decline_strong_neuter_noun("land", "lands", "lönd") land land landi lands lönd lönd löndum landa >>> decline_strong_neuter_noun("herað", "heraðs", "heruð") herað herað heraði heraðs heruð heruð heruðum heraða # >>> decline_strong_neuter_noun("kyn", "kyns", "kyn") # kyn # kyn # kyni # kyns # kyn # kyn # kynjum # kynja # # >>> decline_strong_neuter_noun("högg", "höggs", "högg") # högg # högg # höggvi # höggs # högg # högg # höggum # höggva >>> decline_strong_neuter_noun("kvæði", "kvæðis", "kvæði") kvæði kvæði kvæði kvæðis kvæði kvæði kvæðum kvæða :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return:
[ "Gives", "the", "full", "declension", "of", "strong", "neuter", "nouns", "." ]
python
train
lowandrew/OLCTools
spadespipeline/mMLST.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/mMLST.py#L748-L809
def reprofiler(self, header, genome, sample): """ Creates and appends new profiles as required :param header: :param genome: :param sample: """ # Iterate through mlstseqtype - it contains genomes with partial matches to current reference profiles # Reset :newprofile newprofile = "" # Find the last profile entry in the dictionary of profiles # Opens uses the command line tool 'tail' to look at the last line of the file (-1). This last line # is split on tabs, and only the first entry (the sequence type number) is captured if sample[self.analysistype].supplementalprofile != 'NA': if os.path.isfile(sample[self.analysistype].supplementalprofile): try: lastentry = int( subprocess.check_output(['tail', '-1', sample[self.analysistype].supplementalprofile]) .split("\t")[0]) + 1 except ValueError: lastentry = 1000000 else: open(sample[self.analysistype].supplementalprofile, 'w').close() lastentry = 1000000 # As there can be multiple profiles in MLSTSeqType, this loop only needs to be performed once. seqcount = 0 # Go through the sequence types try: sequencetype = list(self.mlstseqtype[genome].keys())[0] except IndexError: sequencetype = '' seqcount = 1 # Only do this once if seqcount == 0: # Set the :newprofile string to start with the new profile name (e.g. 1000000_CFIA) newprofile = str(lastentry) # The number of matches to the reference profile nummatches = list(self.mlstseqtype[genome][sequencetype].keys())[0] # The genes in geneList - should be in the correct order for gene in sorted(sample[self.analysistype].allelenames): # The allele for each gene in the query genome allele = list(self.mlstseqtype[genome][sequencetype][nummatches][gene].keys())[0] # Append the allele to newprofile newprofile += '\t{}'.format(allele) # Add the MLST results for the query genome as well as the new profile data # to resultProfile self.resultprofile[genome]['{}(new)'.format(str(lastentry))][header][gene][allele] = \ list(self.mlstseqtype[genome][sequencetype][nummatches][gene][allele].values())[0] seqcount += 1 sample[self.analysistype].mismatchestosequencetype = 'NA' sample[self.analysistype].matchestosequencetype = header # Only perform the next loop if :newprofile exists if newprofile: # Open the profile file to append with open(sample[self.analysistype].supplementalprofile, 'a') as appendfile: # Append the new profile to the end of the profile file appendfile.write('{}\n'.format(newprofile)) # Re-run profiler with the updated files self.profiler() else: sample[self.analysistype].mismatchestosequencetype = 'NA' sample[self.analysistype].matchestosequencetype = 'NA'
[ "def", "reprofiler", "(", "self", ",", "header", ",", "genome", ",", "sample", ")", ":", "# Iterate through mlstseqtype - it contains genomes with partial matches to current reference profiles", "# Reset :newprofile", "newprofile", "=", "\"\"", "# Find the last profile entry in the dictionary of profiles", "# Opens uses the command line tool 'tail' to look at the last line of the file (-1). This last line", "# is split on tabs, and only the first entry (the sequence type number) is captured", "if", "sample", "[", "self", ".", "analysistype", "]", ".", "supplementalprofile", "!=", "'NA'", ":", "if", "os", ".", "path", ".", "isfile", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "supplementalprofile", ")", ":", "try", ":", "lastentry", "=", "int", "(", "subprocess", ".", "check_output", "(", "[", "'tail'", ",", "'-1'", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "supplementalprofile", "]", ")", ".", "split", "(", "\"\\t\"", ")", "[", "0", "]", ")", "+", "1", "except", "ValueError", ":", "lastentry", "=", "1000000", "else", ":", "open", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "supplementalprofile", ",", "'w'", ")", ".", "close", "(", ")", "lastentry", "=", "1000000", "# As there can be multiple profiles in MLSTSeqType, this loop only needs to be performed once.", "seqcount", "=", "0", "# Go through the sequence types", "try", ":", "sequencetype", "=", "list", "(", "self", ".", "mlstseqtype", "[", "genome", "]", ".", "keys", "(", ")", ")", "[", "0", "]", "except", "IndexError", ":", "sequencetype", "=", "''", "seqcount", "=", "1", "# Only do this once", "if", "seqcount", "==", "0", ":", "# Set the :newprofile string to start with the new profile name (e.g. 1000000_CFIA)", "newprofile", "=", "str", "(", "lastentry", ")", "# The number of matches to the reference profile", "nummatches", "=", "list", "(", "self", ".", "mlstseqtype", "[", "genome", "]", "[", "sequencetype", "]", ".", "keys", "(", ")", ")", "[", "0", "]", "# The genes in geneList - should be in the correct order", "for", "gene", "in", "sorted", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "allelenames", ")", ":", "# The allele for each gene in the query genome", "allele", "=", "list", "(", "self", ".", "mlstseqtype", "[", "genome", "]", "[", "sequencetype", "]", "[", "nummatches", "]", "[", "gene", "]", ".", "keys", "(", ")", ")", "[", "0", "]", "# Append the allele to newprofile", "newprofile", "+=", "'\\t{}'", ".", "format", "(", "allele", ")", "# Add the MLST results for the query genome as well as the new profile data", "# to resultProfile", "self", ".", "resultprofile", "[", "genome", "]", "[", "'{}(new)'", ".", "format", "(", "str", "(", "lastentry", ")", ")", "]", "[", "header", "]", "[", "gene", "]", "[", "allele", "]", "=", "list", "(", "self", ".", "mlstseqtype", "[", "genome", "]", "[", "sequencetype", "]", "[", "nummatches", "]", "[", "gene", "]", "[", "allele", "]", ".", "values", "(", ")", ")", "[", "0", "]", "seqcount", "+=", "1", "sample", "[", "self", ".", "analysistype", "]", ".", "mismatchestosequencetype", "=", "'NA'", "sample", "[", "self", ".", "analysistype", "]", ".", "matchestosequencetype", "=", "header", "# Only perform the next loop if :newprofile exists", "if", "newprofile", ":", "# Open the profile file to append", "with", "open", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "supplementalprofile", ",", "'a'", ")", "as", "appendfile", ":", "# Append the new profile to the end of the profile file", "appendfile", ".", "write", "(", "'{}\\n'", ".", "format", "(", "newprofile", ")", ")", "# Re-run profiler with the updated files", "self", ".", "profiler", "(", ")", "else", ":", "sample", "[", "self", ".", "analysistype", "]", ".", "mismatchestosequencetype", "=", "'NA'", "sample", "[", "self", ".", "analysistype", "]", ".", "matchestosequencetype", "=", "'NA'" ]
Creates and appends new profiles as required :param header: :param genome: :param sample:
[ "Creates", "and", "appends", "new", "profiles", "as", "required", ":", "param", "header", ":", ":", "param", "genome", ":", ":", "param", "sample", ":" ]
python
train
requests/requests-kerberos
requests_kerberos/kerberos_.py
https://github.com/requests/requests-kerberos/blob/d459afcd20d921f18bc435e8df0f120f3d2ea6a2/requests_kerberos/kerberos_.py#L374-L413
def handle_response(self, response, **kwargs): """Takes the given response and tries kerberos-auth, as needed.""" num_401s = kwargs.pop('num_401s', 0) # Check if we have already tried to get the CBT data value if not self.cbt_binding_tried and self.send_cbt: # If we haven't tried, try getting it now cbt_application_data = _get_channel_bindings_application_data(response) if cbt_application_data: # Only the latest version of pykerberos has this method available try: self.cbt_struct = kerberos.channelBindings(application_data=cbt_application_data) except AttributeError: # Using older version set to None self.cbt_struct = None # Regardless of the result, set tried to True so we don't waste time next time self.cbt_binding_tried = True if self.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. response.request.body.seek(self.pos) if response.status_code == 401 and num_401s < 2: # 401 Unauthorized. Handle it, and if it still comes back as 401, # that means authentication failed. _r = self.handle_401(response, **kwargs) log.debug("handle_response(): returning %s", _r) log.debug("handle_response() has seen %d 401 responses", num_401s) num_401s += 1 return self.handle_response(_r, num_401s=num_401s, **kwargs) elif response.status_code == 401 and num_401s >= 2: # Still receiving 401 responses after attempting to handle them. # Authentication has failed. Return the 401 response. log.debug("handle_response(): returning 401 %s", response) return response else: _r = self.handle_other(response) log.debug("handle_response(): returning %s", _r) return _r
[ "def", "handle_response", "(", "self", ",", "response", ",", "*", "*", "kwargs", ")", ":", "num_401s", "=", "kwargs", ".", "pop", "(", "'num_401s'", ",", "0", ")", "# Check if we have already tried to get the CBT data value", "if", "not", "self", ".", "cbt_binding_tried", "and", "self", ".", "send_cbt", ":", "# If we haven't tried, try getting it now", "cbt_application_data", "=", "_get_channel_bindings_application_data", "(", "response", ")", "if", "cbt_application_data", ":", "# Only the latest version of pykerberos has this method available", "try", ":", "self", ".", "cbt_struct", "=", "kerberos", ".", "channelBindings", "(", "application_data", "=", "cbt_application_data", ")", "except", "AttributeError", ":", "# Using older version set to None", "self", ".", "cbt_struct", "=", "None", "# Regardless of the result, set tried to True so we don't waste time next time", "self", ".", "cbt_binding_tried", "=", "True", "if", "self", ".", "pos", "is", "not", "None", ":", "# Rewind the file position indicator of the body to where", "# it was to resend the request.", "response", ".", "request", ".", "body", ".", "seek", "(", "self", ".", "pos", ")", "if", "response", ".", "status_code", "==", "401", "and", "num_401s", "<", "2", ":", "# 401 Unauthorized. Handle it, and if it still comes back as 401,", "# that means authentication failed.", "_r", "=", "self", ".", "handle_401", "(", "response", ",", "*", "*", "kwargs", ")", "log", ".", "debug", "(", "\"handle_response(): returning %s\"", ",", "_r", ")", "log", ".", "debug", "(", "\"handle_response() has seen %d 401 responses\"", ",", "num_401s", ")", "num_401s", "+=", "1", "return", "self", ".", "handle_response", "(", "_r", ",", "num_401s", "=", "num_401s", ",", "*", "*", "kwargs", ")", "elif", "response", ".", "status_code", "==", "401", "and", "num_401s", ">=", "2", ":", "# Still receiving 401 responses after attempting to handle them.", "# Authentication has failed. Return the 401 response.", "log", ".", "debug", "(", "\"handle_response(): returning 401 %s\"", ",", "response", ")", "return", "response", "else", ":", "_r", "=", "self", ".", "handle_other", "(", "response", ")", "log", ".", "debug", "(", "\"handle_response(): returning %s\"", ",", "_r", ")", "return", "_r" ]
Takes the given response and tries kerberos-auth, as needed.
[ "Takes", "the", "given", "response", "and", "tries", "kerberos", "-", "auth", "as", "needed", "." ]
python
train
ikumen/flask-cfg
flask_cfg/core.py
https://github.com/ikumen/flask-cfg/blob/28f3370121419d22b6a5a3713ab3cb8bb2da6e43/flask_cfg/core.py#L48-L77
def process_loaded_configs(self, values): """Takes the loaded config values (from YAML files) and performs the following clean up steps: 1. remove all value keys that are not uppercase 2. resolve any keys with missing values Note: resolving missing values does not fail fast, we will collect all missing values and report it to a post handler, then finally fail. @param values dictionary of raw, newly loaded config values """ unresolved_value_keys = self._process_config_values([], values, []) if len(unresolved_value_keys) > 0: msg = "Unresolved values for: {}".format(unresolved_value_keys) # Even though we will fail, there might be a situation when we want to # do something with the list of missing values, so pass it to a handler. self.on_process_loaded_configs_failure(values, unresolved_value_keys) if self.ignore_errors: # If we're ignoring errors, at least log it logging.warn(msg) else: # end program raise LookupError(msg) # All the config values were checked and everything looks good, # let's inform post handler for any additional work. self.on_process_loaded_configs_complete(values) return values
[ "def", "process_loaded_configs", "(", "self", ",", "values", ")", ":", "unresolved_value_keys", "=", "self", ".", "_process_config_values", "(", "[", "]", ",", "values", ",", "[", "]", ")", "if", "len", "(", "unresolved_value_keys", ")", ">", "0", ":", "msg", "=", "\"Unresolved values for: {}\"", ".", "format", "(", "unresolved_value_keys", ")", "# Even though we will fail, there might be a situation when we want to ", "# do something with the list of missing values, so pass it to a handler.", "self", ".", "on_process_loaded_configs_failure", "(", "values", ",", "unresolved_value_keys", ")", "if", "self", ".", "ignore_errors", ":", "# If we're ignoring errors, at least log it", "logging", ".", "warn", "(", "msg", ")", "else", ":", "# end program", "raise", "LookupError", "(", "msg", ")", "# All the config values were checked and everything looks good, ", "# let's inform post handler for any additional work.", "self", ".", "on_process_loaded_configs_complete", "(", "values", ")", "return", "values" ]
Takes the loaded config values (from YAML files) and performs the following clean up steps: 1. remove all value keys that are not uppercase 2. resolve any keys with missing values Note: resolving missing values does not fail fast, we will collect all missing values and report it to a post handler, then finally fail. @param values dictionary of raw, newly loaded config values
[ "Takes", "the", "loaded", "config", "values", "(", "from", "YAML", "files", ")", "and", "performs", "the", "following", "clean", "up", "steps", ":", "1", ".", "remove", "all", "value", "keys", "that", "are", "not", "uppercase", "2", ".", "resolve", "any", "keys", "with", "missing", "values" ]
python
train
nmdp-bioinformatics/SeqAnn
seqann/gfe.py
https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/gfe.py#L100-L119
def load_features(self): """ Loads all the known features from the feature service """ # Loading all loci that # are in self.loci variable defined # when the pyGFE object is created for loc in self.loci: if self.verbose: self.logger.info(self.logname + "Loading features for " + loc) # Loading all features for loc from feature service self.all_feats.update({loc: self.locus_features(loc)}) if self.verbose: self.logger.info(self.logname + "Finished loading features for " + loc) if self.verbose: mem = "{:4.4f}".format(sys.getsizeof(self.all_feats) / 1000000) self.logger.info(self.logname + "Finished loading all features * all_feats = " + mem + " MB *")
[ "def", "load_features", "(", "self", ")", ":", "# Loading all loci that", "# are in self.loci variable defined", "# when the pyGFE object is created", "for", "loc", "in", "self", ".", "loci", ":", "if", "self", ".", "verbose", ":", "self", ".", "logger", ".", "info", "(", "self", ".", "logname", "+", "\"Loading features for \"", "+", "loc", ")", "# Loading all features for loc from feature service", "self", ".", "all_feats", ".", "update", "(", "{", "loc", ":", "self", ".", "locus_features", "(", "loc", ")", "}", ")", "if", "self", ".", "verbose", ":", "self", ".", "logger", ".", "info", "(", "self", ".", "logname", "+", "\"Finished loading features for \"", "+", "loc", ")", "if", "self", ".", "verbose", ":", "mem", "=", "\"{:4.4f}\"", ".", "format", "(", "sys", ".", "getsizeof", "(", "self", ".", "all_feats", ")", "/", "1000000", ")", "self", ".", "logger", ".", "info", "(", "self", ".", "logname", "+", "\"Finished loading all features * all_feats = \"", "+", "mem", "+", "\" MB *\"", ")" ]
Loads all the known features from the feature service
[ "Loads", "all", "the", "known", "features", "from", "the", "feature", "service" ]
python
train
MrYsLab/pymata-aio
pymata_aio/pymata3.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata3.py#L277-L298
def get_capability_report(self, raw=True, cb=None): """ This method retrieves the Firmata capability report :param raw: If True, it either stores or provides the callback with a report as list. If False, prints a formatted report to the console :param cb: Optional callback reference to receive a raw report :returns: capability report """ task = asyncio.ensure_future(self.core.get_capability_report()) report = self.loop.run_until_complete(task) if raw: if cb: cb(report) else: return report else: # noinspection PyProtectedMember self.core._format_capability_report(report)
[ "def", "get_capability_report", "(", "self", ",", "raw", "=", "True", ",", "cb", "=", "None", ")", ":", "task", "=", "asyncio", ".", "ensure_future", "(", "self", ".", "core", ".", "get_capability_report", "(", ")", ")", "report", "=", "self", ".", "loop", ".", "run_until_complete", "(", "task", ")", "if", "raw", ":", "if", "cb", ":", "cb", "(", "report", ")", "else", ":", "return", "report", "else", ":", "# noinspection PyProtectedMember", "self", ".", "core", ".", "_format_capability_report", "(", "report", ")" ]
This method retrieves the Firmata capability report :param raw: If True, it either stores or provides the callback with a report as list. If False, prints a formatted report to the console :param cb: Optional callback reference to receive a raw report :returns: capability report
[ "This", "method", "retrieves", "the", "Firmata", "capability", "report" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py#L88-L104
def qos_map_dscp_traffic_class_mark_to(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") map = ET.SubElement(qos, "map") dscp_traffic_class = ET.SubElement(map, "dscp-traffic-class") dscp_traffic_class_map_name_key = ET.SubElement(dscp_traffic_class, "dscp-traffic-class-map-name") dscp_traffic_class_map_name_key.text = kwargs.pop('dscp_traffic_class_map_name') mark = ET.SubElement(dscp_traffic_class, "mark") dscp_in_values_key = ET.SubElement(mark, "dscp-in-values") dscp_in_values_key.text = kwargs.pop('dscp_in_values') to = ET.SubElement(mark, "to") to.text = kwargs.pop('to') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "qos_map_dscp_traffic_class_mark_to", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "qos", "=", "ET", ".", "SubElement", "(", "config", ",", "\"qos\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-qos\"", ")", "map", "=", "ET", ".", "SubElement", "(", "qos", ",", "\"map\"", ")", "dscp_traffic_class", "=", "ET", ".", "SubElement", "(", "map", ",", "\"dscp-traffic-class\"", ")", "dscp_traffic_class_map_name_key", "=", "ET", ".", "SubElement", "(", "dscp_traffic_class", ",", "\"dscp-traffic-class-map-name\"", ")", "dscp_traffic_class_map_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'dscp_traffic_class_map_name'", ")", "mark", "=", "ET", ".", "SubElement", "(", "dscp_traffic_class", ",", "\"mark\"", ")", "dscp_in_values_key", "=", "ET", ".", "SubElement", "(", "mark", ",", "\"dscp-in-values\"", ")", "dscp_in_values_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'dscp_in_values'", ")", "to", "=", "ET", ".", "SubElement", "(", "mark", ",", "\"to\"", ")", "to", ".", "text", "=", "kwargs", ".", "pop", "(", "'to'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
mikusjelly/apkutils
apkutils/__init__.py
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/__init__.py#L214-L246
def _init_methods(self, limit=10000): """初始化方法 某些APK可能存在大量的方法,可能会相当耗时,根据情况加限制 Args: limit (int, optional): 方法数量限制,超过该值,则不获取方法 Returns: TYPE: 方法集合 """ methods = set() if not self.dex_files: self._init_dex_files() count = 0 for dex_file in self.dex_files: count += dex_file.method_ids.size if limit < count: return for dex_file in self.dex_files: for dexClass in dex_file.classes: try: dexClass.parseData() except IndexError: continue for method in dexClass.data.methods: clsname = method.id.cname.decode() mtdname = method.id.name.decode() methods.add(clsname + '/' + mtdname) self.methods = sorted(methods)
[ "def", "_init_methods", "(", "self", ",", "limit", "=", "10000", ")", ":", "methods", "=", "set", "(", ")", "if", "not", "self", ".", "dex_files", ":", "self", ".", "_init_dex_files", "(", ")", "count", "=", "0", "for", "dex_file", "in", "self", ".", "dex_files", ":", "count", "+=", "dex_file", ".", "method_ids", ".", "size", "if", "limit", "<", "count", ":", "return", "for", "dex_file", "in", "self", ".", "dex_files", ":", "for", "dexClass", "in", "dex_file", ".", "classes", ":", "try", ":", "dexClass", ".", "parseData", "(", ")", "except", "IndexError", ":", "continue", "for", "method", "in", "dexClass", ".", "data", ".", "methods", ":", "clsname", "=", "method", ".", "id", ".", "cname", ".", "decode", "(", ")", "mtdname", "=", "method", ".", "id", ".", "name", ".", "decode", "(", ")", "methods", ".", "add", "(", "clsname", "+", "'/'", "+", "mtdname", ")", "self", ".", "methods", "=", "sorted", "(", "methods", ")" ]
初始化方法 某些APK可能存在大量的方法,可能会相当耗时,根据情况加限制 Args: limit (int, optional): 方法数量限制,超过该值,则不获取方法 Returns: TYPE: 方法集合
[ "初始化方法" ]
python
train
meyersj/geotweet
geotweet/osm.py
https://github.com/meyersj/geotweet/blob/1a6b55f98adf34d1b91f172d9187d599616412d9/geotweet/osm.py#L67-L81
def extract(self, pbf, output): """ extract POI nodes from osm pbf extract """ logging.info("Extracting POI nodes from {0} to {1}".format(pbf, output)) with open(output, 'w') as f: # define callback for each node that is processed def nodes_callback(nodes): for node in nodes: node_id, tags, coordinates = node # if any tags have a matching key then write record if any([t in tags for t in POI_TAGS]): f.write(json.dumps(dict(tags=tags, coordinates=coordinates))) f.write('\n') parser = OSMParser(concurrency=4, nodes_callback=nodes_callback) parser.parse(pbf) return output
[ "def", "extract", "(", "self", ",", "pbf", ",", "output", ")", ":", "logging", ".", "info", "(", "\"Extracting POI nodes from {0} to {1}\"", ".", "format", "(", "pbf", ",", "output", ")", ")", "with", "open", "(", "output", ",", "'w'", ")", "as", "f", ":", "# define callback for each node that is processed", "def", "nodes_callback", "(", "nodes", ")", ":", "for", "node", "in", "nodes", ":", "node_id", ",", "tags", ",", "coordinates", "=", "node", "# if any tags have a matching key then write record", "if", "any", "(", "[", "t", "in", "tags", "for", "t", "in", "POI_TAGS", "]", ")", ":", "f", ".", "write", "(", "json", ".", "dumps", "(", "dict", "(", "tags", "=", "tags", ",", "coordinates", "=", "coordinates", ")", ")", ")", "f", ".", "write", "(", "'\\n'", ")", "parser", "=", "OSMParser", "(", "concurrency", "=", "4", ",", "nodes_callback", "=", "nodes_callback", ")", "parser", ".", "parse", "(", "pbf", ")", "return", "output" ]
extract POI nodes from osm pbf extract
[ "extract", "POI", "nodes", "from", "osm", "pbf", "extract" ]
python
train
hyperledger/indy-sdk
vcx/wrappers/python3/vcx/api/issuer_credential.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/vcx/wrappers/python3/vcx/api/issuer_credential.py#L132-L160
async def send_offer(self, connection: Connection): """ Sends an offer to a prover. Once accepted, a request will be recieved. :param connection: vcx.api.connection.Connection :return: None Example: source_id = '1' cred_def_id = 'cred_def_id1' attrs = {'key': 'value', 'key2': 'value2', 'key3': 'value3'} name = 'Credential Name' issuer_did = '8XFh8yBzrpJQmNyZzgoTqB' phone_number = '8019119191' price = 1 issuer_credential = await IssuerCredential.create(source_id, attrs, cred_def_id, name, price) connection = await Connection.create(source_id) issuer_credential.send_offer(connection) """ if not hasattr(IssuerCredential.send_offer, "cb"): self.logger.debug("vcx_issuer_send_credential_offer: Creating callback") IssuerCredential.send_offer.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32)) c_credential_handle = c_uint32(self.handle) c_connection_handle = c_uint32(connection.handle) await do_call('vcx_issuer_send_credential_offer', c_credential_handle, c_connection_handle, IssuerCredential.send_offer.cb)
[ "async", "def", "send_offer", "(", "self", ",", "connection", ":", "Connection", ")", ":", "if", "not", "hasattr", "(", "IssuerCredential", ".", "send_offer", ",", "\"cb\"", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"vcx_issuer_send_credential_offer: Creating callback\"", ")", "IssuerCredential", ".", "send_offer", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_uint32", ",", "c_uint32", ")", ")", "c_credential_handle", "=", "c_uint32", "(", "self", ".", "handle", ")", "c_connection_handle", "=", "c_uint32", "(", "connection", ".", "handle", ")", "await", "do_call", "(", "'vcx_issuer_send_credential_offer'", ",", "c_credential_handle", ",", "c_connection_handle", ",", "IssuerCredential", ".", "send_offer", ".", "cb", ")" ]
Sends an offer to a prover. Once accepted, a request will be recieved. :param connection: vcx.api.connection.Connection :return: None Example: source_id = '1' cred_def_id = 'cred_def_id1' attrs = {'key': 'value', 'key2': 'value2', 'key3': 'value3'} name = 'Credential Name' issuer_did = '8XFh8yBzrpJQmNyZzgoTqB' phone_number = '8019119191' price = 1 issuer_credential = await IssuerCredential.create(source_id, attrs, cred_def_id, name, price) connection = await Connection.create(source_id) issuer_credential.send_offer(connection)
[ "Sends", "an", "offer", "to", "a", "prover", ".", "Once", "accepted", "a", "request", "will", "be", "recieved", ".", ":", "param", "connection", ":", "vcx", ".", "api", ".", "connection", ".", "Connection", ":", "return", ":", "None" ]
python
train
fabioz/PyDev.Debugger
pydevd.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd.py#L586-L610
def get_file_type(self, abs_real_path_and_basename, _cache_file_type=_CACHE_FILE_TYPE): ''' :param abs_real_path_and_basename: The result from get_abs_path_real_path_and_base_from_file or get_abs_path_real_path_and_base_from_frame. :return _pydevd_bundle.pydevd_dont_trace_files.PYDEV_FILE: If it's a file internal to the debugger which shouldn't be traced nor shown to the user. _pydevd_bundle.pydevd_dont_trace_files.LIB_FILE: If it's a file in a library which shouldn't be traced. None: If it's a regular user file which should be traced. ''' try: return _cache_file_type[abs_real_path_and_basename[0]] except: file_type = self._internal_get_file_type(abs_real_path_and_basename) if file_type is None: file_type = PYDEV_FILE if self.dont_trace_external_files(abs_real_path_and_basename[0]) else None _cache_file_type[abs_real_path_and_basename[0]] = file_type return file_type
[ "def", "get_file_type", "(", "self", ",", "abs_real_path_and_basename", ",", "_cache_file_type", "=", "_CACHE_FILE_TYPE", ")", ":", "try", ":", "return", "_cache_file_type", "[", "abs_real_path_and_basename", "[", "0", "]", "]", "except", ":", "file_type", "=", "self", ".", "_internal_get_file_type", "(", "abs_real_path_and_basename", ")", "if", "file_type", "is", "None", ":", "file_type", "=", "PYDEV_FILE", "if", "self", ".", "dont_trace_external_files", "(", "abs_real_path_and_basename", "[", "0", "]", ")", "else", "None", "_cache_file_type", "[", "abs_real_path_and_basename", "[", "0", "]", "]", "=", "file_type", "return", "file_type" ]
:param abs_real_path_and_basename: The result from get_abs_path_real_path_and_base_from_file or get_abs_path_real_path_and_base_from_frame. :return _pydevd_bundle.pydevd_dont_trace_files.PYDEV_FILE: If it's a file internal to the debugger which shouldn't be traced nor shown to the user. _pydevd_bundle.pydevd_dont_trace_files.LIB_FILE: If it's a file in a library which shouldn't be traced. None: If it's a regular user file which should be traced.
[ ":", "param", "abs_real_path_and_basename", ":", "The", "result", "from", "get_abs_path_real_path_and_base_from_file", "or", "get_abs_path_real_path_and_base_from_frame", "." ]
python
train
MozillaSecurity/laniakea
laniakea/core/providers/gce/manager.py
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/gce/manager.py#L274-L297
def start(self, nodes=None): """Start one or many nodes. :param nodes: Nodes to be started. :type nodes: ``list`` """ if not self.is_connected(): return None nodes = nodes or self.nodes result = [] for node in nodes: if node.state == 'running': logging.warning('Node %s is already "running".', node.name) continue try: status = self.gce.ex_start_node(node) if status: result.append(node) except InvalidRequestError as err: raise ComputeEngineManagerException(err) return result
[ "def", "start", "(", "self", ",", "nodes", "=", "None", ")", ":", "if", "not", "self", ".", "is_connected", "(", ")", ":", "return", "None", "nodes", "=", "nodes", "or", "self", ".", "nodes", "result", "=", "[", "]", "for", "node", "in", "nodes", ":", "if", "node", ".", "state", "==", "'running'", ":", "logging", ".", "warning", "(", "'Node %s is already \"running\".'", ",", "node", ".", "name", ")", "continue", "try", ":", "status", "=", "self", ".", "gce", ".", "ex_start_node", "(", "node", ")", "if", "status", ":", "result", ".", "append", "(", "node", ")", "except", "InvalidRequestError", "as", "err", ":", "raise", "ComputeEngineManagerException", "(", "err", ")", "return", "result" ]
Start one or many nodes. :param nodes: Nodes to be started. :type nodes: ``list``
[ "Start", "one", "or", "many", "nodes", "." ]
python
train
lk-geimfari/mimesis
mimesis/providers/base.py
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/base.py#L178-L197
def override_locale(self, locale: str = locales.EN, ) -> Generator['BaseDataProvider', None, None]: """Context manager which allows overriding current locale. Temporarily overrides current locale for locale-dependent providers. :param locale: Locale. :return: Provider with overridden locale. """ try: origin_locale = self.locale self._override_locale(locale) try: yield self finally: self._override_locale(origin_locale) except AttributeError: raise ValueError('«{}» has not locale dependent'.format( self.__class__.__name__))
[ "def", "override_locale", "(", "self", ",", "locale", ":", "str", "=", "locales", ".", "EN", ",", ")", "->", "Generator", "[", "'BaseDataProvider'", ",", "None", ",", "None", "]", ":", "try", ":", "origin_locale", "=", "self", ".", "locale", "self", ".", "_override_locale", "(", "locale", ")", "try", ":", "yield", "self", "finally", ":", "self", ".", "_override_locale", "(", "origin_locale", ")", "except", "AttributeError", ":", "raise", "ValueError", "(", "'«{}» has not locale dependent'.f", "o", "rmat(", "", "self", ".", "__class__", ".", "__name__", ")", ")" ]
Context manager which allows overriding current locale. Temporarily overrides current locale for locale-dependent providers. :param locale: Locale. :return: Provider with overridden locale.
[ "Context", "manager", "which", "allows", "overriding", "current", "locale", "." ]
python
train
atmos-python/atmos
atmos/solve.py
https://github.com/atmos-python/atmos/blob/f4af8eaca23cce881bde979599d15d322fc1935e/atmos/solve.py#L217-L257
def _get_module_methods(module): ''' Returns a methods list corresponding to the equations in the given module. Each entry is a dictionary with keys 'output', 'args', and 'func' corresponding to the output, arguments, and function of the method. The entries may optionally include 'assumptions' and 'overridden_by_assumptions' as keys, stating which assumptions are required to use the method, and which assumptions mean the method should not be used because it is overridden. ''' # Set up the methods dict we will eventually return methods = [] funcs = [] for item in inspect.getmembers(equations): if (item[0][0] != '_' and '_from_' in item[0]): func = item[1] output = item[0][:item[0].find('_from_')] # avoid returning duplicates if func in funcs: continue else: funcs.append(func) args = tuple(getfullargspec(func).args) try: assumptions = tuple(func.assumptions) except AttributeError: raise NotImplementedError('function {0} in equations module has no' ' assumption ' 'definition'.format(func.__name__)) try: overridden_by_assumptions = func.overridden_by_assumptions except AttributeError: overridden_by_assumptions = () methods.append({ 'func': func, 'args': args, 'output': output, 'assumptions': assumptions, 'overridden_by_assumptions': overridden_by_assumptions, }) return methods
[ "def", "_get_module_methods", "(", "module", ")", ":", "# Set up the methods dict we will eventually return", "methods", "=", "[", "]", "funcs", "=", "[", "]", "for", "item", "in", "inspect", ".", "getmembers", "(", "equations", ")", ":", "if", "(", "item", "[", "0", "]", "[", "0", "]", "!=", "'_'", "and", "'_from_'", "in", "item", "[", "0", "]", ")", ":", "func", "=", "item", "[", "1", "]", "output", "=", "item", "[", "0", "]", "[", ":", "item", "[", "0", "]", ".", "find", "(", "'_from_'", ")", "]", "# avoid returning duplicates", "if", "func", "in", "funcs", ":", "continue", "else", ":", "funcs", ".", "append", "(", "func", ")", "args", "=", "tuple", "(", "getfullargspec", "(", "func", ")", ".", "args", ")", "try", ":", "assumptions", "=", "tuple", "(", "func", ".", "assumptions", ")", "except", "AttributeError", ":", "raise", "NotImplementedError", "(", "'function {0} in equations module has no'", "' assumption '", "'definition'", ".", "format", "(", "func", ".", "__name__", ")", ")", "try", ":", "overridden_by_assumptions", "=", "func", ".", "overridden_by_assumptions", "except", "AttributeError", ":", "overridden_by_assumptions", "=", "(", ")", "methods", ".", "append", "(", "{", "'func'", ":", "func", ",", "'args'", ":", "args", ",", "'output'", ":", "output", ",", "'assumptions'", ":", "assumptions", ",", "'overridden_by_assumptions'", ":", "overridden_by_assumptions", ",", "}", ")", "return", "methods" ]
Returns a methods list corresponding to the equations in the given module. Each entry is a dictionary with keys 'output', 'args', and 'func' corresponding to the output, arguments, and function of the method. The entries may optionally include 'assumptions' and 'overridden_by_assumptions' as keys, stating which assumptions are required to use the method, and which assumptions mean the method should not be used because it is overridden.
[ "Returns", "a", "methods", "list", "corresponding", "to", "the", "equations", "in", "the", "given", "module", ".", "Each", "entry", "is", "a", "dictionary", "with", "keys", "output", "args", "and", "func", "corresponding", "to", "the", "output", "arguments", "and", "function", "of", "the", "method", ".", "The", "entries", "may", "optionally", "include", "assumptions", "and", "overridden_by_assumptions", "as", "keys", "stating", "which", "assumptions", "are", "required", "to", "use", "the", "method", "and", "which", "assumptions", "mean", "the", "method", "should", "not", "be", "used", "because", "it", "is", "overridden", "." ]
python
train
sdispater/cachy
cachy/repository.py
https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/repository.py#L90-L113
def add(self, key, val, minutes): """ Store an item in the cache if it does not exist. :param key: The cache key :type key: str :param val: The cache value :type val: mixed :param minutes: The lifetime in minutes of the cached value :type minutes: int|datetime :rtype: bool """ if hasattr(self._store, 'add'): return self._store.add(key, val, self._get_minutes(minutes)) if not self.has(key): self.put(key, val, minutes) return True return False
[ "def", "add", "(", "self", ",", "key", ",", "val", ",", "minutes", ")", ":", "if", "hasattr", "(", "self", ".", "_store", ",", "'add'", ")", ":", "return", "self", ".", "_store", ".", "add", "(", "key", ",", "val", ",", "self", ".", "_get_minutes", "(", "minutes", ")", ")", "if", "not", "self", ".", "has", "(", "key", ")", ":", "self", ".", "put", "(", "key", ",", "val", ",", "minutes", ")", "return", "True", "return", "False" ]
Store an item in the cache if it does not exist. :param key: The cache key :type key: str :param val: The cache value :type val: mixed :param minutes: The lifetime in minutes of the cached value :type minutes: int|datetime :rtype: bool
[ "Store", "an", "item", "in", "the", "cache", "if", "it", "does", "not", "exist", "." ]
python
train
sammchardy/python-binance
binance/client.py
https://github.com/sammchardy/python-binance/blob/31c0d0a32f9edd528c6c2c1dd3044d9a34ce43cc/binance/client.py#L1312-L1336
def order_market_sell(self, **params): """Send in a new market sell order :param symbol: required :type symbol: str :param quantity: required :type quantity: decimal :param newClientOrderId: A unique id for the order. Automatically generated if not sent. :type newClientOrderId: str :param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT. :type newOrderRespType: str :param recvWindow: the number of milliseconds the request is valid for :type recvWindow: int :returns: API response See order endpoint for full response options :raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException """ params.update({ 'side': self.SIDE_SELL }) return self.order_market(**params)
[ "def", "order_market_sell", "(", "self", ",", "*", "*", "params", ")", ":", "params", ".", "update", "(", "{", "'side'", ":", "self", ".", "SIDE_SELL", "}", ")", "return", "self", ".", "order_market", "(", "*", "*", "params", ")" ]
Send in a new market sell order :param symbol: required :type symbol: str :param quantity: required :type quantity: decimal :param newClientOrderId: A unique id for the order. Automatically generated if not sent. :type newClientOrderId: str :param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT. :type newOrderRespType: str :param recvWindow: the number of milliseconds the request is valid for :type recvWindow: int :returns: API response See order endpoint for full response options :raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
[ "Send", "in", "a", "new", "market", "sell", "order" ]
python
train
MartinThoma/hwrt
hwrt/preprocessing.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/preprocessing.py#L382-L426
def _space(self, hwr_obj, stroke, kind): """Do the interpolation of 'kind' for 'stroke'""" new_stroke = [] stroke = sorted(stroke, key=lambda p: p['time']) x, y, t = [], [], [] for point in stroke: x.append(point['x']) y.append(point['y']) t.append(point['time']) x, y = numpy.array(x), numpy.array(y) failed = False try: fx = interp1d(t, x, kind=kind) fy = interp1d(t, y, kind=kind) except Exception as e: # pylint: disable=W0703 if hwr_obj.raw_data_id is not None: logging.debug("spline failed for raw_data_id %i", hwr_obj.raw_data_id) else: logging.debug("spline failed") logging.debug(e) failed = True tnew = numpy.linspace(t[0], t[-1], self.number) # linear interpolation fallback due to # https://github.com/scipy/scipy/issues/3868 if failed: try: fx = interp1d(t, x, kind='linear') fy = interp1d(t, y, kind='linear') failed = False except Exception as e: logging.debug("len(stroke) = %i", len(stroke)) logging.debug("len(x) = %i", len(x)) logging.debug("len(y) = %i", len(y)) logging.debug("stroke=%s", stroke) raise e for x, y, t in zip(fx(tnew), fy(tnew), tnew): new_stroke.append({'x': x, 'y': y, 'time': t}) return new_stroke
[ "def", "_space", "(", "self", ",", "hwr_obj", ",", "stroke", ",", "kind", ")", ":", "new_stroke", "=", "[", "]", "stroke", "=", "sorted", "(", "stroke", ",", "key", "=", "lambda", "p", ":", "p", "[", "'time'", "]", ")", "x", ",", "y", ",", "t", "=", "[", "]", ",", "[", "]", ",", "[", "]", "for", "point", "in", "stroke", ":", "x", ".", "append", "(", "point", "[", "'x'", "]", ")", "y", ".", "append", "(", "point", "[", "'y'", "]", ")", "t", ".", "append", "(", "point", "[", "'time'", "]", ")", "x", ",", "y", "=", "numpy", ".", "array", "(", "x", ")", ",", "numpy", ".", "array", "(", "y", ")", "failed", "=", "False", "try", ":", "fx", "=", "interp1d", "(", "t", ",", "x", ",", "kind", "=", "kind", ")", "fy", "=", "interp1d", "(", "t", ",", "y", ",", "kind", "=", "kind", ")", "except", "Exception", "as", "e", ":", "# pylint: disable=W0703", "if", "hwr_obj", ".", "raw_data_id", "is", "not", "None", ":", "logging", ".", "debug", "(", "\"spline failed for raw_data_id %i\"", ",", "hwr_obj", ".", "raw_data_id", ")", "else", ":", "logging", ".", "debug", "(", "\"spline failed\"", ")", "logging", ".", "debug", "(", "e", ")", "failed", "=", "True", "tnew", "=", "numpy", ".", "linspace", "(", "t", "[", "0", "]", ",", "t", "[", "-", "1", "]", ",", "self", ".", "number", ")", "# linear interpolation fallback due to", "# https://github.com/scipy/scipy/issues/3868", "if", "failed", ":", "try", ":", "fx", "=", "interp1d", "(", "t", ",", "x", ",", "kind", "=", "'linear'", ")", "fy", "=", "interp1d", "(", "t", ",", "y", ",", "kind", "=", "'linear'", ")", "failed", "=", "False", "except", "Exception", "as", "e", ":", "logging", ".", "debug", "(", "\"len(stroke) = %i\"", ",", "len", "(", "stroke", ")", ")", "logging", ".", "debug", "(", "\"len(x) = %i\"", ",", "len", "(", "x", ")", ")", "logging", ".", "debug", "(", "\"len(y) = %i\"", ",", "len", "(", "y", ")", ")", "logging", ".", "debug", "(", "\"stroke=%s\"", ",", "stroke", ")", "raise", "e", "for", "x", ",", "y", ",", "t", "in", "zip", "(", "fx", "(", "tnew", ")", ",", "fy", "(", "tnew", ")", ",", "tnew", ")", ":", "new_stroke", ".", "append", "(", "{", "'x'", ":", "x", ",", "'y'", ":", "y", ",", "'time'", ":", "t", "}", ")", "return", "new_stroke" ]
Do the interpolation of 'kind' for 'stroke
[ "Do", "the", "interpolation", "of", "kind", "for", "stroke" ]
python
train
pyviz/holoviews
holoviews/plotting/mpl/chart.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/mpl/chart.py#L495-L540
def _update_plot(self, key, element, bars, lims, ranges): """ Process the bars and draw the offset line as necessary. If a color map is set in the style of the 'main' ViewableElement object, color the bars appropriately, respecting the required normalization settings. """ main = self.adjoined.main _, y1 = element.range(1) offset = self.offset * y1 range_item, main_range, dim = get_sideplot_ranges(self, element, main, ranges) # Check if plot is colormapped plot_type = Store.registry['matplotlib'].get(type(range_item)) if isinstance(plot_type, PlotSelector): plot_type = plot_type.get_plot_class(range_item) opts = self.lookup_options(range_item, 'plot') if plot_type and issubclass(plot_type, ColorbarPlot): cidx = opts.options.get('color_index', None) if cidx is None: opts = self.lookup_options(range_item, 'style') cidx = opts.kwargs.get('color', None) if cidx not in range_item: cidx = None cdim = None if cidx is None else range_item.get_dimension(cidx) else: cdim = None # Get colormapping options if isinstance(range_item, (HeatMap, Raster)) or cdim: style = self.lookup_options(range_item, 'style')[self.cyclic_index] cmap = cm.get_cmap(style.get('cmap')) main_range = style.get('clims', main_range) else: cmap = None if offset and ('offset_line' not in self.handles): self.handles['offset_line'] = self.offset_linefn(offset, linewidth=1.0, color='k') elif offset: self._update_separator(offset) if cmap is not None: self._colorize_bars(cmap, bars, element, main_range, dim) return bars
[ "def", "_update_plot", "(", "self", ",", "key", ",", "element", ",", "bars", ",", "lims", ",", "ranges", ")", ":", "main", "=", "self", ".", "adjoined", ".", "main", "_", ",", "y1", "=", "element", ".", "range", "(", "1", ")", "offset", "=", "self", ".", "offset", "*", "y1", "range_item", ",", "main_range", ",", "dim", "=", "get_sideplot_ranges", "(", "self", ",", "element", ",", "main", ",", "ranges", ")", "# Check if plot is colormapped", "plot_type", "=", "Store", ".", "registry", "[", "'matplotlib'", "]", ".", "get", "(", "type", "(", "range_item", ")", ")", "if", "isinstance", "(", "plot_type", ",", "PlotSelector", ")", ":", "plot_type", "=", "plot_type", ".", "get_plot_class", "(", "range_item", ")", "opts", "=", "self", ".", "lookup_options", "(", "range_item", ",", "'plot'", ")", "if", "plot_type", "and", "issubclass", "(", "plot_type", ",", "ColorbarPlot", ")", ":", "cidx", "=", "opts", ".", "options", ".", "get", "(", "'color_index'", ",", "None", ")", "if", "cidx", "is", "None", ":", "opts", "=", "self", ".", "lookup_options", "(", "range_item", ",", "'style'", ")", "cidx", "=", "opts", ".", "kwargs", ".", "get", "(", "'color'", ",", "None", ")", "if", "cidx", "not", "in", "range_item", ":", "cidx", "=", "None", "cdim", "=", "None", "if", "cidx", "is", "None", "else", "range_item", ".", "get_dimension", "(", "cidx", ")", "else", ":", "cdim", "=", "None", "# Get colormapping options", "if", "isinstance", "(", "range_item", ",", "(", "HeatMap", ",", "Raster", ")", ")", "or", "cdim", ":", "style", "=", "self", ".", "lookup_options", "(", "range_item", ",", "'style'", ")", "[", "self", ".", "cyclic_index", "]", "cmap", "=", "cm", ".", "get_cmap", "(", "style", ".", "get", "(", "'cmap'", ")", ")", "main_range", "=", "style", ".", "get", "(", "'clims'", ",", "main_range", ")", "else", ":", "cmap", "=", "None", "if", "offset", "and", "(", "'offset_line'", "not", "in", "self", ".", "handles", ")", ":", "self", ".", "handles", "[", "'offset_line'", "]", "=", "self", ".", "offset_linefn", "(", "offset", ",", "linewidth", "=", "1.0", ",", "color", "=", "'k'", ")", "elif", "offset", ":", "self", ".", "_update_separator", "(", "offset", ")", "if", "cmap", "is", "not", "None", ":", "self", ".", "_colorize_bars", "(", "cmap", ",", "bars", ",", "element", ",", "main_range", ",", "dim", ")", "return", "bars" ]
Process the bars and draw the offset line as necessary. If a color map is set in the style of the 'main' ViewableElement object, color the bars appropriately, respecting the required normalization settings.
[ "Process", "the", "bars", "and", "draw", "the", "offset", "line", "as", "necessary", ".", "If", "a", "color", "map", "is", "set", "in", "the", "style", "of", "the", "main", "ViewableElement", "object", "color", "the", "bars", "appropriately", "respecting", "the", "required", "normalization", "settings", "." ]
python
train
numenta/htmresearch
htmresearch/regions/TemporalPoolerRegion.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/regions/TemporalPoolerRegion.py#L439-L451
def getSpec(cls): """ Return the Spec for TemporalPoolerRegion. The parameters collection is constructed based on the parameters specified by the various components (poolerSpec and otherSpec) """ spec = cls.getBaseSpec() p, o = _getAdditionalSpecs() spec["parameters"].update(p) spec["parameters"].update(o) return spec
[ "def", "getSpec", "(", "cls", ")", ":", "spec", "=", "cls", ".", "getBaseSpec", "(", ")", "p", ",", "o", "=", "_getAdditionalSpecs", "(", ")", "spec", "[", "\"parameters\"", "]", ".", "update", "(", "p", ")", "spec", "[", "\"parameters\"", "]", ".", "update", "(", "o", ")", "return", "spec" ]
Return the Spec for TemporalPoolerRegion. The parameters collection is constructed based on the parameters specified by the various components (poolerSpec and otherSpec)
[ "Return", "the", "Spec", "for", "TemporalPoolerRegion", "." ]
python
train
jilljenn/tryalgo
tryalgo/trie.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/trie.py#L46-L60
def spell_check(T, w): """Spellchecker :param T: trie encoding the dictionary :param w: given word :returns: a closest word from the dictionary :complexity: linear if distance was constant """ assert T is not None dist = 0 while True: # Try increasing distances u = search(T, dist, w) if u is not None: return u dist += 1
[ "def", "spell_check", "(", "T", ",", "w", ")", ":", "assert", "T", "is", "not", "None", "dist", "=", "0", "while", "True", ":", "# Try increasing distances", "u", "=", "search", "(", "T", ",", "dist", ",", "w", ")", "if", "u", "is", "not", "None", ":", "return", "u", "dist", "+=", "1" ]
Spellchecker :param T: trie encoding the dictionary :param w: given word :returns: a closest word from the dictionary :complexity: linear if distance was constant
[ "Spellchecker" ]
python
train
pantsbuild/pants
src/python/pants/pantsd/service/pants_service.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/pantsd/service/pants_service.py#L190-L196
def mark_running(self): """Moves the service to the Running state. Raises if the service is not currently in the Paused state. """ with self._lock: self._set_state(self._RUNNING, self._PAUSED)
[ "def", "mark_running", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "self", ".", "_set_state", "(", "self", ".", "_RUNNING", ",", "self", ".", "_PAUSED", ")" ]
Moves the service to the Running state. Raises if the service is not currently in the Paused state.
[ "Moves", "the", "service", "to", "the", "Running", "state", "." ]
python
train
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/__init__.py#L369-L378
def task_queues(self): """ Access the task_queues :returns: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueList :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueList """ if self._task_queues is None: self._task_queues = TaskQueueList(self._version, workspace_sid=self._solution['sid'], ) return self._task_queues
[ "def", "task_queues", "(", "self", ")", ":", "if", "self", ".", "_task_queues", "is", "None", ":", "self", ".", "_task_queues", "=", "TaskQueueList", "(", "self", ".", "_version", ",", "workspace_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_task_queues" ]
Access the task_queues :returns: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueList :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueList
[ "Access", "the", "task_queues" ]
python
train
GNS3/gns3-server
gns3server/compute/vmware/vmware_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vmware/vmware_vm.py#L426-L472
def start(self): """ Starts this VMware VM. """ if self.status == "started": return if (yield from self.is_running()): raise VMwareError("The VM is already running in VMware") ubridge_path = self.ubridge_path if not ubridge_path or not os.path.isfile(ubridge_path): raise VMwareError("ubridge is necessary to start a VMware VM") yield from self._start_ubridge() self._read_vmx_file() # check if there is enough RAM to run if "memsize" in self._vmx_pairs: self.check_available_ram(int(self._vmx_pairs["memsize"])) self._set_network_options() self._set_serial_console() self._write_vmx_file() if self._headless: yield from self._control_vm("start", "nogui") else: yield from self._control_vm("start") try: if self._ubridge_hypervisor: for adapter_number in range(0, self._adapters): nio = self._ethernet_adapters[adapter_number].get_nio(0) if nio: yield from self._add_ubridge_connection(nio, adapter_number) yield from self._start_console() except VMwareError: yield from self.stop() raise if self._get_vmx_setting("vhv.enable", "TRUE"): self._hw_virtualization = True self._started = True self.status = "started" log.info("VMware VM '{name}' [{id}] started".format(name=self.name, id=self.id))
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "status", "==", "\"started\"", ":", "return", "if", "(", "yield", "from", "self", ".", "is_running", "(", ")", ")", ":", "raise", "VMwareError", "(", "\"The VM is already running in VMware\"", ")", "ubridge_path", "=", "self", ".", "ubridge_path", "if", "not", "ubridge_path", "or", "not", "os", ".", "path", ".", "isfile", "(", "ubridge_path", ")", ":", "raise", "VMwareError", "(", "\"ubridge is necessary to start a VMware VM\"", ")", "yield", "from", "self", ".", "_start_ubridge", "(", ")", "self", ".", "_read_vmx_file", "(", ")", "# check if there is enough RAM to run", "if", "\"memsize\"", "in", "self", ".", "_vmx_pairs", ":", "self", ".", "check_available_ram", "(", "int", "(", "self", ".", "_vmx_pairs", "[", "\"memsize\"", "]", ")", ")", "self", ".", "_set_network_options", "(", ")", "self", ".", "_set_serial_console", "(", ")", "self", ".", "_write_vmx_file", "(", ")", "if", "self", ".", "_headless", ":", "yield", "from", "self", ".", "_control_vm", "(", "\"start\"", ",", "\"nogui\"", ")", "else", ":", "yield", "from", "self", ".", "_control_vm", "(", "\"start\"", ")", "try", ":", "if", "self", ".", "_ubridge_hypervisor", ":", "for", "adapter_number", "in", "range", "(", "0", ",", "self", ".", "_adapters", ")", ":", "nio", "=", "self", ".", "_ethernet_adapters", "[", "adapter_number", "]", ".", "get_nio", "(", "0", ")", "if", "nio", ":", "yield", "from", "self", ".", "_add_ubridge_connection", "(", "nio", ",", "adapter_number", ")", "yield", "from", "self", ".", "_start_console", "(", ")", "except", "VMwareError", ":", "yield", "from", "self", ".", "stop", "(", ")", "raise", "if", "self", ".", "_get_vmx_setting", "(", "\"vhv.enable\"", ",", "\"TRUE\"", ")", ":", "self", ".", "_hw_virtualization", "=", "True", "self", ".", "_started", "=", "True", "self", ".", "status", "=", "\"started\"", "log", ".", "info", "(", "\"VMware VM '{name}' [{id}] started\"", ".", "format", "(", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ")", ")" ]
Starts this VMware VM.
[ "Starts", "this", "VMware", "VM", "." ]
python
train
cjdrake/pyeda
pyeda/boolalg/boolfunc.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/boolfunc.py#L268-L277
def iter_terms(fs, conj=False): """Iterate through all min/max terms in an N-dimensional Boolean space. The *fs* argument is a sequence of :math:`N` Boolean functions. If *conj* is ``False``, yield minterms. Otherwise, yield maxterms. """ for num in range(1 << len(fs)): yield num2term(num, fs, conj)
[ "def", "iter_terms", "(", "fs", ",", "conj", "=", "False", ")", ":", "for", "num", "in", "range", "(", "1", "<<", "len", "(", "fs", ")", ")", ":", "yield", "num2term", "(", "num", ",", "fs", ",", "conj", ")" ]
Iterate through all min/max terms in an N-dimensional Boolean space. The *fs* argument is a sequence of :math:`N` Boolean functions. If *conj* is ``False``, yield minterms. Otherwise, yield maxterms.
[ "Iterate", "through", "all", "min", "/", "max", "terms", "in", "an", "N", "-", "dimensional", "Boolean", "space", "." ]
python
train
graphql-python/graphql-core-next
graphql/type/scalars.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/type/scalars.py#L113-L117
def parse_float_literal(ast, _variables=None): """Parse a float value node in the AST.""" if isinstance(ast, (FloatValueNode, IntValueNode)): return float(ast.value) return INVALID
[ "def", "parse_float_literal", "(", "ast", ",", "_variables", "=", "None", ")", ":", "if", "isinstance", "(", "ast", ",", "(", "FloatValueNode", ",", "IntValueNode", ")", ")", ":", "return", "float", "(", "ast", ".", "value", ")", "return", "INVALID" ]
Parse a float value node in the AST.
[ "Parse", "a", "float", "value", "node", "in", "the", "AST", "." ]
python
train
ceph/ceph-deploy
ceph_deploy/pkg.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/pkg.py#L60-L86
def make(parser): """ Manage packages on remote hosts. """ action = parser.add_mutually_exclusive_group() action.add_argument( '--install', metavar='PKG(s)', help='Comma-separated package(s) to install', ) action.add_argument( '--remove', metavar='PKG(s)', help='Comma-separated package(s) to remove', ) parser.add_argument( 'hosts', nargs='+', ) parser.set_defaults( func=pkg, )
[ "def", "make", "(", "parser", ")", ":", "action", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "action", ".", "add_argument", "(", "'--install'", ",", "metavar", "=", "'PKG(s)'", ",", "help", "=", "'Comma-separated package(s) to install'", ",", ")", "action", ".", "add_argument", "(", "'--remove'", ",", "metavar", "=", "'PKG(s)'", ",", "help", "=", "'Comma-separated package(s) to remove'", ",", ")", "parser", ".", "add_argument", "(", "'hosts'", ",", "nargs", "=", "'+'", ",", ")", "parser", ".", "set_defaults", "(", "func", "=", "pkg", ",", ")" ]
Manage packages on remote hosts.
[ "Manage", "packages", "on", "remote", "hosts", "." ]
python
train
hotdoc/hotdoc
hotdoc/utils/setup_utils.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/utils/setup_utils.py#L75-L79
def _update_submodules(repo_dir): """update submodules in a repo""" subprocess.check_call("git submodule init", cwd=repo_dir, shell=True) subprocess.check_call( "git submodule update --recursive", cwd=repo_dir, shell=True)
[ "def", "_update_submodules", "(", "repo_dir", ")", ":", "subprocess", ".", "check_call", "(", "\"git submodule init\"", ",", "cwd", "=", "repo_dir", ",", "shell", "=", "True", ")", "subprocess", ".", "check_call", "(", "\"git submodule update --recursive\"", ",", "cwd", "=", "repo_dir", ",", "shell", "=", "True", ")" ]
update submodules in a repo
[ "update", "submodules", "in", "a", "repo" ]
python
train
horazont/aioxmpp
aioxmpp/disco/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/disco/service.py#L812-L821
def set_info_cache(self, jid, node, info): """ This is a wrapper around :meth:`set_info_future` which creates a future and immediately assigns `info` as its result. .. versionadded:: 0.5 """ fut = asyncio.Future() fut.set_result(info) self.set_info_future(jid, node, fut)
[ "def", "set_info_cache", "(", "self", ",", "jid", ",", "node", ",", "info", ")", ":", "fut", "=", "asyncio", ".", "Future", "(", ")", "fut", ".", "set_result", "(", "info", ")", "self", ".", "set_info_future", "(", "jid", ",", "node", ",", "fut", ")" ]
This is a wrapper around :meth:`set_info_future` which creates a future and immediately assigns `info` as its result. .. versionadded:: 0.5
[ "This", "is", "a", "wrapper", "around", ":", "meth", ":", "set_info_future", "which", "creates", "a", "future", "and", "immediately", "assigns", "info", "as", "its", "result", "." ]
python
train
KelSolaar/Umbra
umbra/exceptions.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/exceptions.py#L53-L67
def notify_exception_handler(*args): """ Provides a notifier exception handler. :param \*args: Arguments. :type \*args: \* :return: Definition success. :rtype: bool """ callback = RuntimeGlobals.components_manager["factory.script_editor"].restore_development_layout foundations.exceptions.base_exception_handler(*args) cls, instance = foundations.exceptions.extract_exception(*args)[:2] RuntimeGlobals.notifications_manager.exceptify(message="{0}".format(instance), notification_clicked_slot=callback) return True
[ "def", "notify_exception_handler", "(", "*", "args", ")", ":", "callback", "=", "RuntimeGlobals", ".", "components_manager", "[", "\"factory.script_editor\"", "]", ".", "restore_development_layout", "foundations", ".", "exceptions", ".", "base_exception_handler", "(", "*", "args", ")", "cls", ",", "instance", "=", "foundations", ".", "exceptions", ".", "extract_exception", "(", "*", "args", ")", "[", ":", "2", "]", "RuntimeGlobals", ".", "notifications_manager", ".", "exceptify", "(", "message", "=", "\"{0}\"", ".", "format", "(", "instance", ")", ",", "notification_clicked_slot", "=", "callback", ")", "return", "True" ]
Provides a notifier exception handler. :param \*args: Arguments. :type \*args: \* :return: Definition success. :rtype: bool
[ "Provides", "a", "notifier", "exception", "handler", "." ]
python
train
konstantint/PassportEye
passporteye/util/pipeline.py
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/pipeline.py#L68-L78
def remove_component(self, name): """Removes an existing component with a given name, invalidating all the values computed by the previous component.""" if name not in self.components: raise Exception("No component named %s" % name) del self.components[name] del self.depends[name] for p in self.provides[name]: del self.whoprovides[p] self.invalidate(p) del self.provides[name]
[ "def", "remove_component", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "components", ":", "raise", "Exception", "(", "\"No component named %s\"", "%", "name", ")", "del", "self", ".", "components", "[", "name", "]", "del", "self", ".", "depends", "[", "name", "]", "for", "p", "in", "self", ".", "provides", "[", "name", "]", ":", "del", "self", ".", "whoprovides", "[", "p", "]", "self", ".", "invalidate", "(", "p", ")", "del", "self", ".", "provides", "[", "name", "]" ]
Removes an existing component with a given name, invalidating all the values computed by the previous component.
[ "Removes", "an", "existing", "component", "with", "a", "given", "name", "invalidating", "all", "the", "values", "computed", "by", "the", "previous", "component", "." ]
python
train
tomekwojcik/envelopes
envelopes/conn.py
https://github.com/tomekwojcik/envelopes/blob/8ad190a55d0d8b805b6ae545b896e719467253b7/envelopes/conn.py#L53-L61
def is_connected(self): """Returns *True* if the SMTP connection is initialized and connected. Otherwise returns *False*""" try: self._conn.noop() except (AttributeError, smtplib.SMTPServerDisconnected): return False else: return True
[ "def", "is_connected", "(", "self", ")", ":", "try", ":", "self", ".", "_conn", ".", "noop", "(", ")", "except", "(", "AttributeError", ",", "smtplib", ".", "SMTPServerDisconnected", ")", ":", "return", "False", "else", ":", "return", "True" ]
Returns *True* if the SMTP connection is initialized and connected. Otherwise returns *False*
[ "Returns", "*", "True", "*", "if", "the", "SMTP", "connection", "is", "initialized", "and", "connected", ".", "Otherwise", "returns", "*", "False", "*" ]
python
train
timmahrt/ProMo
promo/f0_morph.py
https://github.com/timmahrt/ProMo/blob/99d9f5cc01ff328a62973c5a5da910cc905ae4d5/promo/f0_morph.py#L35-L43
def getPitchForIntervals(data, tgFN, tierName): ''' Preps data for use in f0Morph ''' tg = tgio.openTextgrid(tgFN) data = tg.tierDict[tierName].getValuesInIntervals(data) data = [dataList for _, dataList in data] return data
[ "def", "getPitchForIntervals", "(", "data", ",", "tgFN", ",", "tierName", ")", ":", "tg", "=", "tgio", ".", "openTextgrid", "(", "tgFN", ")", "data", "=", "tg", ".", "tierDict", "[", "tierName", "]", ".", "getValuesInIntervals", "(", "data", ")", "data", "=", "[", "dataList", "for", "_", ",", "dataList", "in", "data", "]", "return", "data" ]
Preps data for use in f0Morph
[ "Preps", "data", "for", "use", "in", "f0Morph" ]
python
train
kovacsbalu/WazeRouteCalculator
WazeRouteCalculator/WazeRouteCalculator.py
https://github.com/kovacsbalu/WazeRouteCalculator/blob/13ddb064571bb2bc0ceec51b5b317640b2bc3fb2/WazeRouteCalculator/WazeRouteCalculator.py#L76-L80
def already_coords(self, address): """test used to see if we have coordinates or address""" m = re.search(self.COORD_MATCH, address) return (m != None)
[ "def", "already_coords", "(", "self", ",", "address", ")", ":", "m", "=", "re", ".", "search", "(", "self", ".", "COORD_MATCH", ",", "address", ")", "return", "(", "m", "!=", "None", ")" ]
test used to see if we have coordinates or address
[ "test", "used", "to", "see", "if", "we", "have", "coordinates", "or", "address" ]
python
train
libtcod/python-tcod
tdl/__init__.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tdl/__init__.py#L593-L611
def get_cursor(self): """Return the virtual cursor position. The cursor can be moved with the :any:`move` method. Returns: Tuple[int, int]: The (x, y) coordinate of where :any:`print_str` will continue from. .. seealso:: :any:move` """ x, y = self._cursor width, height = self.parent.get_size() while x >= width: x -= width y += 1 if y >= height and self.scrollMode == 'scroll': y = height - 1 return x, y
[ "def", "get_cursor", "(", "self", ")", ":", "x", ",", "y", "=", "self", ".", "_cursor", "width", ",", "height", "=", "self", ".", "parent", ".", "get_size", "(", ")", "while", "x", ">=", "width", ":", "x", "-=", "width", "y", "+=", "1", "if", "y", ">=", "height", "and", "self", ".", "scrollMode", "==", "'scroll'", ":", "y", "=", "height", "-", "1", "return", "x", ",", "y" ]
Return the virtual cursor position. The cursor can be moved with the :any:`move` method. Returns: Tuple[int, int]: The (x, y) coordinate of where :any:`print_str` will continue from. .. seealso:: :any:move`
[ "Return", "the", "virtual", "cursor", "position", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/structural/regions.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L215-L229
def _calculate_sv_coverage_cnvkit(data, work_dir): """Calculate coverage in an CNVkit ready format using mosdepth. """ from bcbio.variation import coverage from bcbio.structural import annotate out_target_file = os.path.join(work_dir, "%s-target-coverage.cnn" % dd.get_sample_name(data)) out_anti_file = os.path.join(work_dir, "%s-antitarget-coverage.cnn" % dd.get_sample_name(data)) if ((not utils.file_exists(out_target_file) or not utils.file_exists(out_anti_file)) and (dd.get_align_bam(data) or dd.get_work_bam(data))): target_cov = coverage.run_mosdepth(data, "target", tz.get_in(["regions", "bins", "target"], data)) anti_cov = coverage.run_mosdepth(data, "antitarget", tz.get_in(["regions", "bins", "antitarget"], data)) target_cov_genes = annotate.add_genes(target_cov.regions, data, max_distance=0) out_target_file = _add_log2_depth(target_cov_genes, out_target_file, data) out_anti_file = _add_log2_depth(anti_cov.regions, out_anti_file, data) return out_target_file, out_anti_file
[ "def", "_calculate_sv_coverage_cnvkit", "(", "data", ",", "work_dir", ")", ":", "from", "bcbio", ".", "variation", "import", "coverage", "from", "bcbio", ".", "structural", "import", "annotate", "out_target_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-target-coverage.cnn\"", "%", "dd", ".", "get_sample_name", "(", "data", ")", ")", "out_anti_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-antitarget-coverage.cnn\"", "%", "dd", ".", "get_sample_name", "(", "data", ")", ")", "if", "(", "(", "not", "utils", ".", "file_exists", "(", "out_target_file", ")", "or", "not", "utils", ".", "file_exists", "(", "out_anti_file", ")", ")", "and", "(", "dd", ".", "get_align_bam", "(", "data", ")", "or", "dd", ".", "get_work_bam", "(", "data", ")", ")", ")", ":", "target_cov", "=", "coverage", ".", "run_mosdepth", "(", "data", ",", "\"target\"", ",", "tz", ".", "get_in", "(", "[", "\"regions\"", ",", "\"bins\"", ",", "\"target\"", "]", ",", "data", ")", ")", "anti_cov", "=", "coverage", ".", "run_mosdepth", "(", "data", ",", "\"antitarget\"", ",", "tz", ".", "get_in", "(", "[", "\"regions\"", ",", "\"bins\"", ",", "\"antitarget\"", "]", ",", "data", ")", ")", "target_cov_genes", "=", "annotate", ".", "add_genes", "(", "target_cov", ".", "regions", ",", "data", ",", "max_distance", "=", "0", ")", "out_target_file", "=", "_add_log2_depth", "(", "target_cov_genes", ",", "out_target_file", ",", "data", ")", "out_anti_file", "=", "_add_log2_depth", "(", "anti_cov", ".", "regions", ",", "out_anti_file", ",", "data", ")", "return", "out_target_file", ",", "out_anti_file" ]
Calculate coverage in an CNVkit ready format using mosdepth.
[ "Calculate", "coverage", "in", "an", "CNVkit", "ready", "format", "using", "mosdepth", "." ]
python
train
Rapptz/discord.py
discord/iterators.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/iterators.py#L325-L333
async def _retrieve_messages_before_strategy(self, retrieve): """Retrieve messages using before parameter.""" before = self.before.id if self.before else None data = await self.logs_from(self.channel.id, retrieve, before=before) if len(data): if self.limit is not None: self.limit -= retrieve self.before = Object(id=int(data[-1]['id'])) return data
[ "async", "def", "_retrieve_messages_before_strategy", "(", "self", ",", "retrieve", ")", ":", "before", "=", "self", ".", "before", ".", "id", "if", "self", ".", "before", "else", "None", "data", "=", "await", "self", ".", "logs_from", "(", "self", ".", "channel", ".", "id", ",", "retrieve", ",", "before", "=", "before", ")", "if", "len", "(", "data", ")", ":", "if", "self", ".", "limit", "is", "not", "None", ":", "self", ".", "limit", "-=", "retrieve", "self", ".", "before", "=", "Object", "(", "id", "=", "int", "(", "data", "[", "-", "1", "]", "[", "'id'", "]", ")", ")", "return", "data" ]
Retrieve messages using before parameter.
[ "Retrieve", "messages", "using", "before", "parameter", "." ]
python
train
MrYsLab/pymata-aio
pymata_aio/pymata3.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata3.py#L575-L593
def set_pin_mode(self, pin_number, pin_state, callback=None, cb_type=None): """ This method sets the pin mode for the specified pin. :param pin_number: Arduino Pin Number :param pin_state: INPUT/OUTPUT/ANALOG/PWM/PULLUP - for SERVO use servo_config() :param callback: Optional: A reference to a call back function to be called when pin data value changes :param cb_type: Constants.CB_TYPE_DIRECT = direct call or Constants.CB_TYPE_ASYNCIO = asyncio coroutine :returns: No return value """ task = asyncio.ensure_future(self.core.set_pin_mode(pin_number, pin_state, callback, cb_type)) self.loop.run_until_complete(task)
[ "def", "set_pin_mode", "(", "self", ",", "pin_number", ",", "pin_state", ",", "callback", "=", "None", ",", "cb_type", "=", "None", ")", ":", "task", "=", "asyncio", ".", "ensure_future", "(", "self", ".", "core", ".", "set_pin_mode", "(", "pin_number", ",", "pin_state", ",", "callback", ",", "cb_type", ")", ")", "self", ".", "loop", ".", "run_until_complete", "(", "task", ")" ]
This method sets the pin mode for the specified pin. :param pin_number: Arduino Pin Number :param pin_state: INPUT/OUTPUT/ANALOG/PWM/PULLUP - for SERVO use servo_config() :param callback: Optional: A reference to a call back function to be called when pin data value changes :param cb_type: Constants.CB_TYPE_DIRECT = direct call or Constants.CB_TYPE_ASYNCIO = asyncio coroutine :returns: No return value
[ "This", "method", "sets", "the", "pin", "mode", "for", "the", "specified", "pin", "." ]
python
train
pkkid/python-plexapi
plexapi/server.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/server.py#L151-L157
def _headers(self, **kwargs): """ Returns dict containing base headers for all requests to the server. """ headers = BASE_HEADERS.copy() if self._token: headers['X-Plex-Token'] = self._token headers.update(kwargs) return headers
[ "def", "_headers", "(", "self", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "BASE_HEADERS", ".", "copy", "(", ")", "if", "self", ".", "_token", ":", "headers", "[", "'X-Plex-Token'", "]", "=", "self", ".", "_token", "headers", ".", "update", "(", "kwargs", ")", "return", "headers" ]
Returns dict containing base headers for all requests to the server.
[ "Returns", "dict", "containing", "base", "headers", "for", "all", "requests", "to", "the", "server", "." ]
python
train
edx/edx-django-utils
edx_django_utils/monitoring/middleware.py
https://github.com/edx/edx-django-utils/blob/16cb4ac617e53c572bf68ccd19d24afeff1ca769/edx_django_utils/monitoring/middleware.py#L66-L78
def _batch_report(cls, request): """ Report the collected custom metrics to New Relic. """ if not newrelic: return metrics_cache = cls._get_metrics_cache() try: newrelic.agent.add_custom_parameter('user_id', request.user.id) except AttributeError: pass for key, value in metrics_cache.data.items(): newrelic.agent.add_custom_parameter(key, value)
[ "def", "_batch_report", "(", "cls", ",", "request", ")", ":", "if", "not", "newrelic", ":", "return", "metrics_cache", "=", "cls", ".", "_get_metrics_cache", "(", ")", "try", ":", "newrelic", ".", "agent", ".", "add_custom_parameter", "(", "'user_id'", ",", "request", ".", "user", ".", "id", ")", "except", "AttributeError", ":", "pass", "for", "key", ",", "value", "in", "metrics_cache", ".", "data", ".", "items", "(", ")", ":", "newrelic", ".", "agent", ".", "add_custom_parameter", "(", "key", ",", "value", ")" ]
Report the collected custom metrics to New Relic.
[ "Report", "the", "collected", "custom", "metrics", "to", "New", "Relic", "." ]
python
train
20c/twentyc.database
twentyc/database/couchdb/client.py
https://github.com/20c/twentyc.database/blob/c6b7184d66dddafb306c94c4f98234bef1df1291/twentyc/database/couchdb/client.py#L433-L447
def result(self, couchdb_response_text): """ Return whether a REST couchdb operation was successful or not. On error will raise a RESTException """ result = json.loads(couchdb_response_text) if result.get("ok"): return True elif result.get("error"): raise RESTException( "%s: %s" % (result.get("error"), result.get("reason")) ) return result
[ "def", "result", "(", "self", ",", "couchdb_response_text", ")", ":", "result", "=", "json", ".", "loads", "(", "couchdb_response_text", ")", "if", "result", ".", "get", "(", "\"ok\"", ")", ":", "return", "True", "elif", "result", ".", "get", "(", "\"error\"", ")", ":", "raise", "RESTException", "(", "\"%s: %s\"", "%", "(", "result", ".", "get", "(", "\"error\"", ")", ",", "result", ".", "get", "(", "\"reason\"", ")", ")", ")", "return", "result" ]
Return whether a REST couchdb operation was successful or not. On error will raise a RESTException
[ "Return", "whether", "a", "REST", "couchdb", "operation", "was", "successful", "or", "not", "." ]
python
train
ConsenSys/mythril-classic
mythril/laser/ethereum/svm.py
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/laser/ethereum/svm.py#L483-L500
def register_laser_hooks(self, hook_type: str, hook: Callable): """registers the hook with this Laser VM""" if hook_type == "add_world_state": self._add_world_state_hooks.append(hook) elif hook_type == "execute_state": self._execute_state_hooks.append(hook) elif hook_type == "start_sym_exec": self._start_sym_exec_hooks.append(hook) elif hook_type == "stop_sym_exec": self._stop_sym_exec_hooks.append(hook) elif hook_type == "start_sym_trans": self._start_sym_trans_hooks.append(hook) elif hook_type == "stop_sym_trans": self._stop_sym_trans_hooks.append(hook) else: raise ValueError( "Invalid hook type %s. Must be one of {add_world_state}", hook_type )
[ "def", "register_laser_hooks", "(", "self", ",", "hook_type", ":", "str", ",", "hook", ":", "Callable", ")", ":", "if", "hook_type", "==", "\"add_world_state\"", ":", "self", ".", "_add_world_state_hooks", ".", "append", "(", "hook", ")", "elif", "hook_type", "==", "\"execute_state\"", ":", "self", ".", "_execute_state_hooks", ".", "append", "(", "hook", ")", "elif", "hook_type", "==", "\"start_sym_exec\"", ":", "self", ".", "_start_sym_exec_hooks", ".", "append", "(", "hook", ")", "elif", "hook_type", "==", "\"stop_sym_exec\"", ":", "self", ".", "_stop_sym_exec_hooks", ".", "append", "(", "hook", ")", "elif", "hook_type", "==", "\"start_sym_trans\"", ":", "self", ".", "_start_sym_trans_hooks", ".", "append", "(", "hook", ")", "elif", "hook_type", "==", "\"stop_sym_trans\"", ":", "self", ".", "_stop_sym_trans_hooks", ".", "append", "(", "hook", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid hook type %s. Must be one of {add_world_state}\"", ",", "hook_type", ")" ]
registers the hook with this Laser VM
[ "registers", "the", "hook", "with", "this", "Laser", "VM" ]
python
train
mitsei/dlkit
dlkit/json_/repository/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/managers.py#L1762-L1779
def get_composition_repository_session(self, proxy): """Gets the session for retrieving composition to repository mappings. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionRepositorySession) - a ``CompositionRepositorySession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_composition_repository()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_composition_repository()`` is ``true``.* """ if not self.supports_composition_repository(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.CompositionRepositorySession(proxy=proxy, runtime=self._runtime)
[ "def", "get_composition_repository_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_composition_repository", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "CompositionRepositorySession", "(", "proxy", "=", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the session for retrieving composition to repository mappings. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionRepositorySession) - a ``CompositionRepositorySession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_composition_repository()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_composition_repository()`` is ``true``.*
[ "Gets", "the", "session", "for", "retrieving", "composition", "to", "repository", "mappings", "." ]
python
train
ella/ella
ella/core/managers.py
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/core/managers.py#L83-L97
def collect_related(self, finder_funcs, obj, count, *args, **kwargs): """ Collects objects related to ``obj`` using a list of ``finder_funcs``. Stops when required count is collected or the function list is exhausted. """ collected = [] for func in finder_funcs: gathered = func(obj, count, collected, *args, **kwargs) if gathered: collected += gathered if len(collected) >= count: return collected[:count] return collected
[ "def", "collect_related", "(", "self", ",", "finder_funcs", ",", "obj", ",", "count", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "collected", "=", "[", "]", "for", "func", "in", "finder_funcs", ":", "gathered", "=", "func", "(", "obj", ",", "count", ",", "collected", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "gathered", ":", "collected", "+=", "gathered", "if", "len", "(", "collected", ")", ">=", "count", ":", "return", "collected", "[", ":", "count", "]", "return", "collected" ]
Collects objects related to ``obj`` using a list of ``finder_funcs``. Stops when required count is collected or the function list is exhausted.
[ "Collects", "objects", "related", "to", "obj", "using", "a", "list", "of", "finder_funcs", ".", "Stops", "when", "required", "count", "is", "collected", "or", "the", "function", "list", "is", "exhausted", "." ]
python
train
yyuu/botornado
boto/ec2/volume.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/volume.py#L122-L144
def detach(self, force=False): """ Detach this EBS volume from an EC2 instance. :type force: bool :param force: Forces detachment if the previous detachment attempt did not occur cleanly. This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance will not have an opportunity to flush file system caches nor file system meta data. If you use this option, you must perform file system check and repair procedures. :rtype: bool :return: True if successful """ instance_id = None if self.attach_data: instance_id = self.attach_data.instance_id device = None if self.attach_data: device = self.attach_data.device return self.connection.detach_volume(self.id, instance_id, device, force)
[ "def", "detach", "(", "self", ",", "force", "=", "False", ")", ":", "instance_id", "=", "None", "if", "self", ".", "attach_data", ":", "instance_id", "=", "self", ".", "attach_data", ".", "instance_id", "device", "=", "None", "if", "self", ".", "attach_data", ":", "device", "=", "self", ".", "attach_data", ".", "device", "return", "self", ".", "connection", ".", "detach_volume", "(", "self", ".", "id", ",", "instance_id", ",", "device", ",", "force", ")" ]
Detach this EBS volume from an EC2 instance. :type force: bool :param force: Forces detachment if the previous detachment attempt did not occur cleanly. This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance will not have an opportunity to flush file system caches nor file system meta data. If you use this option, you must perform file system check and repair procedures. :rtype: bool :return: True if successful
[ "Detach", "this", "EBS", "volume", "from", "an", "EC2", "instance", "." ]
python
train
pkgw/pwkit
pwkit/environments/casa/util.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/environments/casa/util.py#L221-L306
def forkandlog(function, filter='INFO5', debug=False): """Fork a child process and read its CASA log output. function A function to run in the child process filter The CASA log level filter to apply in the child process: less urgent messages will not be shown. Valid values are strings: "DEBUG1", "INFO5", ... "INFO1", "INFO", "WARN", "SEVERE". debug If true, the standard output and error of the child process are *not* redirected to /dev/null. Some CASA tools produce important results that are *only* provided via log messages. This is a problem for automation, since there’s no way for Python code to intercept those log messages and extract the results of interest. This function provides a framework for working around this limitation: by forking a child process and sending its log output to a pipe, the parent process can capture the log messages. This function is a generator. It yields lines from the child process’ CASA log output. Because the child process is a fork of the parent, it inherits a complete clone of the parent’s state at the time of forking. That means that the *function* argument you pass it can do just about anything you’d do in a regular program. The child process’ standard output and error streams are redirected to ``/dev/null`` unless the *debug* argument is true. Note that the CASA log output is redirected to a pipe that is neither of these streams. So, if the function raises an unhandled Python exception, the Python traceback will not pollute the CASA log output. But, by the same token, the calling program will not be able to detect that the exception occurred except by its impact on the expected log output. """ import sys, os readfd, writefd = os.pipe() pid = os.fork() if pid == 0: # Child process. We never leave this branch. # # Log messages of priority >WARN are sent to stderr regardless of the # status of log.showconsole(). The idea is for this subprocess to be # something super lightweight and constrained, so it seems best to # nullify stderr, and stdout, to not pollute the output of the calling # process. # # I thought of using the default logger() setup and dup2'ing stderr to # the pipe fd, but then if anything else gets printed to stderr (e.g. # Python exception info), it'll get sent along the pipe too. The # caller would have to be much more complex to be able to detect and # handle such output. os.close(readfd) if not debug: f = open(os.devnull, 'w') os.dup2(f.fileno(), 1) os.dup2(f.fileno(), 2) sink = logger(filter=filter) sink.setlogfile(b'/dev/fd/%d' % writefd) function(sink) sys.exit(0) # Original process. os.close(writefd) with os.fdopen(readfd) as readhandle: for line in readhandle: yield line info = os.waitpid(pid, 0) if info[1]: # Because we're a generator, this is the only way for us to signal if # the process died. We could be rewritten as a context manager. e = RuntimeError('logging child process PID %d exited ' 'with error code %d' % tuple(info)) e.pid, e.exitcode = info raise e
[ "def", "forkandlog", "(", "function", ",", "filter", "=", "'INFO5'", ",", "debug", "=", "False", ")", ":", "import", "sys", ",", "os", "readfd", ",", "writefd", "=", "os", ".", "pipe", "(", ")", "pid", "=", "os", ".", "fork", "(", ")", "if", "pid", "==", "0", ":", "# Child process. We never leave this branch.", "#", "# Log messages of priority >WARN are sent to stderr regardless of the", "# status of log.showconsole(). The idea is for this subprocess to be", "# something super lightweight and constrained, so it seems best to", "# nullify stderr, and stdout, to not pollute the output of the calling", "# process.", "#", "# I thought of using the default logger() setup and dup2'ing stderr to", "# the pipe fd, but then if anything else gets printed to stderr (e.g.", "# Python exception info), it'll get sent along the pipe too. The", "# caller would have to be much more complex to be able to detect and", "# handle such output.", "os", ".", "close", "(", "readfd", ")", "if", "not", "debug", ":", "f", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "os", ".", "dup2", "(", "f", ".", "fileno", "(", ")", ",", "1", ")", "os", ".", "dup2", "(", "f", ".", "fileno", "(", ")", ",", "2", ")", "sink", "=", "logger", "(", "filter", "=", "filter", ")", "sink", ".", "setlogfile", "(", "b'/dev/fd/%d'", "%", "writefd", ")", "function", "(", "sink", ")", "sys", ".", "exit", "(", "0", ")", "# Original process.", "os", ".", "close", "(", "writefd", ")", "with", "os", ".", "fdopen", "(", "readfd", ")", "as", "readhandle", ":", "for", "line", "in", "readhandle", ":", "yield", "line", "info", "=", "os", ".", "waitpid", "(", "pid", ",", "0", ")", "if", "info", "[", "1", "]", ":", "# Because we're a generator, this is the only way for us to signal if", "# the process died. We could be rewritten as a context manager.", "e", "=", "RuntimeError", "(", "'logging child process PID %d exited '", "'with error code %d'", "%", "tuple", "(", "info", ")", ")", "e", ".", "pid", ",", "e", ".", "exitcode", "=", "info", "raise", "e" ]
Fork a child process and read its CASA log output. function A function to run in the child process filter The CASA log level filter to apply in the child process: less urgent messages will not be shown. Valid values are strings: "DEBUG1", "INFO5", ... "INFO1", "INFO", "WARN", "SEVERE". debug If true, the standard output and error of the child process are *not* redirected to /dev/null. Some CASA tools produce important results that are *only* provided via log messages. This is a problem for automation, since there’s no way for Python code to intercept those log messages and extract the results of interest. This function provides a framework for working around this limitation: by forking a child process and sending its log output to a pipe, the parent process can capture the log messages. This function is a generator. It yields lines from the child process’ CASA log output. Because the child process is a fork of the parent, it inherits a complete clone of the parent’s state at the time of forking. That means that the *function* argument you pass it can do just about anything you’d do in a regular program. The child process’ standard output and error streams are redirected to ``/dev/null`` unless the *debug* argument is true. Note that the CASA log output is redirected to a pipe that is neither of these streams. So, if the function raises an unhandled Python exception, the Python traceback will not pollute the CASA log output. But, by the same token, the calling program will not be able to detect that the exception occurred except by its impact on the expected log output.
[ "Fork", "a", "child", "process", "and", "read", "its", "CASA", "log", "output", "." ]
python
train
MillionIntegrals/vel
vel/rl/models/q_noisy_model.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/models/q_noisy_model.py#L67-L75
def instantiate(self, **extra_args): """ Instantiate the model """ input_block = self.input_block.instantiate() backbone = self.backbone.instantiate(**extra_args) return NoisyQModel( input_block, backbone, extra_args['action_space'], initial_std_dev=self.initial_std_dev, factorized_noise=self.factorized_noise )
[ "def", "instantiate", "(", "self", ",", "*", "*", "extra_args", ")", ":", "input_block", "=", "self", ".", "input_block", ".", "instantiate", "(", ")", "backbone", "=", "self", ".", "backbone", ".", "instantiate", "(", "*", "*", "extra_args", ")", "return", "NoisyQModel", "(", "input_block", ",", "backbone", ",", "extra_args", "[", "'action_space'", "]", ",", "initial_std_dev", "=", "self", ".", "initial_std_dev", ",", "factorized_noise", "=", "self", ".", "factorized_noise", ")" ]
Instantiate the model
[ "Instantiate", "the", "model" ]
python
train
peterjc/flake8-rst-docstrings
flake8_rst_docstrings.py
https://github.com/peterjc/flake8-rst-docstrings/blob/b8b17d0317fc6728d5586553ab29a7d97e6417fd/flake8_rst_docstrings.py#L265-L288
def dequote_docstring(text): """Remove the quotes delimiting a docstring.""" # TODO: Process escaped characters unless raw mode? text = text.strip() if len(text) > 6 and text[:3] == text[-3:] == '"""': # Standard case, """...""" return text[3:-3] if len(text) > 7 and text[:4] in ('u"""', 'r"""') and text[-3:] == '"""': # Unicode, u"""...""", or raw r"""...""" return text[4:-3] # Other flake8 tools will report atypical quotes: if len(text) > 6 and text[:3] == text[-3:] == "'''": return text[3:-3] if len(text) > 7 and text[:4] in ("u'''", "r'''") and text[-3:] == "'''": return text[4:-3] if len(text) > 2 and text[0] == text[-1] == '"': return text[1:-1] if len(text) > 3 and text[:2] in ('u"', 'r"') and text[-1] == '"': return text[2:-1] if len(text) > 2 and text[0] == text[-1] == "'": return text[1:-1] if len(text) > 3 and text[:2] in ("u'", "r'") and text[-1] == "'": return text[2:-1] raise ValueError("Bad quotes!")
[ "def", "dequote_docstring", "(", "text", ")", ":", "# TODO: Process escaped characters unless raw mode?", "text", "=", "text", ".", "strip", "(", ")", "if", "len", "(", "text", ")", ">", "6", "and", "text", "[", ":", "3", "]", "==", "text", "[", "-", "3", ":", "]", "==", "'\"\"\"'", ":", "# Standard case, \"\"\"...\"\"\"", "return", "text", "[", "3", ":", "-", "3", "]", "if", "len", "(", "text", ")", ">", "7", "and", "text", "[", ":", "4", "]", "in", "(", "'u\"\"\"'", ",", "'r\"\"\"'", ")", "and", "text", "[", "-", "3", ":", "]", "==", "'\"\"\"'", ":", "# Unicode, u\"\"\"...\"\"\", or raw r\"\"\"...\"\"\"", "return", "text", "[", "4", ":", "-", "3", "]", "# Other flake8 tools will report atypical quotes:", "if", "len", "(", "text", ")", ">", "6", "and", "text", "[", ":", "3", "]", "==", "text", "[", "-", "3", ":", "]", "==", "\"'''\"", ":", "return", "text", "[", "3", ":", "-", "3", "]", "if", "len", "(", "text", ")", ">", "7", "and", "text", "[", ":", "4", "]", "in", "(", "\"u'''\"", ",", "\"r'''\"", ")", "and", "text", "[", "-", "3", ":", "]", "==", "\"'''\"", ":", "return", "text", "[", "4", ":", "-", "3", "]", "if", "len", "(", "text", ")", ">", "2", "and", "text", "[", "0", "]", "==", "text", "[", "-", "1", "]", "==", "'\"'", ":", "return", "text", "[", "1", ":", "-", "1", "]", "if", "len", "(", "text", ")", ">", "3", "and", "text", "[", ":", "2", "]", "in", "(", "'u\"'", ",", "'r\"'", ")", "and", "text", "[", "-", "1", "]", "==", "'\"'", ":", "return", "text", "[", "2", ":", "-", "1", "]", "if", "len", "(", "text", ")", ">", "2", "and", "text", "[", "0", "]", "==", "text", "[", "-", "1", "]", "==", "\"'\"", ":", "return", "text", "[", "1", ":", "-", "1", "]", "if", "len", "(", "text", ")", ">", "3", "and", "text", "[", ":", "2", "]", "in", "(", "\"u'\"", ",", "\"r'\"", ")", "and", "text", "[", "-", "1", "]", "==", "\"'\"", ":", "return", "text", "[", "2", ":", "-", "1", "]", "raise", "ValueError", "(", "\"Bad quotes!\"", ")" ]
Remove the quotes delimiting a docstring.
[ "Remove", "the", "quotes", "delimiting", "a", "docstring", "." ]
python
valid
senaite/senaite.core
bika/lims/browser/worksheet/views/results.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/worksheet/views/results.py#L117-L124
def is_assignment_allowed(self): """Check if analyst assignment is allowed """ if not self.is_manage_allowed(): return False review_state = api.get_workflow_status_of(self.context) edit_states = ["open", "attachment_due", "to_be_verified"] return review_state in edit_states
[ "def", "is_assignment_allowed", "(", "self", ")", ":", "if", "not", "self", ".", "is_manage_allowed", "(", ")", ":", "return", "False", "review_state", "=", "api", ".", "get_workflow_status_of", "(", "self", ".", "context", ")", "edit_states", "=", "[", "\"open\"", ",", "\"attachment_due\"", ",", "\"to_be_verified\"", "]", "return", "review_state", "in", "edit_states" ]
Check if analyst assignment is allowed
[ "Check", "if", "analyst", "assignment", "is", "allowed" ]
python
train
pavelsof/ipalint
ipalint/read.py
https://github.com/pavelsof/ipalint/blob/763e5979ede6980cbfc746b06fd007b379762eeb/ipalint/read.py#L136-L174
def get_dialect(self): """ Returns a Dialect named tuple or None if the dataset file comprises a single column of data. If the dialect is not already known, then tries to determine it. Raises ValueError if it fails in the latter case. """ if self.is_single_col: return None if self.delimiter and self.quotechar: return Dialect(self.delimiter, self.quotechar, True if self.escapechar is None else False, self.escapechar) ext = os.path.basename(self.file_path).rsplit('.', maxsplit=1) ext = ext[1].lower() if len(ext) > 1 else None if ext in TSV_EXTENSIONS: self.delimiter = '\t' self.quotechar = '"' else: f = self._open() lines = f.read().splitlines() f.close() if lines: dialect = self._determine_dialect(lines) else: dialect = None if dialect is None: self.is_single_col = True else: self.delimiter = dialect.delimiter self.quotechar = dialect.quotechar self.escapechar = dialect.escapechar return self.get_dialect()
[ "def", "get_dialect", "(", "self", ")", ":", "if", "self", ".", "is_single_col", ":", "return", "None", "if", "self", ".", "delimiter", "and", "self", ".", "quotechar", ":", "return", "Dialect", "(", "self", ".", "delimiter", ",", "self", ".", "quotechar", ",", "True", "if", "self", ".", "escapechar", "is", "None", "else", "False", ",", "self", ".", "escapechar", ")", "ext", "=", "os", ".", "path", ".", "basename", "(", "self", ".", "file_path", ")", ".", "rsplit", "(", "'.'", ",", "maxsplit", "=", "1", ")", "ext", "=", "ext", "[", "1", "]", ".", "lower", "(", ")", "if", "len", "(", "ext", ")", ">", "1", "else", "None", "if", "ext", "in", "TSV_EXTENSIONS", ":", "self", ".", "delimiter", "=", "'\\t'", "self", ".", "quotechar", "=", "'\"'", "else", ":", "f", "=", "self", ".", "_open", "(", ")", "lines", "=", "f", ".", "read", "(", ")", ".", "splitlines", "(", ")", "f", ".", "close", "(", ")", "if", "lines", ":", "dialect", "=", "self", ".", "_determine_dialect", "(", "lines", ")", "else", ":", "dialect", "=", "None", "if", "dialect", "is", "None", ":", "self", ".", "is_single_col", "=", "True", "else", ":", "self", ".", "delimiter", "=", "dialect", ".", "delimiter", "self", ".", "quotechar", "=", "dialect", ".", "quotechar", "self", ".", "escapechar", "=", "dialect", ".", "escapechar", "return", "self", ".", "get_dialect", "(", ")" ]
Returns a Dialect named tuple or None if the dataset file comprises a single column of data. If the dialect is not already known, then tries to determine it. Raises ValueError if it fails in the latter case.
[ "Returns", "a", "Dialect", "named", "tuple", "or", "None", "if", "the", "dataset", "file", "comprises", "a", "single", "column", "of", "data", ".", "If", "the", "dialect", "is", "not", "already", "known", "then", "tries", "to", "determine", "it", ".", "Raises", "ValueError", "if", "it", "fails", "in", "the", "latter", "case", "." ]
python
train
SmartTeleMax/iktomi
iktomi/db/sqla/declarative.py
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/db/sqla/declarative.py#L66-L118
def TableArgsMeta(table_args): '''Declarative metaclass automatically adding (merging) __table_args__ to mapped classes. Example: Meta = TableArgsMeta({ 'mysql_engine': 'InnoDB', 'mysql_default charset': 'utf8', } Base = declarative_base(name='Base', metaclass=Meta) class MyClass(Base): … is equivalent to Base = declarative_base(name='Base') class MyClass(Base): __table_args__ = { 'mysql_engine': 'InnoDB', 'mysql_default charset': 'utf8', } … ''' class _TableArgsMeta(declarative.DeclarativeMeta): def __init__(cls, name, bases, dict_): if ( # Do not extend base class '_decl_class_registry' not in cls.__dict__ and # Missing __tablename_ or equal to None means single table # inheritance — no table for it (columns go to table of # base class) cls.__dict__.get('__tablename__') and # Abstract class — no table for it (columns go to table[s] # of subclass[es] not cls.__dict__.get('__abstract__', False)): ta = getattr(cls, '__table_args__', {}) if isinstance(ta, dict): ta = dict(table_args, **ta) cls.__table_args__ = ta else: assert isinstance(ta, tuple) if ta and isinstance(ta[-1], dict): tad = dict(table_args, **ta[-1]) ta = ta[:-1] else: tad = dict(table_args) cls.__table_args__ = ta + (tad,) super(_TableArgsMeta, cls).__init__(name, bases, dict_) return _TableArgsMeta
[ "def", "TableArgsMeta", "(", "table_args", ")", ":", "class", "_TableArgsMeta", "(", "declarative", ".", "DeclarativeMeta", ")", ":", "def", "__init__", "(", "cls", ",", "name", ",", "bases", ",", "dict_", ")", ":", "if", "(", "# Do not extend base class", "'_decl_class_registry'", "not", "in", "cls", ".", "__dict__", "and", "# Missing __tablename_ or equal to None means single table", "# inheritance — no table for it (columns go to table of", "# base class)", "cls", ".", "__dict__", ".", "get", "(", "'__tablename__'", ")", "and", "# Abstract class — no table for it (columns go to table[s]", "# of subclass[es]", "not", "cls", ".", "__dict__", ".", "get", "(", "'__abstract__'", ",", "False", ")", ")", ":", "ta", "=", "getattr", "(", "cls", ",", "'__table_args__'", ",", "{", "}", ")", "if", "isinstance", "(", "ta", ",", "dict", ")", ":", "ta", "=", "dict", "(", "table_args", ",", "*", "*", "ta", ")", "cls", ".", "__table_args__", "=", "ta", "else", ":", "assert", "isinstance", "(", "ta", ",", "tuple", ")", "if", "ta", "and", "isinstance", "(", "ta", "[", "-", "1", "]", ",", "dict", ")", ":", "tad", "=", "dict", "(", "table_args", ",", "*", "*", "ta", "[", "-", "1", "]", ")", "ta", "=", "ta", "[", ":", "-", "1", "]", "else", ":", "tad", "=", "dict", "(", "table_args", ")", "cls", ".", "__table_args__", "=", "ta", "+", "(", "tad", ",", ")", "super", "(", "_TableArgsMeta", ",", "cls", ")", ".", "__init__", "(", "name", ",", "bases", ",", "dict_", ")", "return", "_TableArgsMeta" ]
Declarative metaclass automatically adding (merging) __table_args__ to mapped classes. Example: Meta = TableArgsMeta({ 'mysql_engine': 'InnoDB', 'mysql_default charset': 'utf8', } Base = declarative_base(name='Base', metaclass=Meta) class MyClass(Base): … is equivalent to Base = declarative_base(name='Base') class MyClass(Base): __table_args__ = { 'mysql_engine': 'InnoDB', 'mysql_default charset': 'utf8', } …
[ "Declarative", "metaclass", "automatically", "adding", "(", "merging", ")", "__table_args__", "to", "mapped", "classes", ".", "Example", ":" ]
python
train
teepark/greenhouse
greenhouse/scheduler.py
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/scheduler.py#L662-L682
def global_exception_handler(handler): """add a callback for when an exception goes uncaught in any greenlet :param handler: the callback function. must be a function taking 3 arguments: - ``klass`` the exception class - ``exc`` the exception instance - ``tb`` the traceback object :type handler: function Note also that the callback is only held by a weakref, so if all other refs to the function are lost it will stop handling greenlets' exceptions """ if not hasattr(handler, "__call__"): raise TypeError("exception handlers must be callable") log.info("setting a new global exception handler") state.global_exception_handlers.append(weakref.ref(handler)) return handler
[ "def", "global_exception_handler", "(", "handler", ")", ":", "if", "not", "hasattr", "(", "handler", ",", "\"__call__\"", ")", ":", "raise", "TypeError", "(", "\"exception handlers must be callable\"", ")", "log", ".", "info", "(", "\"setting a new global exception handler\"", ")", "state", ".", "global_exception_handlers", ".", "append", "(", "weakref", ".", "ref", "(", "handler", ")", ")", "return", "handler" ]
add a callback for when an exception goes uncaught in any greenlet :param handler: the callback function. must be a function taking 3 arguments: - ``klass`` the exception class - ``exc`` the exception instance - ``tb`` the traceback object :type handler: function Note also that the callback is only held by a weakref, so if all other refs to the function are lost it will stop handling greenlets' exceptions
[ "add", "a", "callback", "for", "when", "an", "exception", "goes", "uncaught", "in", "any", "greenlet" ]
python
train
payu-org/payu
payu/cli.py
https://github.com/payu-org/payu/blob/1442a9a226012eff248b8097cc1eaabc3e224867/payu/cli.py#L124-L200
def submit_job(pbs_script, pbs_config, pbs_vars=None): """Submit a userscript the scheduler.""" # Initialisation if pbs_vars is None: pbs_vars = {} pbs_flags = [] pbs_queue = pbs_config.get('queue', 'normal') pbs_flags.append('-q {queue}'.format(queue=pbs_queue)) pbs_project = pbs_config.get('project', os.environ['PROJECT']) pbs_flags.append('-P {project}'.format(project=pbs_project)) pbs_resources = ['walltime', 'ncpus', 'mem', 'jobfs'] for res_key in pbs_resources: res_flags = [] res_val = pbs_config.get(res_key) if res_val: res_flags.append('{key}={val}'.format(key=res_key, val=res_val)) if res_flags: pbs_flags.append('-l {res}'.format(res=','.join(res_flags))) # TODO: Need to pass lab.config_path somehow... pbs_jobname = pbs_config.get('jobname', os.path.basename(os.getcwd())) if pbs_jobname: # PBSPro has a 15-character jobname limit pbs_flags.append('-N {name}'.format(name=pbs_jobname[:15])) pbs_priority = pbs_config.get('priority') if pbs_priority: pbs_flags.append('-p {priority}'.format(priority=pbs_priority)) pbs_flags.append('-l wd') pbs_join = pbs_config.get('join', 'n') if pbs_join not in ('oe', 'eo', 'n'): print('payu: error: unknown qsub IO stream join setting.') sys.exit(-1) else: pbs_flags.append('-j {join}'.format(join=pbs_join)) # Append environment variables to qsub command # TODO: Support full export of environment variables: `qsub -V` pbs_vstring = ','.join('{0}={1}'.format(k, v) for k, v in pbs_vars.items()) pbs_flags.append('-v ' + pbs_vstring) # Append any additional qsub flags here pbs_flags_extend = pbs_config.get('qsub_flags') if pbs_flags_extend: pbs_flags.append(pbs_flags_extend) if not os.path.isabs(pbs_script): # NOTE: PAYU_PATH is always set if `set_env_vars` was always called. # This is currently always true, but is not explicitly enforced. # So this conditional check is a bit redundant. payu_bin = pbs_vars.get('PAYU_PATH', os.path.dirname(sys.argv[0])) pbs_script = os.path.join(payu_bin, pbs_script) assert os.path.isfile(pbs_script) # Set up environment modules here for PBS. envmod.setup() envmod.module('load', 'pbs') # Construct job submission command cmd = 'qsub {flags} -- {python} {script}'.format( flags=' '.join(pbs_flags), python=sys.executable, script=pbs_script ) print(cmd) subprocess.check_call(shlex.split(cmd))
[ "def", "submit_job", "(", "pbs_script", ",", "pbs_config", ",", "pbs_vars", "=", "None", ")", ":", "# Initialisation", "if", "pbs_vars", "is", "None", ":", "pbs_vars", "=", "{", "}", "pbs_flags", "=", "[", "]", "pbs_queue", "=", "pbs_config", ".", "get", "(", "'queue'", ",", "'normal'", ")", "pbs_flags", ".", "append", "(", "'-q {queue}'", ".", "format", "(", "queue", "=", "pbs_queue", ")", ")", "pbs_project", "=", "pbs_config", ".", "get", "(", "'project'", ",", "os", ".", "environ", "[", "'PROJECT'", "]", ")", "pbs_flags", ".", "append", "(", "'-P {project}'", ".", "format", "(", "project", "=", "pbs_project", ")", ")", "pbs_resources", "=", "[", "'walltime'", ",", "'ncpus'", ",", "'mem'", ",", "'jobfs'", "]", "for", "res_key", "in", "pbs_resources", ":", "res_flags", "=", "[", "]", "res_val", "=", "pbs_config", ".", "get", "(", "res_key", ")", "if", "res_val", ":", "res_flags", ".", "append", "(", "'{key}={val}'", ".", "format", "(", "key", "=", "res_key", ",", "val", "=", "res_val", ")", ")", "if", "res_flags", ":", "pbs_flags", ".", "append", "(", "'-l {res}'", ".", "format", "(", "res", "=", "','", ".", "join", "(", "res_flags", ")", ")", ")", "# TODO: Need to pass lab.config_path somehow...", "pbs_jobname", "=", "pbs_config", ".", "get", "(", "'jobname'", ",", "os", ".", "path", ".", "basename", "(", "os", ".", "getcwd", "(", ")", ")", ")", "if", "pbs_jobname", ":", "# PBSPro has a 15-character jobname limit", "pbs_flags", ".", "append", "(", "'-N {name}'", ".", "format", "(", "name", "=", "pbs_jobname", "[", ":", "15", "]", ")", ")", "pbs_priority", "=", "pbs_config", ".", "get", "(", "'priority'", ")", "if", "pbs_priority", ":", "pbs_flags", ".", "append", "(", "'-p {priority}'", ".", "format", "(", "priority", "=", "pbs_priority", ")", ")", "pbs_flags", ".", "append", "(", "'-l wd'", ")", "pbs_join", "=", "pbs_config", ".", "get", "(", "'join'", ",", "'n'", ")", "if", "pbs_join", "not", "in", "(", "'oe'", ",", "'eo'", ",", "'n'", ")", ":", "print", "(", "'payu: error: unknown qsub IO stream join setting.'", ")", "sys", ".", "exit", "(", "-", "1", ")", "else", ":", "pbs_flags", ".", "append", "(", "'-j {join}'", ".", "format", "(", "join", "=", "pbs_join", ")", ")", "# Append environment variables to qsub command", "# TODO: Support full export of environment variables: `qsub -V`", "pbs_vstring", "=", "','", ".", "join", "(", "'{0}={1}'", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "pbs_vars", ".", "items", "(", ")", ")", "pbs_flags", ".", "append", "(", "'-v '", "+", "pbs_vstring", ")", "# Append any additional qsub flags here", "pbs_flags_extend", "=", "pbs_config", ".", "get", "(", "'qsub_flags'", ")", "if", "pbs_flags_extend", ":", "pbs_flags", ".", "append", "(", "pbs_flags_extend", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "pbs_script", ")", ":", "# NOTE: PAYU_PATH is always set if `set_env_vars` was always called.", "# This is currently always true, but is not explicitly enforced.", "# So this conditional check is a bit redundant.", "payu_bin", "=", "pbs_vars", ".", "get", "(", "'PAYU_PATH'", ",", "os", ".", "path", ".", "dirname", "(", "sys", ".", "argv", "[", "0", "]", ")", ")", "pbs_script", "=", "os", ".", "path", ".", "join", "(", "payu_bin", ",", "pbs_script", ")", "assert", "os", ".", "path", ".", "isfile", "(", "pbs_script", ")", "# Set up environment modules here for PBS.", "envmod", ".", "setup", "(", ")", "envmod", ".", "module", "(", "'load'", ",", "'pbs'", ")", "# Construct job submission command", "cmd", "=", "'qsub {flags} -- {python} {script}'", ".", "format", "(", "flags", "=", "' '", ".", "join", "(", "pbs_flags", ")", ",", "python", "=", "sys", ".", "executable", ",", "script", "=", "pbs_script", ")", "print", "(", "cmd", ")", "subprocess", ".", "check_call", "(", "shlex", ".", "split", "(", "cmd", ")", ")" ]
Submit a userscript the scheduler.
[ "Submit", "a", "userscript", "the", "scheduler", "." ]
python
train
PrefPy/prefpy
prefpy/gmm_mixpl.py
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/gmm_mixpl.py#L33-L49
def calcMomentsMatlabEmpirical(params): """Top 3 alternatives 20 empirical moment conditions""" alpha = params[0] a = params[1:5] b = params[5:] p1 = alpha*a+(1-alpha)*b p21 = alpha*a[0]*a[1:]/(1-a[0])+(1-alpha)*b[0]*b[1:]/(1-b[0]) p22 = alpha*a[1]*np.hstack((a[0],a[2:]))/(1-a[1])+(1-alpha)*b[1]*np.hstack((b[0],b[2:]))/(1-b[1]) p23 = alpha*a[2]*np.hstack((a[:2],a[3]))/(1-a[2])+(1-alpha)*b[2]*np.hstack((b[:2],b[3]))/(1-b[2]) p24 = alpha*a[3]*a[:3]/(1-a[3])+(1-alpha)*b[3]*b[:3]/(1-b[3]) p3 = np.array([ alpha*a[0]*a[2]*a[3]/(1-a[2])/(a[0]+a[1])+(1-alpha)*b[0]*b[2]*b[3]/(1-b[2])/(b[0]+b[1]), alpha*a[0]*a[1]*a[3]/(1-a[3])/(a[1]+a[2])+(1-alpha)*b[0]*b[1]*b[3]/(1-b[3])/(b[1]+b[2]), alpha*a[0]*a[1]*a[2]/(1-a[0])/(a[3]+a[2])+(1-alpha)*b[0]*b[1]*b[2]/(1-b[0])/(b[3]+b[2]), alpha*a[2]*a[1]*a[3]/(1-a[1])/(a[0]+a[3])+(1-alpha)*b[2]*b[1]*b[3]/(1-b[1])/(b[0]+b[3]) ]) return np.concatenate((p1,p21,p22,p23,p24,p3))
[ "def", "calcMomentsMatlabEmpirical", "(", "params", ")", ":", "alpha", "=", "params", "[", "0", "]", "a", "=", "params", "[", "1", ":", "5", "]", "b", "=", "params", "[", "5", ":", "]", "p1", "=", "alpha", "*", "a", "+", "(", "1", "-", "alpha", ")", "*", "b", "p21", "=", "alpha", "*", "a", "[", "0", "]", "*", "a", "[", "1", ":", "]", "/", "(", "1", "-", "a", "[", "0", "]", ")", "+", "(", "1", "-", "alpha", ")", "*", "b", "[", "0", "]", "*", "b", "[", "1", ":", "]", "/", "(", "1", "-", "b", "[", "0", "]", ")", "p22", "=", "alpha", "*", "a", "[", "1", "]", "*", "np", ".", "hstack", "(", "(", "a", "[", "0", "]", ",", "a", "[", "2", ":", "]", ")", ")", "/", "(", "1", "-", "a", "[", "1", "]", ")", "+", "(", "1", "-", "alpha", ")", "*", "b", "[", "1", "]", "*", "np", ".", "hstack", "(", "(", "b", "[", "0", "]", ",", "b", "[", "2", ":", "]", ")", ")", "/", "(", "1", "-", "b", "[", "1", "]", ")", "p23", "=", "alpha", "*", "a", "[", "2", "]", "*", "np", ".", "hstack", "(", "(", "a", "[", ":", "2", "]", ",", "a", "[", "3", "]", ")", ")", "/", "(", "1", "-", "a", "[", "2", "]", ")", "+", "(", "1", "-", "alpha", ")", "*", "b", "[", "2", "]", "*", "np", ".", "hstack", "(", "(", "b", "[", ":", "2", "]", ",", "b", "[", "3", "]", ")", ")", "/", "(", "1", "-", "b", "[", "2", "]", ")", "p24", "=", "alpha", "*", "a", "[", "3", "]", "*", "a", "[", ":", "3", "]", "/", "(", "1", "-", "a", "[", "3", "]", ")", "+", "(", "1", "-", "alpha", ")", "*", "b", "[", "3", "]", "*", "b", "[", ":", "3", "]", "/", "(", "1", "-", "b", "[", "3", "]", ")", "p3", "=", "np", ".", "array", "(", "[", "alpha", "*", "a", "[", "0", "]", "*", "a", "[", "2", "]", "*", "a", "[", "3", "]", "/", "(", "1", "-", "a", "[", "2", "]", ")", "/", "(", "a", "[", "0", "]", "+", "a", "[", "1", "]", ")", "+", "(", "1", "-", "alpha", ")", "*", "b", "[", "0", "]", "*", "b", "[", "2", "]", "*", "b", "[", "3", "]", "/", "(", "1", "-", "b", "[", "2", "]", ")", "/", "(", "b", "[", "0", "]", "+", "b", "[", "1", "]", ")", ",", "alpha", "*", "a", "[", "0", "]", "*", "a", "[", "1", "]", "*", "a", "[", "3", "]", "/", "(", "1", "-", "a", "[", "3", "]", ")", "/", "(", "a", "[", "1", "]", "+", "a", "[", "2", "]", ")", "+", "(", "1", "-", "alpha", ")", "*", "b", "[", "0", "]", "*", "b", "[", "1", "]", "*", "b", "[", "3", "]", "/", "(", "1", "-", "b", "[", "3", "]", ")", "/", "(", "b", "[", "1", "]", "+", "b", "[", "2", "]", ")", ",", "alpha", "*", "a", "[", "0", "]", "*", "a", "[", "1", "]", "*", "a", "[", "2", "]", "/", "(", "1", "-", "a", "[", "0", "]", ")", "/", "(", "a", "[", "3", "]", "+", "a", "[", "2", "]", ")", "+", "(", "1", "-", "alpha", ")", "*", "b", "[", "0", "]", "*", "b", "[", "1", "]", "*", "b", "[", "2", "]", "/", "(", "1", "-", "b", "[", "0", "]", ")", "/", "(", "b", "[", "3", "]", "+", "b", "[", "2", "]", ")", ",", "alpha", "*", "a", "[", "2", "]", "*", "a", "[", "1", "]", "*", "a", "[", "3", "]", "/", "(", "1", "-", "a", "[", "1", "]", ")", "/", "(", "a", "[", "0", "]", "+", "a", "[", "3", "]", ")", "+", "(", "1", "-", "alpha", ")", "*", "b", "[", "2", "]", "*", "b", "[", "1", "]", "*", "b", "[", "3", "]", "/", "(", "1", "-", "b", "[", "1", "]", ")", "/", "(", "b", "[", "0", "]", "+", "b", "[", "3", "]", ")", "]", ")", "return", "np", ".", "concatenate", "(", "(", "p1", ",", "p21", ",", "p22", ",", "p23", ",", "p24", ",", "p3", ")", ")" ]
Top 3 alternatives 20 empirical moment conditions
[ "Top", "3", "alternatives", "20", "empirical", "moment", "conditions" ]
python
train
awslabs/sockeye
docs/tutorials/cpu_process_per_core_translation.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/docs/tutorials/cpu_process_per_core_translation.py#L37-L58
def benchmark(cores, args): """ benchmark is used for Processing per core translation. Each core translates the whole input file. Return after all translations done. :param cores: the number of cores used for translation, each core will launch a thread to translate :param args: input parameters """ model = args.module fileInput = args.input_file fileOutput = args.output_file batchsize = args.batch_size thread = [] for i in range(cores): command = "taskset -c %d-%d python3 -m sockeye.translate -m %s -i %s -o %s --batch-size %d --output-type benchmark --use-cpu > /dev/null 2>&1 " % (i, i, model, fileInput, fileOutput, batchsize) t = threading.Thread(target = task, args=(command,)) thread.append(t) t.start() for t in thread: t.join()
[ "def", "benchmark", "(", "cores", ",", "args", ")", ":", "model", "=", "args", ".", "module", "fileInput", "=", "args", ".", "input_file", "fileOutput", "=", "args", ".", "output_file", "batchsize", "=", "args", ".", "batch_size", "thread", "=", "[", "]", "for", "i", "in", "range", "(", "cores", ")", ":", "command", "=", "\"taskset -c %d-%d python3 -m sockeye.translate -m %s -i %s -o %s --batch-size %d --output-type benchmark --use-cpu > /dev/null 2>&1 \"", "%", "(", "i", ",", "i", ",", "model", ",", "fileInput", ",", "fileOutput", ",", "batchsize", ")", "t", "=", "threading", ".", "Thread", "(", "target", "=", "task", ",", "args", "=", "(", "command", ",", ")", ")", "thread", ".", "append", "(", "t", ")", "t", ".", "start", "(", ")", "for", "t", "in", "thread", ":", "t", ".", "join", "(", ")" ]
benchmark is used for Processing per core translation. Each core translates the whole input file. Return after all translations done. :param cores: the number of cores used for translation, each core will launch a thread to translate :param args: input parameters
[ "benchmark", "is", "used", "for", "Processing", "per", "core", "translation", ".", "Each", "core", "translates", "the", "whole", "input", "file", ".", "Return", "after", "all", "translations", "done", "." ]
python
train
jmgilman/Neolib
neolib/pyamf/remoting/client/__init__.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/client/__init__.py#L253-L260
def addHeader(self, name, value, must_understand=False): """ Sets a persistent header to send with each request. @param name: Header name. """ self.headers[name] = value self.headers.set_required(name, must_understand)
[ "def", "addHeader", "(", "self", ",", "name", ",", "value", ",", "must_understand", "=", "False", ")", ":", "self", ".", "headers", "[", "name", "]", "=", "value", "self", ".", "headers", ".", "set_required", "(", "name", ",", "must_understand", ")" ]
Sets a persistent header to send with each request. @param name: Header name.
[ "Sets", "a", "persistent", "header", "to", "send", "with", "each", "request", "." ]
python
train
apriha/lineage
src/lineage/snps.py
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L597-L613
def get_chromosomes(snps): """ Get the chromosomes of SNPs. Parameters ---------- snps : pandas.DataFrame Returns ------- list list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes """ if isinstance(snps, pd.DataFrame): return list(pd.unique(snps["chrom"])) else: return []
[ "def", "get_chromosomes", "(", "snps", ")", ":", "if", "isinstance", "(", "snps", ",", "pd", ".", "DataFrame", ")", ":", "return", "list", "(", "pd", ".", "unique", "(", "snps", "[", "\"chrom\"", "]", ")", ")", "else", ":", "return", "[", "]" ]
Get the chromosomes of SNPs. Parameters ---------- snps : pandas.DataFrame Returns ------- list list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes
[ "Get", "the", "chromosomes", "of", "SNPs", "." ]
python
train
bwohlberg/sporco
sporco/admm/admm.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/admm.py#L604-L616
def display_status(self, fmtstr, itst): """Display current iteration status as selection of fields from iteration stats tuple. """ if self.opt['Verbose']: hdrtxt = type(self).hdrtxt() hdrval = type(self).hdrval() itdsp = tuple([getattr(itst, hdrval[col]) for col in hdrtxt]) if not self.opt['AutoRho', 'Enabled']: itdsp = itdsp[0:-1] print(fmtstr % itdsp)
[ "def", "display_status", "(", "self", ",", "fmtstr", ",", "itst", ")", ":", "if", "self", ".", "opt", "[", "'Verbose'", "]", ":", "hdrtxt", "=", "type", "(", "self", ")", ".", "hdrtxt", "(", ")", "hdrval", "=", "type", "(", "self", ")", ".", "hdrval", "(", ")", "itdsp", "=", "tuple", "(", "[", "getattr", "(", "itst", ",", "hdrval", "[", "col", "]", ")", "for", "col", "in", "hdrtxt", "]", ")", "if", "not", "self", ".", "opt", "[", "'AutoRho'", ",", "'Enabled'", "]", ":", "itdsp", "=", "itdsp", "[", "0", ":", "-", "1", "]", "print", "(", "fmtstr", "%", "itdsp", ")" ]
Display current iteration status as selection of fields from iteration stats tuple.
[ "Display", "current", "iteration", "status", "as", "selection", "of", "fields", "from", "iteration", "stats", "tuple", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/Usuario.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Usuario.py#L121-L146
def list_with_usergroup(self): """List all users and their user groups. is_more -If more than 3 of groups of users or no, to control expansion Screen. :return: Dictionary with the following structure: :: {'usuario': [{'nome': < nome >, 'id': < id >, 'pwd': < pwd >, 'user': < user >, 'ativo': < ativo >, 'email': < email >, 'is_more': <True ou False>, 'grupos': [nome_grupo, ...more user groups...]}, ...more user...]} :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ url = 'usuario/get/' code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
[ "def", "list_with_usergroup", "(", "self", ")", ":", "url", "=", "'usuario/get/'", "code", ",", "xml", "=", "self", ".", "submit", "(", "None", ",", "'GET'", ",", "url", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
List all users and their user groups. is_more -If more than 3 of groups of users or no, to control expansion Screen. :return: Dictionary with the following structure: :: {'usuario': [{'nome': < nome >, 'id': < id >, 'pwd': < pwd >, 'user': < user >, 'ativo': < ativo >, 'email': < email >, 'is_more': <True ou False>, 'grupos': [nome_grupo, ...more user groups...]}, ...more user...]} :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
[ "List", "all", "users", "and", "their", "user", "groups", ".", "is_more", "-", "If", "more", "than", "3", "of", "groups", "of", "users", "or", "no", "to", "control", "expansion", "Screen", "." ]
python
train
nakagami/pyfirebirdsql
firebirdsql/srp.py
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/srp.py#L218-L249
def client_session(user, password, salt, A, B, a): """ Client session secret Both: u = H(A, B) User: x = H(s, p) (user enters password) User: S = (B - kg^x) ^ (a + ux) (computes session key) User: K = H(S) """ N, g, k = get_prime() u = get_scramble(A, B) x = getUserHash(salt, user, password) # x gx = pow(g, x, N) # g^x kgx = (k * gx) % N # kg^x diff = (B - kgx) % N # B - kg^x ux = (u * x) % N aux = (a + ux) % N session_secret = pow(diff, aux, N) # (B - kg^x) ^ (a + ux) K = hash_digest(hashlib.sha1, session_secret) if DEBUG_PRINT: print('B=', binascii.b2a_hex(long2bytes(B)), end='\n') print('u=', binascii.b2a_hex(long2bytes(u)), end='\n') print('x=', binascii.b2a_hex(long2bytes(x)), end='\n') print('gx=', binascii.b2a_hex(long2bytes(gx)), end='\n') print('kgx=', binascii.b2a_hex(long2bytes(kgx)), end='\n') print('diff=', binascii.b2a_hex(long2bytes(diff)), end='\n') print('ux=', binascii.b2a_hex(long2bytes(ux)), end='\n') print('aux=', binascii.b2a_hex(long2bytes(aux)), end='\n') print('session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n') print('session_key:K=', binascii.b2a_hex(K)) return K
[ "def", "client_session", "(", "user", ",", "password", ",", "salt", ",", "A", ",", "B", ",", "a", ")", ":", "N", ",", "g", ",", "k", "=", "get_prime", "(", ")", "u", "=", "get_scramble", "(", "A", ",", "B", ")", "x", "=", "getUserHash", "(", "salt", ",", "user", ",", "password", ")", "# x", "gx", "=", "pow", "(", "g", ",", "x", ",", "N", ")", "# g^x", "kgx", "=", "(", "k", "*", "gx", ")", "%", "N", "# kg^x", "diff", "=", "(", "B", "-", "kgx", ")", "%", "N", "# B - kg^x", "ux", "=", "(", "u", "*", "x", ")", "%", "N", "aux", "=", "(", "a", "+", "ux", ")", "%", "N", "session_secret", "=", "pow", "(", "diff", ",", "aux", ",", "N", ")", "# (B - kg^x) ^ (a + ux)", "K", "=", "hash_digest", "(", "hashlib", ".", "sha1", ",", "session_secret", ")", "if", "DEBUG_PRINT", ":", "print", "(", "'B='", ",", "binascii", ".", "b2a_hex", "(", "long2bytes", "(", "B", ")", ")", ",", "end", "=", "'\\n'", ")", "print", "(", "'u='", ",", "binascii", ".", "b2a_hex", "(", "long2bytes", "(", "u", ")", ")", ",", "end", "=", "'\\n'", ")", "print", "(", "'x='", ",", "binascii", ".", "b2a_hex", "(", "long2bytes", "(", "x", ")", ")", ",", "end", "=", "'\\n'", ")", "print", "(", "'gx='", ",", "binascii", ".", "b2a_hex", "(", "long2bytes", "(", "gx", ")", ")", ",", "end", "=", "'\\n'", ")", "print", "(", "'kgx='", ",", "binascii", ".", "b2a_hex", "(", "long2bytes", "(", "kgx", ")", ")", ",", "end", "=", "'\\n'", ")", "print", "(", "'diff='", ",", "binascii", ".", "b2a_hex", "(", "long2bytes", "(", "diff", ")", ")", ",", "end", "=", "'\\n'", ")", "print", "(", "'ux='", ",", "binascii", ".", "b2a_hex", "(", "long2bytes", "(", "ux", ")", ")", ",", "end", "=", "'\\n'", ")", "print", "(", "'aux='", ",", "binascii", ".", "b2a_hex", "(", "long2bytes", "(", "aux", ")", ")", ",", "end", "=", "'\\n'", ")", "print", "(", "'session_secret='", ",", "binascii", ".", "b2a_hex", "(", "long2bytes", "(", "session_secret", ")", ")", ",", "end", "=", "'\\n'", ")", "print", "(", "'session_key:K='", ",", "binascii", ".", "b2a_hex", "(", "K", ")", ")", "return", "K" ]
Client session secret Both: u = H(A, B) User: x = H(s, p) (user enters password) User: S = (B - kg^x) ^ (a + ux) (computes session key) User: K = H(S)
[ "Client", "session", "secret", "Both", ":", "u", "=", "H", "(", "A", "B", ")" ]
python
train
mushkevych/scheduler
synergy/system/time_helper.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/system/time_helper.py#L138-L155
def cast_to_time_qualifier(time_qualifier, timeperiod): """ method casts given timeperiod accordingly to time qualifier. For example, will cast session time format of 20100101193412 to 2010010119 with QUALIFIER_HOURLY """ if time_qualifier == QUALIFIER_HOURLY: date_format = SYNERGY_HOURLY_PATTERN elif time_qualifier == QUALIFIER_DAILY: date_format = SYNERGY_DAILY_PATTERN elif time_qualifier == QUALIFIER_MONTHLY: date_format = SYNERGY_MONTHLY_PATTERN elif time_qualifier == QUALIFIER_YEARLY: date_format = SYNERGY_YEARLY_PATTERN else: raise ValueError('unknown time qualifier: {0}'.format(time_qualifier)) pattern = define_pattern(timeperiod) t = datetime.strptime(timeperiod, pattern) return t.strftime(date_format)
[ "def", "cast_to_time_qualifier", "(", "time_qualifier", ",", "timeperiod", ")", ":", "if", "time_qualifier", "==", "QUALIFIER_HOURLY", ":", "date_format", "=", "SYNERGY_HOURLY_PATTERN", "elif", "time_qualifier", "==", "QUALIFIER_DAILY", ":", "date_format", "=", "SYNERGY_DAILY_PATTERN", "elif", "time_qualifier", "==", "QUALIFIER_MONTHLY", ":", "date_format", "=", "SYNERGY_MONTHLY_PATTERN", "elif", "time_qualifier", "==", "QUALIFIER_YEARLY", ":", "date_format", "=", "SYNERGY_YEARLY_PATTERN", "else", ":", "raise", "ValueError", "(", "'unknown time qualifier: {0}'", ".", "format", "(", "time_qualifier", ")", ")", "pattern", "=", "define_pattern", "(", "timeperiod", ")", "t", "=", "datetime", ".", "strptime", "(", "timeperiod", ",", "pattern", ")", "return", "t", ".", "strftime", "(", "date_format", ")" ]
method casts given timeperiod accordingly to time qualifier. For example, will cast session time format of 20100101193412 to 2010010119 with QUALIFIER_HOURLY
[ "method", "casts", "given", "timeperiod", "accordingly", "to", "time", "qualifier", ".", "For", "example", "will", "cast", "session", "time", "format", "of", "20100101193412", "to", "2010010119", "with", "QUALIFIER_HOURLY" ]
python
train
zeaphoo/reston
reston/core/dvm.py
https://github.com/zeaphoo/reston/blob/96502487b2259572df55237c9526f92627465088/reston/core/dvm.py#L6655-L6662
def show(self, m_a): """ Display (with a pretty print) this object :param m_a: :class:`MethodAnalysis` object """ bytecode.PrettyShow(m_a, m_a.basic_blocks.gets(), self.notes) bytecode.PrettyShowEx(m_a.exceptions.gets())
[ "def", "show", "(", "self", ",", "m_a", ")", ":", "bytecode", ".", "PrettyShow", "(", "m_a", ",", "m_a", ".", "basic_blocks", ".", "gets", "(", ")", ",", "self", ".", "notes", ")", "bytecode", ".", "PrettyShowEx", "(", "m_a", ".", "exceptions", ".", "gets", "(", ")", ")" ]
Display (with a pretty print) this object :param m_a: :class:`MethodAnalysis` object
[ "Display", "(", "with", "a", "pretty", "print", ")", "this", "object" ]
python
train
RRZE-HPC/kerncraft
kerncraft/kerncraft.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kerncraft.py#L205-L305
def run(parser, args, output_file=sys.stdout): """Run command line interface.""" # Try loading results file (if requested) result_storage = {} if args.store: args.store.seek(0) try: result_storage = pickle.load(args.store) except EOFError: pass args.store.close() # machine information # Read machine description machine = MachineModel(args.machine.name, args=args) # process kernel if not args.kernel_description: code = str(args.code_file.read()) code = clean_code(code) kernel = KernelCode(code, filename=args.code_file.name, machine=machine, keep_intermediates=not args.clean_intermediates) else: description = str(args.code_file.read()) kernel = KernelDescription(yaml.load(description, Loader=yaml.Loader), machine=machine) # if no defines were given, guess suitable defines in-mem # TODO support in-cache # TODO broaden cases to n-dimensions # TODO make configurable (no hardcoded 512MB/1GB/min. 3 iteration ...) # works only for up to 3 dimensions required_consts = [v[1] for v in kernel.variables.values() if v[1] is not None] required_consts += [[l['start'], l['stop']] for l in kernel.get_loop_stack()] # split into individual consts required_consts = [i for l in required_consts for i in l] required_consts = set([i for l in required_consts for i in l.free_symbols]) if len(required_consts) > 0: # build defines permutations define_dict = {} for name, values in args.define: if name not in define_dict: define_dict[name] = [[name, v] for v in values] continue for v in values: if v not in define_dict[name]: define_dict[name].append([name, v]) define_product = list(itertools.product(*list(define_dict.values()))) # Check that all consts have been defined if set(required_consts).difference(set([symbol_pos_int(k) for k in define_dict.keys()])): raise ValueError("Not all constants have been defined. Required are: {}".format( required_consts)) else: define_product = [{}] for define in define_product: # Reset state of kernel kernel.clear_state() # Add constants from define arguments for k, v in define: kernel.set_constant(k, v) for model_name in uniquify(args.pmodel): # print header print('{:^80}'.format(' kerncraft '), file=output_file) print('{:<40}{:>40}'.format(args.code_file.name, '-m ' + args.machine.name), file=output_file) print(' '.join(['-D {} {}'.format(k, v) for k, v in define]), file=output_file) print('{:-^80}'.format(' ' + model_name + ' '), file=output_file) if args.verbose > 1: if not args.kernel_description: kernel.print_kernel_code(output_file=output_file) print('', file=output_file) kernel.print_variables_info(output_file=output_file) kernel.print_kernel_info(output_file=output_file) if args.verbose > 0: kernel.print_constants_info(output_file=output_file) model = getattr(models, model_name)(kernel, machine, args, parser) model.analyze() model.report(output_file=output_file) # Add results to storage kernel_name = os.path.split(args.code_file.name)[1] if kernel_name not in result_storage: result_storage[kernel_name] = {} if tuple(kernel.constants.items()) not in result_storage[kernel_name]: result_storage[kernel_name][tuple(kernel.constants.items())] = {} result_storage[kernel_name][tuple(kernel.constants.items())][model_name] = \ model.results print('', file=output_file) # Save storage to file (if requested) if args.store: temp_name = args.store.name + '.tmp' with open(temp_name, 'wb+') as f: pickle.dump(result_storage, f) shutil.move(temp_name, args.store.name)
[ "def", "run", "(", "parser", ",", "args", ",", "output_file", "=", "sys", ".", "stdout", ")", ":", "# Try loading results file (if requested)", "result_storage", "=", "{", "}", "if", "args", ".", "store", ":", "args", ".", "store", ".", "seek", "(", "0", ")", "try", ":", "result_storage", "=", "pickle", ".", "load", "(", "args", ".", "store", ")", "except", "EOFError", ":", "pass", "args", ".", "store", ".", "close", "(", ")", "# machine information", "# Read machine description", "machine", "=", "MachineModel", "(", "args", ".", "machine", ".", "name", ",", "args", "=", "args", ")", "# process kernel", "if", "not", "args", ".", "kernel_description", ":", "code", "=", "str", "(", "args", ".", "code_file", ".", "read", "(", ")", ")", "code", "=", "clean_code", "(", "code", ")", "kernel", "=", "KernelCode", "(", "code", ",", "filename", "=", "args", ".", "code_file", ".", "name", ",", "machine", "=", "machine", ",", "keep_intermediates", "=", "not", "args", ".", "clean_intermediates", ")", "else", ":", "description", "=", "str", "(", "args", ".", "code_file", ".", "read", "(", ")", ")", "kernel", "=", "KernelDescription", "(", "yaml", ".", "load", "(", "description", ",", "Loader", "=", "yaml", ".", "Loader", ")", ",", "machine", "=", "machine", ")", "# if no defines were given, guess suitable defines in-mem", "# TODO support in-cache", "# TODO broaden cases to n-dimensions", "# TODO make configurable (no hardcoded 512MB/1GB/min. 3 iteration ...)", "# works only for up to 3 dimensions", "required_consts", "=", "[", "v", "[", "1", "]", "for", "v", "in", "kernel", ".", "variables", ".", "values", "(", ")", "if", "v", "[", "1", "]", "is", "not", "None", "]", "required_consts", "+=", "[", "[", "l", "[", "'start'", "]", ",", "l", "[", "'stop'", "]", "]", "for", "l", "in", "kernel", ".", "get_loop_stack", "(", ")", "]", "# split into individual consts", "required_consts", "=", "[", "i", "for", "l", "in", "required_consts", "for", "i", "in", "l", "]", "required_consts", "=", "set", "(", "[", "i", "for", "l", "in", "required_consts", "for", "i", "in", "l", ".", "free_symbols", "]", ")", "if", "len", "(", "required_consts", ")", ">", "0", ":", "# build defines permutations", "define_dict", "=", "{", "}", "for", "name", ",", "values", "in", "args", ".", "define", ":", "if", "name", "not", "in", "define_dict", ":", "define_dict", "[", "name", "]", "=", "[", "[", "name", ",", "v", "]", "for", "v", "in", "values", "]", "continue", "for", "v", "in", "values", ":", "if", "v", "not", "in", "define_dict", "[", "name", "]", ":", "define_dict", "[", "name", "]", ".", "append", "(", "[", "name", ",", "v", "]", ")", "define_product", "=", "list", "(", "itertools", ".", "product", "(", "*", "list", "(", "define_dict", ".", "values", "(", ")", ")", ")", ")", "# Check that all consts have been defined", "if", "set", "(", "required_consts", ")", ".", "difference", "(", "set", "(", "[", "symbol_pos_int", "(", "k", ")", "for", "k", "in", "define_dict", ".", "keys", "(", ")", "]", ")", ")", ":", "raise", "ValueError", "(", "\"Not all constants have been defined. Required are: {}\"", ".", "format", "(", "required_consts", ")", ")", "else", ":", "define_product", "=", "[", "{", "}", "]", "for", "define", "in", "define_product", ":", "# Reset state of kernel", "kernel", ".", "clear_state", "(", ")", "# Add constants from define arguments", "for", "k", ",", "v", "in", "define", ":", "kernel", ".", "set_constant", "(", "k", ",", "v", ")", "for", "model_name", "in", "uniquify", "(", "args", ".", "pmodel", ")", ":", "# print header", "print", "(", "'{:^80}'", ".", "format", "(", "' kerncraft '", ")", ",", "file", "=", "output_file", ")", "print", "(", "'{:<40}{:>40}'", ".", "format", "(", "args", ".", "code_file", ".", "name", ",", "'-m '", "+", "args", ".", "machine", ".", "name", ")", ",", "file", "=", "output_file", ")", "print", "(", "' '", ".", "join", "(", "[", "'-D {} {}'", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "define", "]", ")", ",", "file", "=", "output_file", ")", "print", "(", "'{:-^80}'", ".", "format", "(", "' '", "+", "model_name", "+", "' '", ")", ",", "file", "=", "output_file", ")", "if", "args", ".", "verbose", ">", "1", ":", "if", "not", "args", ".", "kernel_description", ":", "kernel", ".", "print_kernel_code", "(", "output_file", "=", "output_file", ")", "print", "(", "''", ",", "file", "=", "output_file", ")", "kernel", ".", "print_variables_info", "(", "output_file", "=", "output_file", ")", "kernel", ".", "print_kernel_info", "(", "output_file", "=", "output_file", ")", "if", "args", ".", "verbose", ">", "0", ":", "kernel", ".", "print_constants_info", "(", "output_file", "=", "output_file", ")", "model", "=", "getattr", "(", "models", ",", "model_name", ")", "(", "kernel", ",", "machine", ",", "args", ",", "parser", ")", "model", ".", "analyze", "(", ")", "model", ".", "report", "(", "output_file", "=", "output_file", ")", "# Add results to storage", "kernel_name", "=", "os", ".", "path", ".", "split", "(", "args", ".", "code_file", ".", "name", ")", "[", "1", "]", "if", "kernel_name", "not", "in", "result_storage", ":", "result_storage", "[", "kernel_name", "]", "=", "{", "}", "if", "tuple", "(", "kernel", ".", "constants", ".", "items", "(", ")", ")", "not", "in", "result_storage", "[", "kernel_name", "]", ":", "result_storage", "[", "kernel_name", "]", "[", "tuple", "(", "kernel", ".", "constants", ".", "items", "(", ")", ")", "]", "=", "{", "}", "result_storage", "[", "kernel_name", "]", "[", "tuple", "(", "kernel", ".", "constants", ".", "items", "(", ")", ")", "]", "[", "model_name", "]", "=", "model", ".", "results", "print", "(", "''", ",", "file", "=", "output_file", ")", "# Save storage to file (if requested)", "if", "args", ".", "store", ":", "temp_name", "=", "args", ".", "store", ".", "name", "+", "'.tmp'", "with", "open", "(", "temp_name", ",", "'wb+'", ")", "as", "f", ":", "pickle", ".", "dump", "(", "result_storage", ",", "f", ")", "shutil", ".", "move", "(", "temp_name", ",", "args", ".", "store", ".", "name", ")" ]
Run command line interface.
[ "Run", "command", "line", "interface", "." ]
python
test
apple/turicreate
src/unity/python/turicreate/_sys_util.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_sys_util.py#L452-L463
def get_library_name(): """ Returns either sframe or turicreate depending on which library this file is bundled with. """ from os.path import split, abspath __lib_name = split(split(abspath(sys.modules[__name__].__file__))[0])[1] assert __lib_name in ["sframe", "turicreate"] return __lib_name
[ "def", "get_library_name", "(", ")", ":", "from", "os", ".", "path", "import", "split", ",", "abspath", "__lib_name", "=", "split", "(", "split", "(", "abspath", "(", "sys", ".", "modules", "[", "__name__", "]", ".", "__file__", ")", ")", "[", "0", "]", ")", "[", "1", "]", "assert", "__lib_name", "in", "[", "\"sframe\"", ",", "\"turicreate\"", "]", "return", "__lib_name" ]
Returns either sframe or turicreate depending on which library this file is bundled with.
[ "Returns", "either", "sframe", "or", "turicreate", "depending", "on", "which", "library", "this", "file", "is", "bundled", "with", "." ]
python
train
Erotemic/utool
utool/util_hash.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L511-L655
def hashstr(data, hashlen=HASH_LEN, alphabet=ALPHABET): """ python -c "import utool as ut; print(ut.hashstr('abcd'))" Args: data (hashable): hashlen (int): (default = 16) alphabet (list): list of characters: Returns: str: hashstr CommandLine: python -m utool.util_hash --test-hashstr python3 -m utool.util_hash --test-hashstr python3 -m utool.util_hash --test-hashstr:2 python -m utool.util_hash hashstr:3 python3 -m utool.util_hash hashstr:3 Example0: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> data = 'foobar' >>> hashlen = 16 >>> alphabet = ALPHABET_41 >>> text = hashstr(data, hashlen, alphabet) >>> result = ('text = %s' % (str(text),)) >>> print(result) text = mi5yum60mbxhyp+x Example1: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> data = '' >>> hashlen = 16 >>> alphabet = ALPHABET_41 >>> text = hashstr(data, hashlen, alphabet) >>> result = ('text = %s' % (str(text),)) >>> print(result) text = 0000000000000000 Example2: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> import numpy as np >>> data = np.array([1, 2, 3]) >>> hashlen = 16 >>> alphabet = ALPHABET_41 >>> text = hashstr(data, hashlen, alphabet) >>> result = ('text = %s' % (str(text),)) >>> print(result) text = z5lqw0bzt4dmb9yy Example2: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> import numpy as np >>> from uuid import UUID >>> data = (UUID('7cd0197b-1394-9d16-b1eb-0d8d7a60aedc'), UUID('c76b54a5-adb6-7f16-f0fb-190ab99409f8')) >>> hashlen = 16 >>> alphabet = ALPHABET_41 >>> text = hashstr_arr(data, 'label') >>> result = ('text = %s' % (str(text),)) >>> print(result) Example3: >>> # DISABLE_DOCTEST >>> # UNSTABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> import numpy as np >>> data = np.array(['a', 'b'], dtype=object) >>> text = hashstr(data, alphabet=ALPHABET_27) >>> result = ('text = %s' % (str(text),)) >>> print(result) Ignore: data = np.array(['a', 'b'], dtype=object) data.tobytes() data = np.array(['a', 'b']) data = ['a', 'b'] data = np.array([1, 2, 3]) import hashlib from six.moves import cPickle as pickle pickle.dumps(data, protocol=2) python2 -c "import hashlib, numpy; print(hashlib.sha1('ab').hexdigest())" python3 -c "import hashlib, numpy; print(hashlib.sha1('ab'.encode('utf8')).hexdigest())" python2 -c "import hashlib, numpy; print(hashlib.sha1('ab').hexdigest())" python3 -c "import hashlib, numpy; print(hashlib.sha1(b'ab').hexdigest())" python2 -c "import hashlib, numpy; print(hashlib.sha1(numpy.array([1, 2])).hexdigest())" python3 -c "import hashlib, numpy; print(hashlib.sha1(numpy.array([1, 2])).hexdigest())" # TODO: numpy arrays of strings must be encoded to bytes first in python3 python2 -c "import hashlib, numpy; print(hashlib.sha1(numpy.array(['a', 'b'])).hexdigest())" python3 -c "import hashlib, numpy; print(hashlib.sha1(numpy.array([b'a', b'b'])).hexdigest())" python -c "import hashlib, numpy; print(hashlib.sha1(numpy.array(['a', 'b'], dtype=object)).hexdigest())" python -c "import hashlib, numpy; print(hashlib.sha1(numpy.array(['a', 'b'], dtype=object)).hexdigest())" """ if util_type.HAVE_NUMPY and isinstance(data, np.ndarray): if data.dtype.kind == 'O': msg = '[ut] hashing ndarrays with dtype=object is unstable' warnings.warn(msg, RuntimeWarning) # but tobytes is ok, but differs between python 2 and 3 for objects data = data.dumps() # data = data.tobytes() if isinstance(data, tuple): # should instead do if False: hasher = hashlib.sha512() items = data for item in items: if isinstance(item, uuid.UUID): hasher.update(item.bytes) else: hasher.update(item) text = hasher.hexdigest() hashstr2 = convert_hexstr_to_bigbase(text, alphabet, bigbase=len(alphabet)) # Truncate text = hashstr2[:hashlen] return text else: msg = '[ut] hashing tuples with repr is not a good idea. FIXME' # warnings.warn(msg, RuntimeWarning) data = repr(data) # Hack? # convert unicode into raw bytes if isinstance(data, six.text_type): data = data.encode('utf-8') if isinstance(data, stringlike) and len(data) == 0: # Make a special hash for empty data text = (alphabet[0] * hashlen) else: # Get a 128 character hex string text = hashlib.sha512(data).hexdigest() # Shorten length of string (by increasing base) hashstr2 = convert_hexstr_to_bigbase(text, alphabet, bigbase=len(alphabet)) # Truncate text = hashstr2[:hashlen] return text
[ "def", "hashstr", "(", "data", ",", "hashlen", "=", "HASH_LEN", ",", "alphabet", "=", "ALPHABET", ")", ":", "if", "util_type", ".", "HAVE_NUMPY", "and", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "if", "data", ".", "dtype", ".", "kind", "==", "'O'", ":", "msg", "=", "'[ut] hashing ndarrays with dtype=object is unstable'", "warnings", ".", "warn", "(", "msg", ",", "RuntimeWarning", ")", "# but tobytes is ok, but differs between python 2 and 3 for objects", "data", "=", "data", ".", "dumps", "(", ")", "# data = data.tobytes()", "if", "isinstance", "(", "data", ",", "tuple", ")", ":", "# should instead do", "if", "False", ":", "hasher", "=", "hashlib", ".", "sha512", "(", ")", "items", "=", "data", "for", "item", "in", "items", ":", "if", "isinstance", "(", "item", ",", "uuid", ".", "UUID", ")", ":", "hasher", ".", "update", "(", "item", ".", "bytes", ")", "else", ":", "hasher", ".", "update", "(", "item", ")", "text", "=", "hasher", ".", "hexdigest", "(", ")", "hashstr2", "=", "convert_hexstr_to_bigbase", "(", "text", ",", "alphabet", ",", "bigbase", "=", "len", "(", "alphabet", ")", ")", "# Truncate", "text", "=", "hashstr2", "[", ":", "hashlen", "]", "return", "text", "else", ":", "msg", "=", "'[ut] hashing tuples with repr is not a good idea. FIXME'", "# warnings.warn(msg, RuntimeWarning)", "data", "=", "repr", "(", "data", ")", "# Hack?", "# convert unicode into raw bytes", "if", "isinstance", "(", "data", ",", "six", ".", "text_type", ")", ":", "data", "=", "data", ".", "encode", "(", "'utf-8'", ")", "if", "isinstance", "(", "data", ",", "stringlike", ")", "and", "len", "(", "data", ")", "==", "0", ":", "# Make a special hash for empty data", "text", "=", "(", "alphabet", "[", "0", "]", "*", "hashlen", ")", "else", ":", "# Get a 128 character hex string", "text", "=", "hashlib", ".", "sha512", "(", "data", ")", ".", "hexdigest", "(", ")", "# Shorten length of string (by increasing base)", "hashstr2", "=", "convert_hexstr_to_bigbase", "(", "text", ",", "alphabet", ",", "bigbase", "=", "len", "(", "alphabet", ")", ")", "# Truncate", "text", "=", "hashstr2", "[", ":", "hashlen", "]", "return", "text" ]
python -c "import utool as ut; print(ut.hashstr('abcd'))" Args: data (hashable): hashlen (int): (default = 16) alphabet (list): list of characters: Returns: str: hashstr CommandLine: python -m utool.util_hash --test-hashstr python3 -m utool.util_hash --test-hashstr python3 -m utool.util_hash --test-hashstr:2 python -m utool.util_hash hashstr:3 python3 -m utool.util_hash hashstr:3 Example0: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> data = 'foobar' >>> hashlen = 16 >>> alphabet = ALPHABET_41 >>> text = hashstr(data, hashlen, alphabet) >>> result = ('text = %s' % (str(text),)) >>> print(result) text = mi5yum60mbxhyp+x Example1: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> data = '' >>> hashlen = 16 >>> alphabet = ALPHABET_41 >>> text = hashstr(data, hashlen, alphabet) >>> result = ('text = %s' % (str(text),)) >>> print(result) text = 0000000000000000 Example2: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> import numpy as np >>> data = np.array([1, 2, 3]) >>> hashlen = 16 >>> alphabet = ALPHABET_41 >>> text = hashstr(data, hashlen, alphabet) >>> result = ('text = %s' % (str(text),)) >>> print(result) text = z5lqw0bzt4dmb9yy Example2: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> import numpy as np >>> from uuid import UUID >>> data = (UUID('7cd0197b-1394-9d16-b1eb-0d8d7a60aedc'), UUID('c76b54a5-adb6-7f16-f0fb-190ab99409f8')) >>> hashlen = 16 >>> alphabet = ALPHABET_41 >>> text = hashstr_arr(data, 'label') >>> result = ('text = %s' % (str(text),)) >>> print(result) Example3: >>> # DISABLE_DOCTEST >>> # UNSTABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> import numpy as np >>> data = np.array(['a', 'b'], dtype=object) >>> text = hashstr(data, alphabet=ALPHABET_27) >>> result = ('text = %s' % (str(text),)) >>> print(result) Ignore: data = np.array(['a', 'b'], dtype=object) data.tobytes() data = np.array(['a', 'b']) data = ['a', 'b'] data = np.array([1, 2, 3]) import hashlib from six.moves import cPickle as pickle pickle.dumps(data, protocol=2) python2 -c "import hashlib, numpy; print(hashlib.sha1('ab').hexdigest())" python3 -c "import hashlib, numpy; print(hashlib.sha1('ab'.encode('utf8')).hexdigest())" python2 -c "import hashlib, numpy; print(hashlib.sha1('ab').hexdigest())" python3 -c "import hashlib, numpy; print(hashlib.sha1(b'ab').hexdigest())" python2 -c "import hashlib, numpy; print(hashlib.sha1(numpy.array([1, 2])).hexdigest())" python3 -c "import hashlib, numpy; print(hashlib.sha1(numpy.array([1, 2])).hexdigest())" # TODO: numpy arrays of strings must be encoded to bytes first in python3 python2 -c "import hashlib, numpy; print(hashlib.sha1(numpy.array(['a', 'b'])).hexdigest())" python3 -c "import hashlib, numpy; print(hashlib.sha1(numpy.array([b'a', b'b'])).hexdigest())" python -c "import hashlib, numpy; print(hashlib.sha1(numpy.array(['a', 'b'], dtype=object)).hexdigest())" python -c "import hashlib, numpy; print(hashlib.sha1(numpy.array(['a', 'b'], dtype=object)).hexdigest())"
[ "python", "-", "c", "import", "utool", "as", "ut", ";", "print", "(", "ut", ".", "hashstr", "(", "abcd", "))" ]
python
train
tcalmant/ipopo
pelix/rsa/__init__.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/rsa/__init__.py#L860-L888
def fromimportupdate(cls, bundle, import_reg): # type: (Bundle, ImportRegistration) -> RemoteServiceAdminEvent """ Creates a RemoteServiceAdminEvent object from the update of an ImportRegistration """ exc = import_reg.get_exception() if exc: return RemoteServiceAdminEvent( RemoteServiceAdminEvent.IMPORT_ERROR, bundle, import_reg.get_import_container_id(), import_reg.get_remoteservice_id(), None, None, exc, import_reg.get_description(), ) return RemoteServiceAdminEvent( RemoteServiceAdminEvent.IMPORT_UPDATE, bundle, import_reg.get_import_container_id(), import_reg.get_remoteservice_id(), import_reg.get_import_reference(), None, None, import_reg.get_description(), )
[ "def", "fromimportupdate", "(", "cls", ",", "bundle", ",", "import_reg", ")", ":", "# type: (Bundle, ImportRegistration) -> RemoteServiceAdminEvent", "exc", "=", "import_reg", ".", "get_exception", "(", ")", "if", "exc", ":", "return", "RemoteServiceAdminEvent", "(", "RemoteServiceAdminEvent", ".", "IMPORT_ERROR", ",", "bundle", ",", "import_reg", ".", "get_import_container_id", "(", ")", ",", "import_reg", ".", "get_remoteservice_id", "(", ")", ",", "None", ",", "None", ",", "exc", ",", "import_reg", ".", "get_description", "(", ")", ",", ")", "return", "RemoteServiceAdminEvent", "(", "RemoteServiceAdminEvent", ".", "IMPORT_UPDATE", ",", "bundle", ",", "import_reg", ".", "get_import_container_id", "(", ")", ",", "import_reg", ".", "get_remoteservice_id", "(", ")", ",", "import_reg", ".", "get_import_reference", "(", ")", ",", "None", ",", "None", ",", "import_reg", ".", "get_description", "(", ")", ",", ")" ]
Creates a RemoteServiceAdminEvent object from the update of an ImportRegistration
[ "Creates", "a", "RemoteServiceAdminEvent", "object", "from", "the", "update", "of", "an", "ImportRegistration" ]
python
train
pystorm/pystorm
pystorm/bolt.py
https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L217-L226
def _handle_run_exception(self, exc): """Process an exception encountered while running the ``run()`` loop. Called right before program exits. """ if len(self._current_tups) == 1: tup = self._current_tups[0] self.raise_exception(exc, tup) if self.auto_fail: self.fail(tup)
[ "def", "_handle_run_exception", "(", "self", ",", "exc", ")", ":", "if", "len", "(", "self", ".", "_current_tups", ")", "==", "1", ":", "tup", "=", "self", ".", "_current_tups", "[", "0", "]", "self", ".", "raise_exception", "(", "exc", ",", "tup", ")", "if", "self", ".", "auto_fail", ":", "self", ".", "fail", "(", "tup", ")" ]
Process an exception encountered while running the ``run()`` loop. Called right before program exits.
[ "Process", "an", "exception", "encountered", "while", "running", "the", "run", "()", "loop", "." ]
python
train