repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
allianceauth/allianceauth
allianceauth/groupmanagement/managers.py
https://github.com/allianceauth/allianceauth/blob/6585b07e96571a99a4d6dc03cc03f9b8c8f690ca/allianceauth/groupmanagement/managers.py#L31-L42
def can_manage_groups(cls, user): """ For use with user_passes_test decorator. Check if the user can manage groups. Either has the auth.group_management permission or is a leader of at least one group and is also a Member. :param user: django.contrib.auth.models.User for the request :return: bool True if user can manage groups, False otherwise """ if user.is_authenticated: return cls.has_management_permission(user) or user.leads_groups.all() return False
[ "def", "can_manage_groups", "(", "cls", ",", "user", ")", ":", "if", "user", ".", "is_authenticated", ":", "return", "cls", ".", "has_management_permission", "(", "user", ")", "or", "user", ".", "leads_groups", ".", "all", "(", ")", "return", "False" ]
For use with user_passes_test decorator. Check if the user can manage groups. Either has the auth.group_management permission or is a leader of at least one group and is also a Member. :param user: django.contrib.auth.models.User for the request :return: bool True if user can manage groups, False otherwise
[ "For", "use", "with", "user_passes_test", "decorator", ".", "Check", "if", "the", "user", "can", "manage", "groups", ".", "Either", "has", "the", "auth", ".", "group_management", "permission", "or", "is", "a", "leader", "of", "at", "least", "one", "group", "and", "is", "also", "a", "Member", ".", ":", "param", "user", ":", "django", ".", "contrib", ".", "auth", ".", "models", ".", "User", "for", "the", "request", ":", "return", ":", "bool", "True", "if", "user", "can", "manage", "groups", "False", "otherwise" ]
python
train
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L1831-L1841
def tanh(x, context=None): """ Return the hyperbolic tangent of x. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_tanh, (BigFloat._implicit_convert(x),), context, )
[ "def", "tanh", "(", "x", ",", "context", "=", "None", ")", ":", "return", "_apply_function_in_current_context", "(", "BigFloat", ",", "mpfr", ".", "mpfr_tanh", ",", "(", "BigFloat", ".", "_implicit_convert", "(", "x", ")", ",", ")", ",", "context", ",", ")" ]
Return the hyperbolic tangent of x.
[ "Return", "the", "hyperbolic", "tangent", "of", "x", "." ]
python
train
lltk/lltk
lltk/de/scrapers/pons.py
https://github.com/lltk/lltk/blob/d171de55c1b97695fddedf4b02401ae27bf1d634/lltk/de/scrapers/pons.py#L20-L27
def _normalize(self, string): ''' Returns a sanitized string. ''' string = string.replace(u'\xb7', '') string = string.replace(u'\u0331', '') string = string.replace(u'\u0323', '') string = string.strip(' \n\rI.') return string
[ "def", "_normalize", "(", "self", ",", "string", ")", ":", "string", "=", "string", ".", "replace", "(", "u'\\xb7'", ",", "''", ")", "string", "=", "string", ".", "replace", "(", "u'\\u0331'", ",", "''", ")", "string", "=", "string", ".", "replace", "(", "u'\\u0323'", ",", "''", ")", "string", "=", "string", ".", "strip", "(", "' \\n\\rI.'", ")", "return", "string" ]
Returns a sanitized string.
[ "Returns", "a", "sanitized", "string", "." ]
python
train
tonybaloney/retox
retox/ui.py
https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/ui.py#L281-L288
def reset(self): ''' Reset the frame between jobs ''' self.palette['title'] = (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLUE) self._completed_view.options = [] self._task_view.options = [] self.refresh()
[ "def", "reset", "(", "self", ")", ":", "self", ".", "palette", "[", "'title'", "]", "=", "(", "Screen", ".", "COLOUR_WHITE", ",", "Screen", ".", "A_BOLD", ",", "Screen", ".", "COLOUR_BLUE", ")", "self", ".", "_completed_view", ".", "options", "=", "[", "]", "self", ".", "_task_view", ".", "options", "=", "[", "]", "self", ".", "refresh", "(", ")" ]
Reset the frame between jobs
[ "Reset", "the", "frame", "between", "jobs" ]
python
train
cltk/cltk
cltk/prosody/latin/metrical_validator.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/metrical_validator.py#L157-L167
def closest_hexameter_patterns(self, scansion: str) -> List[str]: """ Find the closest group of matching valid hexameter patterns. :return: list of the closest valid hexameter patterns; only candidates with a matching length/number of syllables are considered. >>> print(MetricalValidator().closest_hexameter_patterns('-UUUUU-----UU--')) ['-UU-UU-----UU--'] """ return self._closest_patterns(self.VALID_HEXAMETERS, scansion)
[ "def", "closest_hexameter_patterns", "(", "self", ",", "scansion", ":", "str", ")", "->", "List", "[", "str", "]", ":", "return", "self", ".", "_closest_patterns", "(", "self", ".", "VALID_HEXAMETERS", ",", "scansion", ")" ]
Find the closest group of matching valid hexameter patterns. :return: list of the closest valid hexameter patterns; only candidates with a matching length/number of syllables are considered. >>> print(MetricalValidator().closest_hexameter_patterns('-UUUUU-----UU--')) ['-UU-UU-----UU--']
[ "Find", "the", "closest", "group", "of", "matching", "valid", "hexameter", "patterns", "." ]
python
train
openid/JWTConnect-Python-CryptoJWT
src/cryptojwt/key_jar.py
https://github.com/openid/JWTConnect-Python-CryptoJWT/blob/8863cfbfe77ca885084870b234a66b55bd52930c/src/cryptojwt/key_jar.py#L735-L848
def init_key_jar(public_path='', private_path='', key_defs='', owner='', read_only=True): """ A number of cases here: 1. A private path is given a. The file exists and a JWKS is found there. From that JWKS a KeyJar instance is built. b. If the private path file doesn't exit the key definitions are used to build a KeyJar instance. A JWKS with the private keys are written to the file named in private_path. If a public path is also provided a JWKS with public keys are written to that file. 2. A public path is given but no private path. a. If the public path file exists then the JWKS in that file is used to construct a KeyJar. b. If no such file exists then a KeyJar will be built based on the key_defs specification and a JWKS with the public keys will be written to the public path file. 3. If neither a public path nor a private path is given then a KeyJar is built based on the key_defs specification and no JWKS will be written to file. In all cases a KeyJar instance is returned The keys stored in the KeyJar will be stored under the '' identifier. :param public_path: A file path to a file that contains a JWKS with public keys :param private_path: A file path to a file that contains a JWKS with private keys. :param key_defs: A definition of what keys should be created if they are not already available :param owner: The owner of the keys :param read_only: This function should not attempt to write anything to a file system. :return: An instantiated :py:class;`oidcmsg.key_jar.KeyJar` instance """ if private_path: if os.path.isfile(private_path): _jwks = open(private_path, 'r').read() _kj = KeyJar() _kj.import_jwks(json.loads(_jwks), owner) if key_defs: _kb = _kj.issuer_keys[owner][0] _diff = key_diff(_kb, key_defs) if _diff: if read_only: logger.error('Not allowed to write to disc!') else: update_key_bundle(_kb, _diff) _kj.issuer_keys[owner] = [_kb] jwks = _kj.export_jwks(private=True, issuer=owner) fp = open(private_path, 'w') fp.write(json.dumps(jwks)) fp.close() else: _kj = build_keyjar(key_defs, owner=owner) if not read_only: jwks = _kj.export_jwks(private=True, issuer=owner) head, tail = os.path.split(private_path) if head and not os.path.isdir(head): os.makedirs(head) fp = open(private_path, 'w') fp.write(json.dumps(jwks)) fp.close() if public_path and not read_only: jwks = _kj.export_jwks(issuer=owner) # public part head, tail = os.path.split(public_path) if head and not os.path.isdir(head): os.makedirs(head) fp = open(public_path, 'w') fp.write(json.dumps(jwks)) fp.close() elif public_path: if os.path.isfile(public_path): _jwks = open(public_path, 'r').read() _kj = KeyJar() _kj.import_jwks(json.loads(_jwks), owner) if key_defs: _kb = _kj.issuer_keys[owner][0] _diff = key_diff(_kb, key_defs) if _diff: if read_only: logger.error('Not allowed to write to disc!') else: update_key_bundle(_kb, _diff) _kj.issuer_keys[owner] = [_kb] jwks = _kj.export_jwks(issuer=owner) fp = open(private_path, 'w') fp.write(json.dumps(jwks)) fp.close() else: _kj = build_keyjar(key_defs, owner=owner) if not read_only: _jwks = _kj.export_jwks(issuer=owner) head, tail = os.path.split(public_path) if head and not os.path.isdir(head): os.makedirs(head) fp = open(public_path, 'w') fp.write(json.dumps(_jwks)) fp.close() else: _kj = build_keyjar(key_defs, owner=owner) return _kj
[ "def", "init_key_jar", "(", "public_path", "=", "''", ",", "private_path", "=", "''", ",", "key_defs", "=", "''", ",", "owner", "=", "''", ",", "read_only", "=", "True", ")", ":", "if", "private_path", ":", "if", "os", ".", "path", ".", "isfile", "(", "private_path", ")", ":", "_jwks", "=", "open", "(", "private_path", ",", "'r'", ")", ".", "read", "(", ")", "_kj", "=", "KeyJar", "(", ")", "_kj", ".", "import_jwks", "(", "json", ".", "loads", "(", "_jwks", ")", ",", "owner", ")", "if", "key_defs", ":", "_kb", "=", "_kj", ".", "issuer_keys", "[", "owner", "]", "[", "0", "]", "_diff", "=", "key_diff", "(", "_kb", ",", "key_defs", ")", "if", "_diff", ":", "if", "read_only", ":", "logger", ".", "error", "(", "'Not allowed to write to disc!'", ")", "else", ":", "update_key_bundle", "(", "_kb", ",", "_diff", ")", "_kj", ".", "issuer_keys", "[", "owner", "]", "=", "[", "_kb", "]", "jwks", "=", "_kj", ".", "export_jwks", "(", "private", "=", "True", ",", "issuer", "=", "owner", ")", "fp", "=", "open", "(", "private_path", ",", "'w'", ")", "fp", ".", "write", "(", "json", ".", "dumps", "(", "jwks", ")", ")", "fp", ".", "close", "(", ")", "else", ":", "_kj", "=", "build_keyjar", "(", "key_defs", ",", "owner", "=", "owner", ")", "if", "not", "read_only", ":", "jwks", "=", "_kj", ".", "export_jwks", "(", "private", "=", "True", ",", "issuer", "=", "owner", ")", "head", ",", "tail", "=", "os", ".", "path", ".", "split", "(", "private_path", ")", "if", "head", "and", "not", "os", ".", "path", ".", "isdir", "(", "head", ")", ":", "os", ".", "makedirs", "(", "head", ")", "fp", "=", "open", "(", "private_path", ",", "'w'", ")", "fp", ".", "write", "(", "json", ".", "dumps", "(", "jwks", ")", ")", "fp", ".", "close", "(", ")", "if", "public_path", "and", "not", "read_only", ":", "jwks", "=", "_kj", ".", "export_jwks", "(", "issuer", "=", "owner", ")", "# public part", "head", ",", "tail", "=", "os", ".", "path", ".", "split", "(", "public_path", ")", "if", "head", "and", "not", "os", ".", "path", ".", "isdir", "(", "head", ")", ":", "os", ".", "makedirs", "(", "head", ")", "fp", "=", "open", "(", "public_path", ",", "'w'", ")", "fp", ".", "write", "(", "json", ".", "dumps", "(", "jwks", ")", ")", "fp", ".", "close", "(", ")", "elif", "public_path", ":", "if", "os", ".", "path", ".", "isfile", "(", "public_path", ")", ":", "_jwks", "=", "open", "(", "public_path", ",", "'r'", ")", ".", "read", "(", ")", "_kj", "=", "KeyJar", "(", ")", "_kj", ".", "import_jwks", "(", "json", ".", "loads", "(", "_jwks", ")", ",", "owner", ")", "if", "key_defs", ":", "_kb", "=", "_kj", ".", "issuer_keys", "[", "owner", "]", "[", "0", "]", "_diff", "=", "key_diff", "(", "_kb", ",", "key_defs", ")", "if", "_diff", ":", "if", "read_only", ":", "logger", ".", "error", "(", "'Not allowed to write to disc!'", ")", "else", ":", "update_key_bundle", "(", "_kb", ",", "_diff", ")", "_kj", ".", "issuer_keys", "[", "owner", "]", "=", "[", "_kb", "]", "jwks", "=", "_kj", ".", "export_jwks", "(", "issuer", "=", "owner", ")", "fp", "=", "open", "(", "private_path", ",", "'w'", ")", "fp", ".", "write", "(", "json", ".", "dumps", "(", "jwks", ")", ")", "fp", ".", "close", "(", ")", "else", ":", "_kj", "=", "build_keyjar", "(", "key_defs", ",", "owner", "=", "owner", ")", "if", "not", "read_only", ":", "_jwks", "=", "_kj", ".", "export_jwks", "(", "issuer", "=", "owner", ")", "head", ",", "tail", "=", "os", ".", "path", ".", "split", "(", "public_path", ")", "if", "head", "and", "not", "os", ".", "path", ".", "isdir", "(", "head", ")", ":", "os", ".", "makedirs", "(", "head", ")", "fp", "=", "open", "(", "public_path", ",", "'w'", ")", "fp", ".", "write", "(", "json", ".", "dumps", "(", "_jwks", ")", ")", "fp", ".", "close", "(", ")", "else", ":", "_kj", "=", "build_keyjar", "(", "key_defs", ",", "owner", "=", "owner", ")", "return", "_kj" ]
A number of cases here: 1. A private path is given a. The file exists and a JWKS is found there. From that JWKS a KeyJar instance is built. b. If the private path file doesn't exit the key definitions are used to build a KeyJar instance. A JWKS with the private keys are written to the file named in private_path. If a public path is also provided a JWKS with public keys are written to that file. 2. A public path is given but no private path. a. If the public path file exists then the JWKS in that file is used to construct a KeyJar. b. If no such file exists then a KeyJar will be built based on the key_defs specification and a JWKS with the public keys will be written to the public path file. 3. If neither a public path nor a private path is given then a KeyJar is built based on the key_defs specification and no JWKS will be written to file. In all cases a KeyJar instance is returned The keys stored in the KeyJar will be stored under the '' identifier. :param public_path: A file path to a file that contains a JWKS with public keys :param private_path: A file path to a file that contains a JWKS with private keys. :param key_defs: A definition of what keys should be created if they are not already available :param owner: The owner of the keys :param read_only: This function should not attempt to write anything to a file system. :return: An instantiated :py:class;`oidcmsg.key_jar.KeyJar` instance
[ "A", "number", "of", "cases", "here", ":" ]
python
train
hyperledger/indy-plenum
stp_core/crypto/nacl_wrappers.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/crypto/nacl_wrappers.py#L357-L388
def encrypt(self, plaintext, nonce, encoder=encoding.RawEncoder): """ Encrypts the plaintext message using the given `nonce` and returns the ciphertext encoded with the encoder. .. warning:: It is **VITALLY** important that the nonce is a nonce, i.e. it is a number used only once for any given key. If you fail to do this, you compromise the privacy of the messages encrypted. :param plaintext: [:class:`bytes`] The plaintext message to encrypt :param nonce: [:class:`bytes`] The nonce to use in the encryption :param encoder: The encoder to use to encode the ciphertext :rtype: [:class:`nacl.utils.EncryptedMessage`] """ if len(nonce) != self.NONCE_SIZE: raise ValueError("The nonce must be exactly %s bytes long" % self.NONCE_SIZE) ciphertext = libnacl.crypto_box_afternm( plaintext, nonce, self._shared_key, ) encoded_nonce = encoder.encode(nonce) encoded_ciphertext = encoder.encode(ciphertext) return EncryptedMessage._from_parts( encoded_nonce, encoded_ciphertext, encoder.encode(nonce + ciphertext), )
[ "def", "encrypt", "(", "self", ",", "plaintext", ",", "nonce", ",", "encoder", "=", "encoding", ".", "RawEncoder", ")", ":", "if", "len", "(", "nonce", ")", "!=", "self", ".", "NONCE_SIZE", ":", "raise", "ValueError", "(", "\"The nonce must be exactly %s bytes long\"", "%", "self", ".", "NONCE_SIZE", ")", "ciphertext", "=", "libnacl", ".", "crypto_box_afternm", "(", "plaintext", ",", "nonce", ",", "self", ".", "_shared_key", ",", ")", "encoded_nonce", "=", "encoder", ".", "encode", "(", "nonce", ")", "encoded_ciphertext", "=", "encoder", ".", "encode", "(", "ciphertext", ")", "return", "EncryptedMessage", ".", "_from_parts", "(", "encoded_nonce", ",", "encoded_ciphertext", ",", "encoder", ".", "encode", "(", "nonce", "+", "ciphertext", ")", ",", ")" ]
Encrypts the plaintext message using the given `nonce` and returns the ciphertext encoded with the encoder. .. warning:: It is **VITALLY** important that the nonce is a nonce, i.e. it is a number used only once for any given key. If you fail to do this, you compromise the privacy of the messages encrypted. :param plaintext: [:class:`bytes`] The plaintext message to encrypt :param nonce: [:class:`bytes`] The nonce to use in the encryption :param encoder: The encoder to use to encode the ciphertext :rtype: [:class:`nacl.utils.EncryptedMessage`]
[ "Encrypts", "the", "plaintext", "message", "using", "the", "given", "nonce", "and", "returns", "the", "ciphertext", "encoded", "with", "the", "encoder", "." ]
python
train
rosenbrockc/ci
pyci/server.py
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/server.py#L509-L530
def _fields_common(self): """Returns a dictionary of fields and values that are common to all events for which fields dictionaries are created. """ result = {} if not self.testmode: result["__reponame__"] = self.repo.repo.full_name result["__repodesc__"] = self.repo.repo.description result["__repourl__"] = self.repo.repo.html_url result["__repodir__"] = self.repodir if self.organization is not None: owner = self.repo.organization else: owner = self.repo.user result["__username__"] = owner.name result["__userurl__"] = owner.html_url result["__useravatar__"] = owner.avatar_url result["__useremail__"] = owner.email return result
[ "def", "_fields_common", "(", "self", ")", ":", "result", "=", "{", "}", "if", "not", "self", ".", "testmode", ":", "result", "[", "\"__reponame__\"", "]", "=", "self", ".", "repo", ".", "repo", ".", "full_name", "result", "[", "\"__repodesc__\"", "]", "=", "self", ".", "repo", ".", "repo", ".", "description", "result", "[", "\"__repourl__\"", "]", "=", "self", ".", "repo", ".", "repo", ".", "html_url", "result", "[", "\"__repodir__\"", "]", "=", "self", ".", "repodir", "if", "self", ".", "organization", "is", "not", "None", ":", "owner", "=", "self", ".", "repo", ".", "organization", "else", ":", "owner", "=", "self", ".", "repo", ".", "user", "result", "[", "\"__username__\"", "]", "=", "owner", ".", "name", "result", "[", "\"__userurl__\"", "]", "=", "owner", ".", "html_url", "result", "[", "\"__useravatar__\"", "]", "=", "owner", ".", "avatar_url", "result", "[", "\"__useremail__\"", "]", "=", "owner", ".", "email", "return", "result" ]
Returns a dictionary of fields and values that are common to all events for which fields dictionaries are created.
[ "Returns", "a", "dictionary", "of", "fields", "and", "values", "that", "are", "common", "to", "all", "events", "for", "which", "fields", "dictionaries", "are", "created", "." ]
python
train
pgmpy/pgmpy
pgmpy/inference/mplp.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/inference/mplp.py#L270-L289
def find_triangles(self): """ Finds all the triangles present in the given model Examples -------- >>> from pgmpy.models import MarkovModel >>> from pgmpy.factors.discrete import DiscreteFactor >>> from pgmpy.inference import Mplp >>> mm = MarkovModel() >>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7']) >>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'), ... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'), ... ('x4', 'x7'), ('x5', 'x7')]) >>> phi = [DiscreteFactor(edge, [2, 2], np.random.rand(4)) for edge in mm.edges()] >>> mm.add_factors(*phi) >>> mplp = Mplp(mm) >>> mplp.find_triangles() """ return list(filter(lambda x: len(x) == 3, nx.find_cliques(self.model)))
[ "def", "find_triangles", "(", "self", ")", ":", "return", "list", "(", "filter", "(", "lambda", "x", ":", "len", "(", "x", ")", "==", "3", ",", "nx", ".", "find_cliques", "(", "self", ".", "model", ")", ")", ")" ]
Finds all the triangles present in the given model Examples -------- >>> from pgmpy.models import MarkovModel >>> from pgmpy.factors.discrete import DiscreteFactor >>> from pgmpy.inference import Mplp >>> mm = MarkovModel() >>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7']) >>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'), ... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'), ... ('x4', 'x7'), ('x5', 'x7')]) >>> phi = [DiscreteFactor(edge, [2, 2], np.random.rand(4)) for edge in mm.edges()] >>> mm.add_factors(*phi) >>> mplp = Mplp(mm) >>> mplp.find_triangles()
[ "Finds", "all", "the", "triangles", "present", "in", "the", "given", "model" ]
python
train
gwastro/pycbc
pycbc/inference/models/base.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/base.py#L166-L183
def apply(self, samples, inverse=False): """Applies the sampling transforms to the given samples. Parameters ---------- samples : dict or FieldArray The samples to apply the transforms to. inverse : bool, optional Whether to apply the inverse transforms (i.e., go from the sampling args to the ``variable_params``). Default is False. Returns ------- dict or FieldArray The transformed samples, along with the original samples. """ return transforms.apply_transforms(samples, self.sampling_transforms, inverse=inverse)
[ "def", "apply", "(", "self", ",", "samples", ",", "inverse", "=", "False", ")", ":", "return", "transforms", ".", "apply_transforms", "(", "samples", ",", "self", ".", "sampling_transforms", ",", "inverse", "=", "inverse", ")" ]
Applies the sampling transforms to the given samples. Parameters ---------- samples : dict or FieldArray The samples to apply the transforms to. inverse : bool, optional Whether to apply the inverse transforms (i.e., go from the sampling args to the ``variable_params``). Default is False. Returns ------- dict or FieldArray The transformed samples, along with the original samples.
[ "Applies", "the", "sampling", "transforms", "to", "the", "given", "samples", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_logger.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_logger.py#L53-L58
def emit(self, record): """Emit the log record.""" self.entries.append(self.format(record)) if len(self.entries) > self.flush_limit and not self.session.auth.renewing: self.log_to_api() self.entries = []
[ "def", "emit", "(", "self", ",", "record", ")", ":", "self", ".", "entries", ".", "append", "(", "self", ".", "format", "(", "record", ")", ")", "if", "len", "(", "self", ".", "entries", ")", ">", "self", ".", "flush_limit", "and", "not", "self", ".", "session", ".", "auth", ".", "renewing", ":", "self", ".", "log_to_api", "(", ")", "self", ".", "entries", "=", "[", "]" ]
Emit the log record.
[ "Emit", "the", "log", "record", "." ]
python
train
jurismarches/chopper
chopper/css/extractor.py
https://github.com/jurismarches/chopper/blob/53c5489a53e3a5d205a5cb207df751c09633e7ce/chopper/css/extractor.py#L225-L236
def _selector_as_string(self, selector): """ Returns a selector as a CSS string :param selector: A list of tinycss Tokens :type selector: list :returns: The CSS string for the selector :rtype: str """ return ','.join( ''.join(token.as_css() for token in strip_whitespace(token_list)) for token_list in split_on_comma(selector))
[ "def", "_selector_as_string", "(", "self", ",", "selector", ")", ":", "return", "','", ".", "join", "(", "''", ".", "join", "(", "token", ".", "as_css", "(", ")", "for", "token", "in", "strip_whitespace", "(", "token_list", ")", ")", "for", "token_list", "in", "split_on_comma", "(", "selector", ")", ")" ]
Returns a selector as a CSS string :param selector: A list of tinycss Tokens :type selector: list :returns: The CSS string for the selector :rtype: str
[ "Returns", "a", "selector", "as", "a", "CSS", "string" ]
python
train
workforce-data-initiative/skills-utils
skills_utils/metta.py
https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/metta.py#L27-L45
def metta_config(quarter, num_dimensions): """Returns metta metadata for a quarter's SOC code classifier matrix Args: quarter (str) quarter, in format '2015Q1' num_dimensions (int) Number of features in matrix Returns: (dict) metadata suitable for metta.archive_train_test """ first_day, last_day = quarter_boundaries(quarter) return { 'start_time': first_day, 'end_time': last_day, 'prediction_window': 3, # ??? 'label_name': 'onet_soc_code', 'label_type': 'categorical', 'matrix_id': 'job_postings_{}'.format(quarter), 'feature_names': ['doc2vec_{}'.format(i) for i in range(num_dimensions)], }
[ "def", "metta_config", "(", "quarter", ",", "num_dimensions", ")", ":", "first_day", ",", "last_day", "=", "quarter_boundaries", "(", "quarter", ")", "return", "{", "'start_time'", ":", "first_day", ",", "'end_time'", ":", "last_day", ",", "'prediction_window'", ":", "3", ",", "# ???", "'label_name'", ":", "'onet_soc_code'", ",", "'label_type'", ":", "'categorical'", ",", "'matrix_id'", ":", "'job_postings_{}'", ".", "format", "(", "quarter", ")", ",", "'feature_names'", ":", "[", "'doc2vec_{}'", ".", "format", "(", "i", ")", "for", "i", "in", "range", "(", "num_dimensions", ")", "]", ",", "}" ]
Returns metta metadata for a quarter's SOC code classifier matrix Args: quarter (str) quarter, in format '2015Q1' num_dimensions (int) Number of features in matrix Returns: (dict) metadata suitable for metta.archive_train_test
[ "Returns", "metta", "metadata", "for", "a", "quarter", "s", "SOC", "code", "classifier", "matrix" ]
python
train
RedFantom/ttkwidgets
ttkwidgets/checkboxtreeview.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/checkboxtreeview.py#L104-L119
def change_state(self, item, state): """ Replace the current state of the item. i.e. replace the current state tag but keeps the other tags. :param item: item id :type item: str :param state: "checked", "unchecked" or "tristate": new state of the item :type state: str """ tags = self.item(item, "tags") states = ("checked", "unchecked", "tristate") new_tags = [t for t in tags if t not in states] new_tags.append(state) self.item(item, tags=tuple(new_tags))
[ "def", "change_state", "(", "self", ",", "item", ",", "state", ")", ":", "tags", "=", "self", ".", "item", "(", "item", ",", "\"tags\"", ")", "states", "=", "(", "\"checked\"", ",", "\"unchecked\"", ",", "\"tristate\"", ")", "new_tags", "=", "[", "t", "for", "t", "in", "tags", "if", "t", "not", "in", "states", "]", "new_tags", ".", "append", "(", "state", ")", "self", ".", "item", "(", "item", ",", "tags", "=", "tuple", "(", "new_tags", ")", ")" ]
Replace the current state of the item. i.e. replace the current state tag but keeps the other tags. :param item: item id :type item: str :param state: "checked", "unchecked" or "tristate": new state of the item :type state: str
[ "Replace", "the", "current", "state", "of", "the", "item", "." ]
python
train
MartinThoma/hwrt
bin/merge.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/bin/merge.py#L26-L34
def read_raw(data_path): """ Parameters ---------- data_path : str """ with open(data_path, 'rb') as f: data = pickle.load(f) return data
[ "def", "read_raw", "(", "data_path", ")", ":", "with", "open", "(", "data_path", ",", "'rb'", ")", "as", "f", ":", "data", "=", "pickle", ".", "load", "(", "f", ")", "return", "data" ]
Parameters ---------- data_path : str
[ "Parameters", "----------", "data_path", ":", "str" ]
python
train
trailofbits/manticore
manticore/native/memory.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/memory.py#L579-L614
def _search(self, size, start=None, counter=0): """ Recursively searches the address space for enough free space to allocate C{size} bytes. :param size: the size in bytes to allocate. :param start: an address from where to start the search. :param counter: internal parameter to know if all the memory was already scanned. :return: the address of an available space to map C{size} bytes. :raises MemoryException: if there is no space available to allocate the desired memory. :rtype: int todo: Document what happens when you try to allocate something that goes round the address 32/64 bit representation. """ assert size & self.page_mask == 0 if start is None: end = {32: 0xf8000000, 64: 0x0000800000000000}[self.memory_bit_size] start = end - size else: if start > self.memory_size - size: start = self.memory_size - size end = start + size consecutive_free = 0 for p in range(self._page(end - 1), -1, -1): if p not in self._page2map: consecutive_free += 0x1000 else: consecutive_free = 0 if consecutive_free >= size: return p << self.page_bit_size counter += 1 if counter >= self.memory_size // self.page_size: raise MemoryException('Not enough memory') return self._search(size, self.memory_size - size, counter)
[ "def", "_search", "(", "self", ",", "size", ",", "start", "=", "None", ",", "counter", "=", "0", ")", ":", "assert", "size", "&", "self", ".", "page_mask", "==", "0", "if", "start", "is", "None", ":", "end", "=", "{", "32", ":", "0xf8000000", ",", "64", ":", "0x0000800000000000", "}", "[", "self", ".", "memory_bit_size", "]", "start", "=", "end", "-", "size", "else", ":", "if", "start", ">", "self", ".", "memory_size", "-", "size", ":", "start", "=", "self", ".", "memory_size", "-", "size", "end", "=", "start", "+", "size", "consecutive_free", "=", "0", "for", "p", "in", "range", "(", "self", ".", "_page", "(", "end", "-", "1", ")", ",", "-", "1", ",", "-", "1", ")", ":", "if", "p", "not", "in", "self", ".", "_page2map", ":", "consecutive_free", "+=", "0x1000", "else", ":", "consecutive_free", "=", "0", "if", "consecutive_free", ">=", "size", ":", "return", "p", "<<", "self", ".", "page_bit_size", "counter", "+=", "1", "if", "counter", ">=", "self", ".", "memory_size", "//", "self", ".", "page_size", ":", "raise", "MemoryException", "(", "'Not enough memory'", ")", "return", "self", ".", "_search", "(", "size", ",", "self", ".", "memory_size", "-", "size", ",", "counter", ")" ]
Recursively searches the address space for enough free space to allocate C{size} bytes. :param size: the size in bytes to allocate. :param start: an address from where to start the search. :param counter: internal parameter to know if all the memory was already scanned. :return: the address of an available space to map C{size} bytes. :raises MemoryException: if there is no space available to allocate the desired memory. :rtype: int todo: Document what happens when you try to allocate something that goes round the address 32/64 bit representation.
[ "Recursively", "searches", "the", "address", "space", "for", "enough", "free", "space", "to", "allocate", "C", "{", "size", "}", "bytes", "." ]
python
valid
JdeRobot/base
src/libs/comm_py/comm/ros/listenerLaser.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/libs/comm_py/comm/ros/listenerLaser.py#L86-L97
def getLaserData(self): ''' Returns last LaserData. @return last JdeRobotTypes LaserData saved ''' self.lock.acquire() laser = self.data self.lock.release() return laser
[ "def", "getLaserData", "(", "self", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "laser", "=", "self", ".", "data", "self", ".", "lock", ".", "release", "(", ")", "return", "laser" ]
Returns last LaserData. @return last JdeRobotTypes LaserData saved
[ "Returns", "last", "LaserData", "." ]
python
train
Robpol86/libnl
libnl/list_.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/list_.py#L101-L121
def nl_list_for_each_entry_safe(pos, n, head, member): """https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L84. Positional arguments: pos -- class instance holding an nl_list_head instance. n -- class instance holding an nl_list_head instance. head -- nl_list_head class instance. member -- attribute (string). Returns: Generator yielding a class instances. """ pos = nl_list_entry(head.next_, type(pos), member) n = nl_list_entry(pos.member.next_, type(pos), member) while True: yield pos if getattr(pos, member) != head: pos = n n = nl_list_entry(n.member.next_, type(n), member) continue break
[ "def", "nl_list_for_each_entry_safe", "(", "pos", ",", "n", ",", "head", ",", "member", ")", ":", "pos", "=", "nl_list_entry", "(", "head", ".", "next_", ",", "type", "(", "pos", ")", ",", "member", ")", "n", "=", "nl_list_entry", "(", "pos", ".", "member", ".", "next_", ",", "type", "(", "pos", ")", ",", "member", ")", "while", "True", ":", "yield", "pos", "if", "getattr", "(", "pos", ",", "member", ")", "!=", "head", ":", "pos", "=", "n", "n", "=", "nl_list_entry", "(", "n", ".", "member", ".", "next_", ",", "type", "(", "n", ")", ",", "member", ")", "continue", "break" ]
https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L84. Positional arguments: pos -- class instance holding an nl_list_head instance. n -- class instance holding an nl_list_head instance. head -- nl_list_head class instance. member -- attribute (string). Returns: Generator yielding a class instances.
[ "https", ":", "//", "github", ".", "com", "/", "thom311", "/", "libnl", "/", "blob", "/", "libnl3_2_25", "/", "include", "/", "netlink", "/", "list", ".", "h#L84", "." ]
python
train
codelv/enaml-native
src/enamlnative/core/hotswap/autoreload.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/core/hotswap/autoreload.py#L494-L528
def aimport(self, parameter_s='', stream=None): """%aimport => Import modules for automatic reloading. %aimport List modules to automatically import and not to import. %aimport foo Import module 'foo' and mark it to be autoreloaded for %autoreload 1 %aimport foo, bar Import modules 'foo', 'bar' and mark them to be autoreloaded for %autoreload 1 %aimport -foo Mark module 'foo' to not be autoreloaded for %autoreload 1 """ modname = parameter_s if not modname: to_reload = sorted(self._reloader.modules.keys()) to_skip = sorted(self._reloader.skip_modules.keys()) if stream is None: stream = sys.stdout if self._reloader.check_all: stream.write("Modules to reload:\nall-except-skipped\n") else: stream.write("Modules to reload:\n%s\n" % ' '.join(to_reload)) stream.write("\nModules to skip:\n%s\n" % ' '.join(to_skip)) elif modname.startswith('-'): modname = modname[1:] self._reloader.mark_module_skipped(modname) else: for _module in ([_.strip() for _ in modname.split(',')]): top_module, top_name = self._reloader.aimport_module(_module) # Inject module to user namespace self.shell.push({top_name: top_module})
[ "def", "aimport", "(", "self", ",", "parameter_s", "=", "''", ",", "stream", "=", "None", ")", ":", "modname", "=", "parameter_s", "if", "not", "modname", ":", "to_reload", "=", "sorted", "(", "self", ".", "_reloader", ".", "modules", ".", "keys", "(", ")", ")", "to_skip", "=", "sorted", "(", "self", ".", "_reloader", ".", "skip_modules", ".", "keys", "(", ")", ")", "if", "stream", "is", "None", ":", "stream", "=", "sys", ".", "stdout", "if", "self", ".", "_reloader", ".", "check_all", ":", "stream", ".", "write", "(", "\"Modules to reload:\\nall-except-skipped\\n\"", ")", "else", ":", "stream", ".", "write", "(", "\"Modules to reload:\\n%s\\n\"", "%", "' '", ".", "join", "(", "to_reload", ")", ")", "stream", ".", "write", "(", "\"\\nModules to skip:\\n%s\\n\"", "%", "' '", ".", "join", "(", "to_skip", ")", ")", "elif", "modname", ".", "startswith", "(", "'-'", ")", ":", "modname", "=", "modname", "[", "1", ":", "]", "self", ".", "_reloader", ".", "mark_module_skipped", "(", "modname", ")", "else", ":", "for", "_module", "in", "(", "[", "_", ".", "strip", "(", ")", "for", "_", "in", "modname", ".", "split", "(", "','", ")", "]", ")", ":", "top_module", ",", "top_name", "=", "self", ".", "_reloader", ".", "aimport_module", "(", "_module", ")", "# Inject module to user namespace", "self", ".", "shell", ".", "push", "(", "{", "top_name", ":", "top_module", "}", ")" ]
%aimport => Import modules for automatic reloading. %aimport List modules to automatically import and not to import. %aimport foo Import module 'foo' and mark it to be autoreloaded for %autoreload 1 %aimport foo, bar Import modules 'foo', 'bar' and mark them to be autoreloaded for %autoreload 1 %aimport -foo Mark module 'foo' to not be autoreloaded for %autoreload 1
[ "%aimport", "=", ">", "Import", "modules", "for", "automatic", "reloading", "." ]
python
train
datastore/datastore
datastore/core/key.py
https://github.com/datastore/datastore/blob/7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3/datastore/core/key.py#L115-L118
def instance(self, other): '''Returns an instance Key, by appending a name to the namespace.''' assert '/' not in str(other) return Key(str(self) + ':' + str(other))
[ "def", "instance", "(", "self", ",", "other", ")", ":", "assert", "'/'", "not", "in", "str", "(", "other", ")", "return", "Key", "(", "str", "(", "self", ")", "+", "':'", "+", "str", "(", "other", ")", ")" ]
Returns an instance Key, by appending a name to the namespace.
[ "Returns", "an", "instance", "Key", "by", "appending", "a", "name", "to", "the", "namespace", "." ]
python
train
cggh/scikit-allel
allel/chunked/core.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L65-L92
def copy_table(tbl, start=0, stop=None, blen=None, storage=None, create='table', **kwargs): """Copy `tbl` block-wise into a new table.""" # setup names, columns = _util.check_table_like(tbl) storage = _util.get_storage(storage) blen = _util.get_blen_table(tbl, blen) if stop is None: stop = len(columns[0]) else: stop = min(stop, len(columns[0])) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise out = None for i in range(start, stop, blen): j = min(i+blen, stop) res = [c[i:j] for c in columns] if out is None: out = getattr(storage, create)(res, names=names, expectedlen=length, **kwargs) else: out.append(res) return out
[ "def", "copy_table", "(", "tbl", ",", "start", "=", "0", ",", "stop", "=", "None", ",", "blen", "=", "None", ",", "storage", "=", "None", ",", "create", "=", "'table'", ",", "*", "*", "kwargs", ")", ":", "# setup", "names", ",", "columns", "=", "_util", ".", "check_table_like", "(", "tbl", ")", "storage", "=", "_util", ".", "get_storage", "(", "storage", ")", "blen", "=", "_util", ".", "get_blen_table", "(", "tbl", ",", "blen", ")", "if", "stop", "is", "None", ":", "stop", "=", "len", "(", "columns", "[", "0", "]", ")", "else", ":", "stop", "=", "min", "(", "stop", ",", "len", "(", "columns", "[", "0", "]", ")", ")", "length", "=", "stop", "-", "start", "if", "length", "<", "0", ":", "raise", "ValueError", "(", "'invalid stop/start'", ")", "# copy block-wise", "out", "=", "None", "for", "i", "in", "range", "(", "start", ",", "stop", ",", "blen", ")", ":", "j", "=", "min", "(", "i", "+", "blen", ",", "stop", ")", "res", "=", "[", "c", "[", "i", ":", "j", "]", "for", "c", "in", "columns", "]", "if", "out", "is", "None", ":", "out", "=", "getattr", "(", "storage", ",", "create", ")", "(", "res", ",", "names", "=", "names", ",", "expectedlen", "=", "length", ",", "*", "*", "kwargs", ")", "else", ":", "out", ".", "append", "(", "res", ")", "return", "out" ]
Copy `tbl` block-wise into a new table.
[ "Copy", "tbl", "block", "-", "wise", "into", "a", "new", "table", "." ]
python
train
Dallinger/Dallinger
dallinger/deployment.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/deployment.py#L570-L579
def notify(self, message): """Monitor output from heroku process. This overrides the base class's `notify` to make sure that we stop if the status-monitoring thread has determined that the experiment is complete. """ if self.complete: return HerokuLocalWrapper.MONITOR_STOP return super(DebugDeployment, self).notify(message)
[ "def", "notify", "(", "self", ",", "message", ")", ":", "if", "self", ".", "complete", ":", "return", "HerokuLocalWrapper", ".", "MONITOR_STOP", "return", "super", "(", "DebugDeployment", ",", "self", ")", ".", "notify", "(", "message", ")" ]
Monitor output from heroku process. This overrides the base class's `notify` to make sure that we stop if the status-monitoring thread has determined that the experiment is complete.
[ "Monitor", "output", "from", "heroku", "process", "." ]
python
train
aio-libs/aioredis
aioredis/commands/streams.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/streams.py#L193-L200
def xclaim(self, stream, group_name, consumer_name, min_idle_time, id, *ids): """Claim a message for a given consumer""" fut = self.execute( b'XCLAIM', stream, group_name, consumer_name, min_idle_time, id, *ids ) return wait_convert(fut, parse_messages)
[ "def", "xclaim", "(", "self", ",", "stream", ",", "group_name", ",", "consumer_name", ",", "min_idle_time", ",", "id", ",", "*", "ids", ")", ":", "fut", "=", "self", ".", "execute", "(", "b'XCLAIM'", ",", "stream", ",", "group_name", ",", "consumer_name", ",", "min_idle_time", ",", "id", ",", "*", "ids", ")", "return", "wait_convert", "(", "fut", ",", "parse_messages", ")" ]
Claim a message for a given consumer
[ "Claim", "a", "message", "for", "a", "given", "consumer" ]
python
train
aichaos/rivescript-python
rivescript/rivescript.py
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L926-L947
def reply(self, user, msg, errors_as_replies=True): """Fetch a reply from the RiveScript brain. Arguments: user (str): A unique user ID for the person requesting a reply. This could be e.g. a screen name or nickname. It's used internally to store user variables (including topic and history), so if your bot has multiple users each one should have a unique ID. msg (str): The user's message. This is allowed to contain punctuation and such, but any extraneous data such as HTML tags should be removed in advance. errors_as_replies (bool): When errors are encountered (such as a deep recursion error, no reply matched, etc.) this will make the reply be a text representation of the error message. If you set this to ``False``, errors will instead raise an exception, such as a ``DeepRecursionError`` or ``NoReplyError``. By default, no exceptions are raised and errors are set in the reply instead. Returns: str: The reply output. """ return self._brain.reply(user, msg, errors_as_replies)
[ "def", "reply", "(", "self", ",", "user", ",", "msg", ",", "errors_as_replies", "=", "True", ")", ":", "return", "self", ".", "_brain", ".", "reply", "(", "user", ",", "msg", ",", "errors_as_replies", ")" ]
Fetch a reply from the RiveScript brain. Arguments: user (str): A unique user ID for the person requesting a reply. This could be e.g. a screen name or nickname. It's used internally to store user variables (including topic and history), so if your bot has multiple users each one should have a unique ID. msg (str): The user's message. This is allowed to contain punctuation and such, but any extraneous data such as HTML tags should be removed in advance. errors_as_replies (bool): When errors are encountered (such as a deep recursion error, no reply matched, etc.) this will make the reply be a text representation of the error message. If you set this to ``False``, errors will instead raise an exception, such as a ``DeepRecursionError`` or ``NoReplyError``. By default, no exceptions are raised and errors are set in the reply instead. Returns: str: The reply output.
[ "Fetch", "a", "reply", "from", "the", "RiveScript", "brain", "." ]
python
train
getnikola/coil
coil/tasks.py
https://github.com/getnikola/coil/blob/80ef1827460b0691cf2c98351a14d88e235c9899/coil/tasks.py#L122-L138
def orphans_single(default_exec=False): """Remove all orphans in the site, in the single user-mode.""" if not default_exec and executable.endswith('uwsgi'): # default_exec => rq => sys.executable is sane _executable = executable[:-5] + 'python' else: _executable = executable p = subprocess.Popen([_executable, '-m', 'nikola', 'orphans'], stdout=subprocess.PIPE) p.wait() files = [l.strip().decode('utf-8') for l in p.stdout.readlines()] for f in files: if f: os.unlink(f) out = '\n'.join(files) return p.returncode, out
[ "def", "orphans_single", "(", "default_exec", "=", "False", ")", ":", "if", "not", "default_exec", "and", "executable", ".", "endswith", "(", "'uwsgi'", ")", ":", "# default_exec => rq => sys.executable is sane", "_executable", "=", "executable", "[", ":", "-", "5", "]", "+", "'python'", "else", ":", "_executable", "=", "executable", "p", "=", "subprocess", ".", "Popen", "(", "[", "_executable", ",", "'-m'", ",", "'nikola'", ",", "'orphans'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "p", ".", "wait", "(", ")", "files", "=", "[", "l", ".", "strip", "(", ")", ".", "decode", "(", "'utf-8'", ")", "for", "l", "in", "p", ".", "stdout", ".", "readlines", "(", ")", "]", "for", "f", "in", "files", ":", "if", "f", ":", "os", ".", "unlink", "(", "f", ")", "out", "=", "'\\n'", ".", "join", "(", "files", ")", "return", "p", ".", "returncode", ",", "out" ]
Remove all orphans in the site, in the single user-mode.
[ "Remove", "all", "orphans", "in", "the", "site", "in", "the", "single", "user", "-", "mode", "." ]
python
train
spotify/gordon
gordon/metrics/log.py
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/metrics/log.py#L89-L98
def log(self, metric): """Format and output metric. Args: metric (dict): Complete metric. """ message = self.LOGFMT.format(**metric) if metric['context']: message += ' context: {context}'.format(context=metric['context']) self._logger.log(self.level, message)
[ "def", "log", "(", "self", ",", "metric", ")", ":", "message", "=", "self", ".", "LOGFMT", ".", "format", "(", "*", "*", "metric", ")", "if", "metric", "[", "'context'", "]", ":", "message", "+=", "' context: {context}'", ".", "format", "(", "context", "=", "metric", "[", "'context'", "]", ")", "self", ".", "_logger", ".", "log", "(", "self", ".", "level", ",", "message", ")" ]
Format and output metric. Args: metric (dict): Complete metric.
[ "Format", "and", "output", "metric", "." ]
python
train
recurly/recurly-client-python
recurly/__init__.py
https://github.com/recurly/recurly-client-python/blob/682217c4e85ec5c8d4e41519ee0620d2dc4d84d7/recurly/__init__.py#L1303-L1327
def refund(self, **kwargs): """Refund this transaction. Calling this method returns the refunded transaction (that is, ``self``) if the refund was successful, or raises a `ResponseError` if an error occurred requesting the refund. After a successful call to `refund()`, to retrieve the new transaction representing the refund, use the `get_refund_transaction()` method. """ # Find the URL and method to refund the transaction. try: selfnode = self._elem except AttributeError: raise AttributeError('refund') url, method = None, None for anchor_elem in selfnode.findall('a'): if anchor_elem.attrib.get('name') == 'refund': url = anchor_elem.attrib['href'] method = anchor_elem.attrib['method'].upper() if url is None or method is None: raise AttributeError("refund") # should do something more specific probably actionator = self._make_actionator(url, method, extra_handler=self._handle_refund_accepted) return actionator(**kwargs)
[ "def", "refund", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Find the URL and method to refund the transaction.", "try", ":", "selfnode", "=", "self", ".", "_elem", "except", "AttributeError", ":", "raise", "AttributeError", "(", "'refund'", ")", "url", ",", "method", "=", "None", ",", "None", "for", "anchor_elem", "in", "selfnode", ".", "findall", "(", "'a'", ")", ":", "if", "anchor_elem", ".", "attrib", ".", "get", "(", "'name'", ")", "==", "'refund'", ":", "url", "=", "anchor_elem", ".", "attrib", "[", "'href'", "]", "method", "=", "anchor_elem", ".", "attrib", "[", "'method'", "]", ".", "upper", "(", ")", "if", "url", "is", "None", "or", "method", "is", "None", ":", "raise", "AttributeError", "(", "\"refund\"", ")", "# should do something more specific probably", "actionator", "=", "self", ".", "_make_actionator", "(", "url", ",", "method", ",", "extra_handler", "=", "self", ".", "_handle_refund_accepted", ")", "return", "actionator", "(", "*", "*", "kwargs", ")" ]
Refund this transaction. Calling this method returns the refunded transaction (that is, ``self``) if the refund was successful, or raises a `ResponseError` if an error occurred requesting the refund. After a successful call to `refund()`, to retrieve the new transaction representing the refund, use the `get_refund_transaction()` method.
[ "Refund", "this", "transaction", "." ]
python
train
pyviz/holoviews
holoviews/core/io.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/io.py#L417-L466
def collect(self_or_cls, files, drop=[], metadata=True): """ Given a list or NdMapping type containing file paths return a Layout of Collators, which can be called to load a given set of files using the current Importer. If supplied as a list each file is expected to disambiguate itself with contained metadata. If an NdMapping type is supplied additional key dimensions may be supplied as long as they do not clash with the file metadata. Any key dimension may be dropped by name by supplying a drop argument. """ aslist = not isinstance(files, (NdMapping, Element)) if isinstance(files, Element): files = Collator(files) file_kdims = files.kdims else: file_kdims = files.kdims drop_extra = files.drop if isinstance(files, Collator) else [] mdata_dims = [] if metadata: fnames = [fname[0] if isinstance(fname, tuple) else fname for fname in files.values()] mdata_dims = {kdim for fname in fnames for kdim in self_or_cls.key(fname).keys()} file_dims = set(files.dimensions('key', label=True)) added_dims = set(mdata_dims) - file_dims overlap_dims = file_dims & set(mdata_dims) kwargs = dict(kdims=file_kdims + sorted(added_dims), vdims=['filename', 'entries'], value_transform=self_or_cls.loader, drop=drop_extra + drop) layout_data = defaultdict(lambda: Collator(None, **kwargs)) for key, fname in files.data.items(): fname = fname[0] if isinstance(fname, tuple) else fname mdata = self_or_cls.key(fname) if metadata else {} for odim in overlap_dims: kval = key[files.get_dimension_index(odim)] if kval != mdata[odim]: raise KeyError("Metadata supplies inconsistent " "value for dimension %s" % odim) mkey = tuple(mdata.get(d, None) for d in added_dims) key = mkey if aslist else key + mkey if isinstance(fname, tuple) and len(fname) == 1: (fname,) = fname for entry in self_or_cls.entries(fname): layout_data[entry][key] = (fname, [entry]) return Layout(layout_data.items())
[ "def", "collect", "(", "self_or_cls", ",", "files", ",", "drop", "=", "[", "]", ",", "metadata", "=", "True", ")", ":", "aslist", "=", "not", "isinstance", "(", "files", ",", "(", "NdMapping", ",", "Element", ")", ")", "if", "isinstance", "(", "files", ",", "Element", ")", ":", "files", "=", "Collator", "(", "files", ")", "file_kdims", "=", "files", ".", "kdims", "else", ":", "file_kdims", "=", "files", ".", "kdims", "drop_extra", "=", "files", ".", "drop", "if", "isinstance", "(", "files", ",", "Collator", ")", "else", "[", "]", "mdata_dims", "=", "[", "]", "if", "metadata", ":", "fnames", "=", "[", "fname", "[", "0", "]", "if", "isinstance", "(", "fname", ",", "tuple", ")", "else", "fname", "for", "fname", "in", "files", ".", "values", "(", ")", "]", "mdata_dims", "=", "{", "kdim", "for", "fname", "in", "fnames", "for", "kdim", "in", "self_or_cls", ".", "key", "(", "fname", ")", ".", "keys", "(", ")", "}", "file_dims", "=", "set", "(", "files", ".", "dimensions", "(", "'key'", ",", "label", "=", "True", ")", ")", "added_dims", "=", "set", "(", "mdata_dims", ")", "-", "file_dims", "overlap_dims", "=", "file_dims", "&", "set", "(", "mdata_dims", ")", "kwargs", "=", "dict", "(", "kdims", "=", "file_kdims", "+", "sorted", "(", "added_dims", ")", ",", "vdims", "=", "[", "'filename'", ",", "'entries'", "]", ",", "value_transform", "=", "self_or_cls", ".", "loader", ",", "drop", "=", "drop_extra", "+", "drop", ")", "layout_data", "=", "defaultdict", "(", "lambda", ":", "Collator", "(", "None", ",", "*", "*", "kwargs", ")", ")", "for", "key", ",", "fname", "in", "files", ".", "data", ".", "items", "(", ")", ":", "fname", "=", "fname", "[", "0", "]", "if", "isinstance", "(", "fname", ",", "tuple", ")", "else", "fname", "mdata", "=", "self_or_cls", ".", "key", "(", "fname", ")", "if", "metadata", "else", "{", "}", "for", "odim", "in", "overlap_dims", ":", "kval", "=", "key", "[", "files", ".", "get_dimension_index", "(", "odim", ")", "]", "if", "kval", "!=", "mdata", "[", "odim", "]", ":", "raise", "KeyError", "(", "\"Metadata supplies inconsistent \"", "\"value for dimension %s\"", "%", "odim", ")", "mkey", "=", "tuple", "(", "mdata", ".", "get", "(", "d", ",", "None", ")", "for", "d", "in", "added_dims", ")", "key", "=", "mkey", "if", "aslist", "else", "key", "+", "mkey", "if", "isinstance", "(", "fname", ",", "tuple", ")", "and", "len", "(", "fname", ")", "==", "1", ":", "(", "fname", ",", ")", "=", "fname", "for", "entry", "in", "self_or_cls", ".", "entries", "(", "fname", ")", ":", "layout_data", "[", "entry", "]", "[", "key", "]", "=", "(", "fname", ",", "[", "entry", "]", ")", "return", "Layout", "(", "layout_data", ".", "items", "(", ")", ")" ]
Given a list or NdMapping type containing file paths return a Layout of Collators, which can be called to load a given set of files using the current Importer. If supplied as a list each file is expected to disambiguate itself with contained metadata. If an NdMapping type is supplied additional key dimensions may be supplied as long as they do not clash with the file metadata. Any key dimension may be dropped by name by supplying a drop argument.
[ "Given", "a", "list", "or", "NdMapping", "type", "containing", "file", "paths", "return", "a", "Layout", "of", "Collators", "which", "can", "be", "called", "to", "load", "a", "given", "set", "of", "files", "using", "the", "current", "Importer", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/topology.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/topology.py#L418-L450
def _update_servers(self): """Sync our Servers from TopologyDescription.server_descriptions. Hold the lock while calling this. """ for address, sd in self._description.server_descriptions().items(): if address not in self._servers: monitor = self._settings.monitor_class( server_description=sd, topology=self, pool=self._create_pool_for_monitor(address), topology_settings=self._settings) weak = None if self._publish_server: weak = weakref.ref(self._events) server = Server( server_description=sd, pool=self._create_pool_for_server(address), monitor=monitor, topology_id=self._topology_id, listeners=self._listeners, events=weak) self._servers[address] = server server.open() else: self._servers[address].description = sd for address, server in list(self._servers.items()): if not self._description.has_server(address): server.close() self._servers.pop(address)
[ "def", "_update_servers", "(", "self", ")", ":", "for", "address", ",", "sd", "in", "self", ".", "_description", ".", "server_descriptions", "(", ")", ".", "items", "(", ")", ":", "if", "address", "not", "in", "self", ".", "_servers", ":", "monitor", "=", "self", ".", "_settings", ".", "monitor_class", "(", "server_description", "=", "sd", ",", "topology", "=", "self", ",", "pool", "=", "self", ".", "_create_pool_for_monitor", "(", "address", ")", ",", "topology_settings", "=", "self", ".", "_settings", ")", "weak", "=", "None", "if", "self", ".", "_publish_server", ":", "weak", "=", "weakref", ".", "ref", "(", "self", ".", "_events", ")", "server", "=", "Server", "(", "server_description", "=", "sd", ",", "pool", "=", "self", ".", "_create_pool_for_server", "(", "address", ")", ",", "monitor", "=", "monitor", ",", "topology_id", "=", "self", ".", "_topology_id", ",", "listeners", "=", "self", ".", "_listeners", ",", "events", "=", "weak", ")", "self", ".", "_servers", "[", "address", "]", "=", "server", "server", ".", "open", "(", ")", "else", ":", "self", ".", "_servers", "[", "address", "]", ".", "description", "=", "sd", "for", "address", ",", "server", "in", "list", "(", "self", ".", "_servers", ".", "items", "(", ")", ")", ":", "if", "not", "self", ".", "_description", ".", "has_server", "(", "address", ")", ":", "server", ".", "close", "(", ")", "self", ".", "_servers", ".", "pop", "(", "address", ")" ]
Sync our Servers from TopologyDescription.server_descriptions. Hold the lock while calling this.
[ "Sync", "our", "Servers", "from", "TopologyDescription", ".", "server_descriptions", "." ]
python
train
limodou/uliweb
uliweb/orm/__init__.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L916-L932
def set_model_config(model_name, config, replace=False): """ This function should be only used in initialization phrase :param model_name: model name it's should be string :param config: config should be dict. e.g. {'__mapping_only__', '__tablename__', '__ext_model__'} :param replace: if True, then replace original config, False will update """ assert isinstance(model_name, str) assert isinstance(config, dict) d = __models__.setdefault(model_name, {}) if replace: d['config'] = config else: c = d.setdefault('config', {}) c.update(config)
[ "def", "set_model_config", "(", "model_name", ",", "config", ",", "replace", "=", "False", ")", ":", "assert", "isinstance", "(", "model_name", ",", "str", ")", "assert", "isinstance", "(", "config", ",", "dict", ")", "d", "=", "__models__", ".", "setdefault", "(", "model_name", ",", "{", "}", ")", "if", "replace", ":", "d", "[", "'config'", "]", "=", "config", "else", ":", "c", "=", "d", ".", "setdefault", "(", "'config'", ",", "{", "}", ")", "c", ".", "update", "(", "config", ")" ]
This function should be only used in initialization phrase :param model_name: model name it's should be string :param config: config should be dict. e.g. {'__mapping_only__', '__tablename__', '__ext_model__'} :param replace: if True, then replace original config, False will update
[ "This", "function", "should", "be", "only", "used", "in", "initialization", "phrase", ":", "param", "model_name", ":", "model", "name", "it", "s", "should", "be", "string", ":", "param", "config", ":", "config", "should", "be", "dict", ".", "e", ".", "g", ".", "{", "__mapping_only__", "__tablename__", "__ext_model__", "}", ":", "param", "replace", ":", "if", "True", "then", "replace", "original", "config", "False", "will", "update" ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/device_directory/models/device_data_post_request.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/device_directory/models/device_data_post_request.py#L441-L452
def firmware_checksum(self, firmware_checksum): """ Sets the firmware_checksum of this DeviceDataPostRequest. The SHA256 checksum of the current firmware image. :param firmware_checksum: The firmware_checksum of this DeviceDataPostRequest. :type: str """ if firmware_checksum is not None and len(firmware_checksum) > 64: raise ValueError("Invalid value for `firmware_checksum`, length must be less than or equal to `64`") self._firmware_checksum = firmware_checksum
[ "def", "firmware_checksum", "(", "self", ",", "firmware_checksum", ")", ":", "if", "firmware_checksum", "is", "not", "None", "and", "len", "(", "firmware_checksum", ")", ">", "64", ":", "raise", "ValueError", "(", "\"Invalid value for `firmware_checksum`, length must be less than or equal to `64`\"", ")", "self", ".", "_firmware_checksum", "=", "firmware_checksum" ]
Sets the firmware_checksum of this DeviceDataPostRequest. The SHA256 checksum of the current firmware image. :param firmware_checksum: The firmware_checksum of this DeviceDataPostRequest. :type: str
[ "Sets", "the", "firmware_checksum", "of", "this", "DeviceDataPostRequest", ".", "The", "SHA256", "checksum", "of", "the", "current", "firmware", "image", "." ]
python
train
CellProfiler/centrosome
centrosome/cpmorphology.py
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/cpmorphology.py#L2117-L2149
def ellipse_from_second_moments(image, labels, indexes, wants_compactness = False): """Calculate measurements of ellipses equivalent to the second moments of labels image - the intensity at each point labels - for each labeled object, derive an ellipse indexes - sequence of indexes to process returns the following arrays: coordinates of the center of the ellipse eccentricity major axis length minor axis length orientation compactness (if asked for) some definitions taken from "Image Moments-Based Structuring and Tracking of Objects", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO, http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf particularly equation 5 (which has some errors in it). These yield the rectangle with equivalent second moments. I translate to the ellipse by multiplying by 1.154701 which is Matlab's calculation of the major and minor axis length for a square of length X divided by the actual length of the side of a square of that length. eccentricity is the distance between foci divided by the major axis length orientation is the angle of the major axis with respect to the X axis compactness is the variance of the radial distribution normalized by the area """ if len(indexes) == 0: return (np.zeros((0,2)), np.zeros((0,)), np.zeros((0,)), np.zeros((0,)),np.zeros((0,))) i,j = np.argwhere(labels != 0).transpose() return ellipse_from_second_moments_ijv(i,j,image[i,j], labels[i,j], indexes, wants_compactness)
[ "def", "ellipse_from_second_moments", "(", "image", ",", "labels", ",", "indexes", ",", "wants_compactness", "=", "False", ")", ":", "if", "len", "(", "indexes", ")", "==", "0", ":", "return", "(", "np", ".", "zeros", "(", "(", "0", ",", "2", ")", ")", ",", "np", ".", "zeros", "(", "(", "0", ",", ")", ")", ",", "np", ".", "zeros", "(", "(", "0", ",", ")", ")", ",", "np", ".", "zeros", "(", "(", "0", ",", ")", ")", ",", "np", ".", "zeros", "(", "(", "0", ",", ")", ")", ")", "i", ",", "j", "=", "np", ".", "argwhere", "(", "labels", "!=", "0", ")", ".", "transpose", "(", ")", "return", "ellipse_from_second_moments_ijv", "(", "i", ",", "j", ",", "image", "[", "i", ",", "j", "]", ",", "labels", "[", "i", ",", "j", "]", ",", "indexes", ",", "wants_compactness", ")" ]
Calculate measurements of ellipses equivalent to the second moments of labels image - the intensity at each point labels - for each labeled object, derive an ellipse indexes - sequence of indexes to process returns the following arrays: coordinates of the center of the ellipse eccentricity major axis length minor axis length orientation compactness (if asked for) some definitions taken from "Image Moments-Based Structuring and Tracking of Objects", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO, http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf particularly equation 5 (which has some errors in it). These yield the rectangle with equivalent second moments. I translate to the ellipse by multiplying by 1.154701 which is Matlab's calculation of the major and minor axis length for a square of length X divided by the actual length of the side of a square of that length. eccentricity is the distance between foci divided by the major axis length orientation is the angle of the major axis with respect to the X axis compactness is the variance of the radial distribution normalized by the area
[ "Calculate", "measurements", "of", "ellipses", "equivalent", "to", "the", "second", "moments", "of", "labels", "image", "-", "the", "intensity", "at", "each", "point", "labels", "-", "for", "each", "labeled", "object", "derive", "an", "ellipse", "indexes", "-", "sequence", "of", "indexes", "to", "process", "returns", "the", "following", "arrays", ":", "coordinates", "of", "the", "center", "of", "the", "ellipse", "eccentricity", "major", "axis", "length", "minor", "axis", "length", "orientation", "compactness", "(", "if", "asked", "for", ")", "some", "definitions", "taken", "from", "Image", "Moments", "-", "Based", "Structuring", "and", "Tracking", "of", "Objects", "LOURENA", "ROCHA", "LUIZ", "VELHO", "PAULO", "CEZAR", "P", ".", "CARVALHO", "http", ":", "//", "sibgrapi", ".", "sid", ".", "inpe", ".", "br", "/", "col", "/", "sid", ".", "inpe", ".", "br", "/", "banon", "/", "2002", "/", "10", ".", "23", ".", "11", ".", "34", "/", "doc", "/", "35", ".", "pdf", "particularly", "equation", "5", "(", "which", "has", "some", "errors", "in", "it", ")", ".", "These", "yield", "the", "rectangle", "with", "equivalent", "second", "moments", ".", "I", "translate", "to", "the", "ellipse", "by", "multiplying", "by", "1", ".", "154701", "which", "is", "Matlab", "s", "calculation", "of", "the", "major", "and", "minor", "axis", "length", "for", "a", "square", "of", "length", "X", "divided", "by", "the", "actual", "length", "of", "the", "side", "of", "a", "square", "of", "that", "length", ".", "eccentricity", "is", "the", "distance", "between", "foci", "divided", "by", "the", "major", "axis", "length", "orientation", "is", "the", "angle", "of", "the", "major", "axis", "with", "respect", "to", "the", "X", "axis", "compactness", "is", "the", "variance", "of", "the", "radial", "distribution", "normalized", "by", "the", "area" ]
python
train
ambitioninc/django-query-builder
querybuilder/tables.py
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/tables.py#L213-L230
def remove_field(self, field): """ Removes a field from this table :param field: This can be a string of a field name, a dict of {'alias': field}, or a ``Field`` instance :type field: str or dict or :class:`Field <querybuilder.fields.Field>` """ new_field = FieldFactory( field, ) new_field.set_table(self) new_field_identifier = new_field.get_identifier() for field in self.fields: if field.get_identifier() == new_field_identifier: self.fields.remove(field) return field return None
[ "def", "remove_field", "(", "self", ",", "field", ")", ":", "new_field", "=", "FieldFactory", "(", "field", ",", ")", "new_field", ".", "set_table", "(", "self", ")", "new_field_identifier", "=", "new_field", ".", "get_identifier", "(", ")", "for", "field", "in", "self", ".", "fields", ":", "if", "field", ".", "get_identifier", "(", ")", "==", "new_field_identifier", ":", "self", ".", "fields", ".", "remove", "(", "field", ")", "return", "field", "return", "None" ]
Removes a field from this table :param field: This can be a string of a field name, a dict of {'alias': field}, or a ``Field`` instance :type field: str or dict or :class:`Field <querybuilder.fields.Field>`
[ "Removes", "a", "field", "from", "this", "table" ]
python
train
liip/requests_gpgauthlib
requests_gpgauthlib/gpgauth_session.py
https://github.com/liip/requests_gpgauthlib/blob/017711dfff6cc74cc4cb78ee05dec5e38564987e/requests_gpgauthlib/gpgauth_session.py#L208-L222
def is_authenticated_with_token(self): """ GPGAuth Stage 2 """ """ Send back the token to the server to get auth cookie """ server_login_response = post_log_in( self, keyid=self.user_fingerprint, user_token_result=self.user_auth_token ) if not check_server_login_stage2_response(server_login_response): raise GPGAuthStage2Exception("Login endpoint wrongly formatted") self.cookies.save(ignore_discard=True) logger.info('is_authenticated_with_token: OK') return True
[ "def", "is_authenticated_with_token", "(", "self", ")", ":", "\"\"\" Send back the token to the server to get auth cookie \"\"\"", "server_login_response", "=", "post_log_in", "(", "self", ",", "keyid", "=", "self", ".", "user_fingerprint", ",", "user_token_result", "=", "self", ".", "user_auth_token", ")", "if", "not", "check_server_login_stage2_response", "(", "server_login_response", ")", ":", "raise", "GPGAuthStage2Exception", "(", "\"Login endpoint wrongly formatted\"", ")", "self", ".", "cookies", ".", "save", "(", "ignore_discard", "=", "True", ")", "logger", ".", "info", "(", "'is_authenticated_with_token: OK'", ")", "return", "True" ]
GPGAuth Stage 2
[ "GPGAuth", "Stage", "2" ]
python
train
pyannote/pyannote-metrics
pyannote/metrics/base.py
https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/base.py#L296-L319
def confidence_interval(self, alpha=0.9): """Compute confidence interval on accumulated metric values Parameters ---------- alpha : float, optional Probability that the returned confidence interval contains the true metric value. Returns ------- (center, (lower, upper)) with center the mean of the conditional pdf of the metric value and (lower, upper) is a confidence interval centered on the median, containing the estimate to a probability alpha. See Also: --------- scipy.stats.bayes_mvs """ m, _, _ = scipy.stats.bayes_mvs( [r[self.metric_name_] for _, r in self.results_], alpha=alpha) return m
[ "def", "confidence_interval", "(", "self", ",", "alpha", "=", "0.9", ")", ":", "m", ",", "_", ",", "_", "=", "scipy", ".", "stats", ".", "bayes_mvs", "(", "[", "r", "[", "self", ".", "metric_name_", "]", "for", "_", ",", "r", "in", "self", ".", "results_", "]", ",", "alpha", "=", "alpha", ")", "return", "m" ]
Compute confidence interval on accumulated metric values Parameters ---------- alpha : float, optional Probability that the returned confidence interval contains the true metric value. Returns ------- (center, (lower, upper)) with center the mean of the conditional pdf of the metric value and (lower, upper) is a confidence interval centered on the median, containing the estimate to a probability alpha. See Also: --------- scipy.stats.bayes_mvs
[ "Compute", "confidence", "interval", "on", "accumulated", "metric", "values" ]
python
train
gwastro/pycbc
pycbc/transforms.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/transforms.py#L219-L239
def _copytoscratch(self, maps): """Copies the data in maps to the scratch space. If the maps contain arrays that are not the same shape as the scratch space, a new scratch space will be created. """ try: for p in self.inputs: self._scratch[p][:] = maps[p] except ValueError: # we'll get a ValueError if the scratch space isn't the same size # as the maps; in that case, re-create the scratch space with the # appropriate size and try again invals = maps[list(self.inputs)[0]] if isinstance(invals, numpy.ndarray): shape = invals.shape else: shape = len(invals) self._createscratch(shape) for p in self.inputs: self._scratch[p][:] = maps[p]
[ "def", "_copytoscratch", "(", "self", ",", "maps", ")", ":", "try", ":", "for", "p", "in", "self", ".", "inputs", ":", "self", ".", "_scratch", "[", "p", "]", "[", ":", "]", "=", "maps", "[", "p", "]", "except", "ValueError", ":", "# we'll get a ValueError if the scratch space isn't the same size", "# as the maps; in that case, re-create the scratch space with the", "# appropriate size and try again", "invals", "=", "maps", "[", "list", "(", "self", ".", "inputs", ")", "[", "0", "]", "]", "if", "isinstance", "(", "invals", ",", "numpy", ".", "ndarray", ")", ":", "shape", "=", "invals", ".", "shape", "else", ":", "shape", "=", "len", "(", "invals", ")", "self", ".", "_createscratch", "(", "shape", ")", "for", "p", "in", "self", ".", "inputs", ":", "self", ".", "_scratch", "[", "p", "]", "[", ":", "]", "=", "maps", "[", "p", "]" ]
Copies the data in maps to the scratch space. If the maps contain arrays that are not the same shape as the scratch space, a new scratch space will be created.
[ "Copies", "the", "data", "in", "maps", "to", "the", "scratch", "space", "." ]
python
train
pytorch/text
torchtext/data/dataset.py
https://github.com/pytorch/text/blob/26bfce6869dc704f1d86792f9a681d453d7e7bb8/torchtext/data/dataset.py#L201-L214
def filter_examples(self, field_names): """Remove unknown words from dataset examples with respect to given field. Arguments: field_names (list(str)): Within example only the parts with field names in field_names will have their unknown words deleted. """ for i, example in enumerate(self.examples): for field_name in field_names: vocab = set(self.fields[field_name].vocab.stoi) text = getattr(example, field_name) example_part = [word for word in text if word in vocab] setattr(example, field_name, example_part) self.examples[i] = example
[ "def", "filter_examples", "(", "self", ",", "field_names", ")", ":", "for", "i", ",", "example", "in", "enumerate", "(", "self", ".", "examples", ")", ":", "for", "field_name", "in", "field_names", ":", "vocab", "=", "set", "(", "self", ".", "fields", "[", "field_name", "]", ".", "vocab", ".", "stoi", ")", "text", "=", "getattr", "(", "example", ",", "field_name", ")", "example_part", "=", "[", "word", "for", "word", "in", "text", "if", "word", "in", "vocab", "]", "setattr", "(", "example", ",", "field_name", ",", "example_part", ")", "self", ".", "examples", "[", "i", "]", "=", "example" ]
Remove unknown words from dataset examples with respect to given field. Arguments: field_names (list(str)): Within example only the parts with field names in field_names will have their unknown words deleted.
[ "Remove", "unknown", "words", "from", "dataset", "examples", "with", "respect", "to", "given", "field", "." ]
python
train
saulpw/visidata
visidata/clipboard.py
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/clipboard.py#L92-L104
def save(self, vs, filetype): 'Copy rows to the system clipboard.' # use NTF to generate filename and delete file on context exit with tempfile.NamedTemporaryFile(suffix='.'+filetype) as temp: saveSheets(temp.name, vs) sync(1) p = subprocess.Popen( self.command, stdin=open(temp.name, 'r', encoding=options.encoding), stdout=subprocess.DEVNULL, close_fds=True) p.communicate()
[ "def", "save", "(", "self", ",", "vs", ",", "filetype", ")", ":", "# use NTF to generate filename and delete file on context exit", "with", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.'", "+", "filetype", ")", "as", "temp", ":", "saveSheets", "(", "temp", ".", "name", ",", "vs", ")", "sync", "(", "1", ")", "p", "=", "subprocess", ".", "Popen", "(", "self", ".", "command", ",", "stdin", "=", "open", "(", "temp", ".", "name", ",", "'r'", ",", "encoding", "=", "options", ".", "encoding", ")", ",", "stdout", "=", "subprocess", ".", "DEVNULL", ",", "close_fds", "=", "True", ")", "p", ".", "communicate", "(", ")" ]
Copy rows to the system clipboard.
[ "Copy", "rows", "to", "the", "system", "clipboard", "." ]
python
train
pyviz/holoviews
holoviews/core/spaces.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/spaces.py#L669-L683
def clone(self, callable=None, **overrides): """Clones the Callable optionally with new settings Args: callable: New callable function to wrap **overrides: Parameter overrides to apply Returns: Cloned Callable object """ old = {k: v for k, v in self.get_param_values() if k not in ['callable', 'name']} params = dict(old, **overrides) callable = self.callable if callable is None else callable return self.__class__(callable, **params)
[ "def", "clone", "(", "self", ",", "callable", "=", "None", ",", "*", "*", "overrides", ")", ":", "old", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "get_param_values", "(", ")", "if", "k", "not", "in", "[", "'callable'", ",", "'name'", "]", "}", "params", "=", "dict", "(", "old", ",", "*", "*", "overrides", ")", "callable", "=", "self", ".", "callable", "if", "callable", "is", "None", "else", "callable", "return", "self", ".", "__class__", "(", "callable", ",", "*", "*", "params", ")" ]
Clones the Callable optionally with new settings Args: callable: New callable function to wrap **overrides: Parameter overrides to apply Returns: Cloned Callable object
[ "Clones", "the", "Callable", "optionally", "with", "new", "settings" ]
python
train
apache/airflow
airflow/hooks/dbapi_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L256-L272
def _serialize_cell(cell, conn=None): """ Returns the SQL literal of the cell as a string. :param cell: The cell to insert into the table :type cell: object :param conn: The database connection :type conn: connection object :return: The serialized cell :rtype: str """ if cell is None: return None if isinstance(cell, datetime): return cell.isoformat() return str(cell)
[ "def", "_serialize_cell", "(", "cell", ",", "conn", "=", "None", ")", ":", "if", "cell", "is", "None", ":", "return", "None", "if", "isinstance", "(", "cell", ",", "datetime", ")", ":", "return", "cell", ".", "isoformat", "(", ")", "return", "str", "(", "cell", ")" ]
Returns the SQL literal of the cell as a string. :param cell: The cell to insert into the table :type cell: object :param conn: The database connection :type conn: connection object :return: The serialized cell :rtype: str
[ "Returns", "the", "SQL", "literal", "of", "the", "cell", "as", "a", "string", "." ]
python
test
inveniosoftware/invenio-files-rest
invenio_files_rest/views.py
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/views.py#L529-L566
def create_object(self, bucket, key): """Create a new object. :param bucket: The bucket (instance or id) to get the object from. :param key: The file key. :returns: A Flask response. """ # Initial validation of size based on Content-Length. # User can tamper with Content-Length, so this is just an initial up # front check. The storage subsystem must validate the size limit as # well. stream, content_length, content_md5, tags = \ current_files_rest.upload_factory() size_limit = bucket.size_limit if content_length and size_limit and content_length > size_limit: desc = 'File size limit exceeded.' \ if isinstance(size_limit, int) else size_limit.reason raise FileSizeError(description=desc) with db.session.begin_nested(): obj = ObjectVersion.create(bucket, key) obj.set_contents( stream, size=content_length, size_limit=size_limit) # Check add tags if tags: for key, value in tags.items(): ObjectVersionTag.create(obj, key, value) db.session.commit() return self.make_response( data=obj, context={ 'class': ObjectVersion, 'bucket': bucket, }, etag=obj.file.checksum )
[ "def", "create_object", "(", "self", ",", "bucket", ",", "key", ")", ":", "# Initial validation of size based on Content-Length.", "# User can tamper with Content-Length, so this is just an initial up", "# front check. The storage subsystem must validate the size limit as", "# well.", "stream", ",", "content_length", ",", "content_md5", ",", "tags", "=", "current_files_rest", ".", "upload_factory", "(", ")", "size_limit", "=", "bucket", ".", "size_limit", "if", "content_length", "and", "size_limit", "and", "content_length", ">", "size_limit", ":", "desc", "=", "'File size limit exceeded.'", "if", "isinstance", "(", "size_limit", ",", "int", ")", "else", "size_limit", ".", "reason", "raise", "FileSizeError", "(", "description", "=", "desc", ")", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "obj", "=", "ObjectVersion", ".", "create", "(", "bucket", ",", "key", ")", "obj", ".", "set_contents", "(", "stream", ",", "size", "=", "content_length", ",", "size_limit", "=", "size_limit", ")", "# Check add tags", "if", "tags", ":", "for", "key", ",", "value", "in", "tags", ".", "items", "(", ")", ":", "ObjectVersionTag", ".", "create", "(", "obj", ",", "key", ",", "value", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "self", ".", "make_response", "(", "data", "=", "obj", ",", "context", "=", "{", "'class'", ":", "ObjectVersion", ",", "'bucket'", ":", "bucket", ",", "}", ",", "etag", "=", "obj", ".", "file", ".", "checksum", ")" ]
Create a new object. :param bucket: The bucket (instance or id) to get the object from. :param key: The file key. :returns: A Flask response.
[ "Create", "a", "new", "object", "." ]
python
train
f3at/feat
src/feat/extern/log/log.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/extern/log/log.py#L422-L452
def stderrHandler(level, object, category, file, line, message): """ A log handler that writes to stderr. @type level: string @type object: string (or None) @type category: string @type message: string """ o = "" if object: o = '"' + object + '"' where = "(%s:%d)" % (file, line) # level pid object cat time # 5 + 1 + 7 + 1 + 32 + 1 + 17 + 1 + 15 == 80 safeprintf(sys.stderr, '%s [%5d] %-32s %-17s %-15s ', getFormattedLevelName(level), os.getpid(), o, category, time.strftime("%b %d %H:%M:%S")) try: safeprintf(sys.stderr, '%-4s %s %s\n', "", message, where) except UnicodeEncodeError: # this can happen if message is a unicode object, convert it back into # a string using the UTF-8 encoding message = message.encode('UTF-8') safeprintf(sys.stderr, '%-4s %s %s\n', "", message, where) sys.stderr.flush()
[ "def", "stderrHandler", "(", "level", ",", "object", ",", "category", ",", "file", ",", "line", ",", "message", ")", ":", "o", "=", "\"\"", "if", "object", ":", "o", "=", "'\"'", "+", "object", "+", "'\"'", "where", "=", "\"(%s:%d)\"", "%", "(", "file", ",", "line", ")", "# level pid object cat time", "# 5 + 1 + 7 + 1 + 32 + 1 + 17 + 1 + 15 == 80", "safeprintf", "(", "sys", ".", "stderr", ",", "'%s [%5d] %-32s %-17s %-15s '", ",", "getFormattedLevelName", "(", "level", ")", ",", "os", ".", "getpid", "(", ")", ",", "o", ",", "category", ",", "time", ".", "strftime", "(", "\"%b %d %H:%M:%S\"", ")", ")", "try", ":", "safeprintf", "(", "sys", ".", "stderr", ",", "'%-4s %s %s\\n'", ",", "\"\"", ",", "message", ",", "where", ")", "except", "UnicodeEncodeError", ":", "# this can happen if message is a unicode object, convert it back into", "# a string using the UTF-8 encoding", "message", "=", "message", ".", "encode", "(", "'UTF-8'", ")", "safeprintf", "(", "sys", ".", "stderr", ",", "'%-4s %s %s\\n'", ",", "\"\"", ",", "message", ",", "where", ")", "sys", ".", "stderr", ".", "flush", "(", ")" ]
A log handler that writes to stderr. @type level: string @type object: string (or None) @type category: string @type message: string
[ "A", "log", "handler", "that", "writes", "to", "stderr", "." ]
python
train
wright-group/WrightTools
WrightTools/kit/_timestamp.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/kit/_timestamp.py#L147-L174
def RFC3339(self): """RFC3339. `Link to RFC3339.`__ __ https://www.ietf.org/rfc/rfc3339.txt """ # get timezone offset delta_sec = time.timezone m, s = divmod(delta_sec, 60) h, m = divmod(m, 60) # timestamp format_string = "%Y-%m-%dT%H:%M:%S.%f" out = self.datetime.strftime(format_string) # timezone if delta_sec == 0.: out += "Z" else: if delta_sec > 0: sign = "+" elif delta_sec < 0: sign = "-" def as_string(num): return str(np.abs(int(num))).zfill(2) out += sign + as_string(h) + ":" + as_string(m) return out
[ "def", "RFC3339", "(", "self", ")", ":", "# get timezone offset", "delta_sec", "=", "time", ".", "timezone", "m", ",", "s", "=", "divmod", "(", "delta_sec", ",", "60", ")", "h", ",", "m", "=", "divmod", "(", "m", ",", "60", ")", "# timestamp", "format_string", "=", "\"%Y-%m-%dT%H:%M:%S.%f\"", "out", "=", "self", ".", "datetime", ".", "strftime", "(", "format_string", ")", "# timezone", "if", "delta_sec", "==", "0.", ":", "out", "+=", "\"Z\"", "else", ":", "if", "delta_sec", ">", "0", ":", "sign", "=", "\"+\"", "elif", "delta_sec", "<", "0", ":", "sign", "=", "\"-\"", "def", "as_string", "(", "num", ")", ":", "return", "str", "(", "np", ".", "abs", "(", "int", "(", "num", ")", ")", ")", ".", "zfill", "(", "2", ")", "out", "+=", "sign", "+", "as_string", "(", "h", ")", "+", "\":\"", "+", "as_string", "(", "m", ")", "return", "out" ]
RFC3339. `Link to RFC3339.`__ __ https://www.ietf.org/rfc/rfc3339.txt
[ "RFC3339", "." ]
python
train
openego/eDisGo
edisgo/grid/network.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/network.py#L562-L651
def analyze(self, mode=None, timesteps=None): """Analyzes the grid by power flow analysis Analyze the grid for violations of hosting capacity. Means, perform a power flow analysis and obtain voltages at nodes (load, generator, stations/transformers and branch tees) and active/reactive power at lines. The power flow analysis can currently only be performed for both grid levels MV and LV. See ToDos section for more information. A static `non-linear power flow analysis is performed using PyPSA <https://www.pypsa.org/doc/power_flow.html#full-non-linear-power-flow>`_. The high-voltage to medium-voltage transformer are not included in the analysis. The slack bus is defined at secondary side of these transformers assuming an ideal tap changer. Hence, potential overloading of the transformers is not studied here. Parameters ---------- mode : str Allows to toggle between power flow analysis (PFA) on the whole grid topology (MV + LV), only MV or only LV. Defaults to None which equals power flow analysis for MV + LV which is the only implemented option at the moment. See ToDos section for more information. timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>` Timesteps specifies for which time steps to conduct the power flow analysis. It defaults to None in which case the time steps in timeseries.timeindex (see :class:`~.grid.network.TimeSeries`) are used. Notes ----- The current implementation always translates the grid topology representation to the PyPSA format and stores it to :attr:`self.network.pypsa`. ToDos ------ The option to export only the edisgo MV grid (mode = 'mv') to conduct a power flow analysis is implemented in :func:`~.tools.pypsa_io.to_pypsa` but NotImplementedError is raised since the rest of edisgo does not handle this option yet. The analyze function will throw an error since :func:`~.tools.pypsa_io.process_pfa_results` does not handle aggregated loads and generators in the LV grids. Also, grid reinforcement, pypsa update of time series, and probably other functionalities do not work when only the MV grid is analysed. Further ToDos are: * explain how power plants are modeled, if possible use a link * explain where to find and adjust power flow analysis defining parameters See Also -------- :func:`~.tools.pypsa_io.to_pypsa` Translator to PyPSA data format """ if timesteps is None: timesteps = self.network.timeseries.timeindex # check if timesteps is array-like, otherwise convert to list if not hasattr(timesteps, "__len__"): timesteps = [timesteps] if self.network.pypsa is None: # Translate eDisGo grid topology representation to PyPSA format self.network.pypsa = pypsa_io.to_pypsa( self.network, mode, timesteps) else: if self.network.pypsa.edisgo_mode is not mode: # Translate eDisGo grid topology representation to PyPSA format self.network.pypsa = pypsa_io.to_pypsa( self.network, mode, timesteps) # check if all timesteps are in pypsa.snapshots, if not update time # series if False in [True if _ in self.network.pypsa.snapshots else False for _ in timesteps]: pypsa_io.update_pypsa_timeseries(self.network, timesteps=timesteps) # run power flow analysis pf_results = self.network.pypsa.pf(timesteps) if all(pf_results['converged']['0'].tolist()): pypsa_io.process_pfa_results( self.network, self.network.pypsa, timesteps) else: raise ValueError("Power flow analysis did not converge.")
[ "def", "analyze", "(", "self", ",", "mode", "=", "None", ",", "timesteps", "=", "None", ")", ":", "if", "timesteps", "is", "None", ":", "timesteps", "=", "self", ".", "network", ".", "timeseries", ".", "timeindex", "# check if timesteps is array-like, otherwise convert to list", "if", "not", "hasattr", "(", "timesteps", ",", "\"__len__\"", ")", ":", "timesteps", "=", "[", "timesteps", "]", "if", "self", ".", "network", ".", "pypsa", "is", "None", ":", "# Translate eDisGo grid topology representation to PyPSA format", "self", ".", "network", ".", "pypsa", "=", "pypsa_io", ".", "to_pypsa", "(", "self", ".", "network", ",", "mode", ",", "timesteps", ")", "else", ":", "if", "self", ".", "network", ".", "pypsa", ".", "edisgo_mode", "is", "not", "mode", ":", "# Translate eDisGo grid topology representation to PyPSA format", "self", ".", "network", ".", "pypsa", "=", "pypsa_io", ".", "to_pypsa", "(", "self", ".", "network", ",", "mode", ",", "timesteps", ")", "# check if all timesteps are in pypsa.snapshots, if not update time", "# series", "if", "False", "in", "[", "True", "if", "_", "in", "self", ".", "network", ".", "pypsa", ".", "snapshots", "else", "False", "for", "_", "in", "timesteps", "]", ":", "pypsa_io", ".", "update_pypsa_timeseries", "(", "self", ".", "network", ",", "timesteps", "=", "timesteps", ")", "# run power flow analysis", "pf_results", "=", "self", ".", "network", ".", "pypsa", ".", "pf", "(", "timesteps", ")", "if", "all", "(", "pf_results", "[", "'converged'", "]", "[", "'0'", "]", ".", "tolist", "(", ")", ")", ":", "pypsa_io", ".", "process_pfa_results", "(", "self", ".", "network", ",", "self", ".", "network", ".", "pypsa", ",", "timesteps", ")", "else", ":", "raise", "ValueError", "(", "\"Power flow analysis did not converge.\"", ")" ]
Analyzes the grid by power flow analysis Analyze the grid for violations of hosting capacity. Means, perform a power flow analysis and obtain voltages at nodes (load, generator, stations/transformers and branch tees) and active/reactive power at lines. The power flow analysis can currently only be performed for both grid levels MV and LV. See ToDos section for more information. A static `non-linear power flow analysis is performed using PyPSA <https://www.pypsa.org/doc/power_flow.html#full-non-linear-power-flow>`_. The high-voltage to medium-voltage transformer are not included in the analysis. The slack bus is defined at secondary side of these transformers assuming an ideal tap changer. Hence, potential overloading of the transformers is not studied here. Parameters ---------- mode : str Allows to toggle between power flow analysis (PFA) on the whole grid topology (MV + LV), only MV or only LV. Defaults to None which equals power flow analysis for MV + LV which is the only implemented option at the moment. See ToDos section for more information. timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>` Timesteps specifies for which time steps to conduct the power flow analysis. It defaults to None in which case the time steps in timeseries.timeindex (see :class:`~.grid.network.TimeSeries`) are used. Notes ----- The current implementation always translates the grid topology representation to the PyPSA format and stores it to :attr:`self.network.pypsa`. ToDos ------ The option to export only the edisgo MV grid (mode = 'mv') to conduct a power flow analysis is implemented in :func:`~.tools.pypsa_io.to_pypsa` but NotImplementedError is raised since the rest of edisgo does not handle this option yet. The analyze function will throw an error since :func:`~.tools.pypsa_io.process_pfa_results` does not handle aggregated loads and generators in the LV grids. Also, grid reinforcement, pypsa update of time series, and probably other functionalities do not work when only the MV grid is analysed. Further ToDos are: * explain how power plants are modeled, if possible use a link * explain where to find and adjust power flow analysis defining parameters See Also -------- :func:`~.tools.pypsa_io.to_pypsa` Translator to PyPSA data format
[ "Analyzes", "the", "grid", "by", "power", "flow", "analysis" ]
python
train
mcieslik-mctp/papy
src/papy/util/func.py
https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/papy/util/func.py#L379-L478
def load_item(inbox, type="string", remove=True, buffer=None): """ Loads data from a file. Determines the file type automatically ``"file"``, ``"fifo"``, ``"socket"``, but allows to specify the representation type ``"string"`` or ``"mmap"`` for memory mapped access to the file. Returns the loaded item as a ``str`` or ``mmap`` object. Internally creates an item from a ``file``. Arguments: - type(``"string"`` or ``"mmap"``) [default: ``"string"``] Determines the type of ``object`` the worker returns i.e. the ``file`` is read as a string or a memmory map. FIFOs cannot be memory mapped. - remove(``bool``) [default: ``True``] Should the file be removed from the filesystem? This is mandatory for FIFOs and sockets. Only Files can be used to store data persistantly. """ is_file, is_fifo, is_socket = False, False, False file = inbox[0] try: file_type = file[0] except: raise ValueError("invalid inbox item") if file_type == "file": is_file = os.path.exists(file[1]) elif file_type == "fifo": is_fifo = stat.S_ISFIFO(os.stat(file[1]).st_mode) elif file_type == "socket": # how to test is valid socket? is_socket = True else: raise ValueError("type: %s not undertood" % file_type) if (is_fifo or is_socket) and (type == 'mmap'): raise ValueError("mmap is not supported for FIFOs and sockets") if (is_fifo or is_socket) and not remove: raise ValueError("FIFOs and sockets have to be removed") # get a fd and start/stop start = 0 if is_fifo or is_file: stop = os.stat(file[1]).st_size - 1 fd = os.open(file[1], os.O_RDONLY) BUFFER = (buffer or PAPY_DEFAULTS['PIPE_BUF']) elif is_socket: host, port = socket.gethostbyname(file[1]), file[2] sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) stop = -1 fd = sock.fileno() BUFFER = (buffer or PAPY_DEFAULTS['TCP_RCVBUF']) else: raise ValueError("got unknown inbox: %s" % (repr(inbox))) # get the data if type == 'mmap': offset = start - (start % (getattr(mmap, 'ALLOCATIONGRANULARITY', None)\ or getattr(mmap, 'PAGESIZE'))) start = start - offset stop = stop - offset + 1 try: data = mmap.mmap(fd, stop, access=mmap.ACCESS_READ, offset=offset) except TypeError: # we're on Python 2.5 data = mmap.mmap(fd, stop, access=mmap.ACCESS_READ) data.seek(start) elif type == 'string': data = [] if stop == -1: while True: buffer_ = os.read(fd, BUFFER) if not buffer_: break data.append(buffer_) data = "".join(data) # data = sock.recv(socket.MSG_WAITALL) # this would read all the data from a socket else: os.lseek(fd, start, 0) data = os.read(fd, stop - start + 1) else: raise ValueError('type: %s not understood.' % type) # remove the file or close the socket if remove: if is_socket: # closes client socket sock.close() else: # pipes and files are just removed os.close(fd) os.unlink(file[1]) else: os.close(fd) # returns a string or mmap return data
[ "def", "load_item", "(", "inbox", ",", "type", "=", "\"string\"", ",", "remove", "=", "True", ",", "buffer", "=", "None", ")", ":", "is_file", ",", "is_fifo", ",", "is_socket", "=", "False", ",", "False", ",", "False", "file", "=", "inbox", "[", "0", "]", "try", ":", "file_type", "=", "file", "[", "0", "]", "except", ":", "raise", "ValueError", "(", "\"invalid inbox item\"", ")", "if", "file_type", "==", "\"file\"", ":", "is_file", "=", "os", ".", "path", ".", "exists", "(", "file", "[", "1", "]", ")", "elif", "file_type", "==", "\"fifo\"", ":", "is_fifo", "=", "stat", ".", "S_ISFIFO", "(", "os", ".", "stat", "(", "file", "[", "1", "]", ")", ".", "st_mode", ")", "elif", "file_type", "==", "\"socket\"", ":", "# how to test is valid socket?", "is_socket", "=", "True", "else", ":", "raise", "ValueError", "(", "\"type: %s not undertood\"", "%", "file_type", ")", "if", "(", "is_fifo", "or", "is_socket", ")", "and", "(", "type", "==", "'mmap'", ")", ":", "raise", "ValueError", "(", "\"mmap is not supported for FIFOs and sockets\"", ")", "if", "(", "is_fifo", "or", "is_socket", ")", "and", "not", "remove", ":", "raise", "ValueError", "(", "\"FIFOs and sockets have to be removed\"", ")", "# get a fd and start/stop", "start", "=", "0", "if", "is_fifo", "or", "is_file", ":", "stop", "=", "os", ".", "stat", "(", "file", "[", "1", "]", ")", ".", "st_size", "-", "1", "fd", "=", "os", ".", "open", "(", "file", "[", "1", "]", ",", "os", ".", "O_RDONLY", ")", "BUFFER", "=", "(", "buffer", "or", "PAPY_DEFAULTS", "[", "'PIPE_BUF'", "]", ")", "elif", "is_socket", ":", "host", ",", "port", "=", "socket", ".", "gethostbyname", "(", "file", "[", "1", "]", ")", ",", "file", "[", "2", "]", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "sock", ".", "connect", "(", "(", "host", ",", "port", ")", ")", "stop", "=", "-", "1", "fd", "=", "sock", ".", "fileno", "(", ")", "BUFFER", "=", "(", "buffer", "or", "PAPY_DEFAULTS", "[", "'TCP_RCVBUF'", "]", ")", "else", ":", "raise", "ValueError", "(", "\"got unknown inbox: %s\"", "%", "(", "repr", "(", "inbox", ")", ")", ")", "# get the data", "if", "type", "==", "'mmap'", ":", "offset", "=", "start", "-", "(", "start", "%", "(", "getattr", "(", "mmap", ",", "'ALLOCATIONGRANULARITY'", ",", "None", ")", "or", "getattr", "(", "mmap", ",", "'PAGESIZE'", ")", ")", ")", "start", "=", "start", "-", "offset", "stop", "=", "stop", "-", "offset", "+", "1", "try", ":", "data", "=", "mmap", ".", "mmap", "(", "fd", ",", "stop", ",", "access", "=", "mmap", ".", "ACCESS_READ", ",", "offset", "=", "offset", ")", "except", "TypeError", ":", "# we're on Python 2.5", "data", "=", "mmap", ".", "mmap", "(", "fd", ",", "stop", ",", "access", "=", "mmap", ".", "ACCESS_READ", ")", "data", ".", "seek", "(", "start", ")", "elif", "type", "==", "'string'", ":", "data", "=", "[", "]", "if", "stop", "==", "-", "1", ":", "while", "True", ":", "buffer_", "=", "os", ".", "read", "(", "fd", ",", "BUFFER", ")", "if", "not", "buffer_", ":", "break", "data", ".", "append", "(", "buffer_", ")", "data", "=", "\"\"", ".", "join", "(", "data", ")", "# data = sock.recv(socket.MSG_WAITALL) ", "# this would read all the data from a socket", "else", ":", "os", ".", "lseek", "(", "fd", ",", "start", ",", "0", ")", "data", "=", "os", ".", "read", "(", "fd", ",", "stop", "-", "start", "+", "1", ")", "else", ":", "raise", "ValueError", "(", "'type: %s not understood.'", "%", "type", ")", "# remove the file or close the socket", "if", "remove", ":", "if", "is_socket", ":", "# closes client socket", "sock", ".", "close", "(", ")", "else", ":", "# pipes and files are just removed", "os", ".", "close", "(", "fd", ")", "os", ".", "unlink", "(", "file", "[", "1", "]", ")", "else", ":", "os", ".", "close", "(", "fd", ")", "# returns a string or mmap", "return", "data" ]
Loads data from a file. Determines the file type automatically ``"file"``, ``"fifo"``, ``"socket"``, but allows to specify the representation type ``"string"`` or ``"mmap"`` for memory mapped access to the file. Returns the loaded item as a ``str`` or ``mmap`` object. Internally creates an item from a ``file``. Arguments: - type(``"string"`` or ``"mmap"``) [default: ``"string"``] Determines the type of ``object`` the worker returns i.e. the ``file`` is read as a string or a memmory map. FIFOs cannot be memory mapped. - remove(``bool``) [default: ``True``] Should the file be removed from the filesystem? This is mandatory for FIFOs and sockets. Only Files can be used to store data persistantly.
[ "Loads", "data", "from", "a", "file", ".", "Determines", "the", "file", "type", "automatically", "file", "fifo", "socket", "but", "allows", "to", "specify", "the", "representation", "type", "string", "or", "mmap", "for", "memory", "mapped", "access", "to", "the", "file", ".", "Returns", "the", "loaded", "item", "as", "a", "str", "or", "mmap", "object", ".", "Internally", "creates", "an", "item", "from", "a", "file", "." ]
python
train
inveniosoftware/invenio-access
invenio_access/utils.py
https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/utils.py#L16-L30
def get_identity(user): """Create an identity for a given user instance. Primarily useful for testing. """ identity = Identity(user.id) if hasattr(user, 'id'): identity.provides.add(UserNeed(user.id)) for role in getattr(user, 'roles', []): identity.provides.add(RoleNeed(role.name)) identity.user = user return identity
[ "def", "get_identity", "(", "user", ")", ":", "identity", "=", "Identity", "(", "user", ".", "id", ")", "if", "hasattr", "(", "user", ",", "'id'", ")", ":", "identity", ".", "provides", ".", "add", "(", "UserNeed", "(", "user", ".", "id", ")", ")", "for", "role", "in", "getattr", "(", "user", ",", "'roles'", ",", "[", "]", ")", ":", "identity", ".", "provides", ".", "add", "(", "RoleNeed", "(", "role", ".", "name", ")", ")", "identity", ".", "user", "=", "user", "return", "identity" ]
Create an identity for a given user instance. Primarily useful for testing.
[ "Create", "an", "identity", "for", "a", "given", "user", "instance", "." ]
python
train
tobgu/pyrsistent
pyrsistent/_helpers.py
https://github.com/tobgu/pyrsistent/blob/c84dab0daaa44973cbe83830d14888827b307632/pyrsistent/_helpers.py#L8-L39
def freeze(o): """ Recursively convert simple Python containers into pyrsistent versions of those containers. - list is converted to pvector, recursively - dict is converted to pmap, recursively on values (but not keys) - set is converted to pset, but not recursively - tuple is converted to tuple, recursively. Sets and dict keys are not recursively frozen because they do not contain mutable data by convention. The main exception to this rule is that dict keys and set elements are often instances of mutable objects that support hash-by-id, which this function can't convert anyway. >>> freeze(set([1, 2])) pset([1, 2]) >>> freeze([1, {'a': 3}]) pvector([1, pmap({'a': 3})]) >>> freeze((1, [])) (1, pvector([])) """ typ = type(o) if typ is dict: return pmap(dict((k, freeze(v)) for k, v in six.iteritems(o))) if typ is list: return pvector(map(freeze, o)) if typ is tuple: return tuple(map(freeze, o)) if typ is set: return pset(o) return o
[ "def", "freeze", "(", "o", ")", ":", "typ", "=", "type", "(", "o", ")", "if", "typ", "is", "dict", ":", "return", "pmap", "(", "dict", "(", "(", "k", ",", "freeze", "(", "v", ")", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "o", ")", ")", ")", "if", "typ", "is", "list", ":", "return", "pvector", "(", "map", "(", "freeze", ",", "o", ")", ")", "if", "typ", "is", "tuple", ":", "return", "tuple", "(", "map", "(", "freeze", ",", "o", ")", ")", "if", "typ", "is", "set", ":", "return", "pset", "(", "o", ")", "return", "o" ]
Recursively convert simple Python containers into pyrsistent versions of those containers. - list is converted to pvector, recursively - dict is converted to pmap, recursively on values (but not keys) - set is converted to pset, but not recursively - tuple is converted to tuple, recursively. Sets and dict keys are not recursively frozen because they do not contain mutable data by convention. The main exception to this rule is that dict keys and set elements are often instances of mutable objects that support hash-by-id, which this function can't convert anyway. >>> freeze(set([1, 2])) pset([1, 2]) >>> freeze([1, {'a': 3}]) pvector([1, pmap({'a': 3})]) >>> freeze((1, [])) (1, pvector([]))
[ "Recursively", "convert", "simple", "Python", "containers", "into", "pyrsistent", "versions", "of", "those", "containers", "." ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/layout/controls.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/controls.py#L324-L360
def mouse_handler(self, cli, mouse_event): """ Handle mouse events. (When the token list contained mouse handlers and the user clicked on on any of these, the matching handler is called. This handler can still return `NotImplemented` in case we want the `Window` to handle this particular event.) """ if self._tokens: # Read the generator. tokens_for_line = list(split_lines(self._tokens)) try: tokens = tokens_for_line[mouse_event.position.y] except IndexError: return NotImplemented else: # Find position in the token list. xpos = mouse_event.position.x # Find mouse handler for this character. count = 0 for item in tokens: count += len(item[1]) if count >= xpos: if len(item) >= 3: # Handler found. Call it. # (Handler can return NotImplemented, so return # that result.) handler = item[2] return handler(cli, mouse_event) else: break # Otherwise, don't handle here. return NotImplemented
[ "def", "mouse_handler", "(", "self", ",", "cli", ",", "mouse_event", ")", ":", "if", "self", ".", "_tokens", ":", "# Read the generator.", "tokens_for_line", "=", "list", "(", "split_lines", "(", "self", ".", "_tokens", ")", ")", "try", ":", "tokens", "=", "tokens_for_line", "[", "mouse_event", ".", "position", ".", "y", "]", "except", "IndexError", ":", "return", "NotImplemented", "else", ":", "# Find position in the token list.", "xpos", "=", "mouse_event", ".", "position", ".", "x", "# Find mouse handler for this character.", "count", "=", "0", "for", "item", "in", "tokens", ":", "count", "+=", "len", "(", "item", "[", "1", "]", ")", "if", "count", ">=", "xpos", ":", "if", "len", "(", "item", ")", ">=", "3", ":", "# Handler found. Call it.", "# (Handler can return NotImplemented, so return", "# that result.)", "handler", "=", "item", "[", "2", "]", "return", "handler", "(", "cli", ",", "mouse_event", ")", "else", ":", "break", "# Otherwise, don't handle here.", "return", "NotImplemented" ]
Handle mouse events. (When the token list contained mouse handlers and the user clicked on on any of these, the matching handler is called. This handler can still return `NotImplemented` in case we want the `Window` to handle this particular event.)
[ "Handle", "mouse", "events", "." ]
python
train
pypa/pipenv
pipenv/vendor/click_didyoumean/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click_didyoumean/__init__.py#L25-L42
def resolve_command(self, ctx, args): """ Overrides clicks ``resolve_command`` method and appends *Did you mean ...* suggestions to the raised exception message. """ original_cmd_name = click.utils.make_str(args[0]) try: return super(DYMMixin, self).resolve_command(ctx, args) except click.exceptions.UsageError as error: error_msg = str(error) matches = difflib.get_close_matches(original_cmd_name, self.list_commands(ctx), self.max_suggestions, self.cutoff) if matches: error_msg += '\n\nDid you mean one of these?\n %s' % '\n '.join(matches) # pylint: disable=line-too-long raise click.exceptions.UsageError(error_msg, error.ctx)
[ "def", "resolve_command", "(", "self", ",", "ctx", ",", "args", ")", ":", "original_cmd_name", "=", "click", ".", "utils", ".", "make_str", "(", "args", "[", "0", "]", ")", "try", ":", "return", "super", "(", "DYMMixin", ",", "self", ")", ".", "resolve_command", "(", "ctx", ",", "args", ")", "except", "click", ".", "exceptions", ".", "UsageError", "as", "error", ":", "error_msg", "=", "str", "(", "error", ")", "matches", "=", "difflib", ".", "get_close_matches", "(", "original_cmd_name", ",", "self", ".", "list_commands", "(", "ctx", ")", ",", "self", ".", "max_suggestions", ",", "self", ".", "cutoff", ")", "if", "matches", ":", "error_msg", "+=", "'\\n\\nDid you mean one of these?\\n %s'", "%", "'\\n '", ".", "join", "(", "matches", ")", "# pylint: disable=line-too-long", "raise", "click", ".", "exceptions", ".", "UsageError", "(", "error_msg", ",", "error", ".", "ctx", ")" ]
Overrides clicks ``resolve_command`` method and appends *Did you mean ...* suggestions to the raised exception message.
[ "Overrides", "clicks", "resolve_command", "method", "and", "appends", "*", "Did", "you", "mean", "...", "*", "suggestions", "to", "the", "raised", "exception", "message", "." ]
python
train
Oneiroe/PySimpleAutomata
PySimpleAutomata/AFW.py
https://github.com/Oneiroe/PySimpleAutomata/blob/0f9f2705fd8ddd5d8118bc31552a640f5d00c359/PySimpleAutomata/AFW.py#L106-L120
def afw_word_acceptance(afw: dict, word: list) -> bool: """ Checks if a **word** is accepted by input AFW, returning True/False. The word w is accepted by a AFW if exists at least an accepting run on w. A run for AFWs is a tree and an alternating automaton can have multiple runs on a given input. A run is accepting if all the leaf nodes are accepting states. :param dict afw: input AFW; :param list word: list of symbols ∈ afw['alphabet']. :return: *(bool)*, True if the word is accepted, False otherwise. """ return __recursive_acceptance(afw, afw['initial_state'], word)
[ "def", "afw_word_acceptance", "(", "afw", ":", "dict", ",", "word", ":", "list", ")", "->", "bool", ":", "return", "__recursive_acceptance", "(", "afw", ",", "afw", "[", "'initial_state'", "]", ",", "word", ")" ]
Checks if a **word** is accepted by input AFW, returning True/False. The word w is accepted by a AFW if exists at least an accepting run on w. A run for AFWs is a tree and an alternating automaton can have multiple runs on a given input. A run is accepting if all the leaf nodes are accepting states. :param dict afw: input AFW; :param list word: list of symbols ∈ afw['alphabet']. :return: *(bool)*, True if the word is accepted, False otherwise.
[ "Checks", "if", "a", "**", "word", "**", "is", "accepted", "by", "input", "AFW", "returning", "True", "/", "False", "." ]
python
train
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L200-L225
def next_up(x, context=None): """next_up(x): return the least representable float that's strictly greater than x. This operation is quiet: flags are not affected. """ x = BigFloat._implicit_convert(x) # make sure we don't alter any flags with _saved_flags(): with (context if context is not None else EmptyContext): with RoundTowardPositive: # nan maps to itself if is_nan(x): return +x # round to current context; if value changes, we're done y = +x if y != x: return y # otherwise apply mpfr_nextabove bf = y.copy() mpfr.mpfr_nextabove(bf) # apply + one more time to deal with subnormals return +bf
[ "def", "next_up", "(", "x", ",", "context", "=", "None", ")", ":", "x", "=", "BigFloat", ".", "_implicit_convert", "(", "x", ")", "# make sure we don't alter any flags", "with", "_saved_flags", "(", ")", ":", "with", "(", "context", "if", "context", "is", "not", "None", "else", "EmptyContext", ")", ":", "with", "RoundTowardPositive", ":", "# nan maps to itself", "if", "is_nan", "(", "x", ")", ":", "return", "+", "x", "# round to current context; if value changes, we're done", "y", "=", "+", "x", "if", "y", "!=", "x", ":", "return", "y", "# otherwise apply mpfr_nextabove", "bf", "=", "y", ".", "copy", "(", ")", "mpfr", ".", "mpfr_nextabove", "(", "bf", ")", "# apply + one more time to deal with subnormals", "return", "+", "bf" ]
next_up(x): return the least representable float that's strictly greater than x. This operation is quiet: flags are not affected.
[ "next_up", "(", "x", ")", ":", "return", "the", "least", "representable", "float", "that", "s", "strictly", "greater", "than", "x", "." ]
python
train
gem/oq-engine
openquake/hazardlib/sourcewriter.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/sourcewriter.py#L271-L285
def build_hypo_depth_dist(hdd): """ Returns the hypocentral depth distribution as a Node instance :param hdd: Hypocentral depth distribution as an instance of :class: `openquake.hzardlib.pmf.PMF` :returns: Instance of :class:`openquake.baselib.node.Node` """ hdds = [] for (prob, depth) in hdd.data: hdds.append( Node("hypoDepth", {"depth": depth, "probability": prob})) return Node("hypoDepthDist", nodes=hdds)
[ "def", "build_hypo_depth_dist", "(", "hdd", ")", ":", "hdds", "=", "[", "]", "for", "(", "prob", ",", "depth", ")", "in", "hdd", ".", "data", ":", "hdds", ".", "append", "(", "Node", "(", "\"hypoDepth\"", ",", "{", "\"depth\"", ":", "depth", ",", "\"probability\"", ":", "prob", "}", ")", ")", "return", "Node", "(", "\"hypoDepthDist\"", ",", "nodes", "=", "hdds", ")" ]
Returns the hypocentral depth distribution as a Node instance :param hdd: Hypocentral depth distribution as an instance of :class: `openquake.hzardlib.pmf.PMF` :returns: Instance of :class:`openquake.baselib.node.Node`
[ "Returns", "the", "hypocentral", "depth", "distribution", "as", "a", "Node", "instance" ]
python
train
FujiMakoto/AgentML
agentml/__init__.py
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/__init__.py#L546-L561
def interpreter(self): """ Launch an AML interpreter session for testing """ while True: message = input('[#] ') if message.lower().strip() == 'exit': break reply = self.get_reply('#interpreter#', message) if not reply: print('No reply received.', end='\n\n') continue # typewrite(reply, end='\n\n') TODO print(reply, end='\n\n')
[ "def", "interpreter", "(", "self", ")", ":", "while", "True", ":", "message", "=", "input", "(", "'[#] '", ")", "if", "message", ".", "lower", "(", ")", ".", "strip", "(", ")", "==", "'exit'", ":", "break", "reply", "=", "self", ".", "get_reply", "(", "'#interpreter#'", ",", "message", ")", "if", "not", "reply", ":", "print", "(", "'No reply received.'", ",", "end", "=", "'\\n\\n'", ")", "continue", "# typewrite(reply, end='\\n\\n') TODO", "print", "(", "reply", ",", "end", "=", "'\\n\\n'", ")" ]
Launch an AML interpreter session for testing
[ "Launch", "an", "AML", "interpreter", "session", "for", "testing" ]
python
train
CxAalto/gtfspy
gtfspy/routing/node_profile_analyzer_time.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/node_profile_analyzer_time.py#L196-L206
def mean_temporal_distance(self): """ Get mean temporal distance (in seconds) to the target. Returns ------- mean_temporal_distance : float """ total_width = self.end_time_dep - self.start_time_dep total_area = sum([block.area() for block in self._profile_blocks]) return total_area / total_width
[ "def", "mean_temporal_distance", "(", "self", ")", ":", "total_width", "=", "self", ".", "end_time_dep", "-", "self", ".", "start_time_dep", "total_area", "=", "sum", "(", "[", "block", ".", "area", "(", ")", "for", "block", "in", "self", ".", "_profile_blocks", "]", ")", "return", "total_area", "/", "total_width" ]
Get mean temporal distance (in seconds) to the target. Returns ------- mean_temporal_distance : float
[ "Get", "mean", "temporal", "distance", "(", "in", "seconds", ")", "to", "the", "target", "." ]
python
valid
dourvaris/nano-python
src/nano/rpc.py
https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L2432-L2457
def wallet_frontiers(self, wallet): """ Returns a list of pairs of account and block hash representing the head block starting for accounts from **wallet** :param wallet: Wallet to return frontiers for :type wallet: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.wallet_frontiers( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F" ... ) { "xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F" } """ wallet = self._process_value(wallet, 'wallet') payload = {"wallet": wallet} resp = self.call('wallet_frontiers', payload) return resp.get('frontiers') or {}
[ "def", "wallet_frontiers", "(", "self", ",", "wallet", ")", ":", "wallet", "=", "self", ".", "_process_value", "(", "wallet", ",", "'wallet'", ")", "payload", "=", "{", "\"wallet\"", ":", "wallet", "}", "resp", "=", "self", ".", "call", "(", "'wallet_frontiers'", ",", "payload", ")", "return", "resp", ".", "get", "(", "'frontiers'", ")", "or", "{", "}" ]
Returns a list of pairs of account and block hash representing the head block starting for accounts from **wallet** :param wallet: Wallet to return frontiers for :type wallet: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.wallet_frontiers( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F" ... ) { "xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F" }
[ "Returns", "a", "list", "of", "pairs", "of", "account", "and", "block", "hash", "representing", "the", "head", "block", "starting", "for", "accounts", "from", "**", "wallet", "**" ]
python
train
zarr-developers/zarr
zarr/util.py
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/util.py#L69-L112
def guess_chunks(shape, typesize): """ Guess an appropriate chunk layout for a dataset, given its shape and the size of each element in bytes. Will allocate chunks only as large as MAX_SIZE. Chunks are generally close to some power-of-2 fraction of each axis, slightly favoring bigger values for the last index. Undocumented and subject to change without warning. """ ndims = len(shape) # require chunks to have non-zero length for all dimensions chunks = np.maximum(np.array(shape, dtype='=f8'), 1) # Determine the optimal chunk size in bytes using a PyTables expression. # This is kept as a float. dset_size = np.product(chunks)*typesize target_size = CHUNK_BASE * (2**np.log10(dset_size/(1024.*1024))) if target_size > CHUNK_MAX: target_size = CHUNK_MAX elif target_size < CHUNK_MIN: target_size = CHUNK_MIN idx = 0 while True: # Repeatedly loop over the axes, dividing them by 2. Stop when: # 1a. We're smaller than the target chunk size, OR # 1b. We're within 50% of the target chunk size, AND # 2. The chunk is smaller than the maximum chunk size chunk_bytes = np.product(chunks)*typesize if (chunk_bytes < target_size or abs(chunk_bytes-target_size)/target_size < 0.5) and \ chunk_bytes < CHUNK_MAX: break if np.product(chunks) == 1: break # Element size larger than CHUNK_MAX chunks[idx % ndims] = np.ceil(chunks[idx % ndims] / 2.0) idx += 1 return tuple(int(x) for x in chunks)
[ "def", "guess_chunks", "(", "shape", ",", "typesize", ")", ":", "ndims", "=", "len", "(", "shape", ")", "# require chunks to have non-zero length for all dimensions", "chunks", "=", "np", ".", "maximum", "(", "np", ".", "array", "(", "shape", ",", "dtype", "=", "'=f8'", ")", ",", "1", ")", "# Determine the optimal chunk size in bytes using a PyTables expression.", "# This is kept as a float.", "dset_size", "=", "np", ".", "product", "(", "chunks", ")", "*", "typesize", "target_size", "=", "CHUNK_BASE", "*", "(", "2", "**", "np", ".", "log10", "(", "dset_size", "/", "(", "1024.", "*", "1024", ")", ")", ")", "if", "target_size", ">", "CHUNK_MAX", ":", "target_size", "=", "CHUNK_MAX", "elif", "target_size", "<", "CHUNK_MIN", ":", "target_size", "=", "CHUNK_MIN", "idx", "=", "0", "while", "True", ":", "# Repeatedly loop over the axes, dividing them by 2. Stop when:", "# 1a. We're smaller than the target chunk size, OR", "# 1b. We're within 50% of the target chunk size, AND", "# 2. The chunk is smaller than the maximum chunk size", "chunk_bytes", "=", "np", ".", "product", "(", "chunks", ")", "*", "typesize", "if", "(", "chunk_bytes", "<", "target_size", "or", "abs", "(", "chunk_bytes", "-", "target_size", ")", "/", "target_size", "<", "0.5", ")", "and", "chunk_bytes", "<", "CHUNK_MAX", ":", "break", "if", "np", ".", "product", "(", "chunks", ")", "==", "1", ":", "break", "# Element size larger than CHUNK_MAX", "chunks", "[", "idx", "%", "ndims", "]", "=", "np", ".", "ceil", "(", "chunks", "[", "idx", "%", "ndims", "]", "/", "2.0", ")", "idx", "+=", "1", "return", "tuple", "(", "int", "(", "x", ")", "for", "x", "in", "chunks", ")" ]
Guess an appropriate chunk layout for a dataset, given its shape and the size of each element in bytes. Will allocate chunks only as large as MAX_SIZE. Chunks are generally close to some power-of-2 fraction of each axis, slightly favoring bigger values for the last index. Undocumented and subject to change without warning.
[ "Guess", "an", "appropriate", "chunk", "layout", "for", "a", "dataset", "given", "its", "shape", "and", "the", "size", "of", "each", "element", "in", "bytes", ".", "Will", "allocate", "chunks", "only", "as", "large", "as", "MAX_SIZE", ".", "Chunks", "are", "generally", "close", "to", "some", "power", "-", "of", "-", "2", "fraction", "of", "each", "axis", "slightly", "favoring", "bigger", "values", "for", "the", "last", "index", ".", "Undocumented", "and", "subject", "to", "change", "without", "warning", "." ]
python
train
cltrudeau/django-awl
awl/context_processors.py
https://github.com/cltrudeau/django-awl/blob/70d469ef9a161c1170b53aa017cf02d7c15eb90c/awl/context_processors.py#L4-L19
def extra_context(request): """Adds useful global items to the context for use in templates. * *request*: the request object * *HOST*: host name of server * *IN_ADMIN*: True if you are in the django admin area """ host = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS', None) \ or request.get_host() d = { 'request':request, 'HOST':host, 'IN_ADMIN':request.path.startswith('/admin/'), } return d
[ "def", "extra_context", "(", "request", ")", ":", "host", "=", "os", ".", "environ", ".", "get", "(", "'DJANGO_LIVE_TEST_SERVER_ADDRESS'", ",", "None", ")", "or", "request", ".", "get_host", "(", ")", "d", "=", "{", "'request'", ":", "request", ",", "'HOST'", ":", "host", ",", "'IN_ADMIN'", ":", "request", ".", "path", ".", "startswith", "(", "'/admin/'", ")", ",", "}", "return", "d" ]
Adds useful global items to the context for use in templates. * *request*: the request object * *HOST*: host name of server * *IN_ADMIN*: True if you are in the django admin area
[ "Adds", "useful", "global", "items", "to", "the", "context", "for", "use", "in", "templates", "." ]
python
valid
adewes/blitzdb
blitzdb/backends/base.py
https://github.com/adewes/blitzdb/blob/4b459e0bcde9e1f6224dd4e3bea74194586864b0/blitzdb/backends/base.py#L190-L300
def serialize(self, obj, convert_keys_to_str=False, embed_level=0, encoders=None, autosave=True, for_query=False,path = None): """ Serializes a given object, i.e. converts it to a representation that can be stored in the database. This usually involves replacing all `Document` instances by database references to them. :param obj: The object to serialize. :param convert_keys_to_str: If `True`, converts all dictionary keys to string (this is e.g. required for the MongoDB backend) :param embed_level: If `embed_level > 0`, instances of `Document` classes will be embedded instead of referenced. The value of the parameter will get decremented by 1 when calling `serialize` on child objects. :param autosave: Whether to automatically save embedded objects without a primary key to the database. :param for_query: If true, only the `pk` and `__collection__` attributes will be included in document references. :returns: The serialized object. """ if path is None: path = [] def get_value(obj,key): key_fragments = key.split(".") current_dict = obj for key_fragment in key_fragments: current_dict = current_dict[key_fragment] return current_dict serialize_with_opts = lambda value,*args,**kwargs : self.serialize(value,*args, encoders = encoders, convert_keys_to_str = convert_keys_to_str, autosave = autosave, for_query = for_query, **kwargs) if encoders is None: encoders = [] for encoder in self.standard_encoders+encoders: obj = encoder.encode(obj,path = path) def encode_as_str(obj): if six.PY3: return str(obj) else: if isinstance(obj,unicode): return obj elif isinstance(obj,str): return unicode(obj) else: return unicode(str(obj),errors='replace') if isinstance(obj, dict): output_obj = {} for key, value in obj.items(): new_path = path[:]+[key] try: output_obj[encode_as_str(key) if convert_keys_to_str else key] = serialize_with_opts(value, embed_level=embed_level,path = new_path) except DoNotSerialize: pass elif isinstance(obj,six.string_types): output_obj = encode_as_str(obj) elif isinstance(obj, (list,tuple)): try: output_obj = [serialize_with_opts(x, embed_level=embed_level,path = path[:]+[i]) for i,x in enumerate(obj)] except DoNotSerialize: pass elif isinstance(obj, Document): collection = self.get_collection_for_obj(obj) if embed_level > 0: try: output_obj = self.serialize(obj, embed_level=embed_level-1) except obj.DoesNotExist:#cannot load object, ignoring... output_obj = self.serialize(obj.lazy_attributes, embed_level=embed_level-1) except DoNotSerialize: pass elif obj.embed: output_obj = self.serialize(obj) else: if obj.pk == None and autosave: obj.save(self) if obj._lazy: # We make sure that all attributes that are already present get included in the reference output_obj = {} if obj.get_pk_name() in output_obj: del output_obj[obj.get_pk_name()] output_obj['pk'] = obj.pk output_obj['__collection__'] = self.classes[obj.__class__]['collection'] else: if for_query and not self._allow_documents_in_query: raise ValueError("Documents are not allowed in queries!") if for_query: output_obj = {'$elemMatch' : {'pk':obj.pk,'__collection__':self.classes[obj.__class__]['collection']}} else: ref = "%s:%s" % (self.classes[obj.__class__]['collection'],str(obj.pk)) output_obj = {'__ref__' : ref,'pk':obj.pk,'__collection__':self.classes[obj.__class__]['collection']} if hasattr(obj,'Meta') and hasattr(obj.Meta,'dbref_includes') and obj.Meta.dbref_includes: for include_key in obj.Meta.dbref_includes: try: value = get_value(obj,include_key) output_obj[include_key.replace(".","_")] = value except KeyError: continue else: output_obj = obj return output_obj
[ "def", "serialize", "(", "self", ",", "obj", ",", "convert_keys_to_str", "=", "False", ",", "embed_level", "=", "0", ",", "encoders", "=", "None", ",", "autosave", "=", "True", ",", "for_query", "=", "False", ",", "path", "=", "None", ")", ":", "if", "path", "is", "None", ":", "path", "=", "[", "]", "def", "get_value", "(", "obj", ",", "key", ")", ":", "key_fragments", "=", "key", ".", "split", "(", "\".\"", ")", "current_dict", "=", "obj", "for", "key_fragment", "in", "key_fragments", ":", "current_dict", "=", "current_dict", "[", "key_fragment", "]", "return", "current_dict", "serialize_with_opts", "=", "lambda", "value", ",", "*", "args", ",", "*", "*", "kwargs", ":", "self", ".", "serialize", "(", "value", ",", "*", "args", ",", "encoders", "=", "encoders", ",", "convert_keys_to_str", "=", "convert_keys_to_str", ",", "autosave", "=", "autosave", ",", "for_query", "=", "for_query", ",", "*", "*", "kwargs", ")", "if", "encoders", "is", "None", ":", "encoders", "=", "[", "]", "for", "encoder", "in", "self", ".", "standard_encoders", "+", "encoders", ":", "obj", "=", "encoder", ".", "encode", "(", "obj", ",", "path", "=", "path", ")", "def", "encode_as_str", "(", "obj", ")", ":", "if", "six", ".", "PY3", ":", "return", "str", "(", "obj", ")", "else", ":", "if", "isinstance", "(", "obj", ",", "unicode", ")", ":", "return", "obj", "elif", "isinstance", "(", "obj", ",", "str", ")", ":", "return", "unicode", "(", "obj", ")", "else", ":", "return", "unicode", "(", "str", "(", "obj", ")", ",", "errors", "=", "'replace'", ")", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "output_obj", "=", "{", "}", "for", "key", ",", "value", "in", "obj", ".", "items", "(", ")", ":", "new_path", "=", "path", "[", ":", "]", "+", "[", "key", "]", "try", ":", "output_obj", "[", "encode_as_str", "(", "key", ")", "if", "convert_keys_to_str", "else", "key", "]", "=", "serialize_with_opts", "(", "value", ",", "embed_level", "=", "embed_level", ",", "path", "=", "new_path", ")", "except", "DoNotSerialize", ":", "pass", "elif", "isinstance", "(", "obj", ",", "six", ".", "string_types", ")", ":", "output_obj", "=", "encode_as_str", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ")", ")", ":", "try", ":", "output_obj", "=", "[", "serialize_with_opts", "(", "x", ",", "embed_level", "=", "embed_level", ",", "path", "=", "path", "[", ":", "]", "+", "[", "i", "]", ")", "for", "i", ",", "x", "in", "enumerate", "(", "obj", ")", "]", "except", "DoNotSerialize", ":", "pass", "elif", "isinstance", "(", "obj", ",", "Document", ")", ":", "collection", "=", "self", ".", "get_collection_for_obj", "(", "obj", ")", "if", "embed_level", ">", "0", ":", "try", ":", "output_obj", "=", "self", ".", "serialize", "(", "obj", ",", "embed_level", "=", "embed_level", "-", "1", ")", "except", "obj", ".", "DoesNotExist", ":", "#cannot load object, ignoring...", "output_obj", "=", "self", ".", "serialize", "(", "obj", ".", "lazy_attributes", ",", "embed_level", "=", "embed_level", "-", "1", ")", "except", "DoNotSerialize", ":", "pass", "elif", "obj", ".", "embed", ":", "output_obj", "=", "self", ".", "serialize", "(", "obj", ")", "else", ":", "if", "obj", ".", "pk", "==", "None", "and", "autosave", ":", "obj", ".", "save", "(", "self", ")", "if", "obj", ".", "_lazy", ":", "# We make sure that all attributes that are already present get included in the reference", "output_obj", "=", "{", "}", "if", "obj", ".", "get_pk_name", "(", ")", "in", "output_obj", ":", "del", "output_obj", "[", "obj", ".", "get_pk_name", "(", ")", "]", "output_obj", "[", "'pk'", "]", "=", "obj", ".", "pk", "output_obj", "[", "'__collection__'", "]", "=", "self", ".", "classes", "[", "obj", ".", "__class__", "]", "[", "'collection'", "]", "else", ":", "if", "for_query", "and", "not", "self", ".", "_allow_documents_in_query", ":", "raise", "ValueError", "(", "\"Documents are not allowed in queries!\"", ")", "if", "for_query", ":", "output_obj", "=", "{", "'$elemMatch'", ":", "{", "'pk'", ":", "obj", ".", "pk", ",", "'__collection__'", ":", "self", ".", "classes", "[", "obj", ".", "__class__", "]", "[", "'collection'", "]", "}", "}", "else", ":", "ref", "=", "\"%s:%s\"", "%", "(", "self", ".", "classes", "[", "obj", ".", "__class__", "]", "[", "'collection'", "]", ",", "str", "(", "obj", ".", "pk", ")", ")", "output_obj", "=", "{", "'__ref__'", ":", "ref", ",", "'pk'", ":", "obj", ".", "pk", ",", "'__collection__'", ":", "self", ".", "classes", "[", "obj", ".", "__class__", "]", "[", "'collection'", "]", "}", "if", "hasattr", "(", "obj", ",", "'Meta'", ")", "and", "hasattr", "(", "obj", ".", "Meta", ",", "'dbref_includes'", ")", "and", "obj", ".", "Meta", ".", "dbref_includes", ":", "for", "include_key", "in", "obj", ".", "Meta", ".", "dbref_includes", ":", "try", ":", "value", "=", "get_value", "(", "obj", ",", "include_key", ")", "output_obj", "[", "include_key", ".", "replace", "(", "\".\"", ",", "\"_\"", ")", "]", "=", "value", "except", "KeyError", ":", "continue", "else", ":", "output_obj", "=", "obj", "return", "output_obj" ]
Serializes a given object, i.e. converts it to a representation that can be stored in the database. This usually involves replacing all `Document` instances by database references to them. :param obj: The object to serialize. :param convert_keys_to_str: If `True`, converts all dictionary keys to string (this is e.g. required for the MongoDB backend) :param embed_level: If `embed_level > 0`, instances of `Document` classes will be embedded instead of referenced. The value of the parameter will get decremented by 1 when calling `serialize` on child objects. :param autosave: Whether to automatically save embedded objects without a primary key to the database. :param for_query: If true, only the `pk` and `__collection__` attributes will be included in document references. :returns: The serialized object.
[ "Serializes", "a", "given", "object", "i", ".", "e", ".", "converts", "it", "to", "a", "representation", "that", "can", "be", "stored", "in", "the", "database", ".", "This", "usually", "involves", "replacing", "all", "Document", "instances", "by", "database", "references", "to", "them", "." ]
python
train
NLeSC/noodles
noodles/tutorial.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/tutorial.py#L167-L192
def run_and_print_log(workflow, highlight=None): """Run workflow on multi-threaded worker cached with Sqlite3. :param workflow: workflow to evaluate. :param highlight: highlight these lines. """ from noodles.run.threading.sqlite3 import run_parallel from noodles import serial import io import logging log = io.StringIO() log_handler = logging.StreamHandler(log) formatter = logging.Formatter('%(asctime)s - %(message)s') log_handler.setFormatter(formatter) logger = logging.getLogger('noodles') logger.setLevel(logging.INFO) logger.handlers = [log_handler] result = run_parallel( workflow, n_threads=4, registry=serial.base, db_file='tutorial.db', always_cache=True, echo_log=False) display_text(log.getvalue(), highlight or [], split_at=40) return result
[ "def", "run_and_print_log", "(", "workflow", ",", "highlight", "=", "None", ")", ":", "from", "noodles", ".", "run", ".", "threading", ".", "sqlite3", "import", "run_parallel", "from", "noodles", "import", "serial", "import", "io", "import", "logging", "log", "=", "io", ".", "StringIO", "(", ")", "log_handler", "=", "logging", ".", "StreamHandler", "(", "log", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s - %(message)s'", ")", "log_handler", ".", "setFormatter", "(", "formatter", ")", "logger", "=", "logging", ".", "getLogger", "(", "'noodles'", ")", "logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "logger", ".", "handlers", "=", "[", "log_handler", "]", "result", "=", "run_parallel", "(", "workflow", ",", "n_threads", "=", "4", ",", "registry", "=", "serial", ".", "base", ",", "db_file", "=", "'tutorial.db'", ",", "always_cache", "=", "True", ",", "echo_log", "=", "False", ")", "display_text", "(", "log", ".", "getvalue", "(", ")", ",", "highlight", "or", "[", "]", ",", "split_at", "=", "40", ")", "return", "result" ]
Run workflow on multi-threaded worker cached with Sqlite3. :param workflow: workflow to evaluate. :param highlight: highlight these lines.
[ "Run", "workflow", "on", "multi", "-", "threaded", "worker", "cached", "with", "Sqlite3", "." ]
python
train
trp07/messages
messages/cli.py
https://github.com/trp07/messages/blob/7789ebc960335a59ea5d319fceed3dd349023648/messages/cli.py#L20-L24
def get_body_from_file(kwds): """Reads message body if specified via filepath.""" if kwds["file"] and os.path.isfile(kwds["file"]): kwds["body"] = open(kwds["file"], "r").read() kwds["file"] = None
[ "def", "get_body_from_file", "(", "kwds", ")", ":", "if", "kwds", "[", "\"file\"", "]", "and", "os", ".", "path", ".", "isfile", "(", "kwds", "[", "\"file\"", "]", ")", ":", "kwds", "[", "\"body\"", "]", "=", "open", "(", "kwds", "[", "\"file\"", "]", ",", "\"r\"", ")", ".", "read", "(", ")", "kwds", "[", "\"file\"", "]", "=", "None" ]
Reads message body if specified via filepath.
[ "Reads", "message", "body", "if", "specified", "via", "filepath", "." ]
python
test
sdispater/poetry
poetry/utils/shell.py
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/poetry/utils/shell.py#L27-L41
def get(cls): # type: () -> Shell """ Retrieve the current shell. """ if cls._shell is not None: return cls._shell try: name, path = detect_shell(os.getpid()) except (RuntimeError, ShellDetectionFailure): raise RuntimeError("Unable to detect the current shell.") cls._shell = cls(name, path) return cls._shell
[ "def", "get", "(", "cls", ")", ":", "# type: () -> Shell", "if", "cls", ".", "_shell", "is", "not", "None", ":", "return", "cls", ".", "_shell", "try", ":", "name", ",", "path", "=", "detect_shell", "(", "os", ".", "getpid", "(", ")", ")", "except", "(", "RuntimeError", ",", "ShellDetectionFailure", ")", ":", "raise", "RuntimeError", "(", "\"Unable to detect the current shell.\"", ")", "cls", ".", "_shell", "=", "cls", "(", "name", ",", "path", ")", "return", "cls", ".", "_shell" ]
Retrieve the current shell.
[ "Retrieve", "the", "current", "shell", "." ]
python
train
BDNYC/astrodbkit
astrodbkit/astrocat.py
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L246-L281
def SDSS_spectra_query(self, cat_name, ra, dec, radius, group=True, **kwargs): """ Use astroquery to search SDSS for sources within a search cone Parameters ---------- cat_name: str A name for the imported catalog (e.g. '2MASS') ra: astropy.units.quantity.Quantity The RA of the center of the cone search dec: astropy.units.quantity.Quantity The Dec of the center of the cone search radius: astropy.units.quantity.Quantity The radius of the cone search """ # Verify the cat_name if self._catalog_check(cat_name): # Prep the current catalog as an astropy.QTable tab = at.Table.from_pandas(self.catalog) # Cone search Vizier print("Searching SDSS for sources within {} of ({}, {}). Please be patient...".format(viz_cat, radius, ra, dec)) crds = coord.SkyCoord(ra=ra, dec=dec, frame='icrs') try: data = SDSS.query_region(crds, spectro=True, radius=radius) except: print("No data found in SDSS within {} of ({}, {}).".format(viz_cat, radius, ra, dec)) return # Ingest the data self.ingest_data(data, cat_name, 'id', ra_col=ra_col, dec_col=dec_col) # Regroup if len(self.catalogs)>1 and group: self.group_sources(self.xmatch_radius)
[ "def", "SDSS_spectra_query", "(", "self", ",", "cat_name", ",", "ra", ",", "dec", ",", "radius", ",", "group", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# Verify the cat_name", "if", "self", ".", "_catalog_check", "(", "cat_name", ")", ":", "# Prep the current catalog as an astropy.QTable", "tab", "=", "at", ".", "Table", ".", "from_pandas", "(", "self", ".", "catalog", ")", "# Cone search Vizier", "print", "(", "\"Searching SDSS for sources within {} of ({}, {}). Please be patient...\"", ".", "format", "(", "viz_cat", ",", "radius", ",", "ra", ",", "dec", ")", ")", "crds", "=", "coord", ".", "SkyCoord", "(", "ra", "=", "ra", ",", "dec", "=", "dec", ",", "frame", "=", "'icrs'", ")", "try", ":", "data", "=", "SDSS", ".", "query_region", "(", "crds", ",", "spectro", "=", "True", ",", "radius", "=", "radius", ")", "except", ":", "print", "(", "\"No data found in SDSS within {} of ({}, {}).\"", ".", "format", "(", "viz_cat", ",", "radius", ",", "ra", ",", "dec", ")", ")", "return", "# Ingest the data", "self", ".", "ingest_data", "(", "data", ",", "cat_name", ",", "'id'", ",", "ra_col", "=", "ra_col", ",", "dec_col", "=", "dec_col", ")", "# Regroup", "if", "len", "(", "self", ".", "catalogs", ")", ">", "1", "and", "group", ":", "self", ".", "group_sources", "(", "self", ".", "xmatch_radius", ")" ]
Use astroquery to search SDSS for sources within a search cone Parameters ---------- cat_name: str A name for the imported catalog (e.g. '2MASS') ra: astropy.units.quantity.Quantity The RA of the center of the cone search dec: astropy.units.quantity.Quantity The Dec of the center of the cone search radius: astropy.units.quantity.Quantity The radius of the cone search
[ "Use", "astroquery", "to", "search", "SDSS", "for", "sources", "within", "a", "search", "cone", "Parameters", "----------", "cat_name", ":", "str", "A", "name", "for", "the", "imported", "catalog", "(", "e", ".", "g", ".", "2MASS", ")", "ra", ":", "astropy", ".", "units", ".", "quantity", ".", "Quantity", "The", "RA", "of", "the", "center", "of", "the", "cone", "search", "dec", ":", "astropy", ".", "units", ".", "quantity", ".", "Quantity", "The", "Dec", "of", "the", "center", "of", "the", "cone", "search", "radius", ":", "astropy", ".", "units", ".", "quantity", ".", "Quantity", "The", "radius", "of", "the", "cone", "search" ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L5347-L5370
def create_namespace(self, body, **kwargs): # noqa: E501 """create_namespace # noqa: E501 create a Namespace # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespace(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1Namespace body: (required) :param bool include_uninitialized: If IncludeUninitialized is specified, the object may be returned without completing initialization. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Namespace If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespace_with_http_info(body, **kwargs) # noqa: E501 else: (data) = self.create_namespace_with_http_info(body, **kwargs) # noqa: E501 return data
[ "def", "create_namespace", "(", "self", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "create_namespace_with_http_info", "(", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "create_namespace_with_http_info", "(", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
create_namespace # noqa: E501 create a Namespace # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespace(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1Namespace body: (required) :param bool include_uninitialized: If IncludeUninitialized is specified, the object may be returned without completing initialization. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Namespace If the method is called asynchronously, returns the request thread.
[ "create_namespace", "#", "noqa", ":", "E501" ]
python
train
ggravlingen/pytradfri
pytradfri/command.py
https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/command.py#L65-L70
def result(self, value): """The result of the command.""" if self._process_result: self._result = self._process_result(value) self._raw_result = value
[ "def", "result", "(", "self", ",", "value", ")", ":", "if", "self", ".", "_process_result", ":", "self", ".", "_result", "=", "self", ".", "_process_result", "(", "value", ")", "self", ".", "_raw_result", "=", "value" ]
The result of the command.
[ "The", "result", "of", "the", "command", "." ]
python
train
mikedh/trimesh
trimesh/voxel.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/voxel.py#L669-L696
def indices_to_points(indices, pitch, origin): """ Convert indices of an (n,m,p) matrix into a set of voxel center points. Parameters ---------- indices: (q, 3) int, index of voxel matrix (n,m,p) pitch: float, what pitch was the voxel matrix computed with origin: (3,) float, what is the origin of the voxel matrix Returns ---------- points: (q, 3) float, list of points """ indices = np.asanyarray(indices, dtype=np.float64) origin = np.asanyarray(origin, dtype=np.float64) pitch = float(pitch) if indices.shape != (indices.shape[0], 3): from IPython import embed embed() raise ValueError('shape of indices must be (q, 3)') if origin.shape != (3,): raise ValueError('shape of origin must be (3,)') points = indices * pitch + origin return points
[ "def", "indices_to_points", "(", "indices", ",", "pitch", ",", "origin", ")", ":", "indices", "=", "np", ".", "asanyarray", "(", "indices", ",", "dtype", "=", "np", ".", "float64", ")", "origin", "=", "np", ".", "asanyarray", "(", "origin", ",", "dtype", "=", "np", ".", "float64", ")", "pitch", "=", "float", "(", "pitch", ")", "if", "indices", ".", "shape", "!=", "(", "indices", ".", "shape", "[", "0", "]", ",", "3", ")", ":", "from", "IPython", "import", "embed", "embed", "(", ")", "raise", "ValueError", "(", "'shape of indices must be (q, 3)'", ")", "if", "origin", ".", "shape", "!=", "(", "3", ",", ")", ":", "raise", "ValueError", "(", "'shape of origin must be (3,)'", ")", "points", "=", "indices", "*", "pitch", "+", "origin", "return", "points" ]
Convert indices of an (n,m,p) matrix into a set of voxel center points. Parameters ---------- indices: (q, 3) int, index of voxel matrix (n,m,p) pitch: float, what pitch was the voxel matrix computed with origin: (3,) float, what is the origin of the voxel matrix Returns ---------- points: (q, 3) float, list of points
[ "Convert", "indices", "of", "an", "(", "n", "m", "p", ")", "matrix", "into", "a", "set", "of", "voxel", "center", "points", "." ]
python
train
pzs741/TEDT
TEDT/release_time.py
https://github.com/pzs741/TEDT/blob/6b6663227b755005fe1a1e3e807a05bdb521e066/TEDT/release_time.py#L173-L185
def check_time_extrator(self): """将抽取得时间转换为date标准时间格式 Keyword arguments: string -- 含有时间的文本,str类型 Return: release_time -- 新闻发布时间 """ if self.year_check and self.month_check and self.day_check: time = str(self.year) + '-' + str(self.month) + '-' + str(self.day) release_time = datetime.datetime.strptime(time, "%Y-%m-%d").date() return release_time
[ "def", "check_time_extrator", "(", "self", ")", ":", "if", "self", ".", "year_check", "and", "self", ".", "month_check", "and", "self", ".", "day_check", ":", "time", "=", "str", "(", "self", ".", "year", ")", "+", "'-'", "+", "str", "(", "self", ".", "month", ")", "+", "'-'", "+", "str", "(", "self", ".", "day", ")", "release_time", "=", "datetime", ".", "datetime", ".", "strptime", "(", "time", ",", "\"%Y-%m-%d\"", ")", ".", "date", "(", ")", "return", "release_time" ]
将抽取得时间转换为date标准时间格式 Keyword arguments: string -- 含有时间的文本,str类型 Return: release_time -- 新闻发布时间
[ "将抽取得时间转换为date标准时间格式" ]
python
train
manns/pyspread
pyspread/src/model/model.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/model/model.py#L568-L603
def _set_shape(self, shape): """Deletes all cells beyond new shape and sets dict_grid shape Parameters ---------- shape: 3-tuple of Integer \tTarget shape for grid """ # Delete each cell that is beyond new borders old_shape = self.shape deleted_cells = {} if any(new_axis < old_axis for new_axis, old_axis in zip(shape, old_shape)): for key in self.dict_grid.keys(): if any(key_ele >= new_axis for key_ele, new_axis in zip(key, shape)): deleted_cells[key] = self.pop(key) # Set dict_grid shape attribute self.dict_grid.shape = shape self._adjust_rowcol(0, 0, 0) self._adjust_cell_attributes(0, 0, 0) # Undo actions yield "_set_shape" self.shape = old_shape for key in deleted_cells: self[key] = deleted_cells[key]
[ "def", "_set_shape", "(", "self", ",", "shape", ")", ":", "# Delete each cell that is beyond new borders", "old_shape", "=", "self", ".", "shape", "deleted_cells", "=", "{", "}", "if", "any", "(", "new_axis", "<", "old_axis", "for", "new_axis", ",", "old_axis", "in", "zip", "(", "shape", ",", "old_shape", ")", ")", ":", "for", "key", "in", "self", ".", "dict_grid", ".", "keys", "(", ")", ":", "if", "any", "(", "key_ele", ">=", "new_axis", "for", "key_ele", ",", "new_axis", "in", "zip", "(", "key", ",", "shape", ")", ")", ":", "deleted_cells", "[", "key", "]", "=", "self", ".", "pop", "(", "key", ")", "# Set dict_grid shape attribute", "self", ".", "dict_grid", ".", "shape", "=", "shape", "self", ".", "_adjust_rowcol", "(", "0", ",", "0", ",", "0", ")", "self", ".", "_adjust_cell_attributes", "(", "0", ",", "0", ",", "0", ")", "# Undo actions", "yield", "\"_set_shape\"", "self", ".", "shape", "=", "old_shape", "for", "key", "in", "deleted_cells", ":", "self", "[", "key", "]", "=", "deleted_cells", "[", "key", "]" ]
Deletes all cells beyond new shape and sets dict_grid shape Parameters ---------- shape: 3-tuple of Integer \tTarget shape for grid
[ "Deletes", "all", "cells", "beyond", "new", "shape", "and", "sets", "dict_grid", "shape" ]
python
train
nerdvegas/rez
src/rez/vendor/pygraph/algorithms/accessibility.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/accessibility.py#L143-L163
def _dfs(graph, visited, count, node): """ Depth-first search subfunction adapted for accessibility algorithms. @type graph: graph, digraph, hypergraph @param graph: Graph. @type visited: dictionary @param visited: List of nodes (visited nodes are marked non-zero). @type count: number @param count: Counter of connected components. @type node: node @param node: Node to be explored by DFS. """ visited[node] = count # Explore recursively the connected component for each in graph[node]: if (each not in visited): _dfs(graph, visited, count, each)
[ "def", "_dfs", "(", "graph", ",", "visited", ",", "count", ",", "node", ")", ":", "visited", "[", "node", "]", "=", "count", "# Explore recursively the connected component", "for", "each", "in", "graph", "[", "node", "]", ":", "if", "(", "each", "not", "in", "visited", ")", ":", "_dfs", "(", "graph", ",", "visited", ",", "count", ",", "each", ")" ]
Depth-first search subfunction adapted for accessibility algorithms. @type graph: graph, digraph, hypergraph @param graph: Graph. @type visited: dictionary @param visited: List of nodes (visited nodes are marked non-zero). @type count: number @param count: Counter of connected components. @type node: node @param node: Node to be explored by DFS.
[ "Depth", "-", "first", "search", "subfunction", "adapted", "for", "accessibility", "algorithms", ".", "@type", "graph", ":", "graph", "digraph", "hypergraph", "@param", "graph", ":", "Graph", "." ]
python
train
trendels/gevent_inotifyx
example.py
https://github.com/trendels/gevent_inotifyx/blob/b1e531616d150e86b13aeca450a61c66f9bbc855/example.py#L16-L25
def watch_for_events(): """Wait for events and print them to stdout.""" fd = inotify.init() try: wd = inotify.add_watch(fd, '/tmp', inotify.IN_CLOSE_WRITE) while True: for event in inotify.get_events(fd): print("event:", event.name, event.get_mask_description()) finally: os.close(fd)
[ "def", "watch_for_events", "(", ")", ":", "fd", "=", "inotify", ".", "init", "(", ")", "try", ":", "wd", "=", "inotify", ".", "add_watch", "(", "fd", ",", "'/tmp'", ",", "inotify", ".", "IN_CLOSE_WRITE", ")", "while", "True", ":", "for", "event", "in", "inotify", ".", "get_events", "(", "fd", ")", ":", "print", "(", "\"event:\"", ",", "event", ".", "name", ",", "event", ".", "get_mask_description", "(", ")", ")", "finally", ":", "os", ".", "close", "(", "fd", ")" ]
Wait for events and print them to stdout.
[ "Wait", "for", "events", "and", "print", "them", "to", "stdout", "." ]
python
train
RetailMeNotSandbox/acky
acky/s3.py
https://github.com/RetailMeNotSandbox/acky/blob/fcd4d092c42892ede7c924cafc41e9cf4be3fb9f/acky/s3.py#L113-L125
def copy(self, src_url, dst_url): """Copy an S3 object to another S3 location.""" src_bucket, src_key = _parse_url(src_url) dst_bucket, dst_key = _parse_url(dst_url) if not dst_bucket: dst_bucket = src_bucket params = { 'copy_source': '/'.join((src_bucket, src_key)), 'bucket': dst_bucket, 'key': dst_key, } return self.call("CopyObject", **params)
[ "def", "copy", "(", "self", ",", "src_url", ",", "dst_url", ")", ":", "src_bucket", ",", "src_key", "=", "_parse_url", "(", "src_url", ")", "dst_bucket", ",", "dst_key", "=", "_parse_url", "(", "dst_url", ")", "if", "not", "dst_bucket", ":", "dst_bucket", "=", "src_bucket", "params", "=", "{", "'copy_source'", ":", "'/'", ".", "join", "(", "(", "src_bucket", ",", "src_key", ")", ")", ",", "'bucket'", ":", "dst_bucket", ",", "'key'", ":", "dst_key", ",", "}", "return", "self", ".", "call", "(", "\"CopyObject\"", ",", "*", "*", "params", ")" ]
Copy an S3 object to another S3 location.
[ "Copy", "an", "S3", "object", "to", "another", "S3", "location", "." ]
python
train
romanz/trezor-agent
libagent/gpg/decode.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/decode.py#L284-L300
def load_by_keygrip(pubkey_bytes, keygrip): """Return public key and first user ID for specified keygrip.""" stream = io.BytesIO(pubkey_bytes) packets = list(parse_packets(stream)) packets_per_pubkey = [] for p in packets: if p['type'] == 'pubkey': # Add a new packet list for each pubkey. packets_per_pubkey.append([]) packets_per_pubkey[-1].append(p) for packets in packets_per_pubkey: user_ids = [p for p in packets if p['type'] == 'user_id'] for p in packets: if p.get('keygrip') == keygrip: return p, user_ids raise KeyError('{} keygrip not found'.format(util.hexlify(keygrip)))
[ "def", "load_by_keygrip", "(", "pubkey_bytes", ",", "keygrip", ")", ":", "stream", "=", "io", ".", "BytesIO", "(", "pubkey_bytes", ")", "packets", "=", "list", "(", "parse_packets", "(", "stream", ")", ")", "packets_per_pubkey", "=", "[", "]", "for", "p", "in", "packets", ":", "if", "p", "[", "'type'", "]", "==", "'pubkey'", ":", "# Add a new packet list for each pubkey.", "packets_per_pubkey", ".", "append", "(", "[", "]", ")", "packets_per_pubkey", "[", "-", "1", "]", ".", "append", "(", "p", ")", "for", "packets", "in", "packets_per_pubkey", ":", "user_ids", "=", "[", "p", "for", "p", "in", "packets", "if", "p", "[", "'type'", "]", "==", "'user_id'", "]", "for", "p", "in", "packets", ":", "if", "p", ".", "get", "(", "'keygrip'", ")", "==", "keygrip", ":", "return", "p", ",", "user_ids", "raise", "KeyError", "(", "'{} keygrip not found'", ".", "format", "(", "util", ".", "hexlify", "(", "keygrip", ")", ")", ")" ]
Return public key and first user ID for specified keygrip.
[ "Return", "public", "key", "and", "first", "user", "ID", "for", "specified", "keygrip", "." ]
python
train
sebdah/dynamic-dynamodb
dynamic_dynamodb/calculators.py
https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/calculators.py#L342-L358
def is_consumed_over_proposed( current_provisioning, proposed_provisioning, consumed_units_percent): """ Determines if the currently consumed capacity is over the proposed capacity for this table :type current_provisioning: int :param current_provisioning: The current provisioning :type proposed_provisioning: int :param proposed_provisioning: New provisioning :type consumed_units_percent: float :param consumed_units_percent: Percent of consumed units :returns: bool - if consumed is over max """ consumption_based_current_provisioning = \ int(math.ceil(current_provisioning*(consumed_units_percent/100))) return consumption_based_current_provisioning > proposed_provisioning
[ "def", "is_consumed_over_proposed", "(", "current_provisioning", ",", "proposed_provisioning", ",", "consumed_units_percent", ")", ":", "consumption_based_current_provisioning", "=", "int", "(", "math", ".", "ceil", "(", "current_provisioning", "*", "(", "consumed_units_percent", "/", "100", ")", ")", ")", "return", "consumption_based_current_provisioning", ">", "proposed_provisioning" ]
Determines if the currently consumed capacity is over the proposed capacity for this table :type current_provisioning: int :param current_provisioning: The current provisioning :type proposed_provisioning: int :param proposed_provisioning: New provisioning :type consumed_units_percent: float :param consumed_units_percent: Percent of consumed units :returns: bool - if consumed is over max
[ "Determines", "if", "the", "currently", "consumed", "capacity", "is", "over", "the", "proposed", "capacity", "for", "this", "table" ]
python
train
dhermes/bezier
src/bezier/_curve_helpers.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_curve_helpers.py#L388-L408
def de_casteljau_one_round(nodes, lambda1, lambda2): """Perform one round of de Casteljau's algorithm. .. note:: This is a helper for :func:`_specialize_curve`. It does not have a Fortran speedup because it is **only** used by a function which has a Fortran speedup. The weights are assumed to sum to one. Args: nodes (numpy.ndarray): Control points for a curve. lambda1 (float): First barycentric weight on interval. lambda2 (float): Second barycentric weight on interval. Returns: numpy.ndarray: The nodes for a "blended" curve one degree lower. """ return np.asfortranarray(lambda1 * nodes[:, :-1] + lambda2 * nodes[:, 1:])
[ "def", "de_casteljau_one_round", "(", "nodes", ",", "lambda1", ",", "lambda2", ")", ":", "return", "np", ".", "asfortranarray", "(", "lambda1", "*", "nodes", "[", ":", ",", ":", "-", "1", "]", "+", "lambda2", "*", "nodes", "[", ":", ",", "1", ":", "]", ")" ]
Perform one round of de Casteljau's algorithm. .. note:: This is a helper for :func:`_specialize_curve`. It does not have a Fortran speedup because it is **only** used by a function which has a Fortran speedup. The weights are assumed to sum to one. Args: nodes (numpy.ndarray): Control points for a curve. lambda1 (float): First barycentric weight on interval. lambda2 (float): Second barycentric weight on interval. Returns: numpy.ndarray: The nodes for a "blended" curve one degree lower.
[ "Perform", "one", "round", "of", "de", "Casteljau", "s", "algorithm", "." ]
python
train
pkkid/python-plexapi
plexapi/utils.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/utils.py#L129-L143
def searchType(libtype): """ Returns the integer value of the library string type. Parameters: libtype (str): LibType to lookup (movie, show, season, episode, artist, album, track, collection) Raises: :class:`plexapi.exceptions.NotFound`: Unknown libtype """ libtype = compat.ustr(libtype) if libtype in [compat.ustr(v) for v in SEARCHTYPES.values()]: return libtype if SEARCHTYPES.get(libtype) is not None: return SEARCHTYPES[libtype] raise NotFound('Unknown libtype: %s' % libtype)
[ "def", "searchType", "(", "libtype", ")", ":", "libtype", "=", "compat", ".", "ustr", "(", "libtype", ")", "if", "libtype", "in", "[", "compat", ".", "ustr", "(", "v", ")", "for", "v", "in", "SEARCHTYPES", ".", "values", "(", ")", "]", ":", "return", "libtype", "if", "SEARCHTYPES", ".", "get", "(", "libtype", ")", "is", "not", "None", ":", "return", "SEARCHTYPES", "[", "libtype", "]", "raise", "NotFound", "(", "'Unknown libtype: %s'", "%", "libtype", ")" ]
Returns the integer value of the library string type. Parameters: libtype (str): LibType to lookup (movie, show, season, episode, artist, album, track, collection) Raises: :class:`plexapi.exceptions.NotFound`: Unknown libtype
[ "Returns", "the", "integer", "value", "of", "the", "library", "string", "type", "." ]
python
train
allenai/allennlp
allennlp/state_machines/transition_functions/transition_function.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/transition_functions/transition_function.py#L23-L82
def take_step(self, state: StateType, max_actions: int = None, allowed_actions: List[Set] = None) -> List[StateType]: """ The main method in the ``TransitionFunction`` API. This function defines the computation done at each step of decoding and returns a ranked list of next states. The input state is `grouped`, to allow for efficient computation, but the output states should all have a ``group_size`` of 1, to make things easier on the decoding algorithm. They will get regrouped later as needed. Because of the way we handle grouping in the decoder states, constructing a new state is actually a relatively expensive operation. If you know a priori that only some of the states will be needed (either because you have a set of gold action sequences, or you have a fixed beam size), passing that information into this function will keep us from constructing more states than we need, which will greatly speed up your computation. IMPORTANT: This method `must` returns states already sorted by their score, otherwise ``BeamSearch`` and other methods will break. For efficiency, we do not perform an additional sort in those methods. ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you want to evaluate all possible states and do not need any sorting (e.g., this is true for maximum marginal likelihood training that does not use a beam search). In this case, we may skip the sorting step for efficiency reasons. Parameters ---------- state : ``State`` The current state of the decoder, which we will take a step `from`. We may be grouping together computation for several states here. Because we can have several states for each instance in the original batch being evaluated at the same time, we use ``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch in ``model.forward.`` max_actions : ``int``, optional If you know that you will only need a certain number of states out of this (e.g., in a beam search), you can pass in the max number of actions that you need, and we will only construct that many states (for each `batch` instance - `not` for each `group` instance!). This can save a whole lot of computation if you have an action space that's much larger than your beam size. allowed_actions : ``List[Set]``, optional If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g., maximum marginal likelihood only needs to evaluate action sequences in a given set), you can pass those constraints here, to avoid constructing state objects unnecessarily. If there are no constraints from the trainer, passing a value of ``None`` here will allow all actions to be considered. This is a list because it is `batched` - every instance in the batch has a set of allowed actions. Note that the size of this list is the ``group_size`` in the ``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs to convert from the `batched` allowed action sequences that it has to a `grouped` allowed action sequence list. Returns ------- next_states : ``List[State]`` A list of next states, ordered by score. """ raise NotImplementedError
[ "def", "take_step", "(", "self", ",", "state", ":", "StateType", ",", "max_actions", ":", "int", "=", "None", ",", "allowed_actions", ":", "List", "[", "Set", "]", "=", "None", ")", "->", "List", "[", "StateType", "]", ":", "raise", "NotImplementedError" ]
The main method in the ``TransitionFunction`` API. This function defines the computation done at each step of decoding and returns a ranked list of next states. The input state is `grouped`, to allow for efficient computation, but the output states should all have a ``group_size`` of 1, to make things easier on the decoding algorithm. They will get regrouped later as needed. Because of the way we handle grouping in the decoder states, constructing a new state is actually a relatively expensive operation. If you know a priori that only some of the states will be needed (either because you have a set of gold action sequences, or you have a fixed beam size), passing that information into this function will keep us from constructing more states than we need, which will greatly speed up your computation. IMPORTANT: This method `must` returns states already sorted by their score, otherwise ``BeamSearch`` and other methods will break. For efficiency, we do not perform an additional sort in those methods. ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you want to evaluate all possible states and do not need any sorting (e.g., this is true for maximum marginal likelihood training that does not use a beam search). In this case, we may skip the sorting step for efficiency reasons. Parameters ---------- state : ``State`` The current state of the decoder, which we will take a step `from`. We may be grouping together computation for several states here. Because we can have several states for each instance in the original batch being evaluated at the same time, we use ``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch in ``model.forward.`` max_actions : ``int``, optional If you know that you will only need a certain number of states out of this (e.g., in a beam search), you can pass in the max number of actions that you need, and we will only construct that many states (for each `batch` instance - `not` for each `group` instance!). This can save a whole lot of computation if you have an action space that's much larger than your beam size. allowed_actions : ``List[Set]``, optional If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g., maximum marginal likelihood only needs to evaluate action sequences in a given set), you can pass those constraints here, to avoid constructing state objects unnecessarily. If there are no constraints from the trainer, passing a value of ``None`` here will allow all actions to be considered. This is a list because it is `batched` - every instance in the batch has a set of allowed actions. Note that the size of this list is the ``group_size`` in the ``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs to convert from the `batched` allowed action sequences that it has to a `grouped` allowed action sequence list. Returns ------- next_states : ``List[State]`` A list of next states, ordered by score.
[ "The", "main", "method", "in", "the", "TransitionFunction", "API", ".", "This", "function", "defines", "the", "computation", "done", "at", "each", "step", "of", "decoding", "and", "returns", "a", "ranked", "list", "of", "next", "states", "." ]
python
train
googleapis/oauth2client
oauth2client/client.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/client.py#L1358-L1379
def _get_well_known_file(): """Get the well known file produced by command 'gcloud auth login'.""" # TODO(orestica): Revisit this method once gcloud provides a better way # of pinpointing the exact location of the file. default_config_dir = os.getenv(_CLOUDSDK_CONFIG_ENV_VAR) if default_config_dir is None: if os.name == 'nt': try: default_config_dir = os.path.join(os.environ['APPDATA'], _CLOUDSDK_CONFIG_DIRECTORY) except KeyError: # This should never happen unless someone is really # messing with things. drive = os.environ.get('SystemDrive', 'C:') default_config_dir = os.path.join(drive, '\\', _CLOUDSDK_CONFIG_DIRECTORY) else: default_config_dir = os.path.join(os.path.expanduser('~'), '.config', _CLOUDSDK_CONFIG_DIRECTORY) return os.path.join(default_config_dir, _WELL_KNOWN_CREDENTIALS_FILE)
[ "def", "_get_well_known_file", "(", ")", ":", "# TODO(orestica): Revisit this method once gcloud provides a better way", "# of pinpointing the exact location of the file.", "default_config_dir", "=", "os", ".", "getenv", "(", "_CLOUDSDK_CONFIG_ENV_VAR", ")", "if", "default_config_dir", "is", "None", ":", "if", "os", ".", "name", "==", "'nt'", ":", "try", ":", "default_config_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'APPDATA'", "]", ",", "_CLOUDSDK_CONFIG_DIRECTORY", ")", "except", "KeyError", ":", "# This should never happen unless someone is really", "# messing with things.", "drive", "=", "os", ".", "environ", ".", "get", "(", "'SystemDrive'", ",", "'C:'", ")", "default_config_dir", "=", "os", ".", "path", ".", "join", "(", "drive", ",", "'\\\\'", ",", "_CLOUDSDK_CONFIG_DIRECTORY", ")", "else", ":", "default_config_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ",", "'.config'", ",", "_CLOUDSDK_CONFIG_DIRECTORY", ")", "return", "os", ".", "path", ".", "join", "(", "default_config_dir", ",", "_WELL_KNOWN_CREDENTIALS_FILE", ")" ]
Get the well known file produced by command 'gcloud auth login'.
[ "Get", "the", "well", "known", "file", "produced", "by", "command", "gcloud", "auth", "login", "." ]
python
valid
pecan/pecan
pecan/commands/shell.py
https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/commands/shell.py#L41-L66
def invoke(cls, ns, banner): # pragma: nocover """ :param ns: local namespace :param banner: interactive shell startup banner Embed an interactive ipython shell. Try the InteractiveShellEmbed API first, fall back on IPShellEmbed for older IPython versions. """ try: from IPython.frontend.terminal.embed import ( InteractiveShellEmbed ) # try and load their default profile from IPython.frontend.terminal.ipapp import ( load_default_config ) config = load_default_config() shell = InteractiveShellEmbed(config=config, banner2=banner) shell(local_ns=ns) except ImportError: # Support for the IPython <= 0.10 shell API from IPython.Shell import IPShellEmbed shell = IPShellEmbed(argv=[]) shell.set_banner(shell.IP.BANNER + '\n\n' + banner) shell(local_ns=ns, global_ns={})
[ "def", "invoke", "(", "cls", ",", "ns", ",", "banner", ")", ":", "# pragma: nocover", "try", ":", "from", "IPython", ".", "frontend", ".", "terminal", ".", "embed", "import", "(", "InteractiveShellEmbed", ")", "# try and load their default profile", "from", "IPython", ".", "frontend", ".", "terminal", ".", "ipapp", "import", "(", "load_default_config", ")", "config", "=", "load_default_config", "(", ")", "shell", "=", "InteractiveShellEmbed", "(", "config", "=", "config", ",", "banner2", "=", "banner", ")", "shell", "(", "local_ns", "=", "ns", ")", "except", "ImportError", ":", "# Support for the IPython <= 0.10 shell API", "from", "IPython", ".", "Shell", "import", "IPShellEmbed", "shell", "=", "IPShellEmbed", "(", "argv", "=", "[", "]", ")", "shell", ".", "set_banner", "(", "shell", ".", "IP", ".", "BANNER", "+", "'\\n\\n'", "+", "banner", ")", "shell", "(", "local_ns", "=", "ns", ",", "global_ns", "=", "{", "}", ")" ]
:param ns: local namespace :param banner: interactive shell startup banner Embed an interactive ipython shell. Try the InteractiveShellEmbed API first, fall back on IPShellEmbed for older IPython versions.
[ ":", "param", "ns", ":", "local", "namespace", ":", "param", "banner", ":", "interactive", "shell", "startup", "banner" ]
python
train
PythonCharmers/python-future
src/future/backports/urllib/request.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/urllib/request.py#L1878-L1892
def http_error(self, url, fp, errcode, errmsg, headers, data=None): """Handle http errors. Derived class can override this, or provide specific handlers named http_error_DDD where DDD is the 3-digit error code.""" # First check if there's a specific handler for this error name = 'http_error_%d' % errcode if hasattr(self, name): method = getattr(self, name) if data is None: result = method(url, fp, errcode, errmsg, headers) else: result = method(url, fp, errcode, errmsg, headers, data) if result: return result return self.http_error_default(url, fp, errcode, errmsg, headers)
[ "def", "http_error", "(", "self", ",", "url", ",", "fp", ",", "errcode", ",", "errmsg", ",", "headers", ",", "data", "=", "None", ")", ":", "# First check if there's a specific handler for this error", "name", "=", "'http_error_%d'", "%", "errcode", "if", "hasattr", "(", "self", ",", "name", ")", ":", "method", "=", "getattr", "(", "self", ",", "name", ")", "if", "data", "is", "None", ":", "result", "=", "method", "(", "url", ",", "fp", ",", "errcode", ",", "errmsg", ",", "headers", ")", "else", ":", "result", "=", "method", "(", "url", ",", "fp", ",", "errcode", ",", "errmsg", ",", "headers", ",", "data", ")", "if", "result", ":", "return", "result", "return", "self", ".", "http_error_default", "(", "url", ",", "fp", ",", "errcode", ",", "errmsg", ",", "headers", ")" ]
Handle http errors. Derived class can override this, or provide specific handlers named http_error_DDD where DDD is the 3-digit error code.
[ "Handle", "http", "errors", "." ]
python
train
getsentry/sentry-python
sentry_sdk/utils.py
https://github.com/getsentry/sentry-python/blob/a1d77722bdce0b94660ebf50b5c4a4645916d084/sentry_sdk/utils.py#L182-L189
def store_api_url(self): """Returns the API url for storing events.""" return "%s://%s%sapi/%s/store/" % ( self.scheme, self.host, self.path, self.project_id, )
[ "def", "store_api_url", "(", "self", ")", ":", "return", "\"%s://%s%sapi/%s/store/\"", "%", "(", "self", ".", "scheme", ",", "self", ".", "host", ",", "self", ".", "path", ",", "self", ".", "project_id", ",", ")" ]
Returns the API url for storing events.
[ "Returns", "the", "API", "url", "for", "storing", "events", "." ]
python
train
fabioz/PyDev.Debugger
_pydev_bundle/pydev_override.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_bundle/pydev_override.py#L1-L21
def overrides(method): ''' Meant to be used as class B: @overrides(A.m1) def m1(self): pass ''' def wrapper(func): if func.__name__ != method.__name__: msg = "Wrong @override: %r expected, but overwriting %r." msg = msg % (func.__name__, method.__name__) raise AssertionError(msg) if func.__doc__ is None: func.__doc__ = method.__doc__ return func return wrapper
[ "def", "overrides", "(", "method", ")", ":", "def", "wrapper", "(", "func", ")", ":", "if", "func", ".", "__name__", "!=", "method", ".", "__name__", ":", "msg", "=", "\"Wrong @override: %r expected, but overwriting %r.\"", "msg", "=", "msg", "%", "(", "func", ".", "__name__", ",", "method", ".", "__name__", ")", "raise", "AssertionError", "(", "msg", ")", "if", "func", ".", "__doc__", "is", "None", ":", "func", ".", "__doc__", "=", "method", ".", "__doc__", "return", "func", "return", "wrapper" ]
Meant to be used as class B: @overrides(A.m1) def m1(self): pass
[ "Meant", "to", "be", "used", "as", "class", "B", ":" ]
python
train
ff0000/scarlet
scarlet/cms/sites.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/sites.py#L126-L140
def unregister(self, slug): """ Unregisters the given url. If a slug isn't already registered, this will raise NotRegistered. """ if slug not in self._registry: raise NotRegistered('The slug %s is not registered' % slug) bundle = self._registry[slug] if bundle._meta.model and bundle._meta.primary_model_bundle: self.unregister_model(bundle._meta.model) del self._registry[slug] del self._order[slug]
[ "def", "unregister", "(", "self", ",", "slug", ")", ":", "if", "slug", "not", "in", "self", ".", "_registry", ":", "raise", "NotRegistered", "(", "'The slug %s is not registered'", "%", "slug", ")", "bundle", "=", "self", ".", "_registry", "[", "slug", "]", "if", "bundle", ".", "_meta", ".", "model", "and", "bundle", ".", "_meta", ".", "primary_model_bundle", ":", "self", ".", "unregister_model", "(", "bundle", ".", "_meta", ".", "model", ")", "del", "self", ".", "_registry", "[", "slug", "]", "del", "self", ".", "_order", "[", "slug", "]" ]
Unregisters the given url. If a slug isn't already registered, this will raise NotRegistered.
[ "Unregisters", "the", "given", "url", "." ]
python
train
Unidata/MetPy
metpy/calc/tools.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/calc/tools.py#L1366-L1377
def _abbrieviate_direction(ext_dir_str): """Convert extended (non-abbrievated) directions to abbrieviation.""" return (ext_dir_str .upper() .replace('_', '') .replace('-', '') .replace(' ', '') .replace('NORTH', 'N') .replace('EAST', 'E') .replace('SOUTH', 'S') .replace('WEST', 'W') )
[ "def", "_abbrieviate_direction", "(", "ext_dir_str", ")", ":", "return", "(", "ext_dir_str", ".", "upper", "(", ")", ".", "replace", "(", "'_'", ",", "''", ")", ".", "replace", "(", "'-'", ",", "''", ")", ".", "replace", "(", "' '", ",", "''", ")", ".", "replace", "(", "'NORTH'", ",", "'N'", ")", ".", "replace", "(", "'EAST'", ",", "'E'", ")", ".", "replace", "(", "'SOUTH'", ",", "'S'", ")", ".", "replace", "(", "'WEST'", ",", "'W'", ")", ")" ]
Convert extended (non-abbrievated) directions to abbrieviation.
[ "Convert", "extended", "(", "non", "-", "abbrievated", ")", "directions", "to", "abbrieviation", "." ]
python
train
fumitoh/modelx
modelx/core/cells.py
https://github.com/fumitoh/modelx/blob/0180da34d052c44fb94dab9e115e218bbebfc9c3/modelx/core/cells.py#L28-L47
def convert_args(args, kwargs): """If args and kwargs contains Cells, Convert them to their values.""" found = False for arg in args: if isinstance(arg, Cells): found = True break if found: args = tuple( arg.value if isinstance(arg, Cells) else arg for arg in args ) if kwargs is not None: for key, arg in kwargs.items(): if isinstance(arg, Cells): kwargs[key] = arg.value return args, kwargs
[ "def", "convert_args", "(", "args", ",", "kwargs", ")", ":", "found", "=", "False", "for", "arg", "in", "args", ":", "if", "isinstance", "(", "arg", ",", "Cells", ")", ":", "found", "=", "True", "break", "if", "found", ":", "args", "=", "tuple", "(", "arg", ".", "value", "if", "isinstance", "(", "arg", ",", "Cells", ")", "else", "arg", "for", "arg", "in", "args", ")", "if", "kwargs", "is", "not", "None", ":", "for", "key", ",", "arg", "in", "kwargs", ".", "items", "(", ")", ":", "if", "isinstance", "(", "arg", ",", "Cells", ")", ":", "kwargs", "[", "key", "]", "=", "arg", ".", "value", "return", "args", ",", "kwargs" ]
If args and kwargs contains Cells, Convert them to their values.
[ "If", "args", "and", "kwargs", "contains", "Cells", "Convert", "them", "to", "their", "values", "." ]
python
valid
kubernetes-client/python
kubernetes/client/apis/rbac_authorization_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/rbac_authorization_v1_api.py#L845-L871
def delete_collection_cluster_role_binding(self, **kwargs): """ delete collection of ClusterRoleBinding This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_cluster_role_binding(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_cluster_role_binding_with_http_info(**kwargs) else: (data) = self.delete_collection_cluster_role_binding_with_http_info(**kwargs) return data
[ "def", "delete_collection_cluster_role_binding", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "delete_collection_cluster_role_binding_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "delete_collection_cluster_role_binding_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
delete collection of ClusterRoleBinding This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_cluster_role_binding(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
[ "delete", "collection", "of", "ClusterRoleBinding", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "delete_collection_cluster_role_binding", "(", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L735-L740
def simxReadCollision(clientID, collisionObjectHandle, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' collisionState = ct.c_ubyte() return c_ReadCollision(clientID, collisionObjectHandle, ct.byref(collisionState), operationMode), bool(collisionState.value!=0)
[ "def", "simxReadCollision", "(", "clientID", ",", "collisionObjectHandle", ",", "operationMode", ")", ":", "collisionState", "=", "ct", ".", "c_ubyte", "(", ")", "return", "c_ReadCollision", "(", "clientID", ",", "collisionObjectHandle", ",", "ct", ".", "byref", "(", "collisionState", ")", ",", "operationMode", ")", ",", "bool", "(", "collisionState", ".", "value", "!=", "0", ")" ]
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
treycucco/pyebnf
pyebnf/compiler.py
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/compiler.py#L175-L196
def _get_rule_definition(self, rule): """Generates the source code for a rule.""" fmt = """def {rule_fxn_name}(self, text): {indent}\"\"\"{rule_source}\"\"\" {indent}self._attempting(text) {indent}return {rule_definition}(text){transform} """ fmt = self._clean_fmt(fmt) source = self._indent(self._ast_to_code(rule.expression), skip_first_line=True) # All the primitives will accept a string x in place of terminal(x). This is terminal shorthand. # However, if a rule is only a wrapper around a single terminal, we have to actually make a # terminal call. This handles that situation. if self.use_terminal_shorthand and len(source) == 1 and source[0].startswith(("'", '"')): source = ["terminal({})".format(source[0])] rule_source = fmt.format(rule_fxn_name=self._get_rule_fxn_name(rule.name), indent=self.indent, rule_source=self._get_rule_source(rule), rule_definition="\n".join(source), transform=self._get_rule_transform(rule)) return self._indent(rule_source, 1)
[ "def", "_get_rule_definition", "(", "self", ",", "rule", ")", ":", "fmt", "=", "\"\"\"def {rule_fxn_name}(self, text):\n {indent}\\\"\\\"\\\"{rule_source}\\\"\\\"\\\"\n {indent}self._attempting(text)\n {indent}return {rule_definition}(text){transform}\n \"\"\"", "fmt", "=", "self", ".", "_clean_fmt", "(", "fmt", ")", "source", "=", "self", ".", "_indent", "(", "self", ".", "_ast_to_code", "(", "rule", ".", "expression", ")", ",", "skip_first_line", "=", "True", ")", "# All the primitives will accept a string x in place of terminal(x). This is terminal shorthand.", "# However, if a rule is only a wrapper around a single terminal, we have to actually make a", "# terminal call. This handles that situation.", "if", "self", ".", "use_terminal_shorthand", "and", "len", "(", "source", ")", "==", "1", "and", "source", "[", "0", "]", ".", "startswith", "(", "(", "\"'\"", ",", "'\"'", ")", ")", ":", "source", "=", "[", "\"terminal({})\"", ".", "format", "(", "source", "[", "0", "]", ")", "]", "rule_source", "=", "fmt", ".", "format", "(", "rule_fxn_name", "=", "self", ".", "_get_rule_fxn_name", "(", "rule", ".", "name", ")", ",", "indent", "=", "self", ".", "indent", ",", "rule_source", "=", "self", ".", "_get_rule_source", "(", "rule", ")", ",", "rule_definition", "=", "\"\\n\"", ".", "join", "(", "source", ")", ",", "transform", "=", "self", ".", "_get_rule_transform", "(", "rule", ")", ")", "return", "self", ".", "_indent", "(", "rule_source", ",", "1", ")" ]
Generates the source code for a rule.
[ "Generates", "the", "source", "code", "for", "a", "rule", "." ]
python
test
wakatime/wakatime
wakatime/packages/pygments/lexers/__init__.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/__init__.py#L152-L192
def find_lexer_class_for_filename(_fn, code=None): """Get a lexer for a filename. If multiple lexers match the filename pattern, use ``analyse_text()`` to figure out which one is more appropriate. Returns None if not found. """ matches = [] fn = basename(_fn) for modname, name, _, filenames, _ in itervalues(LEXERS): for filename in filenames: if _fn_matches(fn, filename): if name not in _lexer_cache: _load_lexers(modname) matches.append((_lexer_cache[name], filename)) for cls in find_plugin_lexers(): for filename in cls.filenames: if _fn_matches(fn, filename): matches.append((cls, filename)) if sys.version_info > (3,) and isinstance(code, bytes): # decode it, since all analyse_text functions expect unicode code = guess_decode(code) def get_rating(info): cls, filename = info # explicit patterns get a bonus bonus = '*' not in filename and 0.5 or 0 # The class _always_ defines analyse_text because it's included in # the Lexer class. The default implementation returns None which # gets turned into 0.0. Run scripts/detect_missing_analyse_text.py # to find lexers which need it overridden. if code: return cls.analyse_text(code) + bonus, cls.__name__ return cls.priority + bonus, cls.__name__ if matches: matches.sort(key=get_rating) # print "Possible lexers, after sort:", matches return matches[-1][0]
[ "def", "find_lexer_class_for_filename", "(", "_fn", ",", "code", "=", "None", ")", ":", "matches", "=", "[", "]", "fn", "=", "basename", "(", "_fn", ")", "for", "modname", ",", "name", ",", "_", ",", "filenames", ",", "_", "in", "itervalues", "(", "LEXERS", ")", ":", "for", "filename", "in", "filenames", ":", "if", "_fn_matches", "(", "fn", ",", "filename", ")", ":", "if", "name", "not", "in", "_lexer_cache", ":", "_load_lexers", "(", "modname", ")", "matches", ".", "append", "(", "(", "_lexer_cache", "[", "name", "]", ",", "filename", ")", ")", "for", "cls", "in", "find_plugin_lexers", "(", ")", ":", "for", "filename", "in", "cls", ".", "filenames", ":", "if", "_fn_matches", "(", "fn", ",", "filename", ")", ":", "matches", ".", "append", "(", "(", "cls", ",", "filename", ")", ")", "if", "sys", ".", "version_info", ">", "(", "3", ",", ")", "and", "isinstance", "(", "code", ",", "bytes", ")", ":", "# decode it, since all analyse_text functions expect unicode", "code", "=", "guess_decode", "(", "code", ")", "def", "get_rating", "(", "info", ")", ":", "cls", ",", "filename", "=", "info", "# explicit patterns get a bonus", "bonus", "=", "'*'", "not", "in", "filename", "and", "0.5", "or", "0", "# The class _always_ defines analyse_text because it's included in", "# the Lexer class. The default implementation returns None which", "# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py", "# to find lexers which need it overridden.", "if", "code", ":", "return", "cls", ".", "analyse_text", "(", "code", ")", "+", "bonus", ",", "cls", ".", "__name__", "return", "cls", ".", "priority", "+", "bonus", ",", "cls", ".", "__name__", "if", "matches", ":", "matches", ".", "sort", "(", "key", "=", "get_rating", ")", "# print \"Possible lexers, after sort:\", matches", "return", "matches", "[", "-", "1", "]", "[", "0", "]" ]
Get a lexer for a filename. If multiple lexers match the filename pattern, use ``analyse_text()`` to figure out which one is more appropriate. Returns None if not found.
[ "Get", "a", "lexer", "for", "a", "filename", "." ]
python
train
swimlane/swimlane-python
swimlane/core/fields/reference.py
https://github.com/swimlane/swimlane-python/blob/588fc503a76799bcdb5aecdf2f64a6ee05e3922d/swimlane/core/fields/reference.py#L138-L149
def get_swimlane(self): """Return list of record ids""" value = super(ReferenceField, self).get_swimlane() if value: ids = list(value.keys()) if self.multiselect: return ids return ids[0] return None
[ "def", "get_swimlane", "(", "self", ")", ":", "value", "=", "super", "(", "ReferenceField", ",", "self", ")", ".", "get_swimlane", "(", ")", "if", "value", ":", "ids", "=", "list", "(", "value", ".", "keys", "(", ")", ")", "if", "self", ".", "multiselect", ":", "return", "ids", "return", "ids", "[", "0", "]", "return", "None" ]
Return list of record ids
[ "Return", "list", "of", "record", "ids" ]
python
train
fhs/pyhdf
pyhdf/SD.py
https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1499-L1515
def nametoindex(self, sds_name): """Return the index number of a dataset given the dataset name. Args:: sds_name : dataset name Returns:: index number of the dataset C library equivalent : SDnametoindex """ sds_idx = _C.SDnametoindex(self._id, sds_name) _checkErr('nametoindex', sds_idx, 'non existent SDS') return sds_idx
[ "def", "nametoindex", "(", "self", ",", "sds_name", ")", ":", "sds_idx", "=", "_C", ".", "SDnametoindex", "(", "self", ".", "_id", ",", "sds_name", ")", "_checkErr", "(", "'nametoindex'", ",", "sds_idx", ",", "'non existent SDS'", ")", "return", "sds_idx" ]
Return the index number of a dataset given the dataset name. Args:: sds_name : dataset name Returns:: index number of the dataset C library equivalent : SDnametoindex
[ "Return", "the", "index", "number", "of", "a", "dataset", "given", "the", "dataset", "name", "." ]
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/external.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/external.py#L530-L541
def post_generate_identifier(request): """MNStorage.generateIdentifier(session, scheme[, fragment]) → Identifier.""" d1_gmn.app.views.assert_db.post_has_mime_parts(request, (('field', 'scheme'),)) if request.POST['scheme'] != 'UUID': raise d1_common.types.exceptions.InvalidRequest( 0, 'Only the UUID scheme is currently supported' ) fragment = request.POST.get('fragment', None) while True: pid = (fragment if fragment else '') + uuid.uuid4().hex if not d1_gmn.app.models.ScienceObject.objects.filter(pid__did=pid).exists(): return pid
[ "def", "post_generate_identifier", "(", "request", ")", ":", "d1_gmn", ".", "app", ".", "views", ".", "assert_db", ".", "post_has_mime_parts", "(", "request", ",", "(", "(", "'field'", ",", "'scheme'", ")", ",", ")", ")", "if", "request", ".", "POST", "[", "'scheme'", "]", "!=", "'UUID'", ":", "raise", "d1_common", ".", "types", ".", "exceptions", ".", "InvalidRequest", "(", "0", ",", "'Only the UUID scheme is currently supported'", ")", "fragment", "=", "request", ".", "POST", ".", "get", "(", "'fragment'", ",", "None", ")", "while", "True", ":", "pid", "=", "(", "fragment", "if", "fragment", "else", "''", ")", "+", "uuid", ".", "uuid4", "(", ")", ".", "hex", "if", "not", "d1_gmn", ".", "app", ".", "models", ".", "ScienceObject", ".", "objects", ".", "filter", "(", "pid__did", "=", "pid", ")", ".", "exists", "(", ")", ":", "return", "pid" ]
MNStorage.generateIdentifier(session, scheme[, fragment]) → Identifier.
[ "MNStorage", ".", "generateIdentifier", "(", "session", "scheme", "[", "fragment", "]", ")", "→", "Identifier", "." ]
python
train
CityOfZion/neo-python-rpc
neorpc/Client.py
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L90-L101
def get_block(self, height_or_hash, id=None, endpoint=None): """ Look up a block by the height or hash of the block. Args: height_or_hash: (int or str) either the height of the desired block or its hash in the form '1e67372c158a4cfbb17b9ad3aaae77001a4247a00318e354c62e53b56af4006f' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: block: a json object or the ``neorpc.Core.Block.Block`` object """ return self._call_endpoint(GET_BLOCK, params=[height_or_hash, 1], id=id, endpoint=endpoint)
[ "def", "get_block", "(", "self", ",", "height_or_hash", ",", "id", "=", "None", ",", "endpoint", "=", "None", ")", ":", "return", "self", ".", "_call_endpoint", "(", "GET_BLOCK", ",", "params", "=", "[", "height_or_hash", ",", "1", "]", ",", "id", "=", "id", ",", "endpoint", "=", "endpoint", ")" ]
Look up a block by the height or hash of the block. Args: height_or_hash: (int or str) either the height of the desired block or its hash in the form '1e67372c158a4cfbb17b9ad3aaae77001a4247a00318e354c62e53b56af4006f' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: block: a json object or the ``neorpc.Core.Block.Block`` object
[ "Look", "up", "a", "block", "by", "the", "height", "or", "hash", "of", "the", "block", ".", "Args", ":", "height_or_hash", ":", "(", "int", "or", "str", ")", "either", "the", "height", "of", "the", "desired", "block", "or", "its", "hash", "in", "the", "form", "1e67372c158a4cfbb17b9ad3aaae77001a4247a00318e354c62e53b56af4006f", "id", ":", "(", "int", "optional", ")", "id", "to", "use", "for", "response", "tracking", "endpoint", ":", "(", "RPCEndpoint", "optional", ")", "endpoint", "to", "specify", "to", "use" ]
python
train
push-things/django-th
th_trello/my_trello.py
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/th_trello/my_trello.py#L171-L192
def auth(self, request): """ let's auth the user to the Service :param request: request object :return: callback url :rtype: string that contains the url to redirect after auth """ request_token = super(ServiceTrello, self).auth(request) callback_url = self.callback_url(request) # URL to redirect user to, to authorize your app auth_url_str = '{auth_url}?oauth_token={token}' auth_url_str += '&scope={scope}&name={name}' auth_url_str += '&expiration={expiry}&oauth_callback={callback_url}' auth_url = auth_url_str.format(auth_url=self.AUTH_URL, token=request_token['oauth_token'], scope=self.scope, name=self.app_name, expiry=self.expiry, callback_url=callback_url) return auth_url
[ "def", "auth", "(", "self", ",", "request", ")", ":", "request_token", "=", "super", "(", "ServiceTrello", ",", "self", ")", ".", "auth", "(", "request", ")", "callback_url", "=", "self", ".", "callback_url", "(", "request", ")", "# URL to redirect user to, to authorize your app", "auth_url_str", "=", "'{auth_url}?oauth_token={token}'", "auth_url_str", "+=", "'&scope={scope}&name={name}'", "auth_url_str", "+=", "'&expiration={expiry}&oauth_callback={callback_url}'", "auth_url", "=", "auth_url_str", ".", "format", "(", "auth_url", "=", "self", ".", "AUTH_URL", ",", "token", "=", "request_token", "[", "'oauth_token'", "]", ",", "scope", "=", "self", ".", "scope", ",", "name", "=", "self", ".", "app_name", ",", "expiry", "=", "self", ".", "expiry", ",", "callback_url", "=", "callback_url", ")", "return", "auth_url" ]
let's auth the user to the Service :param request: request object :return: callback url :rtype: string that contains the url to redirect after auth
[ "let", "s", "auth", "the", "user", "to", "the", "Service", ":", "param", "request", ":", "request", "object", ":", "return", ":", "callback", "url", ":", "rtype", ":", "string", "that", "contains", "the", "url", "to", "redirect", "after", "auth" ]
python
train
ArabellaTech/django-basic-cms
basic_cms/placeholders.py
https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/placeholders.py#L156-L188
def save(self, page, language, data, change, extra_data=None): """Actually save the placeholder data into the Content object.""" # if this placeholder is untranslated, we save everything # in the default language if self.untranslated: language = settings.PAGE_DEFAULT_LANGUAGE # the page is being changed if change: # we need create a new content if revision is enabled if(settings.PAGE_CONTENT_REVISION and self.name not in settings.PAGE_CONTENT_REVISION_EXCLUDE_LIST): Content.objects.create_content_if_changed( page, language, self.name, data ) else: Content.objects.set_or_create_content( page, language, self.name, data ) # the page is being added else: Content.objects.set_or_create_content( page, language, self.name, data )
[ "def", "save", "(", "self", ",", "page", ",", "language", ",", "data", ",", "change", ",", "extra_data", "=", "None", ")", ":", "# if this placeholder is untranslated, we save everything", "# in the default language", "if", "self", ".", "untranslated", ":", "language", "=", "settings", ".", "PAGE_DEFAULT_LANGUAGE", "# the page is being changed", "if", "change", ":", "# we need create a new content if revision is enabled", "if", "(", "settings", ".", "PAGE_CONTENT_REVISION", "and", "self", ".", "name", "not", "in", "settings", ".", "PAGE_CONTENT_REVISION_EXCLUDE_LIST", ")", ":", "Content", ".", "objects", ".", "create_content_if_changed", "(", "page", ",", "language", ",", "self", ".", "name", ",", "data", ")", "else", ":", "Content", ".", "objects", ".", "set_or_create_content", "(", "page", ",", "language", ",", "self", ".", "name", ",", "data", ")", "# the page is being added", "else", ":", "Content", ".", "objects", ".", "set_or_create_content", "(", "page", ",", "language", ",", "self", ".", "name", ",", "data", ")" ]
Actually save the placeholder data into the Content object.
[ "Actually", "save", "the", "placeholder", "data", "into", "the", "Content", "object", "." ]
python
train
edx/bok-choy
bok_choy/query.py
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L282-L309
def nth(self, index): """ Return a query that selects the element at `index` (starts from 0). If no elements are available, returns a query with no results. Example usage: .. code:: python >> q = Query(lambda: list(range(5))) >> q.nth(2).results [2] Args: index (int): The index of the element to select (starts from 0) Returns: Query """ def _transform(xs): # pylint: disable=missing-docstring, invalid-name try: return [next(islice(iter(xs), index, None))] # Gracefully handle (a) running out of elements, and (b) negative indices except (StopIteration, ValueError): return [] return self.transform(_transform, 'nth')
[ "def", "nth", "(", "self", ",", "index", ")", ":", "def", "_transform", "(", "xs", ")", ":", "# pylint: disable=missing-docstring, invalid-name", "try", ":", "return", "[", "next", "(", "islice", "(", "iter", "(", "xs", ")", ",", "index", ",", "None", ")", ")", "]", "# Gracefully handle (a) running out of elements, and (b) negative indices", "except", "(", "StopIteration", ",", "ValueError", ")", ":", "return", "[", "]", "return", "self", ".", "transform", "(", "_transform", ",", "'nth'", ")" ]
Return a query that selects the element at `index` (starts from 0). If no elements are available, returns a query with no results. Example usage: .. code:: python >> q = Query(lambda: list(range(5))) >> q.nth(2).results [2] Args: index (int): The index of the element to select (starts from 0) Returns: Query
[ "Return", "a", "query", "that", "selects", "the", "element", "at", "index", "(", "starts", "from", "0", ")", ".", "If", "no", "elements", "are", "available", "returns", "a", "query", "with", "no", "results", "." ]
python
train
nsavch/python-dpcolors
dpcolors/__init__.py
https://github.com/nsavch/python-dpcolors/blob/9bca11416a21eca1c5a84b7dcc852d231d911981/dpcolors/__init__.py#L328-L341
def to_irc(self, preserve_original=True): """ Convert to mIRC format :param preserve_original: if the current ColorString instance was created from mIRC text, then just return the original string :return: """ if preserve_original and self.original_type == 'irc': return self.original_bytes res = [] for i in self.parts: res.append(i.to_irc()) s = ''.join(res) + '\x03\x0f' return s.encode('utf8')
[ "def", "to_irc", "(", "self", ",", "preserve_original", "=", "True", ")", ":", "if", "preserve_original", "and", "self", ".", "original_type", "==", "'irc'", ":", "return", "self", ".", "original_bytes", "res", "=", "[", "]", "for", "i", "in", "self", ".", "parts", ":", "res", ".", "append", "(", "i", ".", "to_irc", "(", ")", ")", "s", "=", "''", ".", "join", "(", "res", ")", "+", "'\\x03\\x0f'", "return", "s", ".", "encode", "(", "'utf8'", ")" ]
Convert to mIRC format :param preserve_original: if the current ColorString instance was created from mIRC text, then just return the original string :return:
[ "Convert", "to", "mIRC", "format", ":", "param", "preserve_original", ":", "if", "the", "current", "ColorString", "instance", "was", "created", "from", "mIRC", "text", "then", "just", "return", "the", "original", "string", ":", "return", ":" ]
python
train
PyCQA/astroid
astroid/node_classes.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L1898-L1917
def postinit(self, target, annotation, simple, value=None): """Do some setup after initialisation. :param target: What is being assigned to. :type target: NodeNG :param annotation: The type annotation of what is being assigned to. :type: NodeNG :param simple: Whether :attr:`target` is a pure name or a complex statement. :type simple: int :param value: The value being assigned to the variables. :type: NodeNG or None """ self.target = target self.annotation = annotation self.value = value self.simple = simple
[ "def", "postinit", "(", "self", ",", "target", ",", "annotation", ",", "simple", ",", "value", "=", "None", ")", ":", "self", ".", "target", "=", "target", "self", ".", "annotation", "=", "annotation", "self", ".", "value", "=", "value", "self", ".", "simple", "=", "simple" ]
Do some setup after initialisation. :param target: What is being assigned to. :type target: NodeNG :param annotation: The type annotation of what is being assigned to. :type: NodeNG :param simple: Whether :attr:`target` is a pure name or a complex statement. :type simple: int :param value: The value being assigned to the variables. :type: NodeNG or None
[ "Do", "some", "setup", "after", "initialisation", "." ]
python
train
DavidMStraub/pylha
pylha/parse.py
https://github.com/DavidMStraub/pylha/blob/8d65074609321e5eaf97fe962c56f6d79a3ad2b6/pylha/parse.py#L6-L13
def numval(token): """Return the numerical value of token.value if it is a number""" if token.type == 'INTEGER': return int(token.value) elif token.type == 'FLOAT': return float(token.value) else: return token.value
[ "def", "numval", "(", "token", ")", ":", "if", "token", ".", "type", "==", "'INTEGER'", ":", "return", "int", "(", "token", ".", "value", ")", "elif", "token", ".", "type", "==", "'FLOAT'", ":", "return", "float", "(", "token", ".", "value", ")", "else", ":", "return", "token", ".", "value" ]
Return the numerical value of token.value if it is a number
[ "Return", "the", "numerical", "value", "of", "token", ".", "value", "if", "it", "is", "a", "number" ]
python
train
ArchiveTeam/wpull
wpull/processor/coprocessor/phantomjs.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/processor/coprocessor/phantomjs.py#L247-L268
def _add_warc_action_log(self, path, url): '''Add the action log to the WARC file.''' _logger.debug('Adding action log record.') actions = [] with open(path, 'r', encoding='utf-8', errors='replace') as file: for line in file: actions.append(json.loads(line)) log_data = json.dumps( {'actions': actions}, indent=4, ).encode('utf-8') self._action_warc_record = record = WARCRecord() record.set_common_fields('metadata', 'application/json') record.fields['WARC-Target-URI'] = 'urn:X-wpull:snapshot?url={0}' \ .format(wpull.url.percent_encode_query_value(url)) record.block_file = io.BytesIO(log_data) self._warc_recorder.set_length_and_maybe_checksums(record) self._warc_recorder.write_record(record)
[ "def", "_add_warc_action_log", "(", "self", ",", "path", ",", "url", ")", ":", "_logger", ".", "debug", "(", "'Adding action log record.'", ")", "actions", "=", "[", "]", "with", "open", "(", "path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'replace'", ")", "as", "file", ":", "for", "line", "in", "file", ":", "actions", ".", "append", "(", "json", ".", "loads", "(", "line", ")", ")", "log_data", "=", "json", ".", "dumps", "(", "{", "'actions'", ":", "actions", "}", ",", "indent", "=", "4", ",", ")", ".", "encode", "(", "'utf-8'", ")", "self", ".", "_action_warc_record", "=", "record", "=", "WARCRecord", "(", ")", "record", ".", "set_common_fields", "(", "'metadata'", ",", "'application/json'", ")", "record", ".", "fields", "[", "'WARC-Target-URI'", "]", "=", "'urn:X-wpull:snapshot?url={0}'", ".", "format", "(", "wpull", ".", "url", ".", "percent_encode_query_value", "(", "url", ")", ")", "record", ".", "block_file", "=", "io", ".", "BytesIO", "(", "log_data", ")", "self", ".", "_warc_recorder", ".", "set_length_and_maybe_checksums", "(", "record", ")", "self", ".", "_warc_recorder", ".", "write_record", "(", "record", ")" ]
Add the action log to the WARC file.
[ "Add", "the", "action", "log", "to", "the", "WARC", "file", "." ]
python
train
aliyun/aliyun-odps-python-sdk
odps/df/expr/merge.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/merge.py#L897-L931
def concat(left, rights, distinct=False, axis=0): """ Concat collections. :param left: left collection :param rights: right collections, can be a DataFrame object or a list of DataFrames :param distinct: whether to remove duplicate entries. only available when axis == 0 :param axis: when axis == 0, the DataFrames are merged vertically, otherwise horizontally. :return: collection Note that axis==1 can only be used under Pandas DataFrames or XFlow. :Example: >>> df['name', 'id'].concat(df2['score'], axis=1) """ from ..utils import to_collection if isinstance(rights, Node): rights = [rights, ] if not rights: raise ValueError('At least one DataFrame should be provided.') if axis == 0: for right in rights: left = union(left, right, distinct=distinct) return left else: rights = [to_collection(r) for r in rights] ConcatCollectionExpr.validate_input(left, *rights) if hasattr(left, '_xflow_concat'): return left._xflow_concat(rights) else: return __horz_concat(left, rights)
[ "def", "concat", "(", "left", ",", "rights", ",", "distinct", "=", "False", ",", "axis", "=", "0", ")", ":", "from", ".", ".", "utils", "import", "to_collection", "if", "isinstance", "(", "rights", ",", "Node", ")", ":", "rights", "=", "[", "rights", ",", "]", "if", "not", "rights", ":", "raise", "ValueError", "(", "'At least one DataFrame should be provided.'", ")", "if", "axis", "==", "0", ":", "for", "right", "in", "rights", ":", "left", "=", "union", "(", "left", ",", "right", ",", "distinct", "=", "distinct", ")", "return", "left", "else", ":", "rights", "=", "[", "to_collection", "(", "r", ")", "for", "r", "in", "rights", "]", "ConcatCollectionExpr", ".", "validate_input", "(", "left", ",", "*", "rights", ")", "if", "hasattr", "(", "left", ",", "'_xflow_concat'", ")", ":", "return", "left", ".", "_xflow_concat", "(", "rights", ")", "else", ":", "return", "__horz_concat", "(", "left", ",", "rights", ")" ]
Concat collections. :param left: left collection :param rights: right collections, can be a DataFrame object or a list of DataFrames :param distinct: whether to remove duplicate entries. only available when axis == 0 :param axis: when axis == 0, the DataFrames are merged vertically, otherwise horizontally. :return: collection Note that axis==1 can only be used under Pandas DataFrames or XFlow. :Example: >>> df['name', 'id'].concat(df2['score'], axis=1)
[ "Concat", "collections", "." ]
python
train
OpenKMIP/PyKMIP
kmip/services/kmip_client.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/services/kmip_client.py#L423-L503
def derive_key(self, object_type, unique_identifiers, derivation_method, derivation_parameters, template_attribute, credential=None): """ Derive a new key or secret data from an existing managed object. Args: object_type (ObjectType): An ObjectType enumeration specifying what type of object to create. Required. unique_identifiers (list): A list of strings specifying the unique IDs of the existing managed objects to use for key derivation. Required. derivation_method (DerivationMethod): A DerivationMethod enumeration specifying what key derivation method to use. Required. derivation_parameters (DerivationParameters): A DerivationParameters struct containing the settings and options to use for key derivation. template_attribute (TemplateAttribute): A TemplateAttribute struct containing the attributes to set on the newly derived object. credential (Credential): A Credential struct containing a set of authorization parameters for the operation. Optional, defaults to None. Returns: dict: The results of the derivation operation, containing the following key/value pairs: Key | Value ---------------------|----------------------------------------- 'unique_identifier' | (string) The unique ID of the newly | derived object. 'template_attribute' | (TemplateAttribute) A struct containing | any attributes set on the newly derived | object. 'result_status' | (ResultStatus) An enumeration indicating | the status of the operation result. 'result_reason' | (ResultReason) An enumeration providing | context for the result status. 'result_message' | (string) A message providing additional | context for the operation result. """ operation = Operation(OperationEnum.DERIVE_KEY) request_payload = payloads.DeriveKeyRequestPayload( object_type=object_type, unique_identifiers=unique_identifiers, derivation_method=derivation_method, derivation_parameters=derivation_parameters, template_attribute=template_attribute ) batch_item = messages.RequestBatchItem( operation=operation, request_payload=request_payload ) request = self._build_request_message(credential, [batch_item]) response = self._send_and_receive_message(request) batch_item = response.batch_items[0] payload = batch_item.response_payload result = {} if payload: result['unique_identifier'] = payload.unique_identifier result['template_attribute'] = payload.template_attribute result['result_status'] = batch_item.result_status.value try: result['result_reason'] = batch_item.result_reason.value except Exception: result['result_reason'] = batch_item.result_reason try: result['result_message'] = batch_item.result_message.value except Exception: result['result_message'] = batch_item.result_message return result
[ "def", "derive_key", "(", "self", ",", "object_type", ",", "unique_identifiers", ",", "derivation_method", ",", "derivation_parameters", ",", "template_attribute", ",", "credential", "=", "None", ")", ":", "operation", "=", "Operation", "(", "OperationEnum", ".", "DERIVE_KEY", ")", "request_payload", "=", "payloads", ".", "DeriveKeyRequestPayload", "(", "object_type", "=", "object_type", ",", "unique_identifiers", "=", "unique_identifiers", ",", "derivation_method", "=", "derivation_method", ",", "derivation_parameters", "=", "derivation_parameters", ",", "template_attribute", "=", "template_attribute", ")", "batch_item", "=", "messages", ".", "RequestBatchItem", "(", "operation", "=", "operation", ",", "request_payload", "=", "request_payload", ")", "request", "=", "self", ".", "_build_request_message", "(", "credential", ",", "[", "batch_item", "]", ")", "response", "=", "self", ".", "_send_and_receive_message", "(", "request", ")", "batch_item", "=", "response", ".", "batch_items", "[", "0", "]", "payload", "=", "batch_item", ".", "response_payload", "result", "=", "{", "}", "if", "payload", ":", "result", "[", "'unique_identifier'", "]", "=", "payload", ".", "unique_identifier", "result", "[", "'template_attribute'", "]", "=", "payload", ".", "template_attribute", "result", "[", "'result_status'", "]", "=", "batch_item", ".", "result_status", ".", "value", "try", ":", "result", "[", "'result_reason'", "]", "=", "batch_item", ".", "result_reason", ".", "value", "except", "Exception", ":", "result", "[", "'result_reason'", "]", "=", "batch_item", ".", "result_reason", "try", ":", "result", "[", "'result_message'", "]", "=", "batch_item", ".", "result_message", ".", "value", "except", "Exception", ":", "result", "[", "'result_message'", "]", "=", "batch_item", ".", "result_message", "return", "result" ]
Derive a new key or secret data from an existing managed object. Args: object_type (ObjectType): An ObjectType enumeration specifying what type of object to create. Required. unique_identifiers (list): A list of strings specifying the unique IDs of the existing managed objects to use for key derivation. Required. derivation_method (DerivationMethod): A DerivationMethod enumeration specifying what key derivation method to use. Required. derivation_parameters (DerivationParameters): A DerivationParameters struct containing the settings and options to use for key derivation. template_attribute (TemplateAttribute): A TemplateAttribute struct containing the attributes to set on the newly derived object. credential (Credential): A Credential struct containing a set of authorization parameters for the operation. Optional, defaults to None. Returns: dict: The results of the derivation operation, containing the following key/value pairs: Key | Value ---------------------|----------------------------------------- 'unique_identifier' | (string) The unique ID of the newly | derived object. 'template_attribute' | (TemplateAttribute) A struct containing | any attributes set on the newly derived | object. 'result_status' | (ResultStatus) An enumeration indicating | the status of the operation result. 'result_reason' | (ResultReason) An enumeration providing | context for the result status. 'result_message' | (string) A message providing additional | context for the operation result.
[ "Derive", "a", "new", "key", "or", "secret", "data", "from", "an", "existing", "managed", "object", "." ]
python
test