repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
knipknap/SpiffWorkflow
SpiffWorkflow/bpmn/serializer/Packager.py
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/bpmn/serializer/Packager.py#L441-L459
def merge_options_and_config(cls, config, options, args): """ Override in subclass if required. """ if args: config.set(CONFIG_SECTION_NAME, 'input_files', ','.join(args)) elif config.has_option(CONFIG_SECTION_NAME, 'input_files'): for i in config.get(CONFIG_SECTION_NAME, 'input_files').split(','): if not os.path.isabs(i): i = os.path.abspath( os.path.join(os.path.dirname(options.config_file), i)) args.append(i) cls.merge_option_and_config_str('package_file', config, options) cls.merge_option_and_config_str('entry_point_process', config, options) cls.merge_option_and_config_str('target_engine', config, options) cls.merge_option_and_config_str( 'target_engine_version', config, options) cls.merge_option_and_config_str('editor', config, options)
[ "def", "merge_options_and_config", "(", "cls", ",", "config", ",", "options", ",", "args", ")", ":", "if", "args", ":", "config", ".", "set", "(", "CONFIG_SECTION_NAME", ",", "'input_files'", ",", "','", ".", "join", "(", "args", ")", ")", "elif", "config", ".", "has_option", "(", "CONFIG_SECTION_NAME", ",", "'input_files'", ")", ":", "for", "i", "in", "config", ".", "get", "(", "CONFIG_SECTION_NAME", ",", "'input_files'", ")", ".", "split", "(", "','", ")", ":", "if", "not", "os", ".", "path", ".", "isabs", "(", "i", ")", ":", "i", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "options", ".", "config_file", ")", ",", "i", ")", ")", "args", ".", "append", "(", "i", ")", "cls", ".", "merge_option_and_config_str", "(", "'package_file'", ",", "config", ",", "options", ")", "cls", ".", "merge_option_and_config_str", "(", "'entry_point_process'", ",", "config", ",", "options", ")", "cls", ".", "merge_option_and_config_str", "(", "'target_engine'", ",", "config", ",", "options", ")", "cls", ".", "merge_option_and_config_str", "(", "'target_engine_version'", ",", "config", ",", "options", ")", "cls", ".", "merge_option_and_config_str", "(", "'editor'", ",", "config", ",", "options", ")" ]
Override in subclass if required.
[ "Override", "in", "subclass", "if", "required", "." ]
python
valid
IBMStreams/pypi.streamsx
streamsx/rest_primitives.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest_primitives.py#L1320-L1328
def get_resource(self): """Get the :py:class:`Resource` of the resource allocation. Returns: Resource: Resource for this allocation. .. versionadded:: 1.9 """ return Resource(self.rest_client.make_request(self.resource), self.rest_client)
[ "def", "get_resource", "(", "self", ")", ":", "return", "Resource", "(", "self", ".", "rest_client", ".", "make_request", "(", "self", ".", "resource", ")", ",", "self", ".", "rest_client", ")" ]
Get the :py:class:`Resource` of the resource allocation. Returns: Resource: Resource for this allocation. .. versionadded:: 1.9
[ "Get", "the", ":", "py", ":", "class", ":", "Resource", "of", "the", "resource", "allocation", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/moe_experiments.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L30-L46
def xmoe_tr_dense_2k(): """Series of architectural experiments on Translation. # run on 8-core setup 119M params, einsum=0.95e13 Returns: a hparams """ hparams = mtf_transformer2.mtf_bitransformer_base() hparams.encoder_layers = ["self_att", "drd"] * 4 hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 4 hparams.batch_size = 64 hparams.shared_embedding_and_softmax_weights = True hparams.mesh_shape = "batch:8" return hparams
[ "def", "xmoe_tr_dense_2k", "(", ")", ":", "hparams", "=", "mtf_transformer2", ".", "mtf_bitransformer_base", "(", ")", "hparams", ".", "encoder_layers", "=", "[", "\"self_att\"", ",", "\"drd\"", "]", "*", "4", "hparams", ".", "decoder_layers", "=", "[", "\"self_att\"", ",", "\"enc_att\"", ",", "\"drd\"", "]", "*", "4", "hparams", ".", "batch_size", "=", "64", "hparams", ".", "shared_embedding_and_softmax_weights", "=", "True", "hparams", ".", "mesh_shape", "=", "\"batch:8\"", "return", "hparams" ]
Series of architectural experiments on Translation. # run on 8-core setup 119M params, einsum=0.95e13 Returns: a hparams
[ "Series", "of", "architectural", "experiments", "on", "Translation", "." ]
python
train
walkr/nanoservice
nanoservice/reqrep.py
https://github.com/walkr/nanoservice/blob/e2098986b1baa5f283167ae487d14f3c6c21961a/nanoservice/reqrep.py#L79-L86
def parse(cls, payload): """ Parse client request """ try: method, args, ref = payload except Exception as exception: raise RequestParseError(exception) else: return method, args, ref
[ "def", "parse", "(", "cls", ",", "payload", ")", ":", "try", ":", "method", ",", "args", ",", "ref", "=", "payload", "except", "Exception", "as", "exception", ":", "raise", "RequestParseError", "(", "exception", ")", "else", ":", "return", "method", ",", "args", ",", "ref" ]
Parse client request
[ "Parse", "client", "request" ]
python
train
wheerd/multiset
multiset.py
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L416-L443
def times(self, factor): """Return a new set with each element's multiplicity multiplied with the given scalar factor. >>> ms = Multiset('aab') >>> sorted(ms.times(2)) ['a', 'a', 'a', 'a', 'b', 'b'] You can also use the ``*`` operator for the same effect: >>> sorted(ms * 3) ['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b'] For a variant of the operation which modifies the multiset in place see :meth:`times_update`. Args: factor: The factor to multiply each multiplicity with. """ if factor == 0: return self.__class__() if factor < 0: raise ValueError('The factor must no be negative.') result = self.__copy__() _elements = result._elements for element in _elements: _elements[element] *= factor result._total *= factor return result
[ "def", "times", "(", "self", ",", "factor", ")", ":", "if", "factor", "==", "0", ":", "return", "self", ".", "__class__", "(", ")", "if", "factor", "<", "0", ":", "raise", "ValueError", "(", "'The factor must no be negative.'", ")", "result", "=", "self", ".", "__copy__", "(", ")", "_elements", "=", "result", ".", "_elements", "for", "element", "in", "_elements", ":", "_elements", "[", "element", "]", "*=", "factor", "result", ".", "_total", "*=", "factor", "return", "result" ]
Return a new set with each element's multiplicity multiplied with the given scalar factor. >>> ms = Multiset('aab') >>> sorted(ms.times(2)) ['a', 'a', 'a', 'a', 'b', 'b'] You can also use the ``*`` operator for the same effect: >>> sorted(ms * 3) ['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b'] For a variant of the operation which modifies the multiset in place see :meth:`times_update`. Args: factor: The factor to multiply each multiplicity with.
[ "Return", "a", "new", "set", "with", "each", "element", "s", "multiplicity", "multiplied", "with", "the", "given", "scalar", "factor", "." ]
python
train
kwikteam/phy
phy/cluster/supervisor.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L556-L566
def select(self, *cluster_ids): """Select a list of clusters.""" # HACK: allow for `select(1, 2, 3)` in addition to `select([1, 2, 3])` # This makes it more convenient to select multiple clusters with # the snippet: `:c 1 2 3` instead of `:c 1,2,3`. if cluster_ids and isinstance(cluster_ids[0], (tuple, list)): cluster_ids = list(cluster_ids[0]) + list(cluster_ids[1:]) # Remove non-existing clusters from the selection. cluster_ids = self._keep_existing_clusters(cluster_ids) # Update the cluster view selection. self.cluster_view.select(cluster_ids)
[ "def", "select", "(", "self", ",", "*", "cluster_ids", ")", ":", "# HACK: allow for `select(1, 2, 3)` in addition to `select([1, 2, 3])`", "# This makes it more convenient to select multiple clusters with", "# the snippet: `:c 1 2 3` instead of `:c 1,2,3`.", "if", "cluster_ids", "and", "isinstance", "(", "cluster_ids", "[", "0", "]", ",", "(", "tuple", ",", "list", ")", ")", ":", "cluster_ids", "=", "list", "(", "cluster_ids", "[", "0", "]", ")", "+", "list", "(", "cluster_ids", "[", "1", ":", "]", ")", "# Remove non-existing clusters from the selection.", "cluster_ids", "=", "self", ".", "_keep_existing_clusters", "(", "cluster_ids", ")", "# Update the cluster view selection.", "self", ".", "cluster_view", ".", "select", "(", "cluster_ids", ")" ]
Select a list of clusters.
[ "Select", "a", "list", "of", "clusters", "." ]
python
train
IdentityPython/pysaml2
src/saml2/validate.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/validate.py#L182-L202
def valid_string(val): """ Expects unicode Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] """ for char in val: try: char = ord(char) except TypeError: raise NotValid("string") if char == 0x09 or char == 0x0A or char == 0x0D: continue elif 0x20 <= char <= 0xD7FF: continue elif 0xE000 <= char <= 0xFFFD: continue elif 0x10000 <= char <= 0x10FFFF: continue else: raise NotValid("string") return True
[ "def", "valid_string", "(", "val", ")", ":", "for", "char", "in", "val", ":", "try", ":", "char", "=", "ord", "(", "char", ")", "except", "TypeError", ":", "raise", "NotValid", "(", "\"string\"", ")", "if", "char", "==", "0x09", "or", "char", "==", "0x0A", "or", "char", "==", "0x0D", ":", "continue", "elif", "0x20", "<=", "char", "<=", "0xD7FF", ":", "continue", "elif", "0xE000", "<=", "char", "<=", "0xFFFD", ":", "continue", "elif", "0x10000", "<=", "char", "<=", "0x10FFFF", ":", "continue", "else", ":", "raise", "NotValid", "(", "\"string\"", ")", "return", "True" ]
Expects unicode Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
[ "Expects", "unicode", "Char", "::", "=", "#x9", "|", "#xA", "|", "#xD", "|", "[", "#x20", "-", "#xD7FF", "]", "|", "[", "#xE000", "-", "#xFFFD", "]", "|", "[", "#x10000", "-", "#x10FFFF", "]" ]
python
train
napalm-automation/napalm-eos
napalm_eos/eos.py
https://github.com/napalm-automation/napalm-eos/blob/a3b37d6ee353e326ab9ea1a09ecc14045b12928b/napalm_eos/eos.py#L91-L118
def open(self): """Implementation of NAPALM method open.""" try: if self.transport in ('http', 'https'): connection = pyeapi.client.connect( transport=self.transport, host=self.hostname, username=self.username, password=self.password, port=self.port, timeout=self.timeout ) elif self.transport == 'socket': connection = pyeapi.client.connect(transport=self.transport) else: raise ConnectionException("Unknown transport: {}".format(self.transport)) if self.device is None: self.device = pyeapi.client.Node(connection, enablepwd=self.enablepwd) # does not raise an Exception if unusable # let's try to run a very simple command self.device.run_commands(['show clock'], encoding='text') except ConnectionError as ce: # and this is raised either if device not avaiable # either if HTTP(S) agent is not enabled # show management api http-commands raise ConnectionException(ce.message)
[ "def", "open", "(", "self", ")", ":", "try", ":", "if", "self", ".", "transport", "in", "(", "'http'", ",", "'https'", ")", ":", "connection", "=", "pyeapi", ".", "client", ".", "connect", "(", "transport", "=", "self", ".", "transport", ",", "host", "=", "self", ".", "hostname", ",", "username", "=", "self", ".", "username", ",", "password", "=", "self", ".", "password", ",", "port", "=", "self", ".", "port", ",", "timeout", "=", "self", ".", "timeout", ")", "elif", "self", ".", "transport", "==", "'socket'", ":", "connection", "=", "pyeapi", ".", "client", ".", "connect", "(", "transport", "=", "self", ".", "transport", ")", "else", ":", "raise", "ConnectionException", "(", "\"Unknown transport: {}\"", ".", "format", "(", "self", ".", "transport", ")", ")", "if", "self", ".", "device", "is", "None", ":", "self", ".", "device", "=", "pyeapi", ".", "client", ".", "Node", "(", "connection", ",", "enablepwd", "=", "self", ".", "enablepwd", ")", "# does not raise an Exception if unusable", "# let's try to run a very simple command", "self", ".", "device", ".", "run_commands", "(", "[", "'show clock'", "]", ",", "encoding", "=", "'text'", ")", "except", "ConnectionError", "as", "ce", ":", "# and this is raised either if device not avaiable", "# either if HTTP(S) agent is not enabled", "# show management api http-commands", "raise", "ConnectionException", "(", "ce", ".", "message", ")" ]
Implementation of NAPALM method open.
[ "Implementation", "of", "NAPALM", "method", "open", "." ]
python
train
sdss/tree
python/tree/misc/logger.py
https://github.com/sdss/tree/blob/f61fe0876c138ccb61874912d4b8590dadfa835c/python/tree/misc/logger.py#L58-L98
def colored_formatter(record): """Prints log messages with colours.""" colours = {'info': ('blue', 'normal'), 'debug': ('magenta', 'normal'), 'warning': ('yellow', 'normal'), 'print': ('green', 'normal'), 'error': ('red', 'bold')} levelname = record.levelname.lower() if levelname == 'error': return if levelname.lower() in colours: levelname_color = colours[levelname][0] header = color_text('[{}]: '.format(levelname.upper()), levelname_color) message = '{0}'.format(record.msg) warning_category = re.match(r'^(\w+Warning:).*', message) if warning_category is not None: warning_category_colour = color_text(warning_category.groups()[0], 'cyan') message = message.replace(warning_category.groups()[0], warning_category_colour) sub_level = re.match(r'(\[.+\]:)(.*)', message) if sub_level is not None: sub_level_name = color_text(sub_level.groups()[0], 'red') message = '{}{}'.format(sub_level_name, ''.join(sub_level.groups()[1:])) # if len(message) > 79: # tw = TextWrapper() # tw.width = 79 # tw.subsequent_indent = ' ' * (len(record.levelname) + 2) # tw.break_on_hyphens = False # message = '\n'.join(tw.wrap(message)) sys.__stdout__.write('{}{}\n'.format(header, message)) sys.__stdout__.flush() return
[ "def", "colored_formatter", "(", "record", ")", ":", "colours", "=", "{", "'info'", ":", "(", "'blue'", ",", "'normal'", ")", ",", "'debug'", ":", "(", "'magenta'", ",", "'normal'", ")", ",", "'warning'", ":", "(", "'yellow'", ",", "'normal'", ")", ",", "'print'", ":", "(", "'green'", ",", "'normal'", ")", ",", "'error'", ":", "(", "'red'", ",", "'bold'", ")", "}", "levelname", "=", "record", ".", "levelname", ".", "lower", "(", ")", "if", "levelname", "==", "'error'", ":", "return", "if", "levelname", ".", "lower", "(", ")", "in", "colours", ":", "levelname_color", "=", "colours", "[", "levelname", "]", "[", "0", "]", "header", "=", "color_text", "(", "'[{}]: '", ".", "format", "(", "levelname", ".", "upper", "(", ")", ")", ",", "levelname_color", ")", "message", "=", "'{0}'", ".", "format", "(", "record", ".", "msg", ")", "warning_category", "=", "re", ".", "match", "(", "r'^(\\w+Warning:).*'", ",", "message", ")", "if", "warning_category", "is", "not", "None", ":", "warning_category_colour", "=", "color_text", "(", "warning_category", ".", "groups", "(", ")", "[", "0", "]", ",", "'cyan'", ")", "message", "=", "message", ".", "replace", "(", "warning_category", ".", "groups", "(", ")", "[", "0", "]", ",", "warning_category_colour", ")", "sub_level", "=", "re", ".", "match", "(", "r'(\\[.+\\]:)(.*)'", ",", "message", ")", "if", "sub_level", "is", "not", "None", ":", "sub_level_name", "=", "color_text", "(", "sub_level", ".", "groups", "(", ")", "[", "0", "]", ",", "'red'", ")", "message", "=", "'{}{}'", ".", "format", "(", "sub_level_name", ",", "''", ".", "join", "(", "sub_level", ".", "groups", "(", ")", "[", "1", ":", "]", ")", ")", "# if len(message) > 79:", "# tw = TextWrapper()", "# tw.width = 79", "# tw.subsequent_indent = ' ' * (len(record.levelname) + 2)", "# tw.break_on_hyphens = False", "# message = '\\n'.join(tw.wrap(message))", "sys", ".", "__stdout__", ".", "write", "(", "'{}{}\\n'", ".", "format", "(", "header", ",", "message", ")", ")", "sys", ".", "__stdout__", ".", "flush", "(", ")", "return" ]
Prints log messages with colours.
[ "Prints", "log", "messages", "with", "colours", "." ]
python
train
Becksteinlab/GromacsWrapper
gromacs/scaling.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/scaling.py#L90-L134
def scale_impropers(mol, impropers, scale, banned_lines=None): """Scale improper dihedrals""" if banned_lines is None: banned_lines = [] new_impropers = [] for im in mol.impropers: atypes = (im.atom1.get_atomtype(), im.atom2.get_atomtype(), im.atom3.get_atomtype(), im.atom4.get_atomtype()) atypes = [a.replace("_", "").replace("=", "") for a in atypes] # special-case: this is a [ dihedral ] override in molecule block, continue and don't match if im.gromacs['param'] != []: for p in im.gromacs['param']: p['kpsi'] *= scale new_impropers.append(im) continue for iswitch in range(32): if (iswitch%2==0): a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3]; else: a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0]; if((iswitch//2)%2==1): a1="X"; if((iswitch//4)%2==1): a2="X"; if((iswitch//8)%2==1): a3="X"; if((iswitch//16)%2==1): a4="X"; key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, im.gromacs['func']) if (key in impropers): for i, imt in enumerate(impropers[key]): imA = copy.deepcopy(im) param = copy.deepcopy(imt.gromacs['param']) # Only check the first dihedral in a list if not impropers[key][0].line in banned_lines: for p in param: p['kpsi'] *= scale imA.gromacs['param'] = param if i == 0: imA.comment = "; banned lines {0} found={1}\n ; parameters for types {2}-{3}-{4}-{5}-9 at LINE({6})\n".format( " ".join(map(str, banned_lines)), 1 if imt.line in banned_lines else 0, imt.atype1, imt.atype2, imt.atype3, imt.atype4, imt.line) new_impropers.append(imA) break #assert(len(mol.impropers) == new_impropers) mol.impropers = new_impropers return mol
[ "def", "scale_impropers", "(", "mol", ",", "impropers", ",", "scale", ",", "banned_lines", "=", "None", ")", ":", "if", "banned_lines", "is", "None", ":", "banned_lines", "=", "[", "]", "new_impropers", "=", "[", "]", "for", "im", "in", "mol", ".", "impropers", ":", "atypes", "=", "(", "im", ".", "atom1", ".", "get_atomtype", "(", ")", ",", "im", ".", "atom2", ".", "get_atomtype", "(", ")", ",", "im", ".", "atom3", ".", "get_atomtype", "(", ")", ",", "im", ".", "atom4", ".", "get_atomtype", "(", ")", ")", "atypes", "=", "[", "a", ".", "replace", "(", "\"_\"", ",", "\"\"", ")", ".", "replace", "(", "\"=\"", ",", "\"\"", ")", "for", "a", "in", "atypes", "]", "# special-case: this is a [ dihedral ] override in molecule block, continue and don't match", "if", "im", ".", "gromacs", "[", "'param'", "]", "!=", "[", "]", ":", "for", "p", "in", "im", ".", "gromacs", "[", "'param'", "]", ":", "p", "[", "'kpsi'", "]", "*=", "scale", "new_impropers", ".", "append", "(", "im", ")", "continue", "for", "iswitch", "in", "range", "(", "32", ")", ":", "if", "(", "iswitch", "%", "2", "==", "0", ")", ":", "a1", "=", "atypes", "[", "0", "]", "a2", "=", "atypes", "[", "1", "]", "a3", "=", "atypes", "[", "2", "]", "a4", "=", "atypes", "[", "3", "]", "else", ":", "a1", "=", "atypes", "[", "3", "]", "a2", "=", "atypes", "[", "2", "]", "a3", "=", "atypes", "[", "1", "]", "a4", "=", "atypes", "[", "0", "]", "if", "(", "(", "iswitch", "//", "2", ")", "%", "2", "==", "1", ")", ":", "a1", "=", "\"X\"", "if", "(", "(", "iswitch", "//", "4", ")", "%", "2", "==", "1", ")", ":", "a2", "=", "\"X\"", "if", "(", "(", "iswitch", "//", "8", ")", "%", "2", "==", "1", ")", ":", "a3", "=", "\"X\"", "if", "(", "(", "iswitch", "//", "16", ")", "%", "2", "==", "1", ")", ":", "a4", "=", "\"X\"", "key", "=", "\"{0}-{1}-{2}-{3}-{4}\"", ".", "format", "(", "a1", ",", "a2", ",", "a3", ",", "a4", ",", "im", ".", "gromacs", "[", "'func'", "]", ")", "if", "(", "key", "in", "impropers", ")", ":", "for", "i", ",", "imt", "in", "enumerate", "(", "impropers", "[", "key", "]", ")", ":", "imA", "=", "copy", ".", "deepcopy", "(", "im", ")", "param", "=", "copy", ".", "deepcopy", "(", "imt", ".", "gromacs", "[", "'param'", "]", ")", "# Only check the first dihedral in a list", "if", "not", "impropers", "[", "key", "]", "[", "0", "]", ".", "line", "in", "banned_lines", ":", "for", "p", "in", "param", ":", "p", "[", "'kpsi'", "]", "*=", "scale", "imA", ".", "gromacs", "[", "'param'", "]", "=", "param", "if", "i", "==", "0", ":", "imA", ".", "comment", "=", "\"; banned lines {0} found={1}\\n ; parameters for types {2}-{3}-{4}-{5}-9 at LINE({6})\\n\"", ".", "format", "(", "\" \"", ".", "join", "(", "map", "(", "str", ",", "banned_lines", ")", ")", ",", "1", "if", "imt", ".", "line", "in", "banned_lines", "else", "0", ",", "imt", ".", "atype1", ",", "imt", ".", "atype2", ",", "imt", ".", "atype3", ",", "imt", ".", "atype4", ",", "imt", ".", "line", ")", "new_impropers", ".", "append", "(", "imA", ")", "break", "#assert(len(mol.impropers) == new_impropers)", "mol", ".", "impropers", "=", "new_impropers", "return", "mol" ]
Scale improper dihedrals
[ "Scale", "improper", "dihedrals" ]
python
valid
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/easy_logging.py
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/easy_logging.py#L16-L99
def setup_logging(**kwargs): # type: (Any) -> None """Setup logging configuration Args: **kwargs: See below logging_config_dict (dict): Logging configuration dictionary OR logging_config_json (str): Path to JSON Logging configuration OR logging_config_yaml (str): Path to YAML Logging configuration. Defaults to internal logging_configuration.yml. smtp_config_dict (dict): Email Logging configuration dictionary if using default logging configuration OR smtp_config_json (str): Path to JSON Email Logging configuration if using default logging configuration OR smtp_config_yaml (str): Path to YAML Email Logging configuration if using default logging configuration Returns: None """ smtp_config_found = False smtp_config_dict = kwargs.get('smtp_config_dict', None) if smtp_config_dict: smtp_config_found = True print('Loading smtp configuration customisations from dictionary') smtp_config_json = kwargs.get('smtp_config_json', '') if smtp_config_json: if smtp_config_found: raise LoggingError('More than one smtp configuration file given!') smtp_config_found = True print('Loading smtp configuration customisations from: %s' % smtp_config_json) smtp_config_dict = load_json(smtp_config_json) smtp_config_yaml = kwargs.get('smtp_config_yaml', '') if smtp_config_yaml: if smtp_config_found: raise LoggingError('More than one smtp configuration file given!') smtp_config_found = True print('Loading smtp configuration customisations from: %s' % smtp_config_yaml) smtp_config_dict = load_yaml(smtp_config_yaml) logging_smtp_config_dict = None logging_config_found = False logging_config_dict = kwargs.get('logging_config_dict', None) if logging_config_dict: logging_config_found = True print('Loading logging configuration from dictionary') logging_config_json = kwargs.get('logging_config_json', '') if logging_config_json: if logging_config_found: raise LoggingError('More than one logging configuration file given!') logging_config_found = True print('Loading logging configuration from: %s' % logging_config_json) logging_config_dict = load_json(logging_config_json) logging_config_yaml = kwargs.get('logging_config_yaml', '') if logging_config_found: if logging_config_yaml: raise LoggingError('More than one logging configuration file given!') else: if not logging_config_yaml: print('No logging configuration parameter. Using default.') logging_config_yaml = script_dir_plus_file('logging_configuration.yml', setup_logging) if smtp_config_found: logging_smtp_config_yaml = script_dir_plus_file('logging_smtp_configuration.yml', setup_logging) print('Loading base SMTP logging configuration from: %s' % logging_smtp_config_yaml) logging_smtp_config_dict = load_yaml(logging_smtp_config_yaml) print('Loading logging configuration from: %s' % logging_config_yaml) logging_config_dict = load_yaml(logging_config_yaml) if smtp_config_found: if logging_smtp_config_dict: logging_config_dict = merge_dictionaries([logging_config_dict, logging_smtp_config_dict, smtp_config_dict]) else: raise LoggingError('SMTP logging configuration file given but not using default logging configuration!') file_only = os.getenv('LOG_FILE_ONLY') if file_only is not None and file_only.lower() not in ['false', 'f', 'n', 'no', '0']: root = logging_config_dict.get('root') if root is not None: handlers = root.get('handlers', list()) for i, handler in enumerate(handlers): if handler.lower() == 'console': del handlers[i] break logging.config.dictConfig(logging_config_dict)
[ "def", "setup_logging", "(", "*", "*", "kwargs", ")", ":", "# type: (Any) -> None", "smtp_config_found", "=", "False", "smtp_config_dict", "=", "kwargs", ".", "get", "(", "'smtp_config_dict'", ",", "None", ")", "if", "smtp_config_dict", ":", "smtp_config_found", "=", "True", "print", "(", "'Loading smtp configuration customisations from dictionary'", ")", "smtp_config_json", "=", "kwargs", ".", "get", "(", "'smtp_config_json'", ",", "''", ")", "if", "smtp_config_json", ":", "if", "smtp_config_found", ":", "raise", "LoggingError", "(", "'More than one smtp configuration file given!'", ")", "smtp_config_found", "=", "True", "print", "(", "'Loading smtp configuration customisations from: %s'", "%", "smtp_config_json", ")", "smtp_config_dict", "=", "load_json", "(", "smtp_config_json", ")", "smtp_config_yaml", "=", "kwargs", ".", "get", "(", "'smtp_config_yaml'", ",", "''", ")", "if", "smtp_config_yaml", ":", "if", "smtp_config_found", ":", "raise", "LoggingError", "(", "'More than one smtp configuration file given!'", ")", "smtp_config_found", "=", "True", "print", "(", "'Loading smtp configuration customisations from: %s'", "%", "smtp_config_yaml", ")", "smtp_config_dict", "=", "load_yaml", "(", "smtp_config_yaml", ")", "logging_smtp_config_dict", "=", "None", "logging_config_found", "=", "False", "logging_config_dict", "=", "kwargs", ".", "get", "(", "'logging_config_dict'", ",", "None", ")", "if", "logging_config_dict", ":", "logging_config_found", "=", "True", "print", "(", "'Loading logging configuration from dictionary'", ")", "logging_config_json", "=", "kwargs", ".", "get", "(", "'logging_config_json'", ",", "''", ")", "if", "logging_config_json", ":", "if", "logging_config_found", ":", "raise", "LoggingError", "(", "'More than one logging configuration file given!'", ")", "logging_config_found", "=", "True", "print", "(", "'Loading logging configuration from: %s'", "%", "logging_config_json", ")", "logging_config_dict", "=", "load_json", "(", "logging_config_json", ")", "logging_config_yaml", "=", "kwargs", ".", "get", "(", "'logging_config_yaml'", ",", "''", ")", "if", "logging_config_found", ":", "if", "logging_config_yaml", ":", "raise", "LoggingError", "(", "'More than one logging configuration file given!'", ")", "else", ":", "if", "not", "logging_config_yaml", ":", "print", "(", "'No logging configuration parameter. Using default.'", ")", "logging_config_yaml", "=", "script_dir_plus_file", "(", "'logging_configuration.yml'", ",", "setup_logging", ")", "if", "smtp_config_found", ":", "logging_smtp_config_yaml", "=", "script_dir_plus_file", "(", "'logging_smtp_configuration.yml'", ",", "setup_logging", ")", "print", "(", "'Loading base SMTP logging configuration from: %s'", "%", "logging_smtp_config_yaml", ")", "logging_smtp_config_dict", "=", "load_yaml", "(", "logging_smtp_config_yaml", ")", "print", "(", "'Loading logging configuration from: %s'", "%", "logging_config_yaml", ")", "logging_config_dict", "=", "load_yaml", "(", "logging_config_yaml", ")", "if", "smtp_config_found", ":", "if", "logging_smtp_config_dict", ":", "logging_config_dict", "=", "merge_dictionaries", "(", "[", "logging_config_dict", ",", "logging_smtp_config_dict", ",", "smtp_config_dict", "]", ")", "else", ":", "raise", "LoggingError", "(", "'SMTP logging configuration file given but not using default logging configuration!'", ")", "file_only", "=", "os", ".", "getenv", "(", "'LOG_FILE_ONLY'", ")", "if", "file_only", "is", "not", "None", "and", "file_only", ".", "lower", "(", ")", "not", "in", "[", "'false'", ",", "'f'", ",", "'n'", ",", "'no'", ",", "'0'", "]", ":", "root", "=", "logging_config_dict", ".", "get", "(", "'root'", ")", "if", "root", "is", "not", "None", ":", "handlers", "=", "root", ".", "get", "(", "'handlers'", ",", "list", "(", ")", ")", "for", "i", ",", "handler", "in", "enumerate", "(", "handlers", ")", ":", "if", "handler", ".", "lower", "(", ")", "==", "'console'", ":", "del", "handlers", "[", "i", "]", "break", "logging", ".", "config", ".", "dictConfig", "(", "logging_config_dict", ")" ]
Setup logging configuration Args: **kwargs: See below logging_config_dict (dict): Logging configuration dictionary OR logging_config_json (str): Path to JSON Logging configuration OR logging_config_yaml (str): Path to YAML Logging configuration. Defaults to internal logging_configuration.yml. smtp_config_dict (dict): Email Logging configuration dictionary if using default logging configuration OR smtp_config_json (str): Path to JSON Email Logging configuration if using default logging configuration OR smtp_config_yaml (str): Path to YAML Email Logging configuration if using default logging configuration Returns: None
[ "Setup", "logging", "configuration" ]
python
train
limix/numpy-sugar
numpy_sugar/linalg/svd.py
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/svd.py#L4-L30
def economic_svd(G, epsilon=sqrt(finfo(float).eps)): r"""Economic Singular Value Decomposition. Args: G (array_like): Matrix to be factorized. epsilon (float): Threshold on the square root of the eigen values. Default is ``sqrt(finfo(float).eps)``. Returns: :class:`numpy.ndarray`: Unitary matrix. :class:`numpy.ndarray`: Singular values. :class:`numpy.ndarray`: Unitary matrix. See Also -------- numpy.linalg.svd : Cholesky decomposition. scipy.linalg.svd : Cholesky decomposition. """ from scipy.linalg import svd G = asarray(G, float) (U, S, V) = svd(G, full_matrices=False, check_finite=False) ok = S >= epsilon S = S[ok] U = U[:, ok] V = V[ok, :] return (U, S, V)
[ "def", "economic_svd", "(", "G", ",", "epsilon", "=", "sqrt", "(", "finfo", "(", "float", ")", ".", "eps", ")", ")", ":", "from", "scipy", ".", "linalg", "import", "svd", "G", "=", "asarray", "(", "G", ",", "float", ")", "(", "U", ",", "S", ",", "V", ")", "=", "svd", "(", "G", ",", "full_matrices", "=", "False", ",", "check_finite", "=", "False", ")", "ok", "=", "S", ">=", "epsilon", "S", "=", "S", "[", "ok", "]", "U", "=", "U", "[", ":", ",", "ok", "]", "V", "=", "V", "[", "ok", ",", ":", "]", "return", "(", "U", ",", "S", ",", "V", ")" ]
r"""Economic Singular Value Decomposition. Args: G (array_like): Matrix to be factorized. epsilon (float): Threshold on the square root of the eigen values. Default is ``sqrt(finfo(float).eps)``. Returns: :class:`numpy.ndarray`: Unitary matrix. :class:`numpy.ndarray`: Singular values. :class:`numpy.ndarray`: Unitary matrix. See Also -------- numpy.linalg.svd : Cholesky decomposition. scipy.linalg.svd : Cholesky decomposition.
[ "r", "Economic", "Singular", "Value", "Decomposition", "." ]
python
train
quikmile/trelliolibs
trelliolibs/pigeon/workers.py
https://github.com/quikmile/trelliolibs/blob/872e37d798523ef72380f504651b98ddd0762b0d/trelliolibs/pigeon/workers.py#L45-L61
def submit(self, method, method_args=(), method_kwargs={}, done_callback=None, done_kwargs={}, loop=None): ''' used to send async notifications :param method: :param method_args: :param method_kwargs: :param done_callback: :param done_kwargs: :param loop: :return: ''' _future = self.pool.submit(method, *method_args, **method_kwargs) self.current_id += 1 if done_callback: _future.add_done_callback(lambda _f:done_callback(_f,loop,**done_kwargs))#done kwargs, hardcoded kwargs self.request_pool[self.current_id] = _future return self.current_id, _future
[ "def", "submit", "(", "self", ",", "method", ",", "method_args", "=", "(", ")", ",", "method_kwargs", "=", "{", "}", ",", "done_callback", "=", "None", ",", "done_kwargs", "=", "{", "}", ",", "loop", "=", "None", ")", ":", "_future", "=", "self", ".", "pool", ".", "submit", "(", "method", ",", "*", "method_args", ",", "*", "*", "method_kwargs", ")", "self", ".", "current_id", "+=", "1", "if", "done_callback", ":", "_future", ".", "add_done_callback", "(", "lambda", "_f", ":", "done_callback", "(", "_f", ",", "loop", ",", "*", "*", "done_kwargs", ")", ")", "#done kwargs, hardcoded kwargs", "self", ".", "request_pool", "[", "self", ".", "current_id", "]", "=", "_future", "return", "self", ".", "current_id", ",", "_future" ]
used to send async notifications :param method: :param method_args: :param method_kwargs: :param done_callback: :param done_kwargs: :param loop: :return:
[ "used", "to", "send", "async", "notifications", ":", "param", "method", ":", ":", "param", "method_args", ":", ":", "param", "method_kwargs", ":", ":", "param", "done_callback", ":", ":", "param", "done_kwargs", ":", ":", "param", "loop", ":", ":", "return", ":" ]
python
train
ruipgil/TrackToTrip
tracktotrip/point.py
https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/point.py#L104-L116
def from_gpx(gpx_track_point): """ Creates a point from GPX representation Arguments: gpx_track_point (:obj:`gpxpy.GPXTrackPoint`) Returns: :obj:`Point` """ return Point( lat=gpx_track_point.latitude, lon=gpx_track_point.longitude, time=gpx_track_point.time )
[ "def", "from_gpx", "(", "gpx_track_point", ")", ":", "return", "Point", "(", "lat", "=", "gpx_track_point", ".", "latitude", ",", "lon", "=", "gpx_track_point", ".", "longitude", ",", "time", "=", "gpx_track_point", ".", "time", ")" ]
Creates a point from GPX representation Arguments: gpx_track_point (:obj:`gpxpy.GPXTrackPoint`) Returns: :obj:`Point`
[ "Creates", "a", "point", "from", "GPX", "representation" ]
python
train
mitsei/dlkit
dlkit/json_/assessment/assessment_utilities.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/assessment_utilities.py#L367-L374
def remove_from_parent_sequence_map(assessment_part_admin_session, assessment_part_id): """Updates the child map of a simple sequence assessment assessment part to remove child part""" apls = get_assessment_part_lookup_session(runtime=assessment_part_admin_session._runtime, proxy=assessment_part_admin_session._proxy) apls.use_federated_bank_view() apls.use_unsequestered_assessment_part_view() child_part = apls.get_assessment_part(assessment_part_id) update_parent_sequence_map(child_part, delete=True)
[ "def", "remove_from_parent_sequence_map", "(", "assessment_part_admin_session", ",", "assessment_part_id", ")", ":", "apls", "=", "get_assessment_part_lookup_session", "(", "runtime", "=", "assessment_part_admin_session", ".", "_runtime", ",", "proxy", "=", "assessment_part_admin_session", ".", "_proxy", ")", "apls", ".", "use_federated_bank_view", "(", ")", "apls", ".", "use_unsequestered_assessment_part_view", "(", ")", "child_part", "=", "apls", ".", "get_assessment_part", "(", "assessment_part_id", ")", "update_parent_sequence_map", "(", "child_part", ",", "delete", "=", "True", ")" ]
Updates the child map of a simple sequence assessment assessment part to remove child part
[ "Updates", "the", "child", "map", "of", "a", "simple", "sequence", "assessment", "assessment", "part", "to", "remove", "child", "part" ]
python
train
Sheeprider/BitBucket-api
bitbucket/bitbucket.py
https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L143-L170
def authorize(self, consumer_key, consumer_secret, callback_url=None, access_token=None, access_token_secret=None): """ Call this with your consumer key, secret and callback URL, to generate a token for verification. """ self.consumer_key = consumer_key self.consumer_secret = consumer_secret if not access_token and not access_token_secret: if not callback_url: return (False, "Callback URL required") oauth = OAuth1( consumer_key, client_secret=consumer_secret, callback_uri=callback_url) r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth) if r.status_code == 200: creds = parse_qs(r.content) self.access_token = creds.get('oauth_token')[0] self.access_token_secret = creds.get('oauth_token_secret')[0] else: return (False, r.content) else: self.finalize_oauth(access_token, access_token_secret) return (True, None)
[ "def", "authorize", "(", "self", ",", "consumer_key", ",", "consumer_secret", ",", "callback_url", "=", "None", ",", "access_token", "=", "None", ",", "access_token_secret", "=", "None", ")", ":", "self", ".", "consumer_key", "=", "consumer_key", "self", ".", "consumer_secret", "=", "consumer_secret", "if", "not", "access_token", "and", "not", "access_token_secret", ":", "if", "not", "callback_url", ":", "return", "(", "False", ",", "\"Callback URL required\"", ")", "oauth", "=", "OAuth1", "(", "consumer_key", ",", "client_secret", "=", "consumer_secret", ",", "callback_uri", "=", "callback_url", ")", "r", "=", "requests", ".", "post", "(", "self", ".", "url", "(", "'REQUEST_TOKEN'", ")", ",", "auth", "=", "oauth", ")", "if", "r", ".", "status_code", "==", "200", ":", "creds", "=", "parse_qs", "(", "r", ".", "content", ")", "self", ".", "access_token", "=", "creds", ".", "get", "(", "'oauth_token'", ")", "[", "0", "]", "self", ".", "access_token_secret", "=", "creds", ".", "get", "(", "'oauth_token_secret'", ")", "[", "0", "]", "else", ":", "return", "(", "False", ",", "r", ".", "content", ")", "else", ":", "self", ".", "finalize_oauth", "(", "access_token", ",", "access_token_secret", ")", "return", "(", "True", ",", "None", ")" ]
Call this with your consumer key, secret and callback URL, to generate a token for verification.
[ "Call", "this", "with", "your", "consumer", "key", "secret", "and", "callback", "URL", "to", "generate", "a", "token", "for", "verification", "." ]
python
train
niklasf/python-chess
chess/__init__.py
https://github.com/niklasf/python-chess/blob/d91f986ca3e046b300a0d7d9ee2a13b07610fe1a/chess/__init__.py#L1888-L1924
def is_repetition(self, count: int = 3) -> bool: """ Checks if the current position has repeated 3 (or a given number of) times. Unlike :func:`~chess.Board.can_claim_threefold_repetition()`, this does not consider a repetition that can be played on the next move. Note that checking this can be slow: In the worst case the entire game has to be replayed because there is no incremental transposition table. """ transposition_key = self._transposition_key() switchyard = [] try: while True: if count <= 1: return True if not self.move_stack: break move = self.pop() switchyard.append(move) if self.is_irreversible(move): break if self._transposition_key() == transposition_key: count -= 1 finally: while switchyard: self.push(switchyard.pop()) return False
[ "def", "is_repetition", "(", "self", ",", "count", ":", "int", "=", "3", ")", "->", "bool", ":", "transposition_key", "=", "self", ".", "_transposition_key", "(", ")", "switchyard", "=", "[", "]", "try", ":", "while", "True", ":", "if", "count", "<=", "1", ":", "return", "True", "if", "not", "self", ".", "move_stack", ":", "break", "move", "=", "self", ".", "pop", "(", ")", "switchyard", ".", "append", "(", "move", ")", "if", "self", ".", "is_irreversible", "(", "move", ")", ":", "break", "if", "self", ".", "_transposition_key", "(", ")", "==", "transposition_key", ":", "count", "-=", "1", "finally", ":", "while", "switchyard", ":", "self", ".", "push", "(", "switchyard", ".", "pop", "(", ")", ")", "return", "False" ]
Checks if the current position has repeated 3 (or a given number of) times. Unlike :func:`~chess.Board.can_claim_threefold_repetition()`, this does not consider a repetition that can be played on the next move. Note that checking this can be slow: In the worst case the entire game has to be replayed because there is no incremental transposition table.
[ "Checks", "if", "the", "current", "position", "has", "repeated", "3", "(", "or", "a", "given", "number", "of", ")", "times", "." ]
python
train
Metatab/metapack
metapack/util.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/util.py#L89-L104
def make_dir_structure(base_dir): """Make the build directory structure. """ def maybe_makedir(*args): p = join(base_dir, *args) if exists(p) and not isdir(p): raise IOError("File '{}' exists but is not a directory ".format(p)) if not exists(p): makedirs(p) maybe_makedir(DOWNLOAD_DIR) maybe_makedir(PACKAGE_DIR) maybe_makedir(OLD_DIR)
[ "def", "make_dir_structure", "(", "base_dir", ")", ":", "def", "maybe_makedir", "(", "*", "args", ")", ":", "p", "=", "join", "(", "base_dir", ",", "*", "args", ")", "if", "exists", "(", "p", ")", "and", "not", "isdir", "(", "p", ")", ":", "raise", "IOError", "(", "\"File '{}' exists but is not a directory \"", ".", "format", "(", "p", ")", ")", "if", "not", "exists", "(", "p", ")", ":", "makedirs", "(", "p", ")", "maybe_makedir", "(", "DOWNLOAD_DIR", ")", "maybe_makedir", "(", "PACKAGE_DIR", ")", "maybe_makedir", "(", "OLD_DIR", ")" ]
Make the build directory structure.
[ "Make", "the", "build", "directory", "structure", "." ]
python
train
ejeschke/ginga
ginga/rv/plugins/ChangeHistory.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/ChangeHistory.py#L269-L284
def delete_channel_cb(self, gshell, chinfo): """Called when a channel is deleted from the main interface. Parameter is chinfo (a bunch).""" chname = chinfo.name if chname not in self.name_dict: return del self.name_dict[chname] self.logger.debug('{0} removed from ChangeHistory'.format(chname)) if not self.gui_up: return False self.clear_selected_history() self.recreate_toc()
[ "def", "delete_channel_cb", "(", "self", ",", "gshell", ",", "chinfo", ")", ":", "chname", "=", "chinfo", ".", "name", "if", "chname", "not", "in", "self", ".", "name_dict", ":", "return", "del", "self", ".", "name_dict", "[", "chname", "]", "self", ".", "logger", ".", "debug", "(", "'{0} removed from ChangeHistory'", ".", "format", "(", "chname", ")", ")", "if", "not", "self", ".", "gui_up", ":", "return", "False", "self", ".", "clear_selected_history", "(", ")", "self", ".", "recreate_toc", "(", ")" ]
Called when a channel is deleted from the main interface. Parameter is chinfo (a bunch).
[ "Called", "when", "a", "channel", "is", "deleted", "from", "the", "main", "interface", ".", "Parameter", "is", "chinfo", "(", "a", "bunch", ")", "." ]
python
train
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L5137-L5162
def offset(self): """Return offset to series data in file, if any.""" if not self._pages: return None pos = 0 for page in self._pages: if page is None: return None if not page.is_final: return None if not pos: pos = page.is_contiguous[0] + page.is_contiguous[1] continue if pos != page.is_contiguous[0]: return None pos += page.is_contiguous[1] page = self._pages[0] offset = page.is_contiguous[0] if (page.is_imagej or page.is_shaped) and len(self._pages) == 1: # truncated files return offset if pos == offset + product(self.shape) * self.dtype.itemsize: return offset return None
[ "def", "offset", "(", "self", ")", ":", "if", "not", "self", ".", "_pages", ":", "return", "None", "pos", "=", "0", "for", "page", "in", "self", ".", "_pages", ":", "if", "page", "is", "None", ":", "return", "None", "if", "not", "page", ".", "is_final", ":", "return", "None", "if", "not", "pos", ":", "pos", "=", "page", ".", "is_contiguous", "[", "0", "]", "+", "page", ".", "is_contiguous", "[", "1", "]", "continue", "if", "pos", "!=", "page", ".", "is_contiguous", "[", "0", "]", ":", "return", "None", "pos", "+=", "page", ".", "is_contiguous", "[", "1", "]", "page", "=", "self", ".", "_pages", "[", "0", "]", "offset", "=", "page", ".", "is_contiguous", "[", "0", "]", "if", "(", "page", ".", "is_imagej", "or", "page", ".", "is_shaped", ")", "and", "len", "(", "self", ".", "_pages", ")", "==", "1", ":", "# truncated files", "return", "offset", "if", "pos", "==", "offset", "+", "product", "(", "self", ".", "shape", ")", "*", "self", ".", "dtype", ".", "itemsize", ":", "return", "offset", "return", "None" ]
Return offset to series data in file, if any.
[ "Return", "offset", "to", "series", "data", "in", "file", "if", "any", "." ]
python
train
programa-stic/barf-project
barf/analysis/codeanalyzer/codeanalyzer.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/analysis/codeanalyzer/codeanalyzer.py#L78-L97
def get_register_expr(self, register_name, mode="post"): """Return a smt bit vector that represents an architectural (native) register. """ reg_info = self._arch_info.alias_mapper.get(register_name, None) if reg_info: var_base_name, offset = reg_info else: var_base_name = register_name var_name = self._get_var_name(var_base_name, mode) var_size = self._arch_info.registers_size[var_base_name] ret_val = self._translator.make_bitvec(var_size, var_name) if reg_info: ret_val = smtfunction.extract(ret_val, offset, self._arch_info.registers_size[register_name]) return ret_val
[ "def", "get_register_expr", "(", "self", ",", "register_name", ",", "mode", "=", "\"post\"", ")", ":", "reg_info", "=", "self", ".", "_arch_info", ".", "alias_mapper", ".", "get", "(", "register_name", ",", "None", ")", "if", "reg_info", ":", "var_base_name", ",", "offset", "=", "reg_info", "else", ":", "var_base_name", "=", "register_name", "var_name", "=", "self", ".", "_get_var_name", "(", "var_base_name", ",", "mode", ")", "var_size", "=", "self", ".", "_arch_info", ".", "registers_size", "[", "var_base_name", "]", "ret_val", "=", "self", ".", "_translator", ".", "make_bitvec", "(", "var_size", ",", "var_name", ")", "if", "reg_info", ":", "ret_val", "=", "smtfunction", ".", "extract", "(", "ret_val", ",", "offset", ",", "self", ".", "_arch_info", ".", "registers_size", "[", "register_name", "]", ")", "return", "ret_val" ]
Return a smt bit vector that represents an architectural (native) register.
[ "Return", "a", "smt", "bit", "vector", "that", "represents", "an", "architectural", "(", "native", ")", "register", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/breakpoint.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L3293-L3321
def enable_one_shot_process_breakpoints(self, dwProcessId): """ Enables for one shot all disabled breakpoints for the given process. @type dwProcessId: int @param dwProcessId: Process global ID. """ # enable code breakpoints for one shot for bp in self.get_process_code_breakpoints(dwProcessId): if bp.is_disabled(): self.enable_one_shot_code_breakpoint(dwProcessId, bp.get_address()) # enable page breakpoints for one shot for bp in self.get_process_page_breakpoints(dwProcessId): if bp.is_disabled(): self.enable_one_shot_page_breakpoint(dwProcessId, bp.get_address()) # enable hardware breakpoints for one shot if self.system.has_process(dwProcessId): aProcess = self.system.get_process(dwProcessId) else: aProcess = Process(dwProcessId) aProcess.scan_threads() for aThread in aProcess.iter_threads(): dwThreadId = aThread.get_tid() for bp in self.get_thread_hardware_breakpoints(dwThreadId): if bp.is_disabled(): self.enable_one_shot_hardware_breakpoint(dwThreadId, bp.get_address())
[ "def", "enable_one_shot_process_breakpoints", "(", "self", ",", "dwProcessId", ")", ":", "# enable code breakpoints for one shot", "for", "bp", "in", "self", ".", "get_process_code_breakpoints", "(", "dwProcessId", ")", ":", "if", "bp", ".", "is_disabled", "(", ")", ":", "self", ".", "enable_one_shot_code_breakpoint", "(", "dwProcessId", ",", "bp", ".", "get_address", "(", ")", ")", "# enable page breakpoints for one shot", "for", "bp", "in", "self", ".", "get_process_page_breakpoints", "(", "dwProcessId", ")", ":", "if", "bp", ".", "is_disabled", "(", ")", ":", "self", ".", "enable_one_shot_page_breakpoint", "(", "dwProcessId", ",", "bp", ".", "get_address", "(", ")", ")", "# enable hardware breakpoints for one shot", "if", "self", ".", "system", ".", "has_process", "(", "dwProcessId", ")", ":", "aProcess", "=", "self", ".", "system", ".", "get_process", "(", "dwProcessId", ")", "else", ":", "aProcess", "=", "Process", "(", "dwProcessId", ")", "aProcess", ".", "scan_threads", "(", ")", "for", "aThread", "in", "aProcess", ".", "iter_threads", "(", ")", ":", "dwThreadId", "=", "aThread", ".", "get_tid", "(", ")", "for", "bp", "in", "self", ".", "get_thread_hardware_breakpoints", "(", "dwThreadId", ")", ":", "if", "bp", ".", "is_disabled", "(", ")", ":", "self", ".", "enable_one_shot_hardware_breakpoint", "(", "dwThreadId", ",", "bp", ".", "get_address", "(", ")", ")" ]
Enables for one shot all disabled breakpoints for the given process. @type dwProcessId: int @param dwProcessId: Process global ID.
[ "Enables", "for", "one", "shot", "all", "disabled", "breakpoints", "for", "the", "given", "process", "." ]
python
train
pyviz/imagen
imagen/patterngenerator.py
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L524-L530
def state_pop(self): """ Pop the state of all generators """ super(Composite,self).state_pop() for gen in self.generators: gen.state_pop()
[ "def", "state_pop", "(", "self", ")", ":", "super", "(", "Composite", ",", "self", ")", ".", "state_pop", "(", ")", "for", "gen", "in", "self", ".", "generators", ":", "gen", ".", "state_pop", "(", ")" ]
Pop the state of all generators
[ "Pop", "the", "state", "of", "all", "generators" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/utils/learning_rate.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/learning_rate.py#L141-L183
def _learning_rate_decay(hparams, warmup_steps=0): """Learning rate decay multiplier.""" scheme = hparams.learning_rate_decay_scheme warmup_steps = tf.to_float(warmup_steps) global_step = _global_step(hparams) if not scheme or scheme == "none": return tf.constant(1.) tf.logging.info("Applying learning rate decay: %s.", scheme) if scheme == "exp": decay_steps = hparams.learning_rate_decay_steps p = (global_step - warmup_steps) / decay_steps if hparams.learning_rate_decay_staircase: p = tf.floor(p) return tf.pow(hparams.learning_rate_decay_rate, p) if scheme == "piecewise": return _piecewise_learning_rate(global_step, hparams.learning_rate_boundaries, hparams.learning_rate_multiples) if scheme == "cosine": cycle_steps = hparams.learning_rate_cosine_cycle_steps cycle_position = global_step % (2 * cycle_steps) cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position) return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps)) if scheme == "cyclelinear10x": # Cycle the rate linearly by 10x every warmup_steps, up and down. cycle_steps = warmup_steps cycle_position = global_step % (2 * cycle_steps) cycle_position = tf.to_float( # Normalize to the interval [-1, 1]. cycle_position - cycle_steps) / float(cycle_steps) cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0. return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3). if scheme == "sqrt": return _legacy_sqrt_decay(global_step - warmup_steps) raise ValueError("Unrecognized learning rate decay scheme: %s" % hparams.learning_rate_decay_scheme)
[ "def", "_learning_rate_decay", "(", "hparams", ",", "warmup_steps", "=", "0", ")", ":", "scheme", "=", "hparams", ".", "learning_rate_decay_scheme", "warmup_steps", "=", "tf", ".", "to_float", "(", "warmup_steps", ")", "global_step", "=", "_global_step", "(", "hparams", ")", "if", "not", "scheme", "or", "scheme", "==", "\"none\"", ":", "return", "tf", ".", "constant", "(", "1.", ")", "tf", ".", "logging", ".", "info", "(", "\"Applying learning rate decay: %s.\"", ",", "scheme", ")", "if", "scheme", "==", "\"exp\"", ":", "decay_steps", "=", "hparams", ".", "learning_rate_decay_steps", "p", "=", "(", "global_step", "-", "warmup_steps", ")", "/", "decay_steps", "if", "hparams", ".", "learning_rate_decay_staircase", ":", "p", "=", "tf", ".", "floor", "(", "p", ")", "return", "tf", ".", "pow", "(", "hparams", ".", "learning_rate_decay_rate", ",", "p", ")", "if", "scheme", "==", "\"piecewise\"", ":", "return", "_piecewise_learning_rate", "(", "global_step", ",", "hparams", ".", "learning_rate_boundaries", ",", "hparams", ".", "learning_rate_multiples", ")", "if", "scheme", "==", "\"cosine\"", ":", "cycle_steps", "=", "hparams", ".", "learning_rate_cosine_cycle_steps", "cycle_position", "=", "global_step", "%", "(", "2", "*", "cycle_steps", ")", "cycle_position", "=", "cycle_steps", "-", "tf", ".", "abs", "(", "cycle_steps", "-", "cycle_position", ")", "return", "0.5", "*", "(", "1", "+", "tf", ".", "cos", "(", "np", ".", "pi", "*", "cycle_position", "/", "cycle_steps", ")", ")", "if", "scheme", "==", "\"cyclelinear10x\"", ":", "# Cycle the rate linearly by 10x every warmup_steps, up and down.", "cycle_steps", "=", "warmup_steps", "cycle_position", "=", "global_step", "%", "(", "2", "*", "cycle_steps", ")", "cycle_position", "=", "tf", ".", "to_float", "(", "# Normalize to the interval [-1, 1].", "cycle_position", "-", "cycle_steps", ")", "/", "float", "(", "cycle_steps", ")", "cycle_position", "=", "1.0", "-", "tf", ".", "abs", "(", "cycle_position", ")", "# 0 to 1 and back to 0.", "return", "(", "cycle_position", "+", "0.1", ")", "*", "3.0", "# 10x difference each cycle (0.3-3).", "if", "scheme", "==", "\"sqrt\"", ":", "return", "_legacy_sqrt_decay", "(", "global_step", "-", "warmup_steps", ")", "raise", "ValueError", "(", "\"Unrecognized learning rate decay scheme: %s\"", "%", "hparams", ".", "learning_rate_decay_scheme", ")" ]
Learning rate decay multiplier.
[ "Learning", "rate", "decay", "multiplier", "." ]
python
train
twisted/txaws
txaws/s3/client.py
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L320-L334
def get_bucket_website_config(self, bucket): """ Get the website configuration of a bucket. @param bucket: The name of the bucket. @return: A C{Deferred} that will fire with the bucket's website configuration. """ details = self._details( method=b"GET", url_context=self._url_context(bucket=bucket, object_name='?website'), ) d = self._submit(self._query_factory(details)) d.addCallback(self._parse_website_config) return d
[ "def", "get_bucket_website_config", "(", "self", ",", "bucket", ")", ":", "details", "=", "self", ".", "_details", "(", "method", "=", "b\"GET\"", ",", "url_context", "=", "self", ".", "_url_context", "(", "bucket", "=", "bucket", ",", "object_name", "=", "'?website'", ")", ",", ")", "d", "=", "self", ".", "_submit", "(", "self", ".", "_query_factory", "(", "details", ")", ")", "d", ".", "addCallback", "(", "self", ".", "_parse_website_config", ")", "return", "d" ]
Get the website configuration of a bucket. @param bucket: The name of the bucket. @return: A C{Deferred} that will fire with the bucket's website configuration.
[ "Get", "the", "website", "configuration", "of", "a", "bucket", "." ]
python
train
edx/edx-enterprise
enterprise/templatetags/enterprise.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/templatetags/enterprise.py#L48-L71
def course_modal(context, course=None): """ Django template tag that returns course information to display in a modal. You may pass in a particular course if you like. Otherwise, the modal will look for course context within the parent context. Usage: {% course_modal %} {% course_modal course %} """ if course: context.update({ 'course_image_uri': course.get('course_image_uri', ''), 'course_title': course.get('course_title', ''), 'course_level_type': course.get('course_level_type', ''), 'course_short_description': course.get('course_short_description', ''), 'course_effort': course.get('course_effort', ''), 'course_full_description': course.get('course_full_description', ''), 'expected_learning_items': course.get('expected_learning_items', []), 'staff': course.get('staff', []), 'premium_modes': course.get('premium_modes', []), }) return context
[ "def", "course_modal", "(", "context", ",", "course", "=", "None", ")", ":", "if", "course", ":", "context", ".", "update", "(", "{", "'course_image_uri'", ":", "course", ".", "get", "(", "'course_image_uri'", ",", "''", ")", ",", "'course_title'", ":", "course", ".", "get", "(", "'course_title'", ",", "''", ")", ",", "'course_level_type'", ":", "course", ".", "get", "(", "'course_level_type'", ",", "''", ")", ",", "'course_short_description'", ":", "course", ".", "get", "(", "'course_short_description'", ",", "''", ")", ",", "'course_effort'", ":", "course", ".", "get", "(", "'course_effort'", ",", "''", ")", ",", "'course_full_description'", ":", "course", ".", "get", "(", "'course_full_description'", ",", "''", ")", ",", "'expected_learning_items'", ":", "course", ".", "get", "(", "'expected_learning_items'", ",", "[", "]", ")", ",", "'staff'", ":", "course", ".", "get", "(", "'staff'", ",", "[", "]", ")", ",", "'premium_modes'", ":", "course", ".", "get", "(", "'premium_modes'", ",", "[", "]", ")", ",", "}", ")", "return", "context" ]
Django template tag that returns course information to display in a modal. You may pass in a particular course if you like. Otherwise, the modal will look for course context within the parent context. Usage: {% course_modal %} {% course_modal course %}
[ "Django", "template", "tag", "that", "returns", "course", "information", "to", "display", "in", "a", "modal", "." ]
python
valid
hyperledger/indy-plenum
plenum/server/observer/observer_node.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/observer/observer_node.py#L39-L46
async def serviceQueues(self, limit=None) -> int: """ Service at most `limit` messages from the inBox. :param limit: the maximum number of messages to service :return: the number of messages successfully processed """ return await self._inbox_router.handleAll(self._inbox, limit)
[ "async", "def", "serviceQueues", "(", "self", ",", "limit", "=", "None", ")", "->", "int", ":", "return", "await", "self", ".", "_inbox_router", ".", "handleAll", "(", "self", ".", "_inbox", ",", "limit", ")" ]
Service at most `limit` messages from the inBox. :param limit: the maximum number of messages to service :return: the number of messages successfully processed
[ "Service", "at", "most", "limit", "messages", "from", "the", "inBox", "." ]
python
train
opendatateam/udata
udata/tasks.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/tasks.py#L50-L70
def router(name, args, kwargs, options, task=None, **kw): ''' A celery router using the predeclared :class:`ContextTask` attributes (`router` or `default_queue` and/or `default routing_key`). ''' # Fetch task by name if necessary task = task or celery.tasks.get(name) if not task: return # Single route param override everything if task.route: queue = task.route.split('.', 1)[0] return {'queue': queue, 'routing_key': task.route} # queue parameter, routing_key computed if not present if task.default_queue: key = task.default_routing_key key = key or '{0.default_queue}.{0.name}'.format(task) return {'queue': task.default_queue, 'routing_key': key} # only routing_key, queue should not be returned to fallback on default elif task.default_routing_key: return {'routing_key': task.default_routing_key}
[ "def", "router", "(", "name", ",", "args", ",", "kwargs", ",", "options", ",", "task", "=", "None", ",", "*", "*", "kw", ")", ":", "# Fetch task by name if necessary", "task", "=", "task", "or", "celery", ".", "tasks", ".", "get", "(", "name", ")", "if", "not", "task", ":", "return", "# Single route param override everything", "if", "task", ".", "route", ":", "queue", "=", "task", ".", "route", ".", "split", "(", "'.'", ",", "1", ")", "[", "0", "]", "return", "{", "'queue'", ":", "queue", ",", "'routing_key'", ":", "task", ".", "route", "}", "# queue parameter, routing_key computed if not present", "if", "task", ".", "default_queue", ":", "key", "=", "task", ".", "default_routing_key", "key", "=", "key", "or", "'{0.default_queue}.{0.name}'", ".", "format", "(", "task", ")", "return", "{", "'queue'", ":", "task", ".", "default_queue", ",", "'routing_key'", ":", "key", "}", "# only routing_key, queue should not be returned to fallback on default", "elif", "task", ".", "default_routing_key", ":", "return", "{", "'routing_key'", ":", "task", ".", "default_routing_key", "}" ]
A celery router using the predeclared :class:`ContextTask` attributes (`router` or `default_queue` and/or `default routing_key`).
[ "A", "celery", "router", "using", "the", "predeclared", ":", "class", ":", "ContextTask", "attributes", "(", "router", "or", "default_queue", "and", "/", "or", "default", "routing_key", ")", "." ]
python
train
wglass/lighthouse
lighthouse/service.py
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/service.py#L39-L50
def validate_config(cls, config): """ Runs a check on the given config to make sure that `port`/`ports` and `discovery` is defined. """ if "discovery" not in config: raise ValueError("No discovery method defined.") if not any([item in config for item in ["port", "ports"]]): raise ValueError("No port(s) defined.") cls.validate_check_configs(config)
[ "def", "validate_config", "(", "cls", ",", "config", ")", ":", "if", "\"discovery\"", "not", "in", "config", ":", "raise", "ValueError", "(", "\"No discovery method defined.\"", ")", "if", "not", "any", "(", "[", "item", "in", "config", "for", "item", "in", "[", "\"port\"", ",", "\"ports\"", "]", "]", ")", ":", "raise", "ValueError", "(", "\"No port(s) defined.\"", ")", "cls", ".", "validate_check_configs", "(", "config", ")" ]
Runs a check on the given config to make sure that `port`/`ports` and `discovery` is defined.
[ "Runs", "a", "check", "on", "the", "given", "config", "to", "make", "sure", "that", "port", "/", "ports", "and", "discovery", "is", "defined", "." ]
python
train
saltstack/salt
salt/modules/btrfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/btrfs.py#L130-L171
def defragment(path): ''' Defragment mounted BTRFS filesystem. In order to defragment a filesystem, device should be properly mounted and writable. If passed a device name, then defragmented whole filesystem, mounted on in. If passed a moun tpoint of the filesystem, then only this mount point is defragmented. CLI Example: .. code-block:: bash salt '*' btrfs.defragment /dev/sda1 salt '*' btrfs.defragment /path/on/filesystem ''' is_device = salt.utils.fsutils._is_device(path) mounts = salt.utils.fsutils._get_mounts("btrfs") if is_device and not mounts.get(path): raise CommandExecutionError("Device \"{0}\" is not mounted".format(path)) result = [] if is_device: for mount_point in mounts[path]: result.append(_defragment_mountpoint(mount_point['mount_point'])) else: is_mountpoint = False for mountpoints in six.itervalues(mounts): for mpnt in mountpoints: if path == mpnt['mount_point']: is_mountpoint = True break d_res = _defragment_mountpoint(path) if not is_mountpoint and not d_res['passed'] and "range ioctl not supported" in d_res['log']: d_res['log'] = "Range ioctl defragmentation is not supported in this kernel." if not is_mountpoint: d_res['mount_point'] = False d_res['range'] = os.path.exists(path) and path or False result.append(d_res) return result
[ "def", "defragment", "(", "path", ")", ":", "is_device", "=", "salt", ".", "utils", ".", "fsutils", ".", "_is_device", "(", "path", ")", "mounts", "=", "salt", ".", "utils", ".", "fsutils", ".", "_get_mounts", "(", "\"btrfs\"", ")", "if", "is_device", "and", "not", "mounts", ".", "get", "(", "path", ")", ":", "raise", "CommandExecutionError", "(", "\"Device \\\"{0}\\\" is not mounted\"", ".", "format", "(", "path", ")", ")", "result", "=", "[", "]", "if", "is_device", ":", "for", "mount_point", "in", "mounts", "[", "path", "]", ":", "result", ".", "append", "(", "_defragment_mountpoint", "(", "mount_point", "[", "'mount_point'", "]", ")", ")", "else", ":", "is_mountpoint", "=", "False", "for", "mountpoints", "in", "six", ".", "itervalues", "(", "mounts", ")", ":", "for", "mpnt", "in", "mountpoints", ":", "if", "path", "==", "mpnt", "[", "'mount_point'", "]", ":", "is_mountpoint", "=", "True", "break", "d_res", "=", "_defragment_mountpoint", "(", "path", ")", "if", "not", "is_mountpoint", "and", "not", "d_res", "[", "'passed'", "]", "and", "\"range ioctl not supported\"", "in", "d_res", "[", "'log'", "]", ":", "d_res", "[", "'log'", "]", "=", "\"Range ioctl defragmentation is not supported in this kernel.\"", "if", "not", "is_mountpoint", ":", "d_res", "[", "'mount_point'", "]", "=", "False", "d_res", "[", "'range'", "]", "=", "os", ".", "path", ".", "exists", "(", "path", ")", "and", "path", "or", "False", "result", ".", "append", "(", "d_res", ")", "return", "result" ]
Defragment mounted BTRFS filesystem. In order to defragment a filesystem, device should be properly mounted and writable. If passed a device name, then defragmented whole filesystem, mounted on in. If passed a moun tpoint of the filesystem, then only this mount point is defragmented. CLI Example: .. code-block:: bash salt '*' btrfs.defragment /dev/sda1 salt '*' btrfs.defragment /path/on/filesystem
[ "Defragment", "mounted", "BTRFS", "filesystem", ".", "In", "order", "to", "defragment", "a", "filesystem", "device", "should", "be", "properly", "mounted", "and", "writable", "." ]
python
train
widdowquinn/pyani
bin/genbank_get_genomes_by_taxon.py
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L234-L259
def get_asm_uids(taxon_uid): """Returns a set of NCBI UIDs associated with the passed taxon. This query at NCBI returns all assemblies for the taxon subtree rooted at the passed taxon_uid. """ query = "txid%s[Organism:exp]" % taxon_uid logger.info("Entrez ESearch with query: %s", query) # Perform initial search for assembly UIDs with taxon ID as query. # Use NCBI history for the search. handle = entrez_retry( Entrez.esearch, db="assembly", term=query, format="xml", usehistory="y") record = Entrez.read(handle, validate=False) result_count = int(record['Count']) logger.info("Entrez ESearch returns %d assembly IDs", result_count) # Recover assembly UIDs from the web history asm_ids = entrez_batch_webhistory( record, result_count, 250, db="assembly", retmode="xml") logger.info("Identified %d unique assemblies", len(asm_ids)) return asm_ids
[ "def", "get_asm_uids", "(", "taxon_uid", ")", ":", "query", "=", "\"txid%s[Organism:exp]\"", "%", "taxon_uid", "logger", ".", "info", "(", "\"Entrez ESearch with query: %s\"", ",", "query", ")", "# Perform initial search for assembly UIDs with taxon ID as query.", "# Use NCBI history for the search.", "handle", "=", "entrez_retry", "(", "Entrez", ".", "esearch", ",", "db", "=", "\"assembly\"", ",", "term", "=", "query", ",", "format", "=", "\"xml\"", ",", "usehistory", "=", "\"y\"", ")", "record", "=", "Entrez", ".", "read", "(", "handle", ",", "validate", "=", "False", ")", "result_count", "=", "int", "(", "record", "[", "'Count'", "]", ")", "logger", ".", "info", "(", "\"Entrez ESearch returns %d assembly IDs\"", ",", "result_count", ")", "# Recover assembly UIDs from the web history", "asm_ids", "=", "entrez_batch_webhistory", "(", "record", ",", "result_count", ",", "250", ",", "db", "=", "\"assembly\"", ",", "retmode", "=", "\"xml\"", ")", "logger", ".", "info", "(", "\"Identified %d unique assemblies\"", ",", "len", "(", "asm_ids", ")", ")", "return", "asm_ids" ]
Returns a set of NCBI UIDs associated with the passed taxon. This query at NCBI returns all assemblies for the taxon subtree rooted at the passed taxon_uid.
[ "Returns", "a", "set", "of", "NCBI", "UIDs", "associated", "with", "the", "passed", "taxon", "." ]
python
train
sentinel-hub/eo-learn
core/eolearn/core/plots.py
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/core/eolearn/core/plots.py#L56-L63
def update(self): """Updates image to be displayed with new time frame.""" if self.single_channel: self.im.set_data(self.data[self.ind, :, :]) else: self.im.set_data(self.data[self.ind, :, :, :]) self.ax.set_ylabel('time frame %s' % self.ind) self.im.axes.figure.canvas.draw()
[ "def", "update", "(", "self", ")", ":", "if", "self", ".", "single_channel", ":", "self", ".", "im", ".", "set_data", "(", "self", ".", "data", "[", "self", ".", "ind", ",", ":", ",", ":", "]", ")", "else", ":", "self", ".", "im", ".", "set_data", "(", "self", ".", "data", "[", "self", ".", "ind", ",", ":", ",", ":", ",", ":", "]", ")", "self", ".", "ax", ".", "set_ylabel", "(", "'time frame %s'", "%", "self", ".", "ind", ")", "self", ".", "im", ".", "axes", ".", "figure", ".", "canvas", ".", "draw", "(", ")" ]
Updates image to be displayed with new time frame.
[ "Updates", "image", "to", "be", "displayed", "with", "new", "time", "frame", "." ]
python
train
rahul13ramesh/hidden_markov
hidden_markov/hmm_class.py
https://github.com/rahul13ramesh/hidden_markov/blob/6ba6012665f9e09c980ff70901604d051ba57dcc/hidden_markov/hmm_class.py#L194-L277
def viterbi(self,observations): """ The probability of occurence of the observation sequence **Arguments**: :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. :type observations: A list or tuple :return: Returns a list of hidden states. :rtype: list of states **Features**: Scaling applied here. This ensures that no underflow error occurs. **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> print(test.viterbi(observations)) """ # Find total states,observations total_stages = len(observations) num_states = len(self.states) # initialize data # Path stores the state sequence giving maximum probability old_path = np.zeros( (total_stages, num_states) ) new_path = np.zeros( (total_stages, num_states) ) # Find initial delta # Map observation to an index # delta[s] stores the probability of most probable path ending in state 's' ob_ind = self.obs_map[ observations[0] ] delta = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob ) # Scale delta delta = delta /np.sum(delta) # initialize path old_path[0,:] = [i for i in range(num_states) ] # Find delta[t][x] for each state 'x' at the iteration 't' # delta[t][x] can be found using delta[t-1][x] and taking the maximum possible path for curr_t in range(1,total_stages): # Map observation to an index ob_ind = self.obs_map[ observations[curr_t] ] # Find temp and take max along each row to get delta temp = np.multiply (np.multiply(delta , self.trans_prob.transpose()) , self.em_prob[:, ob_ind] ) # Update delta and scale it delta = temp.max(axis = 1).transpose() delta = delta /np.sum(delta) # Find state which is most probable using argax # Convert to a list for easier processing max_temp = temp.argmax(axis=1).transpose() max_temp = np.ravel(max_temp).tolist() # Update path for s in range(num_states): new_path[:curr_t,s] = old_path[0:curr_t, max_temp[s] ] new_path[curr_t,:] = [i for i in range(num_states) ] old_path = new_path.copy() # Find the state in last stage, giving maximum probability final_max = np.argmax(np.ravel(delta)) best_path = old_path[:,final_max].tolist() best_path_map = [ self.state_map[i] for i in best_path] return best_path_map
[ "def", "viterbi", "(", "self", ",", "observations", ")", ":", "# Find total states,observations", "total_stages", "=", "len", "(", "observations", ")", "num_states", "=", "len", "(", "self", ".", "states", ")", "# initialize data", "# Path stores the state sequence giving maximum probability", "old_path", "=", "np", ".", "zeros", "(", "(", "total_stages", ",", "num_states", ")", ")", "new_path", "=", "np", ".", "zeros", "(", "(", "total_stages", ",", "num_states", ")", ")", "# Find initial delta", "# Map observation to an index", "# delta[s] stores the probability of most probable path ending in state 's' ", "ob_ind", "=", "self", ".", "obs_map", "[", "observations", "[", "0", "]", "]", "delta", "=", "np", ".", "multiply", "(", "np", ".", "transpose", "(", "self", ".", "em_prob", "[", ":", ",", "ob_ind", "]", ")", ",", "self", ".", "start_prob", ")", "# Scale delta", "delta", "=", "delta", "/", "np", ".", "sum", "(", "delta", ")", "# initialize path", "old_path", "[", "0", ",", ":", "]", "=", "[", "i", "for", "i", "in", "range", "(", "num_states", ")", "]", "# Find delta[t][x] for each state 'x' at the iteration 't'", "# delta[t][x] can be found using delta[t-1][x] and taking the maximum possible path", "for", "curr_t", "in", "range", "(", "1", ",", "total_stages", ")", ":", "# Map observation to an index", "ob_ind", "=", "self", ".", "obs_map", "[", "observations", "[", "curr_t", "]", "]", "# Find temp and take max along each row to get delta", "temp", "=", "np", ".", "multiply", "(", "np", ".", "multiply", "(", "delta", ",", "self", ".", "trans_prob", ".", "transpose", "(", ")", ")", ",", "self", ".", "em_prob", "[", ":", ",", "ob_ind", "]", ")", "# Update delta and scale it", "delta", "=", "temp", ".", "max", "(", "axis", "=", "1", ")", ".", "transpose", "(", ")", "delta", "=", "delta", "/", "np", ".", "sum", "(", "delta", ")", "# Find state which is most probable using argax", "# Convert to a list for easier processing", "max_temp", "=", "temp", ".", "argmax", "(", "axis", "=", "1", ")", ".", "transpose", "(", ")", "max_temp", "=", "np", ".", "ravel", "(", "max_temp", ")", ".", "tolist", "(", ")", "# Update path", "for", "s", "in", "range", "(", "num_states", ")", ":", "new_path", "[", ":", "curr_t", ",", "s", "]", "=", "old_path", "[", "0", ":", "curr_t", ",", "max_temp", "[", "s", "]", "]", "new_path", "[", "curr_t", ",", ":", "]", "=", "[", "i", "for", "i", "in", "range", "(", "num_states", ")", "]", "old_path", "=", "new_path", ".", "copy", "(", ")", "# Find the state in last stage, giving maximum probability", "final_max", "=", "np", ".", "argmax", "(", "np", ".", "ravel", "(", "delta", ")", ")", "best_path", "=", "old_path", "[", ":", ",", "final_max", "]", ".", "tolist", "(", ")", "best_path_map", "=", "[", "self", ".", "state_map", "[", "i", "]", "for", "i", "in", "best_path", "]", "return", "best_path_map" ]
The probability of occurence of the observation sequence **Arguments**: :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. :type observations: A list or tuple :return: Returns a list of hidden states. :rtype: list of states **Features**: Scaling applied here. This ensures that no underflow error occurs. **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> print(test.viterbi(observations))
[ "The", "probability", "of", "occurence", "of", "the", "observation", "sequence" ]
python
train
mozilla/DeepSpeech
bin/import_gram_vaani.py
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/import_gram_vaani.py#L32-L79
def parse_args(args): """Parse command line parameters Args: args ([str]): Command line parameters as list of strings Returns: :obj:`argparse.Namespace`: command line parameters namespace """ parser = argparse.ArgumentParser( description="Imports GramVaani data for Deep Speech" ) parser.add_argument( "--version", action="version", version="GramVaaniImporter {ver}".format(ver=__version__), ) parser.add_argument( "-v", "--verbose", action="store_const", required=False, help="set loglevel to INFO", dest="loglevel", const=logging.INFO, ) parser.add_argument( "-vv", "--very-verbose", action="store_const", required=False, help="set loglevel to DEBUG", dest="loglevel", const=logging.DEBUG, ) parser.add_argument( "-c", "--csv_filename", required=True, help="Path to the GramVaani csv", dest="csv_filename", ) parser.add_argument( "-t", "--target_dir", required=True, help="Directory in which to save the importer GramVaani data", dest="target_dir", ) return parser.parse_args(args)
[ "def", "parse_args", "(", "args", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Imports GramVaani data for Deep Speech\"", ")", "parser", ".", "add_argument", "(", "\"--version\"", ",", "action", "=", "\"version\"", ",", "version", "=", "\"GramVaaniImporter {ver}\"", ".", "format", "(", "ver", "=", "__version__", ")", ",", ")", "parser", ".", "add_argument", "(", "\"-v\"", ",", "\"--verbose\"", ",", "action", "=", "\"store_const\"", ",", "required", "=", "False", ",", "help", "=", "\"set loglevel to INFO\"", ",", "dest", "=", "\"loglevel\"", ",", "const", "=", "logging", ".", "INFO", ",", ")", "parser", ".", "add_argument", "(", "\"-vv\"", ",", "\"--very-verbose\"", ",", "action", "=", "\"store_const\"", ",", "required", "=", "False", ",", "help", "=", "\"set loglevel to DEBUG\"", ",", "dest", "=", "\"loglevel\"", ",", "const", "=", "logging", ".", "DEBUG", ",", ")", "parser", ".", "add_argument", "(", "\"-c\"", ",", "\"--csv_filename\"", ",", "required", "=", "True", ",", "help", "=", "\"Path to the GramVaani csv\"", ",", "dest", "=", "\"csv_filename\"", ",", ")", "parser", ".", "add_argument", "(", "\"-t\"", ",", "\"--target_dir\"", ",", "required", "=", "True", ",", "help", "=", "\"Directory in which to save the importer GramVaani data\"", ",", "dest", "=", "\"target_dir\"", ",", ")", "return", "parser", ".", "parse_args", "(", "args", ")" ]
Parse command line parameters Args: args ([str]): Command line parameters as list of strings Returns: :obj:`argparse.Namespace`: command line parameters namespace
[ "Parse", "command", "line", "parameters", "Args", ":", "args", "(", "[", "str", "]", ")", ":", "Command", "line", "parameters", "as", "list", "of", "strings", "Returns", ":", ":", "obj", ":", "argparse", ".", "Namespace", ":", "command", "line", "parameters", "namespace" ]
python
train
OpenKMIP/PyKMIP
kmip/core/objects.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/objects.py#L3700-L3753
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0): """ Read the data encoding the ObjectDefaults structure and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 2.0. Raises: InvalidKmipEncoding: Raised if the object type or attributes are missing from the encoding. VersionNotSupported: Raised when a KMIP version is provided that does not support the ObjectDefaults structure. """ if kmip_version < enums.KMIPVersion.KMIP_2_0: raise exceptions.VersionNotSupported( "KMIP {} does not support the ObjectDefaults object.".format( kmip_version.value ) ) super(ObjectDefaults, self).read( input_buffer, kmip_version=kmip_version ) local_buffer = utils.BytearrayStream(input_buffer.read(self.length)) if self.is_tag_next(enums.Tags.OBJECT_TYPE, local_buffer): self._object_type = primitives.Enumeration( enums.ObjectType, tag=enums.Tags.OBJECT_TYPE ) self._object_type.read(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidKmipEncoding( "The ObjectDefaults encoding is missing the object type " "enumeration." ) if self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer): self._attributes = Attributes() self._attributes.read(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidKmipEncoding( "The ObjectDefaults encoding is missing the attributes " "structure." ) self.is_oversized(local_buffer)
[ "def", "read", "(", "self", ",", "input_buffer", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_2_0", ")", ":", "if", "kmip_version", "<", "enums", ".", "KMIPVersion", ".", "KMIP_2_0", ":", "raise", "exceptions", ".", "VersionNotSupported", "(", "\"KMIP {} does not support the ObjectDefaults object.\"", ".", "format", "(", "kmip_version", ".", "value", ")", ")", "super", "(", "ObjectDefaults", ",", "self", ")", ".", "read", "(", "input_buffer", ",", "kmip_version", "=", "kmip_version", ")", "local_buffer", "=", "utils", ".", "BytearrayStream", "(", "input_buffer", ".", "read", "(", "self", ".", "length", ")", ")", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "OBJECT_TYPE", ",", "local_buffer", ")", ":", "self", ".", "_object_type", "=", "primitives", ".", "Enumeration", "(", "enums", ".", "ObjectType", ",", "tag", "=", "enums", ".", "Tags", ".", "OBJECT_TYPE", ")", "self", ".", "_object_type", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "else", ":", "raise", "exceptions", ".", "InvalidKmipEncoding", "(", "\"The ObjectDefaults encoding is missing the object type \"", "\"enumeration.\"", ")", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "ATTRIBUTES", ",", "local_buffer", ")", ":", "self", ".", "_attributes", "=", "Attributes", "(", ")", "self", ".", "_attributes", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "else", ":", "raise", "exceptions", ".", "InvalidKmipEncoding", "(", "\"The ObjectDefaults encoding is missing the attributes \"", "\"structure.\"", ")", "self", ".", "is_oversized", "(", "local_buffer", ")" ]
Read the data encoding the ObjectDefaults structure and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 2.0. Raises: InvalidKmipEncoding: Raised if the object type or attributes are missing from the encoding. VersionNotSupported: Raised when a KMIP version is provided that does not support the ObjectDefaults structure.
[ "Read", "the", "data", "encoding", "the", "ObjectDefaults", "structure", "and", "decode", "it", "into", "its", "constituent", "parts", "." ]
python
test
raiden-network/raiden
raiden/network/rpc/client.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/rpc/client.py#L154-L185
def geth_discover_next_available_nonce( web3: Web3, address: AddressHex, ) -> Nonce: """Returns the next available nonce for `address`.""" # The nonces of the mempool transactions are considered used, and it's # assumed these transactions are different from the ones currently pending # in the client. This is a simplification, otherwise it would be necessary # to filter the local pending transactions based on the mempool. pool = web3.txpool.inspect or {} # pool is roughly: # # {'queued': {'account1': {nonce1: ... nonce2: ...}, 'account2': ...}, 'pending': ...} # # Pending refers to the current block and if it contains transactions from # the user, these will be the younger transactions. Because this needs the # largest nonce, queued is checked first. address = to_checksum_address(address) queued = pool.get('queued', {}).get(address) if queued: return Nonce(max(int(k) for k in queued.keys()) + 1) pending = pool.get('pending', {}).get(address) if pending: return Nonce(max(int(k) for k in pending.keys()) + 1) # The first valid nonce is 0, therefore the count is already the next # available nonce return web3.eth.getTransactionCount(address, 'latest')
[ "def", "geth_discover_next_available_nonce", "(", "web3", ":", "Web3", ",", "address", ":", "AddressHex", ",", ")", "->", "Nonce", ":", "# The nonces of the mempool transactions are considered used, and it's", "# assumed these transactions are different from the ones currently pending", "# in the client. This is a simplification, otherwise it would be necessary", "# to filter the local pending transactions based on the mempool.", "pool", "=", "web3", ".", "txpool", ".", "inspect", "or", "{", "}", "# pool is roughly:", "#", "# {'queued': {'account1': {nonce1: ... nonce2: ...}, 'account2': ...}, 'pending': ...}", "#", "# Pending refers to the current block and if it contains transactions from", "# the user, these will be the younger transactions. Because this needs the", "# largest nonce, queued is checked first.", "address", "=", "to_checksum_address", "(", "address", ")", "queued", "=", "pool", ".", "get", "(", "'queued'", ",", "{", "}", ")", ".", "get", "(", "address", ")", "if", "queued", ":", "return", "Nonce", "(", "max", "(", "int", "(", "k", ")", "for", "k", "in", "queued", ".", "keys", "(", ")", ")", "+", "1", ")", "pending", "=", "pool", ".", "get", "(", "'pending'", ",", "{", "}", ")", ".", "get", "(", "address", ")", "if", "pending", ":", "return", "Nonce", "(", "max", "(", "int", "(", "k", ")", "for", "k", "in", "pending", ".", "keys", "(", ")", ")", "+", "1", ")", "# The first valid nonce is 0, therefore the count is already the next", "# available nonce", "return", "web3", ".", "eth", ".", "getTransactionCount", "(", "address", ",", "'latest'", ")" ]
Returns the next available nonce for `address`.
[ "Returns", "the", "next", "available", "nonce", "for", "address", "." ]
python
train
saltstack/salt
salt/cloud/clouds/xen.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L934-L957
def reboot(name, call=None, session=None): ''' Reboot a vm .. code-block:: bash salt-cloud -a reboot xenvm01 ''' if call == 'function': raise SaltCloudException( 'The show_instnce function must be called with -a or --action.' ) if session is None: session = _get_session() log.info('Starting VM %s', name) vm = _get_vm(name, session) power_state = session.xenapi.VM.get_power_state(vm) if power_state == 'Running': task = session.xenapi.Async.VM.clean_reboot(vm) _run_async_task(task, session) return show_instance(name) else: return '{} is not running to be rebooted'.format(name)
[ "def", "reboot", "(", "name", ",", "call", "=", "None", ",", "session", "=", "None", ")", ":", "if", "call", "==", "'function'", ":", "raise", "SaltCloudException", "(", "'The show_instnce function must be called with -a or --action.'", ")", "if", "session", "is", "None", ":", "session", "=", "_get_session", "(", ")", "log", ".", "info", "(", "'Starting VM %s'", ",", "name", ")", "vm", "=", "_get_vm", "(", "name", ",", "session", ")", "power_state", "=", "session", ".", "xenapi", ".", "VM", ".", "get_power_state", "(", "vm", ")", "if", "power_state", "==", "'Running'", ":", "task", "=", "session", ".", "xenapi", ".", "Async", ".", "VM", ".", "clean_reboot", "(", "vm", ")", "_run_async_task", "(", "task", ",", "session", ")", "return", "show_instance", "(", "name", ")", "else", ":", "return", "'{} is not running to be rebooted'", ".", "format", "(", "name", ")" ]
Reboot a vm .. code-block:: bash salt-cloud -a reboot xenvm01
[ "Reboot", "a", "vm" ]
python
train
rigetti/quantumflow
quantumflow/utils.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/utils.py#L192-L208
def symbolize(flt: float) -> sympy.Symbol: """Attempt to convert a real number into a simpler symbolic representation. Returns: A sympy Symbol. (Convert to string with str(sym) or to latex with sympy.latex(sym) Raises: ValueError: If cannot simplify float """ try: ratio = rationalize(flt) res = sympy.simplify(ratio) except ValueError: ratio = rationalize(flt/np.pi) res = sympy.simplify(ratio) * sympy.pi return res
[ "def", "symbolize", "(", "flt", ":", "float", ")", "->", "sympy", ".", "Symbol", ":", "try", ":", "ratio", "=", "rationalize", "(", "flt", ")", "res", "=", "sympy", ".", "simplify", "(", "ratio", ")", "except", "ValueError", ":", "ratio", "=", "rationalize", "(", "flt", "/", "np", ".", "pi", ")", "res", "=", "sympy", ".", "simplify", "(", "ratio", ")", "*", "sympy", ".", "pi", "return", "res" ]
Attempt to convert a real number into a simpler symbolic representation. Returns: A sympy Symbol. (Convert to string with str(sym) or to latex with sympy.latex(sym) Raises: ValueError: If cannot simplify float
[ "Attempt", "to", "convert", "a", "real", "number", "into", "a", "simpler", "symbolic", "representation", "." ]
python
train
kevin1024/vcrpy
vcr/stubs/__init__.py
https://github.com/kevin1024/vcrpy/blob/114fcd29b43c55896aaa6a6613bc7766f2707c8b/vcr/stubs/__init__.py#L216-L272
def getresponse(self, _=False, **kwargs): '''Retrieve the response''' # Check to see if the cassette has a response for this request. If so, # then return it if self.cassette.can_play_response_for(self._vcr_request): log.info( "Playing response for {} from cassette".format( self._vcr_request ) ) response = self.cassette.play_response(self._vcr_request) return VCRHTTPResponse(response) else: if self.cassette.write_protected and self.cassette.filter_request( self._vcr_request ): raise CannotOverwriteExistingCassetteException( "No match for the request (%r) was found. " "Can't overwrite existing cassette (%r) in " "your current record mode (%r)." % (self._vcr_request, self.cassette._path, self.cassette.record_mode) ) # Otherwise, we should send the request, then get the response # and return it. log.info( "{} not in cassette, sending to real server".format( self._vcr_request ) ) # This is imported here to avoid circular import. # TODO(@IvanMalison): Refactor to allow normal import. from vcr.patch import force_reset with force_reset(): self.real_connection.request( method=self._vcr_request.method, url=self._url(self._vcr_request.uri), body=self._vcr_request.body, headers=self._vcr_request.headers, ) # get the response response = self.real_connection.getresponse() # put the response into the cassette response = { 'status': { 'code': response.status, 'message': response.reason }, 'headers': serialize_headers(response), 'body': {'string': response.read()}, } self.cassette.append(self._vcr_request, response) return VCRHTTPResponse(response)
[ "def", "getresponse", "(", "self", ",", "_", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Check to see if the cassette has a response for this request. If so,", "# then return it", "if", "self", ".", "cassette", ".", "can_play_response_for", "(", "self", ".", "_vcr_request", ")", ":", "log", ".", "info", "(", "\"Playing response for {} from cassette\"", ".", "format", "(", "self", ".", "_vcr_request", ")", ")", "response", "=", "self", ".", "cassette", ".", "play_response", "(", "self", ".", "_vcr_request", ")", "return", "VCRHTTPResponse", "(", "response", ")", "else", ":", "if", "self", ".", "cassette", ".", "write_protected", "and", "self", ".", "cassette", ".", "filter_request", "(", "self", ".", "_vcr_request", ")", ":", "raise", "CannotOverwriteExistingCassetteException", "(", "\"No match for the request (%r) was found. \"", "\"Can't overwrite existing cassette (%r) in \"", "\"your current record mode (%r).\"", "%", "(", "self", ".", "_vcr_request", ",", "self", ".", "cassette", ".", "_path", ",", "self", ".", "cassette", ".", "record_mode", ")", ")", "# Otherwise, we should send the request, then get the response", "# and return it.", "log", ".", "info", "(", "\"{} not in cassette, sending to real server\"", ".", "format", "(", "self", ".", "_vcr_request", ")", ")", "# This is imported here to avoid circular import.", "# TODO(@IvanMalison): Refactor to allow normal import.", "from", "vcr", ".", "patch", "import", "force_reset", "with", "force_reset", "(", ")", ":", "self", ".", "real_connection", ".", "request", "(", "method", "=", "self", ".", "_vcr_request", ".", "method", ",", "url", "=", "self", ".", "_url", "(", "self", ".", "_vcr_request", ".", "uri", ")", ",", "body", "=", "self", ".", "_vcr_request", ".", "body", ",", "headers", "=", "self", ".", "_vcr_request", ".", "headers", ",", ")", "# get the response", "response", "=", "self", ".", "real_connection", ".", "getresponse", "(", ")", "# put the response into the cassette", "response", "=", "{", "'status'", ":", "{", "'code'", ":", "response", ".", "status", ",", "'message'", ":", "response", ".", "reason", "}", ",", "'headers'", ":", "serialize_headers", "(", "response", ")", ",", "'body'", ":", "{", "'string'", ":", "response", ".", "read", "(", ")", "}", ",", "}", "self", ".", "cassette", ".", "append", "(", "self", ".", "_vcr_request", ",", "response", ")", "return", "VCRHTTPResponse", "(", "response", ")" ]
Retrieve the response
[ "Retrieve", "the", "response" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L378-L394
def encode_schedule(schedule): """Encodes a schedule tuple into a string. Args: schedule: A tuple containing (interpolation, steps, pmfs), where interpolation is a string specifying the interpolation strategy, steps is an int array_like of shape [N] specifying the global steps, and pmfs is an array_like of shape [N, M] where pmf[i] is the sampling distribution at global step steps[i]. N is the number of schedule requirements to interpolate and M is the size of the probability space. Returns: The string encoding of the schedule tuple. """ interpolation, steps, pmfs = schedule return interpolation + ' ' + ' '.join( '@' + str(s) + ' ' + ' '.join(map(str, p)) for s, p in zip(steps, pmfs))
[ "def", "encode_schedule", "(", "schedule", ")", ":", "interpolation", ",", "steps", ",", "pmfs", "=", "schedule", "return", "interpolation", "+", "' '", "+", "' '", ".", "join", "(", "'@'", "+", "str", "(", "s", ")", "+", "' '", "+", "' '", ".", "join", "(", "map", "(", "str", ",", "p", ")", ")", "for", "s", ",", "p", "in", "zip", "(", "steps", ",", "pmfs", ")", ")" ]
Encodes a schedule tuple into a string. Args: schedule: A tuple containing (interpolation, steps, pmfs), where interpolation is a string specifying the interpolation strategy, steps is an int array_like of shape [N] specifying the global steps, and pmfs is an array_like of shape [N, M] where pmf[i] is the sampling distribution at global step steps[i]. N is the number of schedule requirements to interpolate and M is the size of the probability space. Returns: The string encoding of the schedule tuple.
[ "Encodes", "a", "schedule", "tuple", "into", "a", "string", "." ]
python
train
gem/oq-engine
openquake/server/views.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/server/views.py#L378-L400
def calc_remove(request, calc_id): """ Remove the calculation id """ # Only the owner can remove a job user = utils.get_user(request) try: message = logs.dbcmd('del_calc', calc_id, user) except dbapi.NotFound: return HttpResponseNotFound() if 'success' in message: return HttpResponse(content=json.dumps(message), content_type=JSON, status=200) elif 'error' in message: logging.error(message['error']) return HttpResponse(content=json.dumps(message), content_type=JSON, status=403) else: # This is an untrapped server error logging.error(message) return HttpResponse(content=message, content_type='text/plain', status=500)
[ "def", "calc_remove", "(", "request", ",", "calc_id", ")", ":", "# Only the owner can remove a job", "user", "=", "utils", ".", "get_user", "(", "request", ")", "try", ":", "message", "=", "logs", ".", "dbcmd", "(", "'del_calc'", ",", "calc_id", ",", "user", ")", "except", "dbapi", ".", "NotFound", ":", "return", "HttpResponseNotFound", "(", ")", "if", "'success'", "in", "message", ":", "return", "HttpResponse", "(", "content", "=", "json", ".", "dumps", "(", "message", ")", ",", "content_type", "=", "JSON", ",", "status", "=", "200", ")", "elif", "'error'", "in", "message", ":", "logging", ".", "error", "(", "message", "[", "'error'", "]", ")", "return", "HttpResponse", "(", "content", "=", "json", ".", "dumps", "(", "message", ")", ",", "content_type", "=", "JSON", ",", "status", "=", "403", ")", "else", ":", "# This is an untrapped server error", "logging", ".", "error", "(", "message", ")", "return", "HttpResponse", "(", "content", "=", "message", ",", "content_type", "=", "'text/plain'", ",", "status", "=", "500", ")" ]
Remove the calculation id
[ "Remove", "the", "calculation", "id" ]
python
train
HewlettPackard/python-hpOneView
hpOneView/oneview_client.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/oneview_client.py#L777-L786
def racks(self): """ Gets the Racks API client. Returns: Racks: """ if not self.__racks: self.__racks = Racks(self.__connection) return self.__racks
[ "def", "racks", "(", "self", ")", ":", "if", "not", "self", ".", "__racks", ":", "self", ".", "__racks", "=", "Racks", "(", "self", ".", "__connection", ")", "return", "self", ".", "__racks" ]
Gets the Racks API client. Returns: Racks:
[ "Gets", "the", "Racks", "API", "client", "." ]
python
train
h2oai/h2o-3
h2o-py/h2o/utils/typechecks.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/typechecks.py#L230-L235
def name(self, src=None): """Return string representing the name of this type.""" if len(self._types) > 1: return "!(%s)" % str("|".join(_get_type_name(tt, src) for tt in self._types)) else: return "!" + _get_type_name(self._types[0], src)
[ "def", "name", "(", "self", ",", "src", "=", "None", ")", ":", "if", "len", "(", "self", ".", "_types", ")", ">", "1", ":", "return", "\"!(%s)\"", "%", "str", "(", "\"|\"", ".", "join", "(", "_get_type_name", "(", "tt", ",", "src", ")", "for", "tt", "in", "self", ".", "_types", ")", ")", "else", ":", "return", "\"!\"", "+", "_get_type_name", "(", "self", ".", "_types", "[", "0", "]", ",", "src", ")" ]
Return string representing the name of this type.
[ "Return", "string", "representing", "the", "name", "of", "this", "type", "." ]
python
test
tanghaibao/jcvi
jcvi/graphics/base.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/graphics/base.py#L328-L345
def print_colors(palette, outfile="Palette.png"): """ print color palette (a tuple) to a PNG file for quick check """ fig = plt.figure() ax = fig.add_subplot(111) xmax = 20 * (len(palette) + 1) x1s = np.arange(0, xmax, 20) xintervals = [10] * len(palette) xx = zip(x1s, xintervals) ax.broken_barh(xx, (5, 10), facecolors=palette) ax.set_ylim(0, 20) ax.set_xlim(0, xmax) ax.set_axis_off() savefig(outfile)
[ "def", "print_colors", "(", "palette", ",", "outfile", "=", "\"Palette.png\"", ")", ":", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "xmax", "=", "20", "*", "(", "len", "(", "palette", ")", "+", "1", ")", "x1s", "=", "np", ".", "arange", "(", "0", ",", "xmax", ",", "20", ")", "xintervals", "=", "[", "10", "]", "*", "len", "(", "palette", ")", "xx", "=", "zip", "(", "x1s", ",", "xintervals", ")", "ax", ".", "broken_barh", "(", "xx", ",", "(", "5", ",", "10", ")", ",", "facecolors", "=", "palette", ")", "ax", ".", "set_ylim", "(", "0", ",", "20", ")", "ax", ".", "set_xlim", "(", "0", ",", "xmax", ")", "ax", ".", "set_axis_off", "(", ")", "savefig", "(", "outfile", ")" ]
print color palette (a tuple) to a PNG file for quick check
[ "print", "color", "palette", "(", "a", "tuple", ")", "to", "a", "PNG", "file", "for", "quick", "check" ]
python
train
angr/angr
angr/storage/file.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/storage/file.py#L479-L521
def write(self, pos, data, size=None, events=True, **kwargs): """ Write a packet to the stream. :param int pos: The packet number to write in the sequence of the stream. May be None to append to the stream. :param data: The data to write, as a string or bitvector. :param size: The optional size to write. May be symbolic; must be constrained to at most the size of data. :return: The next packet to use after this """ if events: self.state.history.add_event('fs_write', filename=self.name, data=data, size=size, pos=pos) # sanity check on read/write modes if self.write_mode is None: self.write_mode = True elif self.write_mode is False: raise SimFileError("Cannot read and write to the same SimPackets") data = _deps_unpack(data)[0] if type(data) is bytes: data = claripy.BVV(data) if size is None: size = len(data) // self.state.arch.byte_width if isinstance(data, claripy.Bits) else len(data) if type(size) is int: size = self.state.solver.BVV(size, self.state.arch.bits) # sanity check on packet number and determine if data is already present if pos < 0: raise SimFileError("SimPacket.write(%d): Negative packet number?" % pos) elif pos > len(self.content): raise SimFileError("SimPacket.write(%d): Packet number is past frontier of %d?" % (pos, len(self.content))) elif pos != len(self.content): realdata, realsize = self.content[pos] maxlen = max(len(realdata), len(data)) self.state.solver.add(realdata[maxlen-1:0] == data[maxlen-1:0]) self.state.solver.add(size == realsize) if not self.state.solver.satisfiable(): raise SimFileError("Packet write equality constraints made state unsatisfiable???") return pos+1 # write it out! self.content.append((_deps_unpack(data)[0], size)) return pos+1
[ "def", "write", "(", "self", ",", "pos", ",", "data", ",", "size", "=", "None", ",", "events", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "events", ":", "self", ".", "state", ".", "history", ".", "add_event", "(", "'fs_write'", ",", "filename", "=", "self", ".", "name", ",", "data", "=", "data", ",", "size", "=", "size", ",", "pos", "=", "pos", ")", "# sanity check on read/write modes", "if", "self", ".", "write_mode", "is", "None", ":", "self", ".", "write_mode", "=", "True", "elif", "self", ".", "write_mode", "is", "False", ":", "raise", "SimFileError", "(", "\"Cannot read and write to the same SimPackets\"", ")", "data", "=", "_deps_unpack", "(", "data", ")", "[", "0", "]", "if", "type", "(", "data", ")", "is", "bytes", ":", "data", "=", "claripy", ".", "BVV", "(", "data", ")", "if", "size", "is", "None", ":", "size", "=", "len", "(", "data", ")", "//", "self", ".", "state", ".", "arch", ".", "byte_width", "if", "isinstance", "(", "data", ",", "claripy", ".", "Bits", ")", "else", "len", "(", "data", ")", "if", "type", "(", "size", ")", "is", "int", ":", "size", "=", "self", ".", "state", ".", "solver", ".", "BVV", "(", "size", ",", "self", ".", "state", ".", "arch", ".", "bits", ")", "# sanity check on packet number and determine if data is already present", "if", "pos", "<", "0", ":", "raise", "SimFileError", "(", "\"SimPacket.write(%d): Negative packet number?\"", "%", "pos", ")", "elif", "pos", ">", "len", "(", "self", ".", "content", ")", ":", "raise", "SimFileError", "(", "\"SimPacket.write(%d): Packet number is past frontier of %d?\"", "%", "(", "pos", ",", "len", "(", "self", ".", "content", ")", ")", ")", "elif", "pos", "!=", "len", "(", "self", ".", "content", ")", ":", "realdata", ",", "realsize", "=", "self", ".", "content", "[", "pos", "]", "maxlen", "=", "max", "(", "len", "(", "realdata", ")", ",", "len", "(", "data", ")", ")", "self", ".", "state", ".", "solver", ".", "add", "(", "realdata", "[", "maxlen", "-", "1", ":", "0", "]", "==", "data", "[", "maxlen", "-", "1", ":", "0", "]", ")", "self", ".", "state", ".", "solver", ".", "add", "(", "size", "==", "realsize", ")", "if", "not", "self", ".", "state", ".", "solver", ".", "satisfiable", "(", ")", ":", "raise", "SimFileError", "(", "\"Packet write equality constraints made state unsatisfiable???\"", ")", "return", "pos", "+", "1", "# write it out!", "self", ".", "content", ".", "append", "(", "(", "_deps_unpack", "(", "data", ")", "[", "0", "]", ",", "size", ")", ")", "return", "pos", "+", "1" ]
Write a packet to the stream. :param int pos: The packet number to write in the sequence of the stream. May be None to append to the stream. :param data: The data to write, as a string or bitvector. :param size: The optional size to write. May be symbolic; must be constrained to at most the size of data. :return: The next packet to use after this
[ "Write", "a", "packet", "to", "the", "stream", "." ]
python
train
SheffieldML/GPy
GPy/examples/dimensionality_reduction.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/examples/dimensionality_reduction.py#L257-L301
def _simulate_sincos(D1, D2, D3, N, num_inducing, plot_sim=False): """Simulate some data drawn from sine and cosine for use in demos of MRD""" _np.random.seed(1234) x = _np.linspace(0, 4 * _np.pi, N)[:, None] s1 = _np.vectorize(lambda x: _np.sin(x)) s2 = _np.vectorize(lambda x: _np.cos(x)) s3 = _np.vectorize(lambda x:-_np.exp(-_np.cos(2 * x))) sS = _np.vectorize(lambda x: _np.cos(x)) s1 = s1(x) s2 = s2(x) s3 = s3(x) sS = sS(x) s1 -= s1.mean(); s1 /= s1.std(0) s2 -= s2.mean(); s2 /= s2.std(0) s3 -= s3.mean(); s3 /= s3.std(0) sS -= sS.mean(); sS /= sS.std(0) Y1, Y2, Y3, S1, S2, S3 = _generate_high_dimensional_output(D1, D2, D3, s1, s2, s3, sS) slist = [sS, s1, s2, s3] slist_names = ["sS", "s1", "s2", "s3"] Ylist = [Y1, Y2, Y3] if plot_sim: from matplotlib import pyplot as plt import matplotlib.cm as cm import itertools fig = plt.figure("MRD Simulation Data", figsize=(8, 6)) fig.clf() ax = fig.add_subplot(2, 1, 1) labls = slist_names for S, lab in zip(slist, labls): ax.plot(S, label=lab) ax.legend() for i, Y in enumerate(Ylist): ax = fig.add_subplot(2, len(Ylist), len(Ylist) + 1 + i) ax.imshow(Y, aspect='auto', cmap=cm.gray) # @UndefinedVariable ax.set_title("Y{}".format(i + 1)) plt.draw() plt.tight_layout() return slist, [S1, S2, S3], Ylist
[ "def", "_simulate_sincos", "(", "D1", ",", "D2", ",", "D3", ",", "N", ",", "num_inducing", ",", "plot_sim", "=", "False", ")", ":", "_np", ".", "random", ".", "seed", "(", "1234", ")", "x", "=", "_np", ".", "linspace", "(", "0", ",", "4", "*", "_np", ".", "pi", ",", "N", ")", "[", ":", ",", "None", "]", "s1", "=", "_np", ".", "vectorize", "(", "lambda", "x", ":", "_np", ".", "sin", "(", "x", ")", ")", "s2", "=", "_np", ".", "vectorize", "(", "lambda", "x", ":", "_np", ".", "cos", "(", "x", ")", ")", "s3", "=", "_np", ".", "vectorize", "(", "lambda", "x", ":", "-", "_np", ".", "exp", "(", "-", "_np", ".", "cos", "(", "2", "*", "x", ")", ")", ")", "sS", "=", "_np", ".", "vectorize", "(", "lambda", "x", ":", "_np", ".", "cos", "(", "x", ")", ")", "s1", "=", "s1", "(", "x", ")", "s2", "=", "s2", "(", "x", ")", "s3", "=", "s3", "(", "x", ")", "sS", "=", "sS", "(", "x", ")", "s1", "-=", "s1", ".", "mean", "(", ")", "s1", "/=", "s1", ".", "std", "(", "0", ")", "s2", "-=", "s2", ".", "mean", "(", ")", "s2", "/=", "s2", ".", "std", "(", "0", ")", "s3", "-=", "s3", ".", "mean", "(", ")", "s3", "/=", "s3", ".", "std", "(", "0", ")", "sS", "-=", "sS", ".", "mean", "(", ")", "sS", "/=", "sS", ".", "std", "(", "0", ")", "Y1", ",", "Y2", ",", "Y3", ",", "S1", ",", "S2", ",", "S3", "=", "_generate_high_dimensional_output", "(", "D1", ",", "D2", ",", "D3", ",", "s1", ",", "s2", ",", "s3", ",", "sS", ")", "slist", "=", "[", "sS", ",", "s1", ",", "s2", ",", "s3", "]", "slist_names", "=", "[", "\"sS\"", ",", "\"s1\"", ",", "\"s2\"", ",", "\"s3\"", "]", "Ylist", "=", "[", "Y1", ",", "Y2", ",", "Y3", "]", "if", "plot_sim", ":", "from", "matplotlib", "import", "pyplot", "as", "plt", "import", "matplotlib", ".", "cm", "as", "cm", "import", "itertools", "fig", "=", "plt", ".", "figure", "(", "\"MRD Simulation Data\"", ",", "figsize", "=", "(", "8", ",", "6", ")", ")", "fig", ".", "clf", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "2", ",", "1", ",", "1", ")", "labls", "=", "slist_names", "for", "S", ",", "lab", "in", "zip", "(", "slist", ",", "labls", ")", ":", "ax", ".", "plot", "(", "S", ",", "label", "=", "lab", ")", "ax", ".", "legend", "(", ")", "for", "i", ",", "Y", "in", "enumerate", "(", "Ylist", ")", ":", "ax", "=", "fig", ".", "add_subplot", "(", "2", ",", "len", "(", "Ylist", ")", ",", "len", "(", "Ylist", ")", "+", "1", "+", "i", ")", "ax", ".", "imshow", "(", "Y", ",", "aspect", "=", "'auto'", ",", "cmap", "=", "cm", ".", "gray", ")", "# @UndefinedVariable", "ax", ".", "set_title", "(", "\"Y{}\"", ".", "format", "(", "i", "+", "1", ")", ")", "plt", ".", "draw", "(", ")", "plt", ".", "tight_layout", "(", ")", "return", "slist", ",", "[", "S1", ",", "S2", ",", "S3", "]", ",", "Ylist" ]
Simulate some data drawn from sine and cosine for use in demos of MRD
[ "Simulate", "some", "data", "drawn", "from", "sine", "and", "cosine", "for", "use", "in", "demos", "of", "MRD" ]
python
train
apache/incubator-heron
heron/tools/tracker/src/python/handlers/exceptionsummaryhandler.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/handlers/exceptionsummaryhandler.py#L75-L129
def getComponentExceptionSummary(self, tmaster, component_name, instances=[], callback=None): """ Get the summary of exceptions for component_name and list of instances. Empty instance list will fetch all exceptions. """ if not tmaster or not tmaster.host or not tmaster.stats_port: return exception_request = tmaster_pb2.ExceptionLogRequest() exception_request.component_name = component_name if len(instances) > 0: exception_request.instances.extend(instances) request_str = exception_request.SerializeToString() port = str(tmaster.stats_port) host = tmaster.host url = "http://{0}:{1}/exceptionsummary".format(host, port) Log.debug("Creating request object.") request = tornado.httpclient.HTTPRequest(url, body=request_str, method='POST', request_timeout=5) Log.debug('Making HTTP call to fetch exceptionsummary url: %s', url) try: client = tornado.httpclient.AsyncHTTPClient() result = yield client.fetch(request) Log.debug("HTTP call complete.") except tornado.httpclient.HTTPError as e: raise Exception(str(e)) # Check the response code - error if it is in 400s or 500s responseCode = result.code if responseCode >= 400: message = "Error in getting exceptions from Tmaster, code: " + responseCode Log.error(message) raise tornado.gen.Return({ "message": message }) # Parse the response from tmaster. exception_response = tmaster_pb2.ExceptionLogResponse() exception_response.ParseFromString(result.body) if exception_response.status.status == common_pb2.NOTOK: if exception_response.status.HasField("message"): raise tornado.gen.Return({ "message": exception_response.status.message }) # Send response ret = [] for exception_log in exception_response.exceptions: ret.append({'class_name': exception_log.stacktrace, 'lasttime': exception_log.lasttime, 'firsttime': exception_log.firsttime, 'count': str(exception_log.count)}) raise tornado.gen.Return(ret)
[ "def", "getComponentExceptionSummary", "(", "self", ",", "tmaster", ",", "component_name", ",", "instances", "=", "[", "]", ",", "callback", "=", "None", ")", ":", "if", "not", "tmaster", "or", "not", "tmaster", ".", "host", "or", "not", "tmaster", ".", "stats_port", ":", "return", "exception_request", "=", "tmaster_pb2", ".", "ExceptionLogRequest", "(", ")", "exception_request", ".", "component_name", "=", "component_name", "if", "len", "(", "instances", ")", ">", "0", ":", "exception_request", ".", "instances", ".", "extend", "(", "instances", ")", "request_str", "=", "exception_request", ".", "SerializeToString", "(", ")", "port", "=", "str", "(", "tmaster", ".", "stats_port", ")", "host", "=", "tmaster", ".", "host", "url", "=", "\"http://{0}:{1}/exceptionsummary\"", ".", "format", "(", "host", ",", "port", ")", "Log", ".", "debug", "(", "\"Creating request object.\"", ")", "request", "=", "tornado", ".", "httpclient", ".", "HTTPRequest", "(", "url", ",", "body", "=", "request_str", ",", "method", "=", "'POST'", ",", "request_timeout", "=", "5", ")", "Log", ".", "debug", "(", "'Making HTTP call to fetch exceptionsummary url: %s'", ",", "url", ")", "try", ":", "client", "=", "tornado", ".", "httpclient", ".", "AsyncHTTPClient", "(", ")", "result", "=", "yield", "client", ".", "fetch", "(", "request", ")", "Log", ".", "debug", "(", "\"HTTP call complete.\"", ")", "except", "tornado", ".", "httpclient", ".", "HTTPError", "as", "e", ":", "raise", "Exception", "(", "str", "(", "e", ")", ")", "# Check the response code - error if it is in 400s or 500s", "responseCode", "=", "result", ".", "code", "if", "responseCode", ">=", "400", ":", "message", "=", "\"Error in getting exceptions from Tmaster, code: \"", "+", "responseCode", "Log", ".", "error", "(", "message", ")", "raise", "tornado", ".", "gen", ".", "Return", "(", "{", "\"message\"", ":", "message", "}", ")", "# Parse the response from tmaster.", "exception_response", "=", "tmaster_pb2", ".", "ExceptionLogResponse", "(", ")", "exception_response", ".", "ParseFromString", "(", "result", ".", "body", ")", "if", "exception_response", ".", "status", ".", "status", "==", "common_pb2", ".", "NOTOK", ":", "if", "exception_response", ".", "status", ".", "HasField", "(", "\"message\"", ")", ":", "raise", "tornado", ".", "gen", ".", "Return", "(", "{", "\"message\"", ":", "exception_response", ".", "status", ".", "message", "}", ")", "# Send response", "ret", "=", "[", "]", "for", "exception_log", "in", "exception_response", ".", "exceptions", ":", "ret", ".", "append", "(", "{", "'class_name'", ":", "exception_log", ".", "stacktrace", ",", "'lasttime'", ":", "exception_log", ".", "lasttime", ",", "'firsttime'", ":", "exception_log", ".", "firsttime", ",", "'count'", ":", "str", "(", "exception_log", ".", "count", ")", "}", ")", "raise", "tornado", ".", "gen", ".", "Return", "(", "ret", ")" ]
Get the summary of exceptions for component_name and list of instances. Empty instance list will fetch all exceptions.
[ "Get", "the", "summary", "of", "exceptions", "for", "component_name", "and", "list", "of", "instances", ".", "Empty", "instance", "list", "will", "fetch", "all", "exceptions", "." ]
python
valid
jic-dtool/dtool-http
dtool_http/publish.py
https://github.com/jic-dtool/dtool-http/blob/7572221b07d5294aa9ead5097a4f16478837e742/dtool_http/publish.py#L9-L31
def publish(dataset_uri): """Return access URL to HTTP enabled (published) dataset. Exits with error code 1 if the dataset_uri is not a dataset. Exits with error code 2 if the dataset cannot be HTTP enabled. """ try: dataset = dtoolcore.DataSet.from_uri(dataset_uri) except dtoolcore.DtoolCoreTypeError: print("Not a dataset: {}".format(dataset_uri)) sys.exit(1) try: access_uri = dataset._storage_broker.http_enable() except AttributeError: print( "Datasets of type '{}' cannot be published using HTTP".format( dataset._storage_broker.key) ) sys.exit(2) return access_uri
[ "def", "publish", "(", "dataset_uri", ")", ":", "try", ":", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "dataset_uri", ")", "except", "dtoolcore", ".", "DtoolCoreTypeError", ":", "print", "(", "\"Not a dataset: {}\"", ".", "format", "(", "dataset_uri", ")", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "access_uri", "=", "dataset", ".", "_storage_broker", ".", "http_enable", "(", ")", "except", "AttributeError", ":", "print", "(", "\"Datasets of type '{}' cannot be published using HTTP\"", ".", "format", "(", "dataset", ".", "_storage_broker", ".", "key", ")", ")", "sys", ".", "exit", "(", "2", ")", "return", "access_uri" ]
Return access URL to HTTP enabled (published) dataset. Exits with error code 1 if the dataset_uri is not a dataset. Exits with error code 2 if the dataset cannot be HTTP enabled.
[ "Return", "access", "URL", "to", "HTTP", "enabled", "(", "published", ")", "dataset", "." ]
python
train
pybel/pybel
src/pybel/utils.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/utils.py#L236-L244
def hash_evidence(text: str, type: str, reference: str) -> str: """Create a hash for an evidence and its citation. :param text: The evidence text :param type: The corresponding citation type :param reference: The citation reference """ s = u'{type}:{reference}:{text}'.format(type=type, reference=reference, text=text) return hashlib.sha512(s.encode('utf8')).hexdigest()
[ "def", "hash_evidence", "(", "text", ":", "str", ",", "type", ":", "str", ",", "reference", ":", "str", ")", "->", "str", ":", "s", "=", "u'{type}:{reference}:{text}'", ".", "format", "(", "type", "=", "type", ",", "reference", "=", "reference", ",", "text", "=", "text", ")", "return", "hashlib", ".", "sha512", "(", "s", ".", "encode", "(", "'utf8'", ")", ")", ".", "hexdigest", "(", ")" ]
Create a hash for an evidence and its citation. :param text: The evidence text :param type: The corresponding citation type :param reference: The citation reference
[ "Create", "a", "hash", "for", "an", "evidence", "and", "its", "citation", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/provenance/system.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/system.py#L128-L144
def _sge_info(queue): """Returns machine information for an sge job scheduler. """ qhost_out = subprocess.check_output(["qhost", "-q", "-xml"]).decode() qstat_queue = ["-q", queue] if queue and "," not in queue else [] qstat_out = subprocess.check_output(["qstat", "-f", "-xml"] + qstat_queue).decode() slot_info = _sge_get_slots(qstat_out) mem_info = _sge_get_mem(qhost_out, queue) machine_keys = slot_info.keys() #num_cpus_vec = [slot_info[x]["slots_total"] for x in machine_keys] #mem_vec = [mem_info[x]["mem_total"] for x in machine_keys] mem_per_slot = [mem_info[x]["mem_total"] / float(slot_info[x]["slots_total"]) for x in machine_keys] min_ratio_index = mem_per_slot.index(median_left(mem_per_slot)) mem_info[machine_keys[min_ratio_index]]["mem_total"] return [{"cores": slot_info[machine_keys[min_ratio_index]]["slots_total"], "memory": mem_info[machine_keys[min_ratio_index]]["mem_total"], "name": "sge_machine"}]
[ "def", "_sge_info", "(", "queue", ")", ":", "qhost_out", "=", "subprocess", ".", "check_output", "(", "[", "\"qhost\"", ",", "\"-q\"", ",", "\"-xml\"", "]", ")", ".", "decode", "(", ")", "qstat_queue", "=", "[", "\"-q\"", ",", "queue", "]", "if", "queue", "and", "\",\"", "not", "in", "queue", "else", "[", "]", "qstat_out", "=", "subprocess", ".", "check_output", "(", "[", "\"qstat\"", ",", "\"-f\"", ",", "\"-xml\"", "]", "+", "qstat_queue", ")", ".", "decode", "(", ")", "slot_info", "=", "_sge_get_slots", "(", "qstat_out", ")", "mem_info", "=", "_sge_get_mem", "(", "qhost_out", ",", "queue", ")", "machine_keys", "=", "slot_info", ".", "keys", "(", ")", "#num_cpus_vec = [slot_info[x][\"slots_total\"] for x in machine_keys]", "#mem_vec = [mem_info[x][\"mem_total\"] for x in machine_keys]", "mem_per_slot", "=", "[", "mem_info", "[", "x", "]", "[", "\"mem_total\"", "]", "/", "float", "(", "slot_info", "[", "x", "]", "[", "\"slots_total\"", "]", ")", "for", "x", "in", "machine_keys", "]", "min_ratio_index", "=", "mem_per_slot", ".", "index", "(", "median_left", "(", "mem_per_slot", ")", ")", "mem_info", "[", "machine_keys", "[", "min_ratio_index", "]", "]", "[", "\"mem_total\"", "]", "return", "[", "{", "\"cores\"", ":", "slot_info", "[", "machine_keys", "[", "min_ratio_index", "]", "]", "[", "\"slots_total\"", "]", ",", "\"memory\"", ":", "mem_info", "[", "machine_keys", "[", "min_ratio_index", "]", "]", "[", "\"mem_total\"", "]", ",", "\"name\"", ":", "\"sge_machine\"", "}", "]" ]
Returns machine information for an sge job scheduler.
[ "Returns", "machine", "information", "for", "an", "sge", "job", "scheduler", "." ]
python
train
pycontribs/pyrax
pyrax/clouddns.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddns.py#L423-L433
def list_next_page(self): """ When paging through results, this will return the next page, using the same limit. If there are no more results, a NoMoreResults exception will be raised. """ uri = self._paging.get("domain", {}).get("next_uri") if uri is None: raise exc.NoMoreResults("There are no more pages of domains to " "list.") return self._list(uri)
[ "def", "list_next_page", "(", "self", ")", ":", "uri", "=", "self", ".", "_paging", ".", "get", "(", "\"domain\"", ",", "{", "}", ")", ".", "get", "(", "\"next_uri\"", ")", "if", "uri", "is", "None", ":", "raise", "exc", ".", "NoMoreResults", "(", "\"There are no more pages of domains to \"", "\"list.\"", ")", "return", "self", ".", "_list", "(", "uri", ")" ]
When paging through results, this will return the next page, using the same limit. If there are no more results, a NoMoreResults exception will be raised.
[ "When", "paging", "through", "results", "this", "will", "return", "the", "next", "page", "using", "the", "same", "limit", ".", "If", "there", "are", "no", "more", "results", "a", "NoMoreResults", "exception", "will", "be", "raised", "." ]
python
train
gwastro/pycbc
pycbc/waveform/waveform.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/waveform.py#L1063-L1071
def get_waveform_filter_length_in_time(approximant, template=None, **kwargs): """For filter templates, return the length in time of the template. """ kwargs = props(template, **kwargs) if approximant in _filter_time_lengths: return _filter_time_lengths[approximant](**kwargs) else: return None
[ "def", "get_waveform_filter_length_in_time", "(", "approximant", ",", "template", "=", "None", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "props", "(", "template", ",", "*", "*", "kwargs", ")", "if", "approximant", "in", "_filter_time_lengths", ":", "return", "_filter_time_lengths", "[", "approximant", "]", "(", "*", "*", "kwargs", ")", "else", ":", "return", "None" ]
For filter templates, return the length in time of the template.
[ "For", "filter", "templates", "return", "the", "length", "in", "time", "of", "the", "template", "." ]
python
train
richardchien/nonebot
nonebot/natural_language.py
https://github.com/richardchien/nonebot/blob/13ed9e4e87d9824b61592520aabda6d2737c8848/nonebot/natural_language.py#L30-L58
def on_natural_language(keywords: Union[Optional[Iterable], Callable] = None, *, permission: int = perm.EVERYBODY, only_to_me: bool = True, only_short_message: bool = True, allow_empty_message: bool = False) -> Callable: """ Decorator to register a function as a natural language processor. :param keywords: keywords to respond to, if None, respond to all messages :param permission: permission required by the processor :param only_to_me: only handle messages to me :param only_short_message: only handle short messages :param allow_empty_message: handle empty messages """ def deco(func: Callable) -> Callable: nl_processor = NLProcessor(func=func, keywords=keywords, permission=permission, only_to_me=only_to_me, only_short_message=only_short_message, allow_empty_message=allow_empty_message) _nl_processors.add(nl_processor) return func if isinstance(keywords, Callable): # here "keywords" is the function to be decorated return on_natural_language()(keywords) else: return deco
[ "def", "on_natural_language", "(", "keywords", ":", "Union", "[", "Optional", "[", "Iterable", "]", ",", "Callable", "]", "=", "None", ",", "*", ",", "permission", ":", "int", "=", "perm", ".", "EVERYBODY", ",", "only_to_me", ":", "bool", "=", "True", ",", "only_short_message", ":", "bool", "=", "True", ",", "allow_empty_message", ":", "bool", "=", "False", ")", "->", "Callable", ":", "def", "deco", "(", "func", ":", "Callable", ")", "->", "Callable", ":", "nl_processor", "=", "NLProcessor", "(", "func", "=", "func", ",", "keywords", "=", "keywords", ",", "permission", "=", "permission", ",", "only_to_me", "=", "only_to_me", ",", "only_short_message", "=", "only_short_message", ",", "allow_empty_message", "=", "allow_empty_message", ")", "_nl_processors", ".", "add", "(", "nl_processor", ")", "return", "func", "if", "isinstance", "(", "keywords", ",", "Callable", ")", ":", "# here \"keywords\" is the function to be decorated", "return", "on_natural_language", "(", ")", "(", "keywords", ")", "else", ":", "return", "deco" ]
Decorator to register a function as a natural language processor. :param keywords: keywords to respond to, if None, respond to all messages :param permission: permission required by the processor :param only_to_me: only handle messages to me :param only_short_message: only handle short messages :param allow_empty_message: handle empty messages
[ "Decorator", "to", "register", "a", "function", "as", "a", "natural", "language", "processor", "." ]
python
train
tamasgal/km3pipe
km3pipe/hardware.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L121-L127
def _extract_comments(self): """Retrieve all comments from the file""" self._det_file.seek(0, 0) for line in self._det_file.readlines(): line = line.strip() if line.startswith('#'): self.add_comment(line[1:])
[ "def", "_extract_comments", "(", "self", ")", ":", "self", ".", "_det_file", ".", "seek", "(", "0", ",", "0", ")", "for", "line", "in", "self", ".", "_det_file", ".", "readlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "self", ".", "add_comment", "(", "line", "[", "1", ":", "]", ")" ]
Retrieve all comments from the file
[ "Retrieve", "all", "comments", "from", "the", "file" ]
python
train
eandersson/amqpstorm
amqpstorm/channel.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L454-L481
def _close_channel(self, frame_in): """Close Channel. :param specification.Channel.Close frame_in: Channel Close frame. :return: """ if frame_in.reply_code != 200: reply_text = try_utf8_decode(frame_in.reply_text) message = ( 'Channel %d was closed by remote server: %s' % ( self._channel_id, reply_text ) ) exception = AMQPChannelError(message, reply_code=frame_in.reply_code) self.exceptions.append(exception) self.set_state(self.CLOSED) if self._connection.is_open: try: self._connection.write_frame( self.channel_id, specification.Channel.CloseOk() ) except AMQPConnectionError: pass self.close()
[ "def", "_close_channel", "(", "self", ",", "frame_in", ")", ":", "if", "frame_in", ".", "reply_code", "!=", "200", ":", "reply_text", "=", "try_utf8_decode", "(", "frame_in", ".", "reply_text", ")", "message", "=", "(", "'Channel %d was closed by remote server: %s'", "%", "(", "self", ".", "_channel_id", ",", "reply_text", ")", ")", "exception", "=", "AMQPChannelError", "(", "message", ",", "reply_code", "=", "frame_in", ".", "reply_code", ")", "self", ".", "exceptions", ".", "append", "(", "exception", ")", "self", ".", "set_state", "(", "self", ".", "CLOSED", ")", "if", "self", ".", "_connection", ".", "is_open", ":", "try", ":", "self", ".", "_connection", ".", "write_frame", "(", "self", ".", "channel_id", ",", "specification", ".", "Channel", ".", "CloseOk", "(", ")", ")", "except", "AMQPConnectionError", ":", "pass", "self", ".", "close", "(", ")" ]
Close Channel. :param specification.Channel.Close frame_in: Channel Close frame. :return:
[ "Close", "Channel", "." ]
python
train
BreakingBytes/simkit
simkit/core/simulations.py
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L519-L535
def pause(self, progress_hook=None): """ Pause the simulation. How is this different from stopping it? Maintain info sufficient to restart simulation. Sets ``is_paused`` to True. Will this state allow analysis? changing parameters? What can you do with a paused simulation? Should be capable of saving paused simulation for loading/resuming later, that is the main usage. EG: someone else need computer, or power goes out, so on battery backup quickly pause simulation, and save. Is save automatic? Should there be a parameter for auto save changed? """ # default progress hook if progress_hook is None: progress_hook = sim_progress_hook progress_hook('simulation paused') self.cmd_queue.put('pause') self._ispaused = True
[ "def", "pause", "(", "self", ",", "progress_hook", "=", "None", ")", ":", "# default progress hook", "if", "progress_hook", "is", "None", ":", "progress_hook", "=", "sim_progress_hook", "progress_hook", "(", "'simulation paused'", ")", "self", ".", "cmd_queue", ".", "put", "(", "'pause'", ")", "self", ".", "_ispaused", "=", "True" ]
Pause the simulation. How is this different from stopping it? Maintain info sufficient to restart simulation. Sets ``is_paused`` to True. Will this state allow analysis? changing parameters? What can you do with a paused simulation? Should be capable of saving paused simulation for loading/resuming later, that is the main usage. EG: someone else need computer, or power goes out, so on battery backup quickly pause simulation, and save. Is save automatic? Should there be a parameter for auto save changed?
[ "Pause", "the", "simulation", ".", "How", "is", "this", "different", "from", "stopping", "it?", "Maintain", "info", "sufficient", "to", "restart", "simulation", ".", "Sets", "is_paused", "to", "True", ".", "Will", "this", "state", "allow", "analysis?", "changing", "parameters?", "What", "can", "you", "do", "with", "a", "paused", "simulation?", "Should", "be", "capable", "of", "saving", "paused", "simulation", "for", "loading", "/", "resuming", "later", "that", "is", "the", "main", "usage", ".", "EG", ":", "someone", "else", "need", "computer", "or", "power", "goes", "out", "so", "on", "battery", "backup", "quickly", "pause", "simulation", "and", "save", ".", "Is", "save", "automatic?", "Should", "there", "be", "a", "parameter", "for", "auto", "save", "changed?" ]
python
train
deepmipt/DeepPavlov
deeppavlov/core/layers/tf_csoftmax_attention.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/layers/tf_csoftmax_attention.py#L75-L90
def csoftmax(tensor, inv_cumulative_att): """ It is a implementation of the constrained softmax (csoftmax). Based on the paper: https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers" Args: tensor: A tensorflow tensor is score. This tensor have dimensionality [None, n_tokens] inv_cumulative_att: A inverse cumulative attention tensor with dimensionality [None, n_tokens] Returns: cs: Tensor at the output with dimensionality [None, n_tokens] """ shape_ten = tensor.shape shape_cum = inv_cumulative_att.shape merge_tensor = [tensor, inv_cumulative_att] cs, _ = tf.map_fn(csoftmax_for_slice, merge_tensor, dtype=[tf.float32, tf.float32]) # [bs, L] return cs
[ "def", "csoftmax", "(", "tensor", ",", "inv_cumulative_att", ")", ":", "shape_ten", "=", "tensor", ".", "shape", "shape_cum", "=", "inv_cumulative_att", ".", "shape", "merge_tensor", "=", "[", "tensor", ",", "inv_cumulative_att", "]", "cs", ",", "_", "=", "tf", ".", "map_fn", "(", "csoftmax_for_slice", ",", "merge_tensor", ",", "dtype", "=", "[", "tf", ".", "float32", ",", "tf", ".", "float32", "]", ")", "# [bs, L]", "return", "cs" ]
It is a implementation of the constrained softmax (csoftmax). Based on the paper: https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers" Args: tensor: A tensorflow tensor is score. This tensor have dimensionality [None, n_tokens] inv_cumulative_att: A inverse cumulative attention tensor with dimensionality [None, n_tokens] Returns: cs: Tensor at the output with dimensionality [None, n_tokens]
[ "It", "is", "a", "implementation", "of", "the", "constrained", "softmax", "(", "csoftmax", ")", ".", "Based", "on", "the", "paper", ":", "https", ":", "//", "andre", "-", "martins", ".", "github", ".", "io", "/", "docs", "/", "emnlp2017_final", ".", "pdf", "Learning", "What", "s", "Easy", ":", "Fully", "Differentiable", "Neural", "Easy", "-", "First", "Taggers", "Args", ":", "tensor", ":", "A", "tensorflow", "tensor", "is", "score", ".", "This", "tensor", "have", "dimensionality", "[", "None", "n_tokens", "]", "inv_cumulative_att", ":", "A", "inverse", "cumulative", "attention", "tensor", "with", "dimensionality", "[", "None", "n_tokens", "]", "Returns", ":", "cs", ":", "Tensor", "at", "the", "output", "with", "dimensionality", "[", "None", "n_tokens", "]" ]
python
test
log2timeline/plaso
plaso/output/timesketch_out.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/timesketch_out.py#L42-L55
def Close(self): """Closes the connection to TimeSketch Elasticsearch database. Sends the remaining events for indexing and removes the processing status on the Timesketch search index object. """ super(TimesketchOutputModule, self).Close() with self._timesketch.app_context(): search_index = timesketch_sketch.SearchIndex.query.filter_by( index_name=self._index_name).first() search_index.status.remove(search_index.status[0]) timesketch_db_session.add(search_index) timesketch_db_session.commit()
[ "def", "Close", "(", "self", ")", ":", "super", "(", "TimesketchOutputModule", ",", "self", ")", ".", "Close", "(", ")", "with", "self", ".", "_timesketch", ".", "app_context", "(", ")", ":", "search_index", "=", "timesketch_sketch", ".", "SearchIndex", ".", "query", ".", "filter_by", "(", "index_name", "=", "self", ".", "_index_name", ")", ".", "first", "(", ")", "search_index", ".", "status", ".", "remove", "(", "search_index", ".", "status", "[", "0", "]", ")", "timesketch_db_session", ".", "add", "(", "search_index", ")", "timesketch_db_session", ".", "commit", "(", ")" ]
Closes the connection to TimeSketch Elasticsearch database. Sends the remaining events for indexing and removes the processing status on the Timesketch search index object.
[ "Closes", "the", "connection", "to", "TimeSketch", "Elasticsearch", "database", "." ]
python
train
sods/ods
pods/assesser.py
https://github.com/sods/ods/blob/3995c659f25a0a640f6009ed7fcc2559ce659b1d/pods/assesser.py#L145-L150
def latex(self): """Gives a latex representation of the assessment.""" output = self.latex_preamble output += self._repr_latex_() output += self.latex_post return output
[ "def", "latex", "(", "self", ")", ":", "output", "=", "self", ".", "latex_preamble", "output", "+=", "self", ".", "_repr_latex_", "(", ")", "output", "+=", "self", ".", "latex_post", "return", "output" ]
Gives a latex representation of the assessment.
[ "Gives", "a", "latex", "representation", "of", "the", "assessment", "." ]
python
train
jazzband/django-analytical
analytical/templatetags/uservoice.py
https://github.com/jazzband/django-analytical/blob/5487fd677bd47bc63fc2cf39597a0adc5d6c9ab3/analytical/templatetags/uservoice.py#L36-L47
def uservoice(parser, token): """ UserVoice tracking template tag. Renders Javascript code to track page visits. You must supply your UserVoice Widget Key in the ``USERVOICE_WIDGET_KEY`` setting or the ``uservoice_widget_key`` template context variable. """ bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return UserVoiceNode()
[ "def", "uservoice", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "bits", ")", ">", "1", ":", "raise", "TemplateSyntaxError", "(", "\"'%s' takes no arguments\"", "%", "bits", "[", "0", "]", ")", "return", "UserVoiceNode", "(", ")" ]
UserVoice tracking template tag. Renders Javascript code to track page visits. You must supply your UserVoice Widget Key in the ``USERVOICE_WIDGET_KEY`` setting or the ``uservoice_widget_key`` template context variable.
[ "UserVoice", "tracking", "template", "tag", "." ]
python
valid
clinicedc/edc-notification
edc_notification/decorators.py
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/decorators.py#L9-L23
def register(**kwargs): """Registers a notification_cls. """ def _wrapper(notification_cls): if not issubclass(notification_cls, (Notification,)): raise RegisterNotificationError( f"Wrapped class must be a 'Notification' class. " f"Got '{notification_cls.__name__}'" ) site_notifications.register(notification_cls=notification_cls) return notification_cls return _wrapper
[ "def", "register", "(", "*", "*", "kwargs", ")", ":", "def", "_wrapper", "(", "notification_cls", ")", ":", "if", "not", "issubclass", "(", "notification_cls", ",", "(", "Notification", ",", ")", ")", ":", "raise", "RegisterNotificationError", "(", "f\"Wrapped class must be a 'Notification' class. \"", "f\"Got '{notification_cls.__name__}'\"", ")", "site_notifications", ".", "register", "(", "notification_cls", "=", "notification_cls", ")", "return", "notification_cls", "return", "_wrapper" ]
Registers a notification_cls.
[ "Registers", "a", "notification_cls", "." ]
python
train
twisted/vertex
vertex/ptcp.py
https://github.com/twisted/vertex/blob/feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca/vertex/ptcp.py#L986-L993
def waitForAllConnectionsToClose(self): """ Wait for all currently-open connections to enter the 'CLOSED' state. Currently this is only usable from test fixtures. """ if not self._connections: return self._stop() return self._allConnectionsClosed.deferred().addBoth(self._stop)
[ "def", "waitForAllConnectionsToClose", "(", "self", ")", ":", "if", "not", "self", ".", "_connections", ":", "return", "self", ".", "_stop", "(", ")", "return", "self", ".", "_allConnectionsClosed", ".", "deferred", "(", ")", ".", "addBoth", "(", "self", ".", "_stop", ")" ]
Wait for all currently-open connections to enter the 'CLOSED' state. Currently this is only usable from test fixtures.
[ "Wait", "for", "all", "currently", "-", "open", "connections", "to", "enter", "the", "CLOSED", "state", ".", "Currently", "this", "is", "only", "usable", "from", "test", "fixtures", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L58-L62
def store_dummy_router_net(self, net_id, subnet_id, rtr_id): """Storing the router attributes. """ self.dummy_net_id = net_id self.dummy_subnet_id = subnet_id self.dummy_router_id = rtr_id
[ "def", "store_dummy_router_net", "(", "self", ",", "net_id", ",", "subnet_id", ",", "rtr_id", ")", ":", "self", ".", "dummy_net_id", "=", "net_id", "self", ".", "dummy_subnet_id", "=", "subnet_id", "self", ".", "dummy_router_id", "=", "rtr_id" ]
Storing the router attributes.
[ "Storing", "the", "router", "attributes", "." ]
python
train
klis87/django-cloudinary-storage
cloudinary_storage/management/commands/deleteorphanedmedia.py
https://github.com/klis87/django-cloudinary-storage/blob/b8cabd2ebbf67b9cfbbf4defee1a750fea5950a9/cloudinary_storage/management/commands/deleteorphanedmedia.py#L30-L36
def model_file_fields(self, model): """ Generator yielding all instances of FileField and its subclasses of a model. """ for field in model._meta.fields: if isinstance(field, models.FileField): yield field
[ "def", "model_file_fields", "(", "self", ",", "model", ")", ":", "for", "field", "in", "model", ".", "_meta", ".", "fields", ":", "if", "isinstance", "(", "field", ",", "models", ".", "FileField", ")", ":", "yield", "field" ]
Generator yielding all instances of FileField and its subclasses of a model.
[ "Generator", "yielding", "all", "instances", "of", "FileField", "and", "its", "subclasses", "of", "a", "model", "." ]
python
train
raymontag/kppy
kppy/database.py
https://github.com/raymontag/kppy/blob/a43f1fff7d49da1da4b3d8628a1b3ebbaf47f43a/kppy/database.py#L654-L662
def _move_group_helper(self, group): """A helper to move the chidren of a group.""" for i in group.children: self.groups.remove(i) i.level = group.level + 1 self.groups.insert(self.groups.index(group) + 1, i) if i.children: self._move_group_helper(i)
[ "def", "_move_group_helper", "(", "self", ",", "group", ")", ":", "for", "i", "in", "group", ".", "children", ":", "self", ".", "groups", ".", "remove", "(", "i", ")", "i", ".", "level", "=", "group", ".", "level", "+", "1", "self", ".", "groups", ".", "insert", "(", "self", ".", "groups", ".", "index", "(", "group", ")", "+", "1", ",", "i", ")", "if", "i", ".", "children", ":", "self", ".", "_move_group_helper", "(", "i", ")" ]
A helper to move the chidren of a group.
[ "A", "helper", "to", "move", "the", "chidren", "of", "a", "group", "." ]
python
train
mkoura/dump2polarion
dump2polarion/results/ostriztools.py
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/ostriztools.py#L97-L120
def _append_record(test_data, results, test_path): """Adds data of single testcase results to results database.""" statuses = test_data.get("statuses") jenkins_data = test_data.get("jenkins") or {} data = [ ("title", test_data.get("test_name") or _get_testname(test_path)), ("verdict", statuses.get("overall")), ("source", test_data.get("source")), ("job_name", jenkins_data.get("job_name")), ("run", jenkins_data.get("build_number")), ("params", _filter_parameters(test_data.get("params"))), ( "time", _calculate_duration(test_data.get("start_time"), test_data.get("finish_time")) or 0, ), ] test_id = test_data.get("polarion") if test_id: if isinstance(test_id, list): test_id = test_id[0] data.append(("test_id", test_id)) results.append(OrderedDict(data))
[ "def", "_append_record", "(", "test_data", ",", "results", ",", "test_path", ")", ":", "statuses", "=", "test_data", ".", "get", "(", "\"statuses\"", ")", "jenkins_data", "=", "test_data", ".", "get", "(", "\"jenkins\"", ")", "or", "{", "}", "data", "=", "[", "(", "\"title\"", ",", "test_data", ".", "get", "(", "\"test_name\"", ")", "or", "_get_testname", "(", "test_path", ")", ")", ",", "(", "\"verdict\"", ",", "statuses", ".", "get", "(", "\"overall\"", ")", ")", ",", "(", "\"source\"", ",", "test_data", ".", "get", "(", "\"source\"", ")", ")", ",", "(", "\"job_name\"", ",", "jenkins_data", ".", "get", "(", "\"job_name\"", ")", ")", ",", "(", "\"run\"", ",", "jenkins_data", ".", "get", "(", "\"build_number\"", ")", ")", ",", "(", "\"params\"", ",", "_filter_parameters", "(", "test_data", ".", "get", "(", "\"params\"", ")", ")", ")", ",", "(", "\"time\"", ",", "_calculate_duration", "(", "test_data", ".", "get", "(", "\"start_time\"", ")", ",", "test_data", ".", "get", "(", "\"finish_time\"", ")", ")", "or", "0", ",", ")", ",", "]", "test_id", "=", "test_data", ".", "get", "(", "\"polarion\"", ")", "if", "test_id", ":", "if", "isinstance", "(", "test_id", ",", "list", ")", ":", "test_id", "=", "test_id", "[", "0", "]", "data", ".", "append", "(", "(", "\"test_id\"", ",", "test_id", ")", ")", "results", ".", "append", "(", "OrderedDict", "(", "data", ")", ")" ]
Adds data of single testcase results to results database.
[ "Adds", "data", "of", "single", "testcase", "results", "to", "results", "database", "." ]
python
train
pytest-dev/pluggy
pluggy/manager.py
https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/manager.py#L158-L179
def add_hookspecs(self, module_or_class): """ add new hook specifications defined in the given module_or_class. Functions are recognized if they have been decorated accordingly. """ names = [] for name in dir(module_or_class): spec_opts = self.parse_hookspec_opts(module_or_class, name) if spec_opts is not None: hc = getattr(self.hook, name, None) if hc is None: hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts) setattr(self.hook, name, hc) else: # plugins registered this hook without knowing the spec hc.set_specification(module_or_class, spec_opts) for hookfunction in hc.get_hookimpls(): self._verify_hook(hc, hookfunction) names.append(name) if not names: raise ValueError( "did not find any %r hooks in %r" % (self.project_name, module_or_class) )
[ "def", "add_hookspecs", "(", "self", ",", "module_or_class", ")", ":", "names", "=", "[", "]", "for", "name", "in", "dir", "(", "module_or_class", ")", ":", "spec_opts", "=", "self", ".", "parse_hookspec_opts", "(", "module_or_class", ",", "name", ")", "if", "spec_opts", "is", "not", "None", ":", "hc", "=", "getattr", "(", "self", ".", "hook", ",", "name", ",", "None", ")", "if", "hc", "is", "None", ":", "hc", "=", "_HookCaller", "(", "name", ",", "self", ".", "_hookexec", ",", "module_or_class", ",", "spec_opts", ")", "setattr", "(", "self", ".", "hook", ",", "name", ",", "hc", ")", "else", ":", "# plugins registered this hook without knowing the spec", "hc", ".", "set_specification", "(", "module_or_class", ",", "spec_opts", ")", "for", "hookfunction", "in", "hc", ".", "get_hookimpls", "(", ")", ":", "self", ".", "_verify_hook", "(", "hc", ",", "hookfunction", ")", "names", ".", "append", "(", "name", ")", "if", "not", "names", ":", "raise", "ValueError", "(", "\"did not find any %r hooks in %r\"", "%", "(", "self", ".", "project_name", ",", "module_or_class", ")", ")" ]
add new hook specifications defined in the given module_or_class. Functions are recognized if they have been decorated accordingly.
[ "add", "new", "hook", "specifications", "defined", "in", "the", "given", "module_or_class", ".", "Functions", "are", "recognized", "if", "they", "have", "been", "decorated", "accordingly", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/bindings/search.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/search.py#L528-L622
def find_global_executables(method, name=None, name_mode='exact', category=None, all_versions=None, published=None, billed_to=None, created_by=None, developer=None, created_after=None, created_before=None, modified_after=None, modified_before=None, describe=False, limit=None, return_handler=False, first_page_size=100, **kwargs): """ :param method: Name of the API method used to find the global executable (app or a global workflow). :type name: function :param name: Name of the app or a global workflow (also see *name_mode*) :type name: string :param name_mode: Method by which to interpret the *name* field ("exact": exact match, "glob": use "*" and "?" as wildcards, "regexp": interpret as a regular expression) :type name_mode: string :param category: If specified, only returns executables that are in the specified category :type category: string :param all_versions: Whether to return all versions of each app/global workflow or just the version tagged "default" :type all_versions: boolean :param published: If specified, only returns results that have the specified publish status (True for published apps/global workflows, False for unpublished ones) :type published: boolean :param billed_to: Entity ID (user or organization) that pays for the storage costs of the app/global workflow :type billed_to: string :param created_by: If specified, only returns versions that were created by the specified user (of the form "user-USERNAME") :type created_by: string :param developer: If specified, only returns apps or global workflows for which the specified user (of the form "user-USERNAME") is a developer :type developer: string :param created_after: Timestamp after which each result was last created (see note accompanying :meth:`find_data_objects()` for interpretation) :type created_after: int or string :param created_before: Timestamp before which each result was last created (see note accompanying :meth:`find_data_objects()` for interpretation) :type created_before: int or string :param modified_after: Timestamp after which each result was last modified (see note accompanying :meth:`find_data_objects()` for interpretation) :type modified_after: int or string :param modified_before: Timestamp before which each result was last modified (see note accompanying :meth:`find_data_objects()` for interpretation) :type modified_before: int or string :param describe: Controls whether to also return the output of calling describe() on each executable. Supply False to omit describe output, True to obtain the default describe output, or a dict to be supplied as the describe call input (which may be used to customize the set of fields that is returned) :type describe: bool or dict :param limit: The maximum number of results to be returned (if not specified, the number of results is unlimited) :type limit: int :param first_page_size: The number of results that the initial API call will return. Subsequent calls will raise this by multiplying by 2 up to a maximum of 1000. :type first_page_size: int :param return_handler: If True, yields results as dxpy object handlers (otherwise, yields each result as a dict with keys "id" and "project") :type return_handler: boolean :rtype: generator Returns a generator that yields all global executables (either apps or global workflows) that match the query. It transparently handles paging through the result set if necessary. For all parameters that are omitted, the search is not restricted by the corresponding field. """ query = {} if name is not None: if name_mode == 'exact': query['name'] = name elif name_mode == 'glob': query['name'] = {'glob': name} elif name_mode == 'regexp': query['name'] = {'regexp': name} else: raise DXError('find_global_executables: Unexpected value found for argument name_mode') if category is not None: query["category"] = category if all_versions is not None: query["allVersions"] = all_versions if published is not None: query["published"] = published if billed_to is not None: query["billTo"] = billed_to if created_by is not None: query["createdBy"] = created_by if developer is not None: query["developer"] = developer if modified_after is not None or modified_before is not None: query["modified"] = {} if modified_after is not None: query["modified"]["after"] = dxpy.utils.normalize_time_input(modified_after) if modified_before is not None: query["modified"]["before"] = dxpy.utils.normalize_time_input(modified_before) if created_after is not None or created_before is not None: query["created"] = {} if created_after is not None: query["created"]["after"] = dxpy.utils.normalize_time_input(created_after) if created_before is not None: query["created"]["before"] = dxpy.utils.normalize_time_input(created_before) if describe is not None and describe is not False: query["describe"] = describe if limit is not None: query["limit"] = limit return _find(method, query, limit, return_handler, first_page_size, **kwargs)
[ "def", "find_global_executables", "(", "method", ",", "name", "=", "None", ",", "name_mode", "=", "'exact'", ",", "category", "=", "None", ",", "all_versions", "=", "None", ",", "published", "=", "None", ",", "billed_to", "=", "None", ",", "created_by", "=", "None", ",", "developer", "=", "None", ",", "created_after", "=", "None", ",", "created_before", "=", "None", ",", "modified_after", "=", "None", ",", "modified_before", "=", "None", ",", "describe", "=", "False", ",", "limit", "=", "None", ",", "return_handler", "=", "False", ",", "first_page_size", "=", "100", ",", "*", "*", "kwargs", ")", ":", "query", "=", "{", "}", "if", "name", "is", "not", "None", ":", "if", "name_mode", "==", "'exact'", ":", "query", "[", "'name'", "]", "=", "name", "elif", "name_mode", "==", "'glob'", ":", "query", "[", "'name'", "]", "=", "{", "'glob'", ":", "name", "}", "elif", "name_mode", "==", "'regexp'", ":", "query", "[", "'name'", "]", "=", "{", "'regexp'", ":", "name", "}", "else", ":", "raise", "DXError", "(", "'find_global_executables: Unexpected value found for argument name_mode'", ")", "if", "category", "is", "not", "None", ":", "query", "[", "\"category\"", "]", "=", "category", "if", "all_versions", "is", "not", "None", ":", "query", "[", "\"allVersions\"", "]", "=", "all_versions", "if", "published", "is", "not", "None", ":", "query", "[", "\"published\"", "]", "=", "published", "if", "billed_to", "is", "not", "None", ":", "query", "[", "\"billTo\"", "]", "=", "billed_to", "if", "created_by", "is", "not", "None", ":", "query", "[", "\"createdBy\"", "]", "=", "created_by", "if", "developer", "is", "not", "None", ":", "query", "[", "\"developer\"", "]", "=", "developer", "if", "modified_after", "is", "not", "None", "or", "modified_before", "is", "not", "None", ":", "query", "[", "\"modified\"", "]", "=", "{", "}", "if", "modified_after", "is", "not", "None", ":", "query", "[", "\"modified\"", "]", "[", "\"after\"", "]", "=", "dxpy", ".", "utils", ".", "normalize_time_input", "(", "modified_after", ")", "if", "modified_before", "is", "not", "None", ":", "query", "[", "\"modified\"", "]", "[", "\"before\"", "]", "=", "dxpy", ".", "utils", ".", "normalize_time_input", "(", "modified_before", ")", "if", "created_after", "is", "not", "None", "or", "created_before", "is", "not", "None", ":", "query", "[", "\"created\"", "]", "=", "{", "}", "if", "created_after", "is", "not", "None", ":", "query", "[", "\"created\"", "]", "[", "\"after\"", "]", "=", "dxpy", ".", "utils", ".", "normalize_time_input", "(", "created_after", ")", "if", "created_before", "is", "not", "None", ":", "query", "[", "\"created\"", "]", "[", "\"before\"", "]", "=", "dxpy", ".", "utils", ".", "normalize_time_input", "(", "created_before", ")", "if", "describe", "is", "not", "None", "and", "describe", "is", "not", "False", ":", "query", "[", "\"describe\"", "]", "=", "describe", "if", "limit", "is", "not", "None", ":", "query", "[", "\"limit\"", "]", "=", "limit", "return", "_find", "(", "method", ",", "query", ",", "limit", ",", "return_handler", ",", "first_page_size", ",", "*", "*", "kwargs", ")" ]
:param method: Name of the API method used to find the global executable (app or a global workflow). :type name: function :param name: Name of the app or a global workflow (also see *name_mode*) :type name: string :param name_mode: Method by which to interpret the *name* field ("exact": exact match, "glob": use "*" and "?" as wildcards, "regexp": interpret as a regular expression) :type name_mode: string :param category: If specified, only returns executables that are in the specified category :type category: string :param all_versions: Whether to return all versions of each app/global workflow or just the version tagged "default" :type all_versions: boolean :param published: If specified, only returns results that have the specified publish status (True for published apps/global workflows, False for unpublished ones) :type published: boolean :param billed_to: Entity ID (user or organization) that pays for the storage costs of the app/global workflow :type billed_to: string :param created_by: If specified, only returns versions that were created by the specified user (of the form "user-USERNAME") :type created_by: string :param developer: If specified, only returns apps or global workflows for which the specified user (of the form "user-USERNAME") is a developer :type developer: string :param created_after: Timestamp after which each result was last created (see note accompanying :meth:`find_data_objects()` for interpretation) :type created_after: int or string :param created_before: Timestamp before which each result was last created (see note accompanying :meth:`find_data_objects()` for interpretation) :type created_before: int or string :param modified_after: Timestamp after which each result was last modified (see note accompanying :meth:`find_data_objects()` for interpretation) :type modified_after: int or string :param modified_before: Timestamp before which each result was last modified (see note accompanying :meth:`find_data_objects()` for interpretation) :type modified_before: int or string :param describe: Controls whether to also return the output of calling describe() on each executable. Supply False to omit describe output, True to obtain the default describe output, or a dict to be supplied as the describe call input (which may be used to customize the set of fields that is returned) :type describe: bool or dict :param limit: The maximum number of results to be returned (if not specified, the number of results is unlimited) :type limit: int :param first_page_size: The number of results that the initial API call will return. Subsequent calls will raise this by multiplying by 2 up to a maximum of 1000. :type first_page_size: int :param return_handler: If True, yields results as dxpy object handlers (otherwise, yields each result as a dict with keys "id" and "project") :type return_handler: boolean :rtype: generator Returns a generator that yields all global executables (either apps or global workflows) that match the query. It transparently handles paging through the result set if necessary. For all parameters that are omitted, the search is not restricted by the corresponding field.
[ ":", "param", "method", ":", "Name", "of", "the", "API", "method", "used", "to", "find", "the", "global", "executable", "(", "app", "or", "a", "global", "workflow", ")", ".", ":", "type", "name", ":", "function", ":", "param", "name", ":", "Name", "of", "the", "app", "or", "a", "global", "workflow", "(", "also", "see", "*", "name_mode", "*", ")", ":", "type", "name", ":", "string", ":", "param", "name_mode", ":", "Method", "by", "which", "to", "interpret", "the", "*", "name", "*", "field", "(", "exact", ":", "exact", "match", "glob", ":", "use", "*", "and", "?", "as", "wildcards", "regexp", ":", "interpret", "as", "a", "regular", "expression", ")", ":", "type", "name_mode", ":", "string", ":", "param", "category", ":", "If", "specified", "only", "returns", "executables", "that", "are", "in", "the", "specified", "category", ":", "type", "category", ":", "string", ":", "param", "all_versions", ":", "Whether", "to", "return", "all", "versions", "of", "each", "app", "/", "global", "workflow", "or", "just", "the", "version", "tagged", "default", ":", "type", "all_versions", ":", "boolean", ":", "param", "published", ":", "If", "specified", "only", "returns", "results", "that", "have", "the", "specified", "publish", "status", "(", "True", "for", "published", "apps", "/", "global", "workflows", "False", "for", "unpublished", "ones", ")", ":", "type", "published", ":", "boolean", ":", "param", "billed_to", ":", "Entity", "ID", "(", "user", "or", "organization", ")", "that", "pays", "for", "the", "storage", "costs", "of", "the", "app", "/", "global", "workflow", ":", "type", "billed_to", ":", "string", ":", "param", "created_by", ":", "If", "specified", "only", "returns", "versions", "that", "were", "created", "by", "the", "specified", "user", "(", "of", "the", "form", "user", "-", "USERNAME", ")", ":", "type", "created_by", ":", "string", ":", "param", "developer", ":", "If", "specified", "only", "returns", "apps", "or", "global", "workflows", "for", "which", "the", "specified", "user", "(", "of", "the", "form", "user", "-", "USERNAME", ")", "is", "a", "developer", ":", "type", "developer", ":", "string", ":", "param", "created_after", ":", "Timestamp", "after", "which", "each", "result", "was", "last", "created", "(", "see", "note", "accompanying", ":", "meth", ":", "find_data_objects", "()", "for", "interpretation", ")", ":", "type", "created_after", ":", "int", "or", "string", ":", "param", "created_before", ":", "Timestamp", "before", "which", "each", "result", "was", "last", "created", "(", "see", "note", "accompanying", ":", "meth", ":", "find_data_objects", "()", "for", "interpretation", ")", ":", "type", "created_before", ":", "int", "or", "string", ":", "param", "modified_after", ":", "Timestamp", "after", "which", "each", "result", "was", "last", "modified", "(", "see", "note", "accompanying", ":", "meth", ":", "find_data_objects", "()", "for", "interpretation", ")", ":", "type", "modified_after", ":", "int", "or", "string", ":", "param", "modified_before", ":", "Timestamp", "before", "which", "each", "result", "was", "last", "modified", "(", "see", "note", "accompanying", ":", "meth", ":", "find_data_objects", "()", "for", "interpretation", ")", ":", "type", "modified_before", ":", "int", "or", "string", ":", "param", "describe", ":", "Controls", "whether", "to", "also", "return", "the", "output", "of", "calling", "describe", "()", "on", "each", "executable", ".", "Supply", "False", "to", "omit", "describe", "output", "True", "to", "obtain", "the", "default", "describe", "output", "or", "a", "dict", "to", "be", "supplied", "as", "the", "describe", "call", "input", "(", "which", "may", "be", "used", "to", "customize", "the", "set", "of", "fields", "that", "is", "returned", ")", ":", "type", "describe", ":", "bool", "or", "dict", ":", "param", "limit", ":", "The", "maximum", "number", "of", "results", "to", "be", "returned", "(", "if", "not", "specified", "the", "number", "of", "results", "is", "unlimited", ")", ":", "type", "limit", ":", "int", ":", "param", "first_page_size", ":", "The", "number", "of", "results", "that", "the", "initial", "API", "call", "will", "return", ".", "Subsequent", "calls", "will", "raise", "this", "by", "multiplying", "by", "2", "up", "to", "a", "maximum", "of", "1000", ".", ":", "type", "first_page_size", ":", "int", ":", "param", "return_handler", ":", "If", "True", "yields", "results", "as", "dxpy", "object", "handlers", "(", "otherwise", "yields", "each", "result", "as", "a", "dict", "with", "keys", "id", "and", "project", ")", ":", "type", "return_handler", ":", "boolean", ":", "rtype", ":", "generator" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L2688-L2721
def padded_cross_entropy_factored(factored_logits, labels, label_smoothing, weights_fn=weights_nonzero, reduce_sum=True): """Memory-efficient computation of smoothing cross-entropy. Avoids realizing the entire logits matrix at once. Args: factored_logits: a `FactoredTensor` representing a Tensor with shape `[batch, timesteps, vocab_size]`. labels: an integer `Tensor` with shape `[batch, timesteps]`. label_smoothing: a floating point `Scalar`. weights_fn: A function from labels to weights. reduce_sum: a Boolean, whether to sum at the end or not. Returns: loss_numerator: a `Scalar`. Sum of losses. loss_denominator: a `Scalar. The number of non-padding target tokens. """ a = factored_logits.a b = factored_logits.b confidence = 1.0 - label_smoothing with tf.name_scope("padded_cross_entropy_factored", values=[a, b, labels]): labels_flat = tf.reshape(labels, [-1]) a_flat = tf.reshape(a, [-1, shape_list(b)[1]]) xent = smoothing_cross_entropy_factored(a_flat, b, labels_flat, tf.convert_to_tensor(confidence)) xent = tf.reshape(xent, shape_list(labels)) weights = weights_fn(labels) if not reduce_sum: return xent * weights, weights return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
[ "def", "padded_cross_entropy_factored", "(", "factored_logits", ",", "labels", ",", "label_smoothing", ",", "weights_fn", "=", "weights_nonzero", ",", "reduce_sum", "=", "True", ")", ":", "a", "=", "factored_logits", ".", "a", "b", "=", "factored_logits", ".", "b", "confidence", "=", "1.0", "-", "label_smoothing", "with", "tf", ".", "name_scope", "(", "\"padded_cross_entropy_factored\"", ",", "values", "=", "[", "a", ",", "b", ",", "labels", "]", ")", ":", "labels_flat", "=", "tf", ".", "reshape", "(", "labels", ",", "[", "-", "1", "]", ")", "a_flat", "=", "tf", ".", "reshape", "(", "a", ",", "[", "-", "1", ",", "shape_list", "(", "b", ")", "[", "1", "]", "]", ")", "xent", "=", "smoothing_cross_entropy_factored", "(", "a_flat", ",", "b", ",", "labels_flat", ",", "tf", ".", "convert_to_tensor", "(", "confidence", ")", ")", "xent", "=", "tf", ".", "reshape", "(", "xent", ",", "shape_list", "(", "labels", ")", ")", "weights", "=", "weights_fn", "(", "labels", ")", "if", "not", "reduce_sum", ":", "return", "xent", "*", "weights", ",", "weights", "return", "tf", ".", "reduce_sum", "(", "xent", "*", "weights", ")", ",", "tf", ".", "reduce_sum", "(", "weights", ")" ]
Memory-efficient computation of smoothing cross-entropy. Avoids realizing the entire logits matrix at once. Args: factored_logits: a `FactoredTensor` representing a Tensor with shape `[batch, timesteps, vocab_size]`. labels: an integer `Tensor` with shape `[batch, timesteps]`. label_smoothing: a floating point `Scalar`. weights_fn: A function from labels to weights. reduce_sum: a Boolean, whether to sum at the end or not. Returns: loss_numerator: a `Scalar`. Sum of losses. loss_denominator: a `Scalar. The number of non-padding target tokens.
[ "Memory", "-", "efficient", "computation", "of", "smoothing", "cross", "-", "entropy", "." ]
python
train
msmbuilder/msmbuilder
msmbuilder/msm/validation/bootstrapmsm.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/msm/validation/bootstrapmsm.py#L199-L214
def _mapped_populations(mdl1, mdl2): """ Method to get the populations for states in mdl 1 from populations inferred in mdl 2. Resorts to 0 if population is not present. """ return_vect = np.zeros(mdl1.n_states_) for i in range(mdl1.n_states_): try: #there has to be a better way to do this mdl1_unmapped = mdl1.inverse_transform([i])[0][0] mdl2_mapped = mdl2.mapping_[mdl1_unmapped] return_vect[i] = mdl2.populations_[mdl2_mapped] except: pass return return_vect
[ "def", "_mapped_populations", "(", "mdl1", ",", "mdl2", ")", ":", "return_vect", "=", "np", ".", "zeros", "(", "mdl1", ".", "n_states_", ")", "for", "i", "in", "range", "(", "mdl1", ".", "n_states_", ")", ":", "try", ":", "#there has to be a better way to do this", "mdl1_unmapped", "=", "mdl1", ".", "inverse_transform", "(", "[", "i", "]", ")", "[", "0", "]", "[", "0", "]", "mdl2_mapped", "=", "mdl2", ".", "mapping_", "[", "mdl1_unmapped", "]", "return_vect", "[", "i", "]", "=", "mdl2", ".", "populations_", "[", "mdl2_mapped", "]", "except", ":", "pass", "return", "return_vect" ]
Method to get the populations for states in mdl 1 from populations inferred in mdl 2. Resorts to 0 if population is not present.
[ "Method", "to", "get", "the", "populations", "for", "states", "in", "mdl", "1", "from", "populations", "inferred", "in", "mdl", "2", ".", "Resorts", "to", "0", "if", "population", "is", "not", "present", "." ]
python
train
duniter/duniter-python-api
duniterpy/key/signing_key.py
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/key/signing_key.py#L311-L363
def from_ewif_hex(cls: Type[SigningKeyType], ewif_hex: str, password: str) -> SigningKeyType: """ Return SigningKey instance from Duniter EWIF in hexadecimal format :param ewif_hex: EWIF string in hexadecimal format :param password: Password of the encrypted seed """ ewif_bytes = Base58Encoder.decode(ewif_hex) if len(ewif_bytes) != 39: raise Exception("Error: the size of EWIF is invalid") # extract data fi = ewif_bytes[0:1] checksum_from_ewif = ewif_bytes[-2:] ewif_no_checksum = ewif_bytes[0:-2] salt = ewif_bytes[1:5] encryptedhalf1 = ewif_bytes[5:21] encryptedhalf2 = ewif_bytes[21:37] # check format flag if fi != b"\x02": raise Exception("Error: bad format version, not EWIF") # checksum control checksum = libnacl.crypto_hash_sha256(libnacl.crypto_hash_sha256(ewif_no_checksum))[0:2] if checksum_from_ewif != checksum: raise Exception("Error: bad checksum of the EWIF") # SCRYPT password_bytes = password.encode("utf-8") scrypt_seed = scrypt(password_bytes, salt, 16384, 8, 8, 64) derivedhalf1 = scrypt_seed[0:32] derivedhalf2 = scrypt_seed[32:64] # AES aes = pyaes.AESModeOfOperationECB(derivedhalf2) decryptedhalf1 = aes.decrypt(encryptedhalf1) decryptedhalf2 = aes.decrypt(encryptedhalf2) # XOR seed1 = xor_bytes(decryptedhalf1, derivedhalf1[0:16]) seed2 = xor_bytes(decryptedhalf2, derivedhalf1[16:32]) seed = bytes(seed1 + seed2) # Password Control signer = SigningKey(seed) salt_from_seed = libnacl.crypto_hash_sha256( libnacl.crypto_hash_sha256( Base58Encoder.decode(signer.pubkey)))[0:4] if salt_from_seed != salt: raise Exception("Error: bad Password of EWIF address") return cls(seed)
[ "def", "from_ewif_hex", "(", "cls", ":", "Type", "[", "SigningKeyType", "]", ",", "ewif_hex", ":", "str", ",", "password", ":", "str", ")", "->", "SigningKeyType", ":", "ewif_bytes", "=", "Base58Encoder", ".", "decode", "(", "ewif_hex", ")", "if", "len", "(", "ewif_bytes", ")", "!=", "39", ":", "raise", "Exception", "(", "\"Error: the size of EWIF is invalid\"", ")", "# extract data", "fi", "=", "ewif_bytes", "[", "0", ":", "1", "]", "checksum_from_ewif", "=", "ewif_bytes", "[", "-", "2", ":", "]", "ewif_no_checksum", "=", "ewif_bytes", "[", "0", ":", "-", "2", "]", "salt", "=", "ewif_bytes", "[", "1", ":", "5", "]", "encryptedhalf1", "=", "ewif_bytes", "[", "5", ":", "21", "]", "encryptedhalf2", "=", "ewif_bytes", "[", "21", ":", "37", "]", "# check format flag", "if", "fi", "!=", "b\"\\x02\"", ":", "raise", "Exception", "(", "\"Error: bad format version, not EWIF\"", ")", "# checksum control", "checksum", "=", "libnacl", ".", "crypto_hash_sha256", "(", "libnacl", ".", "crypto_hash_sha256", "(", "ewif_no_checksum", ")", ")", "[", "0", ":", "2", "]", "if", "checksum_from_ewif", "!=", "checksum", ":", "raise", "Exception", "(", "\"Error: bad checksum of the EWIF\"", ")", "# SCRYPT", "password_bytes", "=", "password", ".", "encode", "(", "\"utf-8\"", ")", "scrypt_seed", "=", "scrypt", "(", "password_bytes", ",", "salt", ",", "16384", ",", "8", ",", "8", ",", "64", ")", "derivedhalf1", "=", "scrypt_seed", "[", "0", ":", "32", "]", "derivedhalf2", "=", "scrypt_seed", "[", "32", ":", "64", "]", "# AES", "aes", "=", "pyaes", ".", "AESModeOfOperationECB", "(", "derivedhalf2", ")", "decryptedhalf1", "=", "aes", ".", "decrypt", "(", "encryptedhalf1", ")", "decryptedhalf2", "=", "aes", ".", "decrypt", "(", "encryptedhalf2", ")", "# XOR", "seed1", "=", "xor_bytes", "(", "decryptedhalf1", ",", "derivedhalf1", "[", "0", ":", "16", "]", ")", "seed2", "=", "xor_bytes", "(", "decryptedhalf2", ",", "derivedhalf1", "[", "16", ":", "32", "]", ")", "seed", "=", "bytes", "(", "seed1", "+", "seed2", ")", "# Password Control", "signer", "=", "SigningKey", "(", "seed", ")", "salt_from_seed", "=", "libnacl", ".", "crypto_hash_sha256", "(", "libnacl", ".", "crypto_hash_sha256", "(", "Base58Encoder", ".", "decode", "(", "signer", ".", "pubkey", ")", ")", ")", "[", "0", ":", "4", "]", "if", "salt_from_seed", "!=", "salt", ":", "raise", "Exception", "(", "\"Error: bad Password of EWIF address\"", ")", "return", "cls", "(", "seed", ")" ]
Return SigningKey instance from Duniter EWIF in hexadecimal format :param ewif_hex: EWIF string in hexadecimal format :param password: Password of the encrypted seed
[ "Return", "SigningKey", "instance", "from", "Duniter", "EWIF", "in", "hexadecimal", "format" ]
python
train
h2non/pook
pook/request.py
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/request.py#L141-L151
def copy(self): """ Copies the current Request object instance for side-effects purposes. Returns: pook.Request: copy of the current Request instance. """ req = type(self)() req.__dict__ = self.__dict__.copy() req._headers = self.headers.copy() return req
[ "def", "copy", "(", "self", ")", ":", "req", "=", "type", "(", "self", ")", "(", ")", "req", ".", "__dict__", "=", "self", ".", "__dict__", ".", "copy", "(", ")", "req", ".", "_headers", "=", "self", ".", "headers", ".", "copy", "(", ")", "return", "req" ]
Copies the current Request object instance for side-effects purposes. Returns: pook.Request: copy of the current Request instance.
[ "Copies", "the", "current", "Request", "object", "instance", "for", "side", "-", "effects", "purposes", "." ]
python
test
317070/python-twitch-stream
twitchstream/outputvideo.py
https://github.com/317070/python-twitch-stream/blob/83b4c2a27ee368fc3316b59ab1d25fcf0b0bcda6/twitchstream/outputvideo.py#L209-L237
def send_audio(self, left_channel, right_channel): """Add the audio samples to the stream. The left and the right channel should have the same shape. Raises an OSError when the stream is closed. :param left_channel: array containing the audio signal. :type left_channel: numpy array with shape (k, ) containing values between -1.0 and 1.0. k can be any integer :param right_channel: array containing the audio signal. :type right_channel: numpy array with shape (k, ) containing values between -1.0 and 1.0. k can be any integer """ if self.audio_pipe is None: if not os.path.exists('/tmp/audiopipe'): os.mkfifo('/tmp/audiopipe') self.audio_pipe = os.open('/tmp/audiopipe', os.O_WRONLY) assert len(left_channel.shape) == 1 assert left_channel.shape == right_channel.shape frame = np.column_stack((left_channel, right_channel)).flatten() frame = np.clip(32767*frame, -32767, 32767).astype('int16') try: os.write(self.audio_pipe, frame.tostring()) except OSError: # The pipe has been closed. Reraise and handle it further # downstream raise
[ "def", "send_audio", "(", "self", ",", "left_channel", ",", "right_channel", ")", ":", "if", "self", ".", "audio_pipe", "is", "None", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "'/tmp/audiopipe'", ")", ":", "os", ".", "mkfifo", "(", "'/tmp/audiopipe'", ")", "self", ".", "audio_pipe", "=", "os", ".", "open", "(", "'/tmp/audiopipe'", ",", "os", ".", "O_WRONLY", ")", "assert", "len", "(", "left_channel", ".", "shape", ")", "==", "1", "assert", "left_channel", ".", "shape", "==", "right_channel", ".", "shape", "frame", "=", "np", ".", "column_stack", "(", "(", "left_channel", ",", "right_channel", ")", ")", ".", "flatten", "(", ")", "frame", "=", "np", ".", "clip", "(", "32767", "*", "frame", ",", "-", "32767", ",", "32767", ")", ".", "astype", "(", "'int16'", ")", "try", ":", "os", ".", "write", "(", "self", ".", "audio_pipe", ",", "frame", ".", "tostring", "(", ")", ")", "except", "OSError", ":", "# The pipe has been closed. Reraise and handle it further", "# downstream", "raise" ]
Add the audio samples to the stream. The left and the right channel should have the same shape. Raises an OSError when the stream is closed. :param left_channel: array containing the audio signal. :type left_channel: numpy array with shape (k, ) containing values between -1.0 and 1.0. k can be any integer :param right_channel: array containing the audio signal. :type right_channel: numpy array with shape (k, ) containing values between -1.0 and 1.0. k can be any integer
[ "Add", "the", "audio", "samples", "to", "the", "stream", ".", "The", "left", "and", "the", "right", "channel", "should", "have", "the", "same", "shape", ".", "Raises", "an", "OSError", "when", "the", "stream", "is", "closed", "." ]
python
train
asciimoo/drawille
drawille.py
https://github.com/asciimoo/drawille/blob/ab58bba76cad68674ce50df7382c235ed21ab5ae/drawille.py#L242-L255
def frame(self, min_x=None, min_y=None, max_x=None, max_y=None): """String representation of the current :class:`Canvas` object pixels. :param min_x: (optional) minimum x coordinate of the canvas :param min_y: (optional) minimum y coordinate of the canvas :param max_x: (optional) maximum x coordinate of the canvas :param max_y: (optional) maximum y coordinate of the canvas """ ret = self.line_ending.join(self.rows(min_x, min_y, max_x, max_y)) if IS_PY3: return ret return ret.encode('utf-8')
[ "def", "frame", "(", "self", ",", "min_x", "=", "None", ",", "min_y", "=", "None", ",", "max_x", "=", "None", ",", "max_y", "=", "None", ")", ":", "ret", "=", "self", ".", "line_ending", ".", "join", "(", "self", ".", "rows", "(", "min_x", ",", "min_y", ",", "max_x", ",", "max_y", ")", ")", "if", "IS_PY3", ":", "return", "ret", "return", "ret", ".", "encode", "(", "'utf-8'", ")" ]
String representation of the current :class:`Canvas` object pixels. :param min_x: (optional) minimum x coordinate of the canvas :param min_y: (optional) minimum y coordinate of the canvas :param max_x: (optional) maximum x coordinate of the canvas :param max_y: (optional) maximum y coordinate of the canvas
[ "String", "representation", "of", "the", "current", ":", "class", ":", "Canvas", "object", "pixels", "." ]
python
train
7sDream/zhihu-py3
zhihu/question.py
https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/question.py#L375-L390
def refresh(self): """刷新 Question object 的属性. 例如回答数增加了, 先调用 ``refresh()`` 再访问 answer_num 属性, 可获得更新后的答案数量. :return: None """ super().refresh() self._html = None self._title = None self._details = None self._answer_num = None self._follower_num = None self._topics = None self._last_edit_time = None self._logs = None
[ "def", "refresh", "(", "self", ")", ":", "super", "(", ")", ".", "refresh", "(", ")", "self", ".", "_html", "=", "None", "self", ".", "_title", "=", "None", "self", ".", "_details", "=", "None", "self", ".", "_answer_num", "=", "None", "self", ".", "_follower_num", "=", "None", "self", ".", "_topics", "=", "None", "self", ".", "_last_edit_time", "=", "None", "self", ".", "_logs", "=", "None" ]
刷新 Question object 的属性. 例如回答数增加了, 先调用 ``refresh()`` 再访问 answer_num 属性, 可获得更新后的答案数量. :return: None
[ "刷新", "Question", "object", "的属性", ".", "例如回答数增加了", "先调用", "refresh", "()", "再访问", "answer_num", "属性", "可获得更新后的答案数量", ".", ":", "return", ":", "None" ]
python
train
leonidessaguisagjr/unicodeutil
unicodeutil/hangulutil.py
https://github.com/leonidessaguisagjr/unicodeutil/blob/c25c882cf9cb38c123df49fad365be67e5818928/unicodeutil/hangulutil.py#L215-L230
def _get_hangul_syllable_name(hangul_syllable): """ Function for taking a Unicode scalar value representing a Hangul syllable and converting it to its syllable name as defined by the Unicode naming rule NR1. See the Unicode Standard, ch. 04, section 4.8, Names, for more information. :param hangul_syllable: Unicode scalar value representing the Hangul syllable to convert :return: String representing its syllable name as transformed according to naming rule NR1. """ if not _is_hangul_syllable(hangul_syllable): raise ValueError("Value passed in does not represent a Hangul syllable!") jamo = decompose_hangul_syllable(hangul_syllable, fully_decompose=True) result = '' for j in jamo: if j is not None: result += _get_jamo_short_name(j) return result
[ "def", "_get_hangul_syllable_name", "(", "hangul_syllable", ")", ":", "if", "not", "_is_hangul_syllable", "(", "hangul_syllable", ")", ":", "raise", "ValueError", "(", "\"Value passed in does not represent a Hangul syllable!\"", ")", "jamo", "=", "decompose_hangul_syllable", "(", "hangul_syllable", ",", "fully_decompose", "=", "True", ")", "result", "=", "''", "for", "j", "in", "jamo", ":", "if", "j", "is", "not", "None", ":", "result", "+=", "_get_jamo_short_name", "(", "j", ")", "return", "result" ]
Function for taking a Unicode scalar value representing a Hangul syllable and converting it to its syllable name as defined by the Unicode naming rule NR1. See the Unicode Standard, ch. 04, section 4.8, Names, for more information. :param hangul_syllable: Unicode scalar value representing the Hangul syllable to convert :return: String representing its syllable name as transformed according to naming rule NR1.
[ "Function", "for", "taking", "a", "Unicode", "scalar", "value", "representing", "a", "Hangul", "syllable", "and", "converting", "it", "to", "its", "syllable", "name", "as", "defined", "by", "the", "Unicode", "naming", "rule", "NR1", ".", "See", "the", "Unicode", "Standard", "ch", ".", "04", "section", "4", ".", "8", "Names", "for", "more", "information", "." ]
python
train
log2timeline/plaso
plaso/parsers/mediator.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/mediator.py#L628-L639
def SetStorageWriter(self, storage_writer): """Sets the storage writer. Args: storage_writer (StorageWriter): storage writer. """ self._storage_writer = storage_writer # Reset the last event data information. Each storage file should # contain event data for their events. self._last_event_data_hash = None self._last_event_data_identifier = None
[ "def", "SetStorageWriter", "(", "self", ",", "storage_writer", ")", ":", "self", ".", "_storage_writer", "=", "storage_writer", "# Reset the last event data information. Each storage file should", "# contain event data for their events.", "self", ".", "_last_event_data_hash", "=", "None", "self", ".", "_last_event_data_identifier", "=", "None" ]
Sets the storage writer. Args: storage_writer (StorageWriter): storage writer.
[ "Sets", "the", "storage", "writer", "." ]
python
train
google/grr
grr/server/grr_response_server/export.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/export.py#L1772-L1794
def ConvertValues(default_metadata, values, token=None, options=None): """Converts a set of RDFValues into a set of export-friendly RDFValues. Args: default_metadata: export.ExportedMetadata instance with basic information about where the values come from. This metadata will be passed to exporters. values: Values to convert. They should be of the same type. token: Security token. options: rdfvalue.ExportOptions instance that will be passed to ExportConverters. Returns: Converted values. Converted values may be of different types (unlike the source values which are all of the same type). This is due to the fact that multiple ExportConverters may be applied to the same value thus generating multiple converted values of different types. Raises: NoConverterFound: in case no suitable converters were found for the values. """ batch_data = [(default_metadata, obj) for obj in values] return ConvertValuesWithMetadata(batch_data, token=token, options=options)
[ "def", "ConvertValues", "(", "default_metadata", ",", "values", ",", "token", "=", "None", ",", "options", "=", "None", ")", ":", "batch_data", "=", "[", "(", "default_metadata", ",", "obj", ")", "for", "obj", "in", "values", "]", "return", "ConvertValuesWithMetadata", "(", "batch_data", ",", "token", "=", "token", ",", "options", "=", "options", ")" ]
Converts a set of RDFValues into a set of export-friendly RDFValues. Args: default_metadata: export.ExportedMetadata instance with basic information about where the values come from. This metadata will be passed to exporters. values: Values to convert. They should be of the same type. token: Security token. options: rdfvalue.ExportOptions instance that will be passed to ExportConverters. Returns: Converted values. Converted values may be of different types (unlike the source values which are all of the same type). This is due to the fact that multiple ExportConverters may be applied to the same value thus generating multiple converted values of different types. Raises: NoConverterFound: in case no suitable converters were found for the values.
[ "Converts", "a", "set", "of", "RDFValues", "into", "a", "set", "of", "export", "-", "friendly", "RDFValues", "." ]
python
train
AltSchool/dynamic-rest
dynamic_rest/links.py
https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/links.py#L8-L48
def merge_link_object(serializer, data, instance): """Add a 'links' attribute to the data that maps field names to URLs. NOTE: This is the format that Ember Data supports, but alternative implementations are possible to support other formats. """ link_object = {} if not getattr(instance, 'pk', None): # If instance doesn't have a `pk` field, we'll assume it doesn't # have a canonical resource URL to hang a link off of. # This generally only affectes Ephemeral Objects. return data link_fields = serializer.get_link_fields() for name, field in six.iteritems(link_fields): # For included fields, omit link if there's no data. if name in data and not data[name]: continue link = getattr(field, 'link', None) if link is None: base_url = '' if settings.ENABLE_HOST_RELATIVE_LINKS: # if the resource isn't registered, this will default back to # using resource-relative urls for links. base_url = DynamicRouter.get_canonical_path( serializer.get_resource_key(), instance.pk ) or '' link = '%s%s/' % (base_url, name) # Default to DREST-generated relation endpoints. elif callable(link): link = link(name, field, data, instance) link_object[name] = link if link_object: data['links'] = link_object return data
[ "def", "merge_link_object", "(", "serializer", ",", "data", ",", "instance", ")", ":", "link_object", "=", "{", "}", "if", "not", "getattr", "(", "instance", ",", "'pk'", ",", "None", ")", ":", "# If instance doesn't have a `pk` field, we'll assume it doesn't", "# have a canonical resource URL to hang a link off of.", "# This generally only affectes Ephemeral Objects.", "return", "data", "link_fields", "=", "serializer", ".", "get_link_fields", "(", ")", "for", "name", ",", "field", "in", "six", ".", "iteritems", "(", "link_fields", ")", ":", "# For included fields, omit link if there's no data.", "if", "name", "in", "data", "and", "not", "data", "[", "name", "]", ":", "continue", "link", "=", "getattr", "(", "field", ",", "'link'", ",", "None", ")", "if", "link", "is", "None", ":", "base_url", "=", "''", "if", "settings", ".", "ENABLE_HOST_RELATIVE_LINKS", ":", "# if the resource isn't registered, this will default back to", "# using resource-relative urls for links.", "base_url", "=", "DynamicRouter", ".", "get_canonical_path", "(", "serializer", ".", "get_resource_key", "(", ")", ",", "instance", ".", "pk", ")", "or", "''", "link", "=", "'%s%s/'", "%", "(", "base_url", ",", "name", ")", "# Default to DREST-generated relation endpoints.", "elif", "callable", "(", "link", ")", ":", "link", "=", "link", "(", "name", ",", "field", ",", "data", ",", "instance", ")", "link_object", "[", "name", "]", "=", "link", "if", "link_object", ":", "data", "[", "'links'", "]", "=", "link_object", "return", "data" ]
Add a 'links' attribute to the data that maps field names to URLs. NOTE: This is the format that Ember Data supports, but alternative implementations are possible to support other formats.
[ "Add", "a", "links", "attribute", "to", "the", "data", "that", "maps", "field", "names", "to", "URLs", "." ]
python
train
pyviz/geoviews
geoviews/util.py
https://github.com/pyviz/geoviews/blob/cc70ac2d5a96307769bc6192eaef8576c3d24b30/geoviews/util.py#L256-L299
def path_to_geom_dicts(path, skip_invalid=True): """ Converts a Path element into a list of geometry dictionaries, preserving all value dimensions. """ interface = path.interface.datatype if interface == 'geodataframe': return [row.to_dict() for _, row in path.data.iterrows()] elif interface == 'geom_dictionary': return path.data geoms = [] invalid = False xdim, ydim = path.kdims for i, path in enumerate(path.split(datatype='columns')): array = np.column_stack([path.pop(xdim.name), path.pop(ydim.name)]) splits = np.where(np.isnan(array[:, :2].astype('float')).sum(axis=1))[0] arrays = np.split(array, splits+1) if len(splits) else [array] subpaths = [] for j, arr in enumerate(arrays): if j != (len(arrays)-1): arr = arr[:-1] # Drop nan if len(arr) == 0: continue elif len(arr) == 1: if skip_invalid: continue g = Point(arr[0]) invalid = True else: g = LineString(arr) subpaths.append(g) if invalid: geoms += [dict(path, geometry=sp) for sp in subpaths] continue elif len(subpaths) == 1: geom = subpaths[0] elif subpaths: geom = MultiLineString(subpaths) path['geometry'] = geom geoms.append(path) return geoms
[ "def", "path_to_geom_dicts", "(", "path", ",", "skip_invalid", "=", "True", ")", ":", "interface", "=", "path", ".", "interface", ".", "datatype", "if", "interface", "==", "'geodataframe'", ":", "return", "[", "row", ".", "to_dict", "(", ")", "for", "_", ",", "row", "in", "path", ".", "data", ".", "iterrows", "(", ")", "]", "elif", "interface", "==", "'geom_dictionary'", ":", "return", "path", ".", "data", "geoms", "=", "[", "]", "invalid", "=", "False", "xdim", ",", "ydim", "=", "path", ".", "kdims", "for", "i", ",", "path", "in", "enumerate", "(", "path", ".", "split", "(", "datatype", "=", "'columns'", ")", ")", ":", "array", "=", "np", ".", "column_stack", "(", "[", "path", ".", "pop", "(", "xdim", ".", "name", ")", ",", "path", ".", "pop", "(", "ydim", ".", "name", ")", "]", ")", "splits", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "array", "[", ":", ",", ":", "2", "]", ".", "astype", "(", "'float'", ")", ")", ".", "sum", "(", "axis", "=", "1", ")", ")", "[", "0", "]", "arrays", "=", "np", ".", "split", "(", "array", ",", "splits", "+", "1", ")", "if", "len", "(", "splits", ")", "else", "[", "array", "]", "subpaths", "=", "[", "]", "for", "j", ",", "arr", "in", "enumerate", "(", "arrays", ")", ":", "if", "j", "!=", "(", "len", "(", "arrays", ")", "-", "1", ")", ":", "arr", "=", "arr", "[", ":", "-", "1", "]", "# Drop nan", "if", "len", "(", "arr", ")", "==", "0", ":", "continue", "elif", "len", "(", "arr", ")", "==", "1", ":", "if", "skip_invalid", ":", "continue", "g", "=", "Point", "(", "arr", "[", "0", "]", ")", "invalid", "=", "True", "else", ":", "g", "=", "LineString", "(", "arr", ")", "subpaths", ".", "append", "(", "g", ")", "if", "invalid", ":", "geoms", "+=", "[", "dict", "(", "path", ",", "geometry", "=", "sp", ")", "for", "sp", "in", "subpaths", "]", "continue", "elif", "len", "(", "subpaths", ")", "==", "1", ":", "geom", "=", "subpaths", "[", "0", "]", "elif", "subpaths", ":", "geom", "=", "MultiLineString", "(", "subpaths", ")", "path", "[", "'geometry'", "]", "=", "geom", "geoms", ".", "append", "(", "path", ")", "return", "geoms" ]
Converts a Path element into a list of geometry dictionaries, preserving all value dimensions.
[ "Converts", "a", "Path", "element", "into", "a", "list", "of", "geometry", "dictionaries", "preserving", "all", "value", "dimensions", "." ]
python
train
softlayer/softlayer-python
SoftLayer/managers/storage_utils.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/storage_utils.py#L409-L422
def find_snapshot_schedule_id(volume, snapshot_schedule_keyname): """Find the snapshot schedule ID for the given volume and keyname :param volume: The volume for which the snapshot ID is desired :param snapshot_schedule_keyname: The keyname of the snapshot schedule :return: Returns an int value indicating the volume's snapshot schedule ID """ for schedule in volume['schedules']: if 'type' in schedule and 'keyname' in schedule['type']: if schedule['type']['keyname'] == snapshot_schedule_keyname: return schedule['id'] raise ValueError("The given snapshot schedule ID was not found for " "the given storage volume")
[ "def", "find_snapshot_schedule_id", "(", "volume", ",", "snapshot_schedule_keyname", ")", ":", "for", "schedule", "in", "volume", "[", "'schedules'", "]", ":", "if", "'type'", "in", "schedule", "and", "'keyname'", "in", "schedule", "[", "'type'", "]", ":", "if", "schedule", "[", "'type'", "]", "[", "'keyname'", "]", "==", "snapshot_schedule_keyname", ":", "return", "schedule", "[", "'id'", "]", "raise", "ValueError", "(", "\"The given snapshot schedule ID was not found for \"", "\"the given storage volume\"", ")" ]
Find the snapshot schedule ID for the given volume and keyname :param volume: The volume for which the snapshot ID is desired :param snapshot_schedule_keyname: The keyname of the snapshot schedule :return: Returns an int value indicating the volume's snapshot schedule ID
[ "Find", "the", "snapshot", "schedule", "ID", "for", "the", "given", "volume", "and", "keyname" ]
python
train
abilian/abilian-core
abilian/services/repository/service.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/services/repository/service.py#L453-L461
def _add_to(self, uuid, dest, other): """Add `item` to `dest` set, ensuring `item` is not present in `other` set.""" _assert_uuid(uuid) try: other.remove(uuid) except KeyError: pass dest.add(uuid)
[ "def", "_add_to", "(", "self", ",", "uuid", ",", "dest", ",", "other", ")", ":", "_assert_uuid", "(", "uuid", ")", "try", ":", "other", ".", "remove", "(", "uuid", ")", "except", "KeyError", ":", "pass", "dest", ".", "add", "(", "uuid", ")" ]
Add `item` to `dest` set, ensuring `item` is not present in `other` set.
[ "Add", "item", "to", "dest", "set", "ensuring", "item", "is", "not", "present", "in", "other", "set", "." ]
python
train
dicaso/leopard
leopard/__init__.py
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L289-L316
def sectionFromFunction(function,*args,**kwargs): """ This staticmethod executes the function that is passed with the provided args and kwargs. The first line of the function docstring is used as the section title, the comments within the function body are parsed and added as the section text. The function should return an ordered dict of figures and tables, that are then attached to the section. Args: function (function): The function that generates the section content. Returns: Section >>> # Section title of example function ... def exampleFunction(a,b=None): ... 'Mock figures and tables included' ... figures = (('fig1',Mock()),('fig2',Mock())) ... tables = (('tab1',Mock()),('tab2',Mock())) ... return figures, tables >>> Section.sectionFromFunction(exampleFunction,Mock(),b=Mock()) <Section @ Section title of example function> """ figures, tables = function(*args,**kwargs) title = inspect.getcomments(function)[1:].strip() text = inspect.getdoc(function) code = inspect.getsource(function) return Section(title=title,text=text,figures=figures,tables=tables,code=code)
[ "def", "sectionFromFunction", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "figures", ",", "tables", "=", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "title", "=", "inspect", ".", "getcomments", "(", "function", ")", "[", "1", ":", "]", ".", "strip", "(", ")", "text", "=", "inspect", ".", "getdoc", "(", "function", ")", "code", "=", "inspect", ".", "getsource", "(", "function", ")", "return", "Section", "(", "title", "=", "title", ",", "text", "=", "text", ",", "figures", "=", "figures", ",", "tables", "=", "tables", ",", "code", "=", "code", ")" ]
This staticmethod executes the function that is passed with the provided args and kwargs. The first line of the function docstring is used as the section title, the comments within the function body are parsed and added as the section text. The function should return an ordered dict of figures and tables, that are then attached to the section. Args: function (function): The function that generates the section content. Returns: Section >>> # Section title of example function ... def exampleFunction(a,b=None): ... 'Mock figures and tables included' ... figures = (('fig1',Mock()),('fig2',Mock())) ... tables = (('tab1',Mock()),('tab2',Mock())) ... return figures, tables >>> Section.sectionFromFunction(exampleFunction,Mock(),b=Mock()) <Section @ Section title of example function>
[ "This", "staticmethod", "executes", "the", "function", "that", "is", "passed", "with", "the", "provided", "args", "and", "kwargs", ".", "The", "first", "line", "of", "the", "function", "docstring", "is", "used", "as", "the", "section", "title", "the", "comments", "within", "the", "function", "body", "are", "parsed", "and", "added", "as", "the", "section", "text", ".", "The", "function", "should", "return", "an", "ordered", "dict", "of", "figures", "and", "tables", "that", "are", "then", "attached", "to", "the", "section", "." ]
python
train
DataDog/integrations-core
win32_event_log/datadog_checks/win32_event_log/win32_event_log.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/win32_event_log/datadog_checks/win32_event_log/win32_event_log.py#L192-L229
def _msg_text(self): """ Generate the event's body to send to Datadog. Consider `event_format` parameter: * Only use the specified list of event properties. * If unspecified, default to the EventLog's `Message` or `InsertionStrings`. """ msg_text = u"" if self._format: msg_text_fields = ["%%%\n```"] for event_property in self._format: property_value = self.event.get(event_property) if property_value is None: self.log.warning(u"Unrecognized `%s` event property.", event_property) continue msg_text_fields.append( u"{property_name}: {property_value}".format( property_name=event_property, property_value=property_value ) ) msg_text_fields.append("```\n%%%") msg_text = u"\n".join(msg_text_fields) else: # Override when verbosity if self.event.get('Message'): msg_text = u"{message}\n".format(message=self.event['Message']) elif self.event.get('InsertionStrings'): msg_text = u"\n".join([i_str for i_str in self.event['InsertionStrings'] if i_str.strip()]) if self.notify_list: msg_text += u"\n{notify_list}".format(notify_list=' '.join([" @" + n for n in self.notify_list])) return msg_text
[ "def", "_msg_text", "(", "self", ")", ":", "msg_text", "=", "u\"\"", "if", "self", ".", "_format", ":", "msg_text_fields", "=", "[", "\"%%%\\n```\"", "]", "for", "event_property", "in", "self", ".", "_format", ":", "property_value", "=", "self", ".", "event", ".", "get", "(", "event_property", ")", "if", "property_value", "is", "None", ":", "self", ".", "log", ".", "warning", "(", "u\"Unrecognized `%s` event property.\"", ",", "event_property", ")", "continue", "msg_text_fields", ".", "append", "(", "u\"{property_name}: {property_value}\"", ".", "format", "(", "property_name", "=", "event_property", ",", "property_value", "=", "property_value", ")", ")", "msg_text_fields", ".", "append", "(", "\"```\\n%%%\"", ")", "msg_text", "=", "u\"\\n\"", ".", "join", "(", "msg_text_fields", ")", "else", ":", "# Override when verbosity", "if", "self", ".", "event", ".", "get", "(", "'Message'", ")", ":", "msg_text", "=", "u\"{message}\\n\"", ".", "format", "(", "message", "=", "self", ".", "event", "[", "'Message'", "]", ")", "elif", "self", ".", "event", ".", "get", "(", "'InsertionStrings'", ")", ":", "msg_text", "=", "u\"\\n\"", ".", "join", "(", "[", "i_str", "for", "i_str", "in", "self", ".", "event", "[", "'InsertionStrings'", "]", "if", "i_str", ".", "strip", "(", ")", "]", ")", "if", "self", ".", "notify_list", ":", "msg_text", "+=", "u\"\\n{notify_list}\"", ".", "format", "(", "notify_list", "=", "' '", ".", "join", "(", "[", "\" @\"", "+", "n", "for", "n", "in", "self", ".", "notify_list", "]", ")", ")", "return", "msg_text" ]
Generate the event's body to send to Datadog. Consider `event_format` parameter: * Only use the specified list of event properties. * If unspecified, default to the EventLog's `Message` or `InsertionStrings`.
[ "Generate", "the", "event", "s", "body", "to", "send", "to", "Datadog", "." ]
python
train
dgovil/PySignal
PySignal.py
https://github.com/dgovil/PySignal/blob/72f4ced949f81e5438bd8f15247ef7890e8cc5ff/PySignal.py#L167-L187
def block(self, signals=None, isBlocked=True): """ Sets the block on any provided signals, or to all signals :param signals: defaults to all signals. Accepts either a single string or a list of strings :param isBlocked: the state to set the signal to """ if signals: try: if isinstance(signals, basestring): signals = [signals] except NameError: if isinstance(signals, str): signals = [signals] signals = signals or self.keys() for signal in signals: if signal not in self: raise RuntimeError("Could not find signal matching %s" % signal) self[signal].block(isBlocked)
[ "def", "block", "(", "self", ",", "signals", "=", "None", ",", "isBlocked", "=", "True", ")", ":", "if", "signals", ":", "try", ":", "if", "isinstance", "(", "signals", ",", "basestring", ")", ":", "signals", "=", "[", "signals", "]", "except", "NameError", ":", "if", "isinstance", "(", "signals", ",", "str", ")", ":", "signals", "=", "[", "signals", "]", "signals", "=", "signals", "or", "self", ".", "keys", "(", ")", "for", "signal", "in", "signals", ":", "if", "signal", "not", "in", "self", ":", "raise", "RuntimeError", "(", "\"Could not find signal matching %s\"", "%", "signal", ")", "self", "[", "signal", "]", ".", "block", "(", "isBlocked", ")" ]
Sets the block on any provided signals, or to all signals :param signals: defaults to all signals. Accepts either a single string or a list of strings :param isBlocked: the state to set the signal to
[ "Sets", "the", "block", "on", "any", "provided", "signals", "or", "to", "all", "signals" ]
python
train
pycontribs/pyrax
pyrax/identity/rax_identity.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/identity/rax_identity.py#L90-L118
def auth_with_token(self, token, tenant_id=None, tenant_name=None): """ If a valid token is already known, this call will use it to generate the service catalog. """ # Implementation note: # Rackspace auth uses one tenant ID for the object_store services and # another for everything else. The one that the user would know is the # 'everything else' ID, so we need to extract the object_store tenant # ID from the initial response, and call the superclass # auth_with_token() method a second time with that tenant ID to get the # object_store endpoints. We can then add these to the initial # endpoints returned by the primary tenant ID, and then continue with # the auth process. main_resp, main_body = self._call_token_auth(token, tenant_id, tenant_name) # Get the swift tenant ID roles = main_body["access"]["user"]["roles"] ostore = [role for role in roles if role["name"] == "object-store:default"] if ostore: ostore_tenant_id = ostore[0]["tenantId"] ostore_resp, ostore_body = self._call_token_auth(token, ostore_tenant_id, None) ostore_cat = ostore_body["access"]["serviceCatalog"] main_cat = main_body["access"]["serviceCatalog"] main_cat.extend(ostore_cat) self._parse_response(main_body) self.authenticated = True
[ "def", "auth_with_token", "(", "self", ",", "token", ",", "tenant_id", "=", "None", ",", "tenant_name", "=", "None", ")", ":", "# Implementation note:", "# Rackspace auth uses one tenant ID for the object_store services and", "# another for everything else. The one that the user would know is the", "# 'everything else' ID, so we need to extract the object_store tenant", "# ID from the initial response, and call the superclass", "# auth_with_token() method a second time with that tenant ID to get the", "# object_store endpoints. We can then add these to the initial", "# endpoints returned by the primary tenant ID, and then continue with", "# the auth process.", "main_resp", ",", "main_body", "=", "self", ".", "_call_token_auth", "(", "token", ",", "tenant_id", ",", "tenant_name", ")", "# Get the swift tenant ID", "roles", "=", "main_body", "[", "\"access\"", "]", "[", "\"user\"", "]", "[", "\"roles\"", "]", "ostore", "=", "[", "role", "for", "role", "in", "roles", "if", "role", "[", "\"name\"", "]", "==", "\"object-store:default\"", "]", "if", "ostore", ":", "ostore_tenant_id", "=", "ostore", "[", "0", "]", "[", "\"tenantId\"", "]", "ostore_resp", ",", "ostore_body", "=", "self", ".", "_call_token_auth", "(", "token", ",", "ostore_tenant_id", ",", "None", ")", "ostore_cat", "=", "ostore_body", "[", "\"access\"", "]", "[", "\"serviceCatalog\"", "]", "main_cat", "=", "main_body", "[", "\"access\"", "]", "[", "\"serviceCatalog\"", "]", "main_cat", ".", "extend", "(", "ostore_cat", ")", "self", ".", "_parse_response", "(", "main_body", ")", "self", ".", "authenticated", "=", "True" ]
If a valid token is already known, this call will use it to generate the service catalog.
[ "If", "a", "valid", "token", "is", "already", "known", "this", "call", "will", "use", "it", "to", "generate", "the", "service", "catalog", "." ]
python
train
PSPC-SPAC-buyandsell/von_anchor
von_anchor/anchor/issuer.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/issuer.py#L111-L126
async def open(self) -> 'Issuer': """ Explicit entry. Perform ancestor opening operations, then synchronize revocation registry to tails tree content. :return: current object """ LOGGER.debug('Issuer.open >>>') await super().open() for path_rr_id in Tails.links(self.dir_tails, self.did): await self._sync_revoc_for_issue(basename(path_rr_id)) LOGGER.debug('Issuer.open <<<') return self
[ "async", "def", "open", "(", "self", ")", "->", "'Issuer'", ":", "LOGGER", ".", "debug", "(", "'Issuer.open >>>'", ")", "await", "super", "(", ")", ".", "open", "(", ")", "for", "path_rr_id", "in", "Tails", ".", "links", "(", "self", ".", "dir_tails", ",", "self", ".", "did", ")", ":", "await", "self", ".", "_sync_revoc_for_issue", "(", "basename", "(", "path_rr_id", ")", ")", "LOGGER", ".", "debug", "(", "'Issuer.open <<<'", ")", "return", "self" ]
Explicit entry. Perform ancestor opening operations, then synchronize revocation registry to tails tree content. :return: current object
[ "Explicit", "entry", ".", "Perform", "ancestor", "opening", "operations", "then", "synchronize", "revocation", "registry", "to", "tails", "tree", "content", "." ]
python
train
EventRegistry/event-registry-python
eventregistry/ReturnInfo.py
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/ReturnInfo.py#L473-L497
def getConf(self): """ return configuration in a json object that stores properties set by each *InfoFlags class """ conf = { "articleInfo": self.articleInfo._getFlags().copy(), "eventInfo": self.eventInfo._getFlags().copy(), "sourceInfo": self.sourceInfo._getFlags().copy(), "categoryInfo": self.categoryInfo._getFlags().copy(), "conceptInfo": self.conceptInfo._getFlags().copy(), "locationInfo": self.locationInfo._getFlags().copy(), "storyInfo": self.storyInfo._getFlags().copy(), "conceptClassInfo": self.articleInfo._getFlags().copy(), "conceptFolderInfo": self.articleInfo._getFlags().copy() } conf["articleInfo"].update(self.articleInfo._getVals()) conf["eventInfo"].update(self.eventInfo._getVals()) conf["sourceInfo"].update(self.sourceInfo._getVals()) conf["categoryInfo"].update(self.categoryInfo._getVals()) conf["conceptInfo"].update(self.conceptInfo._getVals()) conf["locationInfo"].update(self.locationInfo._getVals()) conf["storyInfo"].update(self.storyInfo._getVals()) conf["conceptClassInfo"].update(self.conceptClassInfo._getVals()) conf["conceptFolderInfo"].update(self.conceptFolderInfo._getVals()) return conf
[ "def", "getConf", "(", "self", ")", ":", "conf", "=", "{", "\"articleInfo\"", ":", "self", ".", "articleInfo", ".", "_getFlags", "(", ")", ".", "copy", "(", ")", ",", "\"eventInfo\"", ":", "self", ".", "eventInfo", ".", "_getFlags", "(", ")", ".", "copy", "(", ")", ",", "\"sourceInfo\"", ":", "self", ".", "sourceInfo", ".", "_getFlags", "(", ")", ".", "copy", "(", ")", ",", "\"categoryInfo\"", ":", "self", ".", "categoryInfo", ".", "_getFlags", "(", ")", ".", "copy", "(", ")", ",", "\"conceptInfo\"", ":", "self", ".", "conceptInfo", ".", "_getFlags", "(", ")", ".", "copy", "(", ")", ",", "\"locationInfo\"", ":", "self", ".", "locationInfo", ".", "_getFlags", "(", ")", ".", "copy", "(", ")", ",", "\"storyInfo\"", ":", "self", ".", "storyInfo", ".", "_getFlags", "(", ")", ".", "copy", "(", ")", ",", "\"conceptClassInfo\"", ":", "self", ".", "articleInfo", ".", "_getFlags", "(", ")", ".", "copy", "(", ")", ",", "\"conceptFolderInfo\"", ":", "self", ".", "articleInfo", ".", "_getFlags", "(", ")", ".", "copy", "(", ")", "}", "conf", "[", "\"articleInfo\"", "]", ".", "update", "(", "self", ".", "articleInfo", ".", "_getVals", "(", ")", ")", "conf", "[", "\"eventInfo\"", "]", ".", "update", "(", "self", ".", "eventInfo", ".", "_getVals", "(", ")", ")", "conf", "[", "\"sourceInfo\"", "]", ".", "update", "(", "self", ".", "sourceInfo", ".", "_getVals", "(", ")", ")", "conf", "[", "\"categoryInfo\"", "]", ".", "update", "(", "self", ".", "categoryInfo", ".", "_getVals", "(", ")", ")", "conf", "[", "\"conceptInfo\"", "]", ".", "update", "(", "self", ".", "conceptInfo", ".", "_getVals", "(", ")", ")", "conf", "[", "\"locationInfo\"", "]", ".", "update", "(", "self", ".", "locationInfo", ".", "_getVals", "(", ")", ")", "conf", "[", "\"storyInfo\"", "]", ".", "update", "(", "self", ".", "storyInfo", ".", "_getVals", "(", ")", ")", "conf", "[", "\"conceptClassInfo\"", "]", ".", "update", "(", "self", ".", "conceptClassInfo", ".", "_getVals", "(", ")", ")", "conf", "[", "\"conceptFolderInfo\"", "]", ".", "update", "(", "self", ".", "conceptFolderInfo", ".", "_getVals", "(", ")", ")", "return", "conf" ]
return configuration in a json object that stores properties set by each *InfoFlags class
[ "return", "configuration", "in", "a", "json", "object", "that", "stores", "properties", "set", "by", "each", "*", "InfoFlags", "class" ]
python
train
gem/oq-engine
openquake/commonlib/logictree.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/logictree.py#L1012-L1020
def parse_filters(self, branchset_node, uncertainty_type, filters): """ See superclass' method for description and signature specification. Converts "applyToSources" filter value by just splitting it to a list. """ if 'applyToSources' in filters: filters['applyToSources'] = filters['applyToSources'].split() return filters
[ "def", "parse_filters", "(", "self", ",", "branchset_node", ",", "uncertainty_type", ",", "filters", ")", ":", "if", "'applyToSources'", "in", "filters", ":", "filters", "[", "'applyToSources'", "]", "=", "filters", "[", "'applyToSources'", "]", ".", "split", "(", ")", "return", "filters" ]
See superclass' method for description and signature specification. Converts "applyToSources" filter value by just splitting it to a list.
[ "See", "superclass", "method", "for", "description", "and", "signature", "specification", "." ]
python
train
neherlab/treetime
treetime/treeregression.py
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L133-L144
def CovInv(self): """ Inverse of the covariance matrix Returns ------- H : (np.array) inverse of the covariance matrix. """ self.recurse(full_matrix=True) return self.tree.root.cinv
[ "def", "CovInv", "(", "self", ")", ":", "self", ".", "recurse", "(", "full_matrix", "=", "True", ")", "return", "self", ".", "tree", ".", "root", ".", "cinv" ]
Inverse of the covariance matrix Returns ------- H : (np.array) inverse of the covariance matrix.
[ "Inverse", "of", "the", "covariance", "matrix" ]
python
test
aws/aws-iot-device-sdk-python
AWSIoTPythonSDK/core/protocol/paho/client.py
https://github.com/aws/aws-iot-device-sdk-python/blob/f0aa2ce34b21dd2e44f4fb7e1d058656aaf2fc62/AWSIoTPythonSDK/core/protocol/paho/client.py#L1305-L1313
def socket(self): """Return the socket or ssl object for this client.""" if self._ssl: if self._useSecuredWebsocket: return self._ssl.getSSLSocket() else: return self._ssl else: return self._sock
[ "def", "socket", "(", "self", ")", ":", "if", "self", ".", "_ssl", ":", "if", "self", ".", "_useSecuredWebsocket", ":", "return", "self", ".", "_ssl", ".", "getSSLSocket", "(", ")", "else", ":", "return", "self", ".", "_ssl", "else", ":", "return", "self", ".", "_sock" ]
Return the socket or ssl object for this client.
[ "Return", "the", "socket", "or", "ssl", "object", "for", "this", "client", "." ]
python
train
wonambi-python/wonambi
wonambi/attr/annotations.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/attr/annotations.py#L353-L643
def import_staging(self, filename, source, rater_name, rec_start, staging_start=None, epoch_length=None, poor=['Artefact'], as_qual=False): """Import staging from an external staging text file. Parameters ---------- filename : str Staging file name. source : str Name of program where staging was made. One of 'domino', 'alice', 'compumedics', 'sandman', 'remlogic' rater_name : str Rater name for imported staging. rec_start : datetime Date and time (year, month, day, hour, minute, second) of recording start. Year is ignored (New Year's Eve celebratory recordings unsupported.) staging_start : datetime (default: None) Date and time of staging start. For use when not provided in staging file. epoch_length : int duration in s of a scoring epoch poor : list of str epochs with stage names in this list will be marked as Poor quality as_qual : bool if True, the staging only be used to mark quality, as per poor """ if as_qual and rater_name not in self.raters: self.parent.statusBar.showMessage('Rater not found.') return clue = None # used in some instances to pick out epochs from other evts idx_clue = None if source in ['remlogic', 'sandman']: encoding = 'ISO-8859-1' else: encoding = 'utf-8' with open(filename, 'r', encoding=encoding) as f: lines = f.readlines() if source == 'domino': for i, line in enumerate(lines): if line[0].isdigit(): idx_first_line = i break if lines[idx_first_line].index(';') > 15: idx_time = (11, 19) idx_stage = slice(25, 26) stage_key = PHYSIP_STAGE_KEY else: idx_time = (0, 8) idx_stage = slice(14, 16) stage_key = DOMINO_STAGE_KEY stage_start = datetime.strptime( lines[idx_first_line][idx_time[0]:idx_time[1]], '%H:%M:%S') stage_day = int(lines[1][12:14]) stage_month = int(lines[1][15:17]) stage_start_for_delta = stage_start.replace(year=1999, month=stage_month, day=stage_day) rec_start_for_delta = rec_start.replace(year=1999) first_second = int((stage_start_for_delta - rec_start_for_delta).total_seconds()) if epoch_length is None: epoch_length = int(lines[5][6:8]) elif source == 'remlogic': clue = 'SLEEP-' # signifies an epoch (as opposed to an event) idx_clue = slice(-18, -6) idx_head = lines.index( next(l for l in lines if 'Time [hh:mm:ss]' in l)) first_line = next(l for l in lines[idx_head:] if clue in l) idx_first_line = lines.index(first_line) stage_start_date = _try_parse_datetime( lines[3][16:lines[3].index('\n')], ('%Y/%m/%d', '%d/%m/%Y')) stage_start_time = None try: stage_start_time = datetime.strptime( first_line[:19], '%Y-%m-%dT%H:%M:%S') except ValueError: cells = first_line.split('\t') for cell in cells: try: stage_start_time = datetime.strptime(cell[-8:], '%I:%M:%S') if cell[1] == 'U': stage_start_time = stage_start_time + timedelta( hours=12) except ValueError: continue if stage_start_time == None: raise ValueError('No valid start time found.') stage_start = datetime.combine(stage_start_date.date(), stage_start_time.time()) first_second = int((stage_start - rec_start).total_seconds()) stage_key = {k[-2:]: v for k, v in REMLOGIC_STAGE_KEY.items()} idx_stage = slice(-6, -4) if epoch_length is None: epoch_length = int(first_line[-3:-1]) elif source == 'alice': stage_start = datetime.strptime(lines[1][2:13], '%I:%M:%S %p') dt = rec_start # best guess in absence of date if lines[1][11:13] == 'pm' and rec_start.hour < 12: dt = rec_start - timedelta(days=1) elif lines[1][11:13] == 'am' and rec_start.hour > 12: dt = rec_start + timedelta stage_start = stage_start.replace(year=dt.year, month=dt.month, day=dt.day) first_second = int((stage_start - rec_start).total_seconds()) idx_first_line = 1 lines[-1] += '_' # to fill newline position stage_key = ALICE_STAGE_KEY idx_stage = slice(-3, -1) if epoch_length is None: epoch_length = 30 elif source == 'sandman': stage_start = datetime.strptime(lines[4][12:33], '%d/%m/%Y %I:%M:%S %p') first_second = int((stage_start - rec_start).total_seconds()) idx_first_line = 14 stage_key = SANDMAN_STAGE_KEY idx_stage = slice(-14, -12) if epoch_length is None: epoch_length = 30 elif source == 'compumedics': if staging_start is None: first_second = 0 else: first_second = int(( staging_start - rec_start).total_seconds()) idx_first_line = 0 stage_key = COMPUMEDICS_STAGE_KEY idx_stage = slice(0, 1) if epoch_length is None: epoch_length = 30 elif source == 'deltamed': if staging_start is None: first_second = 0 else: first_second = int(( staging_start - rec_start).total_seconds()) idx_first_line = 0 stage_key = DELTAMED_STAGE_KEY idx_stage = slice(-2, -1) if epoch_length is None: epoch_length = int(lines[0][:lines[0].index('\t')]) elif source == 'prana': stage_start = datetime.strptime(lines[5][:11], '%d %H:%M:%S') # best guess in absence of date dt = rec_start if stage_start.hour > 12 and rec_start.hour < 12: dt = rec_start - timedelta(days=1) elif stage_start.hour < 12 and rec_start.hour > 12: dt = rec_start + timedelta(days=1) stage_start = stage_start.replace(year=dt.year, month=dt.month, day=dt.day) first_second = int((stage_start - rec_start).total_seconds()) idx_first_line = 5 stage_key = PRANA_STAGE_KEY spacer = next(i for i, j in enumerate(lines[5][30:]) \ if j.strip()) idx_stage = slice(30 + spacer, 30 + spacer + 1) if epoch_length is None: idx_epoch_length = None for i,j in enumerate(lines[3]): if j.isdigit(): idx_epoch_length = i, i + lines[3][i:].index(' ') epoch_length = int(lines[3][slice(*idx_epoch_length)]) break if idx_epoch_length is None: epoch_length = 30 else: raise ValueError('Unknown source program for staging file') offset = first_second % epoch_length lg.info('Time offset: ' + str(offset) + ' sec') if rater_name not in self.raters: self.add_rater(rater_name) self.get_rater(rater_name) stages = self.rater.find('stages') if as_qual: for i, one_line in enumerate(lines[idx_first_line:]): if one_line[idx_stage] in poor: epoch_beg = first_second + (i * epoch_length) try: self.set_stage_for_epoch(epoch_beg, 'Poor', attr='quality', save=False) except KeyError: return 1 else: # list is necessary so that it does not remove in place for s in list(stages): stages.remove(s) for i in arange(offset, first_second - epoch_length, epoch_length): epoch = SubElement(stages, 'epoch') start_time = SubElement(epoch, 'epoch_start') epoch_beg = i start_time.text = str(epoch_beg) end_time = SubElement(epoch, 'epoch_end') end_time.text = str(epoch_beg + epoch_length) epoch_stage = SubElement(epoch, 'stage') epoch_stage.text = 'Unknown' quality = SubElement(epoch, 'quality') quality.text = 'Good' idx_epoch = 0 for i, one_line in enumerate(lines[idx_first_line:]): if clue is not None: if clue not in one_line[idx_clue]: continue epoch = SubElement(stages, 'epoch') start_time = SubElement(epoch, 'epoch_start') epoch_beg = first_second + (idx_epoch * epoch_length) start_time.text = str(epoch_beg) end_time = SubElement(epoch, 'epoch_end') end_time.text = str(epoch_beg + epoch_length) epoch_stage = SubElement(epoch, 'stage') try: key = one_line[idx_stage] one_stage = stage_key[key] except KeyError: one_stage = 'Unknown' lg.info('Stage not recognized: ' + key) epoch_stage.text = one_stage quality = SubElement(epoch, 'quality') if one_stage in poor: quality.text = 'Poor' else: quality.text = 'Good' idx_epoch += 1 self.save()
[ "def", "import_staging", "(", "self", ",", "filename", ",", "source", ",", "rater_name", ",", "rec_start", ",", "staging_start", "=", "None", ",", "epoch_length", "=", "None", ",", "poor", "=", "[", "'Artefact'", "]", ",", "as_qual", "=", "False", ")", ":", "if", "as_qual", "and", "rater_name", "not", "in", "self", ".", "raters", ":", "self", ".", "parent", ".", "statusBar", ".", "showMessage", "(", "'Rater not found.'", ")", "return", "clue", "=", "None", "# used in some instances to pick out epochs from other evts", "idx_clue", "=", "None", "if", "source", "in", "[", "'remlogic'", ",", "'sandman'", "]", ":", "encoding", "=", "'ISO-8859-1'", "else", ":", "encoding", "=", "'utf-8'", "with", "open", "(", "filename", ",", "'r'", ",", "encoding", "=", "encoding", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "if", "source", "==", "'domino'", ":", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "if", "line", "[", "0", "]", ".", "isdigit", "(", ")", ":", "idx_first_line", "=", "i", "break", "if", "lines", "[", "idx_first_line", "]", ".", "index", "(", "';'", ")", ">", "15", ":", "idx_time", "=", "(", "11", ",", "19", ")", "idx_stage", "=", "slice", "(", "25", ",", "26", ")", "stage_key", "=", "PHYSIP_STAGE_KEY", "else", ":", "idx_time", "=", "(", "0", ",", "8", ")", "idx_stage", "=", "slice", "(", "14", ",", "16", ")", "stage_key", "=", "DOMINO_STAGE_KEY", "stage_start", "=", "datetime", ".", "strptime", "(", "lines", "[", "idx_first_line", "]", "[", "idx_time", "[", "0", "]", ":", "idx_time", "[", "1", "]", "]", ",", "'%H:%M:%S'", ")", "stage_day", "=", "int", "(", "lines", "[", "1", "]", "[", "12", ":", "14", "]", ")", "stage_month", "=", "int", "(", "lines", "[", "1", "]", "[", "15", ":", "17", "]", ")", "stage_start_for_delta", "=", "stage_start", ".", "replace", "(", "year", "=", "1999", ",", "month", "=", "stage_month", ",", "day", "=", "stage_day", ")", "rec_start_for_delta", "=", "rec_start", ".", "replace", "(", "year", "=", "1999", ")", "first_second", "=", "int", "(", "(", "stage_start_for_delta", "-", "rec_start_for_delta", ")", ".", "total_seconds", "(", ")", ")", "if", "epoch_length", "is", "None", ":", "epoch_length", "=", "int", "(", "lines", "[", "5", "]", "[", "6", ":", "8", "]", ")", "elif", "source", "==", "'remlogic'", ":", "clue", "=", "'SLEEP-'", "# signifies an epoch (as opposed to an event)", "idx_clue", "=", "slice", "(", "-", "18", ",", "-", "6", ")", "idx_head", "=", "lines", ".", "index", "(", "next", "(", "l", "for", "l", "in", "lines", "if", "'Time [hh:mm:ss]'", "in", "l", ")", ")", "first_line", "=", "next", "(", "l", "for", "l", "in", "lines", "[", "idx_head", ":", "]", "if", "clue", "in", "l", ")", "idx_first_line", "=", "lines", ".", "index", "(", "first_line", ")", "stage_start_date", "=", "_try_parse_datetime", "(", "lines", "[", "3", "]", "[", "16", ":", "lines", "[", "3", "]", ".", "index", "(", "'\\n'", ")", "]", ",", "(", "'%Y/%m/%d'", ",", "'%d/%m/%Y'", ")", ")", "stage_start_time", "=", "None", "try", ":", "stage_start_time", "=", "datetime", ".", "strptime", "(", "first_line", "[", ":", "19", "]", ",", "'%Y-%m-%dT%H:%M:%S'", ")", "except", "ValueError", ":", "cells", "=", "first_line", ".", "split", "(", "'\\t'", ")", "for", "cell", "in", "cells", ":", "try", ":", "stage_start_time", "=", "datetime", ".", "strptime", "(", "cell", "[", "-", "8", ":", "]", ",", "'%I:%M:%S'", ")", "if", "cell", "[", "1", "]", "==", "'U'", ":", "stage_start_time", "=", "stage_start_time", "+", "timedelta", "(", "hours", "=", "12", ")", "except", "ValueError", ":", "continue", "if", "stage_start_time", "==", "None", ":", "raise", "ValueError", "(", "'No valid start time found.'", ")", "stage_start", "=", "datetime", ".", "combine", "(", "stage_start_date", ".", "date", "(", ")", ",", "stage_start_time", ".", "time", "(", ")", ")", "first_second", "=", "int", "(", "(", "stage_start", "-", "rec_start", ")", ".", "total_seconds", "(", ")", ")", "stage_key", "=", "{", "k", "[", "-", "2", ":", "]", ":", "v", "for", "k", ",", "v", "in", "REMLOGIC_STAGE_KEY", ".", "items", "(", ")", "}", "idx_stage", "=", "slice", "(", "-", "6", ",", "-", "4", ")", "if", "epoch_length", "is", "None", ":", "epoch_length", "=", "int", "(", "first_line", "[", "-", "3", ":", "-", "1", "]", ")", "elif", "source", "==", "'alice'", ":", "stage_start", "=", "datetime", ".", "strptime", "(", "lines", "[", "1", "]", "[", "2", ":", "13", "]", ",", "'%I:%M:%S %p'", ")", "dt", "=", "rec_start", "# best guess in absence of date", "if", "lines", "[", "1", "]", "[", "11", ":", "13", "]", "==", "'pm'", "and", "rec_start", ".", "hour", "<", "12", ":", "dt", "=", "rec_start", "-", "timedelta", "(", "days", "=", "1", ")", "elif", "lines", "[", "1", "]", "[", "11", ":", "13", "]", "==", "'am'", "and", "rec_start", ".", "hour", ">", "12", ":", "dt", "=", "rec_start", "+", "timedelta", "stage_start", "=", "stage_start", ".", "replace", "(", "year", "=", "dt", ".", "year", ",", "month", "=", "dt", ".", "month", ",", "day", "=", "dt", ".", "day", ")", "first_second", "=", "int", "(", "(", "stage_start", "-", "rec_start", ")", ".", "total_seconds", "(", ")", ")", "idx_first_line", "=", "1", "lines", "[", "-", "1", "]", "+=", "'_'", "# to fill newline position", "stage_key", "=", "ALICE_STAGE_KEY", "idx_stage", "=", "slice", "(", "-", "3", ",", "-", "1", ")", "if", "epoch_length", "is", "None", ":", "epoch_length", "=", "30", "elif", "source", "==", "'sandman'", ":", "stage_start", "=", "datetime", ".", "strptime", "(", "lines", "[", "4", "]", "[", "12", ":", "33", "]", ",", "'%d/%m/%Y %I:%M:%S %p'", ")", "first_second", "=", "int", "(", "(", "stage_start", "-", "rec_start", ")", ".", "total_seconds", "(", ")", ")", "idx_first_line", "=", "14", "stage_key", "=", "SANDMAN_STAGE_KEY", "idx_stage", "=", "slice", "(", "-", "14", ",", "-", "12", ")", "if", "epoch_length", "is", "None", ":", "epoch_length", "=", "30", "elif", "source", "==", "'compumedics'", ":", "if", "staging_start", "is", "None", ":", "first_second", "=", "0", "else", ":", "first_second", "=", "int", "(", "(", "staging_start", "-", "rec_start", ")", ".", "total_seconds", "(", ")", ")", "idx_first_line", "=", "0", "stage_key", "=", "COMPUMEDICS_STAGE_KEY", "idx_stage", "=", "slice", "(", "0", ",", "1", ")", "if", "epoch_length", "is", "None", ":", "epoch_length", "=", "30", "elif", "source", "==", "'deltamed'", ":", "if", "staging_start", "is", "None", ":", "first_second", "=", "0", "else", ":", "first_second", "=", "int", "(", "(", "staging_start", "-", "rec_start", ")", ".", "total_seconds", "(", ")", ")", "idx_first_line", "=", "0", "stage_key", "=", "DELTAMED_STAGE_KEY", "idx_stage", "=", "slice", "(", "-", "2", ",", "-", "1", ")", "if", "epoch_length", "is", "None", ":", "epoch_length", "=", "int", "(", "lines", "[", "0", "]", "[", ":", "lines", "[", "0", "]", ".", "index", "(", "'\\t'", ")", "]", ")", "elif", "source", "==", "'prana'", ":", "stage_start", "=", "datetime", ".", "strptime", "(", "lines", "[", "5", "]", "[", ":", "11", "]", ",", "'%d %H:%M:%S'", ")", "# best guess in absence of date", "dt", "=", "rec_start", "if", "stage_start", ".", "hour", ">", "12", "and", "rec_start", ".", "hour", "<", "12", ":", "dt", "=", "rec_start", "-", "timedelta", "(", "days", "=", "1", ")", "elif", "stage_start", ".", "hour", "<", "12", "and", "rec_start", ".", "hour", ">", "12", ":", "dt", "=", "rec_start", "+", "timedelta", "(", "days", "=", "1", ")", "stage_start", "=", "stage_start", ".", "replace", "(", "year", "=", "dt", ".", "year", ",", "month", "=", "dt", ".", "month", ",", "day", "=", "dt", ".", "day", ")", "first_second", "=", "int", "(", "(", "stage_start", "-", "rec_start", ")", ".", "total_seconds", "(", ")", ")", "idx_first_line", "=", "5", "stage_key", "=", "PRANA_STAGE_KEY", "spacer", "=", "next", "(", "i", "for", "i", ",", "j", "in", "enumerate", "(", "lines", "[", "5", "]", "[", "30", ":", "]", ")", "if", "j", ".", "strip", "(", ")", ")", "idx_stage", "=", "slice", "(", "30", "+", "spacer", ",", "30", "+", "spacer", "+", "1", ")", "if", "epoch_length", "is", "None", ":", "idx_epoch_length", "=", "None", "for", "i", ",", "j", "in", "enumerate", "(", "lines", "[", "3", "]", ")", ":", "if", "j", ".", "isdigit", "(", ")", ":", "idx_epoch_length", "=", "i", ",", "i", "+", "lines", "[", "3", "]", "[", "i", ":", "]", ".", "index", "(", "' '", ")", "epoch_length", "=", "int", "(", "lines", "[", "3", "]", "[", "slice", "(", "*", "idx_epoch_length", ")", "]", ")", "break", "if", "idx_epoch_length", "is", "None", ":", "epoch_length", "=", "30", "else", ":", "raise", "ValueError", "(", "'Unknown source program for staging file'", ")", "offset", "=", "first_second", "%", "epoch_length", "lg", ".", "info", "(", "'Time offset: '", "+", "str", "(", "offset", ")", "+", "' sec'", ")", "if", "rater_name", "not", "in", "self", ".", "raters", ":", "self", ".", "add_rater", "(", "rater_name", ")", "self", ".", "get_rater", "(", "rater_name", ")", "stages", "=", "self", ".", "rater", ".", "find", "(", "'stages'", ")", "if", "as_qual", ":", "for", "i", ",", "one_line", "in", "enumerate", "(", "lines", "[", "idx_first_line", ":", "]", ")", ":", "if", "one_line", "[", "idx_stage", "]", "in", "poor", ":", "epoch_beg", "=", "first_second", "+", "(", "i", "*", "epoch_length", ")", "try", ":", "self", ".", "set_stage_for_epoch", "(", "epoch_beg", ",", "'Poor'", ",", "attr", "=", "'quality'", ",", "save", "=", "False", ")", "except", "KeyError", ":", "return", "1", "else", ":", "# list is necessary so that it does not remove in place", "for", "s", "in", "list", "(", "stages", ")", ":", "stages", ".", "remove", "(", "s", ")", "for", "i", "in", "arange", "(", "offset", ",", "first_second", "-", "epoch_length", ",", "epoch_length", ")", ":", "epoch", "=", "SubElement", "(", "stages", ",", "'epoch'", ")", "start_time", "=", "SubElement", "(", "epoch", ",", "'epoch_start'", ")", "epoch_beg", "=", "i", "start_time", ".", "text", "=", "str", "(", "epoch_beg", ")", "end_time", "=", "SubElement", "(", "epoch", ",", "'epoch_end'", ")", "end_time", ".", "text", "=", "str", "(", "epoch_beg", "+", "epoch_length", ")", "epoch_stage", "=", "SubElement", "(", "epoch", ",", "'stage'", ")", "epoch_stage", ".", "text", "=", "'Unknown'", "quality", "=", "SubElement", "(", "epoch", ",", "'quality'", ")", "quality", ".", "text", "=", "'Good'", "idx_epoch", "=", "0", "for", "i", ",", "one_line", "in", "enumerate", "(", "lines", "[", "idx_first_line", ":", "]", ")", ":", "if", "clue", "is", "not", "None", ":", "if", "clue", "not", "in", "one_line", "[", "idx_clue", "]", ":", "continue", "epoch", "=", "SubElement", "(", "stages", ",", "'epoch'", ")", "start_time", "=", "SubElement", "(", "epoch", ",", "'epoch_start'", ")", "epoch_beg", "=", "first_second", "+", "(", "idx_epoch", "*", "epoch_length", ")", "start_time", ".", "text", "=", "str", "(", "epoch_beg", ")", "end_time", "=", "SubElement", "(", "epoch", ",", "'epoch_end'", ")", "end_time", ".", "text", "=", "str", "(", "epoch_beg", "+", "epoch_length", ")", "epoch_stage", "=", "SubElement", "(", "epoch", ",", "'stage'", ")", "try", ":", "key", "=", "one_line", "[", "idx_stage", "]", "one_stage", "=", "stage_key", "[", "key", "]", "except", "KeyError", ":", "one_stage", "=", "'Unknown'", "lg", ".", "info", "(", "'Stage not recognized: '", "+", "key", ")", "epoch_stage", ".", "text", "=", "one_stage", "quality", "=", "SubElement", "(", "epoch", ",", "'quality'", ")", "if", "one_stage", "in", "poor", ":", "quality", ".", "text", "=", "'Poor'", "else", ":", "quality", ".", "text", "=", "'Good'", "idx_epoch", "+=", "1", "self", ".", "save", "(", ")" ]
Import staging from an external staging text file. Parameters ---------- filename : str Staging file name. source : str Name of program where staging was made. One of 'domino', 'alice', 'compumedics', 'sandman', 'remlogic' rater_name : str Rater name for imported staging. rec_start : datetime Date and time (year, month, day, hour, minute, second) of recording start. Year is ignored (New Year's Eve celebratory recordings unsupported.) staging_start : datetime (default: None) Date and time of staging start. For use when not provided in staging file. epoch_length : int duration in s of a scoring epoch poor : list of str epochs with stage names in this list will be marked as Poor quality as_qual : bool if True, the staging only be used to mark quality, as per poor
[ "Import", "staging", "from", "an", "external", "staging", "text", "file", "." ]
python
train
pgjones/quart
quart/ctx.py
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/ctx.py#L183-L200
def after_this_request(func: Callable) -> Callable: """Schedule the func to be called after the current request. This is useful in situations whereby you want an after request function for a specific route or circumstance only, for example, .. code-block:: python def index(): @after_this_request def set_cookie(response): response.set_cookie('special', 'value') return response ... """ _request_ctx_stack.top._after_request_functions.append(func) return func
[ "def", "after_this_request", "(", "func", ":", "Callable", ")", "->", "Callable", ":", "_request_ctx_stack", ".", "top", ".", "_after_request_functions", ".", "append", "(", "func", ")", "return", "func" ]
Schedule the func to be called after the current request. This is useful in situations whereby you want an after request function for a specific route or circumstance only, for example, .. code-block:: python def index(): @after_this_request def set_cookie(response): response.set_cookie('special', 'value') return response ...
[ "Schedule", "the", "func", "to", "be", "called", "after", "the", "current", "request", "." ]
python
train
jart/fabulous
fabulous/gotham.py
https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/gotham.py#L108-L121
def main(): """I provide a command-line interface for this module """ print() print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-") print(lorem_gotham_title().center(50)) print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-") print() poem = lorem_gotham() for n in range(16): if n in (4, 8, 12): print() print(next(poem)) print()
[ "def", "main", "(", ")", ":", "print", "(", ")", "print", "(", "\"-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-\"", ")", "print", "(", "lorem_gotham_title", "(", ")", ".", "center", "(", "50", ")", ")", "print", "(", "\"-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-\"", ")", "print", "(", ")", "poem", "=", "lorem_gotham", "(", ")", "for", "n", "in", "range", "(", "16", ")", ":", "if", "n", "in", "(", "4", ",", "8", ",", "12", ")", ":", "print", "(", ")", "print", "(", "next", "(", "poem", ")", ")", "print", "(", ")" ]
I provide a command-line interface for this module
[ "I", "provide", "a", "command", "-", "line", "interface", "for", "this", "module" ]
python
train
eddiejessup/spatious
spatious/vector.py
https://github.com/eddiejessup/spatious/blob/b7ae91bec029e85a45a7f303ee184076433723cd/spatious/vector.py#L79-L98
def vector_unit_nullrand(v, rng=None): """Return unit vectors. Any null vectors are mapped to a uniformly picked unit vector. Parameters ---------- v: array, shape (a1, a2, ..., d) Cartesian vectors, with last axis indexing the dimension. Returns ------- v_new: array, shape of v """ if v.size == 0: return v mag = vector_mag(v) v_new = v.copy() v_new[mag == 0.0] = sphere_pick(v.shape[-1], (mag == 0.0).sum(), rng) v_new[mag > 0.0] /= mag[mag > 0.0][..., np.newaxis] return v_new
[ "def", "vector_unit_nullrand", "(", "v", ",", "rng", "=", "None", ")", ":", "if", "v", ".", "size", "==", "0", ":", "return", "v", "mag", "=", "vector_mag", "(", "v", ")", "v_new", "=", "v", ".", "copy", "(", ")", "v_new", "[", "mag", "==", "0.0", "]", "=", "sphere_pick", "(", "v", ".", "shape", "[", "-", "1", "]", ",", "(", "mag", "==", "0.0", ")", ".", "sum", "(", ")", ",", "rng", ")", "v_new", "[", "mag", ">", "0.0", "]", "/=", "mag", "[", "mag", ">", "0.0", "]", "[", "...", ",", "np", ".", "newaxis", "]", "return", "v_new" ]
Return unit vectors. Any null vectors are mapped to a uniformly picked unit vector. Parameters ---------- v: array, shape (a1, a2, ..., d) Cartesian vectors, with last axis indexing the dimension. Returns ------- v_new: array, shape of v
[ "Return", "unit", "vectors", ".", "Any", "null", "vectors", "are", "mapped", "to", "a", "uniformly", "picked", "unit", "vector", "." ]
python
train
ranaroussi/qtpylib
qtpylib/algo.py
https://github.com/ranaroussi/qtpylib/blob/0dbbc465fafd9cb9b0f4d10e1e07fae4e15032dd/qtpylib/algo.py#L538-L625
def order(self, signal, symbol, quantity=0, **kwargs): """ Send an order for the selected instrument :Parameters: direction : string Order Type (BUY/SELL, EXIT/FLATTEN) symbol : string instrument symbol quantity : int Order quantiry :Optional: limit_price : float In case of a LIMIT order, this is the LIMIT PRICE expiry : int Cancel this order if not filled after *n* seconds (default 60 seconds) order_type : string Type of order: Market (default), LIMIT (default when limit_price is passed), MODIFY (required passing or orderId) orderId : int If modifying an order, the order id of the modified order target : float Target (exit) price initial_stop : float Price to set hard stop stop_limit: bool Flag to indicate if the stop should be STOP or STOP LIMIT. Default is ``False`` (STOP) trail_stop_at : float Price at which to start trailing the stop trail_stop_type : string Type of traiing stop offset (amount, percent). Default is ``percent`` trail_stop_by : float Offset of trailing stop distance from current price fillorkill: bool Fill entire quantiry or none at all iceberg: bool Is this an iceberg (hidden) order tif: str Time in force (DAY, GTC, IOC, GTD). default is ``DAY`` """ self.log_algo.debug('ORDER: %s %4d %s %s', signal, quantity, symbol, kwargs) if signal.upper() == "EXIT" or signal.upper() == "FLATTEN": position = self.get_positions(symbol) if position['position'] == 0: return kwargs['symbol'] = symbol kwargs['quantity'] = abs(position['position']) kwargs['direction'] = "BUY" if position['position'] < 0 else "SELL" # print("EXIT", kwargs) try: self.record({symbol+'_POSITION': 0}) except Exception as e: pass if not self.backtest: self._create_order(**kwargs) else: if quantity == 0: return kwargs['symbol'] = symbol kwargs['quantity'] = abs(quantity) kwargs['direction'] = signal.upper() # print(signal.upper(), kwargs) # record try: quantity = abs(quantity) if kwargs['direction'] != "BUY": quantity = -quantity self.record({symbol+'_POSITION': quantity}) except Exception as e: pass if not self.backtest: self._create_order(**kwargs)
[ "def", "order", "(", "self", ",", "signal", ",", "symbol", ",", "quantity", "=", "0", ",", "*", "*", "kwargs", ")", ":", "self", ".", "log_algo", ".", "debug", "(", "'ORDER: %s %4d %s %s'", ",", "signal", ",", "quantity", ",", "symbol", ",", "kwargs", ")", "if", "signal", ".", "upper", "(", ")", "==", "\"EXIT\"", "or", "signal", ".", "upper", "(", ")", "==", "\"FLATTEN\"", ":", "position", "=", "self", ".", "get_positions", "(", "symbol", ")", "if", "position", "[", "'position'", "]", "==", "0", ":", "return", "kwargs", "[", "'symbol'", "]", "=", "symbol", "kwargs", "[", "'quantity'", "]", "=", "abs", "(", "position", "[", "'position'", "]", ")", "kwargs", "[", "'direction'", "]", "=", "\"BUY\"", "if", "position", "[", "'position'", "]", "<", "0", "else", "\"SELL\"", "# print(\"EXIT\", kwargs)", "try", ":", "self", ".", "record", "(", "{", "symbol", "+", "'_POSITION'", ":", "0", "}", ")", "except", "Exception", "as", "e", ":", "pass", "if", "not", "self", ".", "backtest", ":", "self", ".", "_create_order", "(", "*", "*", "kwargs", ")", "else", ":", "if", "quantity", "==", "0", ":", "return", "kwargs", "[", "'symbol'", "]", "=", "symbol", "kwargs", "[", "'quantity'", "]", "=", "abs", "(", "quantity", ")", "kwargs", "[", "'direction'", "]", "=", "signal", ".", "upper", "(", ")", "# print(signal.upper(), kwargs)", "# record", "try", ":", "quantity", "=", "abs", "(", "quantity", ")", "if", "kwargs", "[", "'direction'", "]", "!=", "\"BUY\"", ":", "quantity", "=", "-", "quantity", "self", ".", "record", "(", "{", "symbol", "+", "'_POSITION'", ":", "quantity", "}", ")", "except", "Exception", "as", "e", ":", "pass", "if", "not", "self", ".", "backtest", ":", "self", ".", "_create_order", "(", "*", "*", "kwargs", ")" ]
Send an order for the selected instrument :Parameters: direction : string Order Type (BUY/SELL, EXIT/FLATTEN) symbol : string instrument symbol quantity : int Order quantiry :Optional: limit_price : float In case of a LIMIT order, this is the LIMIT PRICE expiry : int Cancel this order if not filled after *n* seconds (default 60 seconds) order_type : string Type of order: Market (default), LIMIT (default when limit_price is passed), MODIFY (required passing or orderId) orderId : int If modifying an order, the order id of the modified order target : float Target (exit) price initial_stop : float Price to set hard stop stop_limit: bool Flag to indicate if the stop should be STOP or STOP LIMIT. Default is ``False`` (STOP) trail_stop_at : float Price at which to start trailing the stop trail_stop_type : string Type of traiing stop offset (amount, percent). Default is ``percent`` trail_stop_by : float Offset of trailing stop distance from current price fillorkill: bool Fill entire quantiry or none at all iceberg: bool Is this an iceberg (hidden) order tif: str Time in force (DAY, GTC, IOC, GTD). default is ``DAY``
[ "Send", "an", "order", "for", "the", "selected", "instrument" ]
python
train
kwikteam/phy
phy/cluster/views/correlogram.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/views/correlogram.py#L56-L67
def set_bin_window(self, bin_size=None, window_size=None): """Set the bin and window sizes.""" bin_size = bin_size or self.bin_size window_size = window_size or self.window_size assert 1e-6 < bin_size < 1e3 assert 1e-6 < window_size < 1e3 assert bin_size < window_size self.bin_size = bin_size self.window_size = window_size # Set the status message. b, w = self.bin_size * 1000, self.window_size * 1000 self.set_status('Bin: {:.1f} ms. Window: {:.1f} ms.'.format(b, w))
[ "def", "set_bin_window", "(", "self", ",", "bin_size", "=", "None", ",", "window_size", "=", "None", ")", ":", "bin_size", "=", "bin_size", "or", "self", ".", "bin_size", "window_size", "=", "window_size", "or", "self", ".", "window_size", "assert", "1e-6", "<", "bin_size", "<", "1e3", "assert", "1e-6", "<", "window_size", "<", "1e3", "assert", "bin_size", "<", "window_size", "self", ".", "bin_size", "=", "bin_size", "self", ".", "window_size", "=", "window_size", "# Set the status message.", "b", ",", "w", "=", "self", ".", "bin_size", "*", "1000", ",", "self", ".", "window_size", "*", "1000", "self", ".", "set_status", "(", "'Bin: {:.1f} ms. Window: {:.1f} ms.'", ".", "format", "(", "b", ",", "w", ")", ")" ]
Set the bin and window sizes.
[ "Set", "the", "bin", "and", "window", "sizes", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L14370-L14384
def unload(filename): """ Unload a SPICE kernel. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/unload_c.html :param filename: The name of a kernel to unload. :type filename: str """ if isinstance(filename, list): for f in filename: libspice.unload_c(stypes.stringToCharP(f)) return filename = stypes.stringToCharP(filename) libspice.unload_c(filename)
[ "def", "unload", "(", "filename", ")", ":", "if", "isinstance", "(", "filename", ",", "list", ")", ":", "for", "f", "in", "filename", ":", "libspice", ".", "unload_c", "(", "stypes", ".", "stringToCharP", "(", "f", ")", ")", "return", "filename", "=", "stypes", ".", "stringToCharP", "(", "filename", ")", "libspice", ".", "unload_c", "(", "filename", ")" ]
Unload a SPICE kernel. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/unload_c.html :param filename: The name of a kernel to unload. :type filename: str
[ "Unload", "a", "SPICE", "kernel", "." ]
python
train
mlavin/argyle
argyle/postgres.py
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L66-L70
def change_db_user_password(username, password): """Change a db user's password.""" sql = "ALTER USER %s WITH PASSWORD '%s'" % (username, password) excute_query(sql, use_sudo=True)
[ "def", "change_db_user_password", "(", "username", ",", "password", ")", ":", "sql", "=", "\"ALTER USER %s WITH PASSWORD '%s'\"", "%", "(", "username", ",", "password", ")", "excute_query", "(", "sql", ",", "use_sudo", "=", "True", ")" ]
Change a db user's password.
[ "Change", "a", "db", "user", "s", "password", "." ]
python
train
pybel/pybel
src/pybel/parser/modifiers/fusion.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/parser/modifiers/fusion.py#L89-L106
def get_legacy_fusion_langauge(identifier: ParserElement, reference: str) -> ParserElement: """Build a legacy fusion parser.""" break_start = (ppc.integer | '?').setParseAction(_fusion_break_handler_wrapper(reference, start=True)) break_end = (ppc.integer | '?').setParseAction(_fusion_break_handler_wrapper(reference, start=False)) res = ( identifier(PARTNER_5P) + WCW + fusion_tags + nest( identifier(PARTNER_3P) + Optional(WCW + Group(break_start)(RANGE_5P) + WCW + Group(break_end)(RANGE_3P)) ) ) res.setParseAction(_fusion_legacy_handler) return res
[ "def", "get_legacy_fusion_langauge", "(", "identifier", ":", "ParserElement", ",", "reference", ":", "str", ")", "->", "ParserElement", ":", "break_start", "=", "(", "ppc", ".", "integer", "|", "'?'", ")", ".", "setParseAction", "(", "_fusion_break_handler_wrapper", "(", "reference", ",", "start", "=", "True", ")", ")", "break_end", "=", "(", "ppc", ".", "integer", "|", "'?'", ")", ".", "setParseAction", "(", "_fusion_break_handler_wrapper", "(", "reference", ",", "start", "=", "False", ")", ")", "res", "=", "(", "identifier", "(", "PARTNER_5P", ")", "+", "WCW", "+", "fusion_tags", "+", "nest", "(", "identifier", "(", "PARTNER_3P", ")", "+", "Optional", "(", "WCW", "+", "Group", "(", "break_start", ")", "(", "RANGE_5P", ")", "+", "WCW", "+", "Group", "(", "break_end", ")", "(", "RANGE_3P", ")", ")", ")", ")", "res", ".", "setParseAction", "(", "_fusion_legacy_handler", ")", "return", "res" ]
Build a legacy fusion parser.
[ "Build", "a", "legacy", "fusion", "parser", "." ]
python
train