repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
joke2k/faker
faker/providers/address/ko_KR/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/address/ko_KR/__init__.py#L274-L279
def land_address(self): """ :example ์„ธ์ข…ํŠน๋ณ„์ž์น˜์‹œ ์–ด์ง„๋™ 507 """ pattern = self.random_element(self.land_address_formats) return self.generator.parse(pattern)
[ "def", "land_address", "(", "self", ")", ":", "pattern", "=", "self", ".", "random_element", "(", "self", ".", "land_address_formats", ")", "return", "self", ".", "generator", ".", "parse", "(", "pattern", ")" ]
:example ์„ธ์ข…ํŠน๋ณ„์ž์น˜์‹œ ์–ด์ง„๋™ 507
[ ":", "example", "์„ธ์ข…ํŠน๋ณ„์ž์น˜์‹œ", "์–ด์ง„๋™", "507" ]
python
train
Nekroze/partpy
partpy/sourcestring.py
https://github.com/Nekroze/partpy/blob/dbb7d2fb285464fc43d85bc31f5af46192d301f6/partpy/sourcestring.py#L38-L42
def set_string(self, string): """Set the working string and its length then reset positions.""" self.string = string self.length = len(string) self.reset_position()
[ "def", "set_string", "(", "self", ",", "string", ")", ":", "self", ".", "string", "=", "string", "self", ".", "length", "=", "len", "(", "string", ")", "self", ".", "reset_position", "(", ")" ]
Set the working string and its length then reset positions.
[ "Set", "the", "working", "string", "and", "its", "length", "then", "reset", "positions", "." ]
python
train
saltstack/salt
salt/states/host.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/host.py#L70-L165
def present(name, ip, clean=False): # pylint: disable=C0103 ''' Ensures that the named host is present with the given ip name The host to assign an ip to ip The ip addr(s) to apply to the host. Can be a single IP or a list of IP addresses. clean : False Remove any entries which don't match those configured in the ``ip`` option. .. versionadded:: 2018.3.4 ''' ret = {'name': name, 'changes': {}, 'result': None if __opts__['test'] else True, 'comment': ''} if not isinstance(ip, list): ip = [ip] all_hosts = __salt__['hosts.list_hosts']() comments = [] to_add = set() to_remove = set() # First check for IPs not currently in the hosts file to_add.update([(addr, name) for addr in ip if addr not in all_hosts]) # Now sweep through the hosts file and look for entries matching either the # IP address(es) or hostname. for addr, aliases in six.iteritems(all_hosts): if addr not in ip: if name in aliases: # Found match for hostname, but the corresponding IP is not in # our list, so we need to remove it. if clean: to_remove.add((addr, name)) else: ret.setdefault('warnings', []).append( 'Host {0} present for IP address {1}. To get rid of ' 'this warning, either run this state with \'clean\' ' 'set to True to remove {0} from {1}, or add {1} to ' 'the \'ip\' argument.'.format(name, addr) ) else: if name in aliases: # No changes needed for this IP address and hostname comments.append( 'Host {0} ({1}) already present'.format(name, addr) ) else: # IP address listed in hosts file, but hostname is not present. # We will need to add it. if salt.utils.validate.net.ip_addr(addr): to_add.add((addr, name)) else: ret['result'] = False comments.append( 'Invalid IP Address for {0} ({1})'.format(name, addr) ) for addr, name in to_add: if __opts__['test']: comments.append( 'Host {0} ({1}) would be added'.format(name, addr) ) else: if __salt__['hosts.add_host'](addr, name): comments.append('Added host {0} ({1})'.format(name, addr)) else: ret['result'] = False comments.append('Failed to add host {0} ({1})'.format(name, addr)) continue ret['changes'].setdefault('added', {}).setdefault(addr, []).append(name) for addr, name in to_remove: if __opts__['test']: comments.append( 'Host {0} ({1}) would be removed'.format(name, addr) ) else: if __salt__['hosts.rm_host'](addr, name): comments.append('Removed host {0} ({1})'.format(name, addr)) else: ret['result'] = False comments.append('Failed to remove host {0} ({1})'.format(name, addr)) continue ret['changes'].setdefault('removed', {}).setdefault(addr, []).append(name) ret['comment'] = '\n'.join(comments) return ret
[ "def", "present", "(", "name", ",", "ip", ",", "clean", "=", "False", ")", ":", "# pylint: disable=C0103", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", "if", "__opts__", "[", "'test'", "]", "else", "True", ",", "'comment'", ":", "''", "}", "if", "not", "isinstance", "(", "ip", ",", "list", ")", ":", "ip", "=", "[", "ip", "]", "all_hosts", "=", "__salt__", "[", "'hosts.list_hosts'", "]", "(", ")", "comments", "=", "[", "]", "to_add", "=", "set", "(", ")", "to_remove", "=", "set", "(", ")", "# First check for IPs not currently in the hosts file", "to_add", ".", "update", "(", "[", "(", "addr", ",", "name", ")", "for", "addr", "in", "ip", "if", "addr", "not", "in", "all_hosts", "]", ")", "# Now sweep through the hosts file and look for entries matching either the", "# IP address(es) or hostname.", "for", "addr", ",", "aliases", "in", "six", ".", "iteritems", "(", "all_hosts", ")", ":", "if", "addr", "not", "in", "ip", ":", "if", "name", "in", "aliases", ":", "# Found match for hostname, but the corresponding IP is not in", "# our list, so we need to remove it.", "if", "clean", ":", "to_remove", ".", "add", "(", "(", "addr", ",", "name", ")", ")", "else", ":", "ret", ".", "setdefault", "(", "'warnings'", ",", "[", "]", ")", ".", "append", "(", "'Host {0} present for IP address {1}. To get rid of '", "'this warning, either run this state with \\'clean\\' '", "'set to True to remove {0} from {1}, or add {1} to '", "'the \\'ip\\' argument.'", ".", "format", "(", "name", ",", "addr", ")", ")", "else", ":", "if", "name", "in", "aliases", ":", "# No changes needed for this IP address and hostname", "comments", ".", "append", "(", "'Host {0} ({1}) already present'", ".", "format", "(", "name", ",", "addr", ")", ")", "else", ":", "# IP address listed in hosts file, but hostname is not present.", "# We will need to add it.", "if", "salt", ".", "utils", ".", "validate", ".", "net", ".", "ip_addr", "(", "addr", ")", ":", "to_add", ".", "add", "(", "(", "addr", ",", "name", ")", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "comments", ".", "append", "(", "'Invalid IP Address for {0} ({1})'", ".", "format", "(", "name", ",", "addr", ")", ")", "for", "addr", ",", "name", "in", "to_add", ":", "if", "__opts__", "[", "'test'", "]", ":", "comments", ".", "append", "(", "'Host {0} ({1}) would be added'", ".", "format", "(", "name", ",", "addr", ")", ")", "else", ":", "if", "__salt__", "[", "'hosts.add_host'", "]", "(", "addr", ",", "name", ")", ":", "comments", ".", "append", "(", "'Added host {0} ({1})'", ".", "format", "(", "name", ",", "addr", ")", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "comments", ".", "append", "(", "'Failed to add host {0} ({1})'", ".", "format", "(", "name", ",", "addr", ")", ")", "continue", "ret", "[", "'changes'", "]", ".", "setdefault", "(", "'added'", ",", "{", "}", ")", ".", "setdefault", "(", "addr", ",", "[", "]", ")", ".", "append", "(", "name", ")", "for", "addr", ",", "name", "in", "to_remove", ":", "if", "__opts__", "[", "'test'", "]", ":", "comments", ".", "append", "(", "'Host {0} ({1}) would be removed'", ".", "format", "(", "name", ",", "addr", ")", ")", "else", ":", "if", "__salt__", "[", "'hosts.rm_host'", "]", "(", "addr", ",", "name", ")", ":", "comments", ".", "append", "(", "'Removed host {0} ({1})'", ".", "format", "(", "name", ",", "addr", ")", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "comments", ".", "append", "(", "'Failed to remove host {0} ({1})'", ".", "format", "(", "name", ",", "addr", ")", ")", "continue", "ret", "[", "'changes'", "]", ".", "setdefault", "(", "'removed'", ",", "{", "}", ")", ".", "setdefault", "(", "addr", ",", "[", "]", ")", ".", "append", "(", "name", ")", "ret", "[", "'comment'", "]", "=", "'\\n'", ".", "join", "(", "comments", ")", "return", "ret" ]
Ensures that the named host is present with the given ip name The host to assign an ip to ip The ip addr(s) to apply to the host. Can be a single IP or a list of IP addresses. clean : False Remove any entries which don't match those configured in the ``ip`` option. .. versionadded:: 2018.3.4
[ "Ensures", "that", "the", "named", "host", "is", "present", "with", "the", "given", "ip" ]
python
train
saltstack/salt
salt/modules/lxd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxd.py#L1836-L1911
def container_execute(name, cmd, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Execute a command list on a container. name : Name of the container cmd : Command to be executed (as a list) Example : '["ls", "-l"]' remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Example: .. code-block:: bash salt '*' lxd.container_execute <container name> '["ls", "-l"]' ''' container = container_get( name, remote_addr, cert, key, verify_cert, _raw=True ) try: result = container.execute(cmd) saltresult = {} if not hasattr(result, 'exit_code'): saltresult = dict( exit_code=0, stdout=result[0], stderr=result[1], ) else: saltresult = dict( exit_code=result.exit_code, stdout=result.stdout, stderr=result.stderr, ) except pylxd.exceptions.NotFound as e: # TODO: Using exit_code 0 here is not always right, # in the most cases the command worked ok though. # See: https://github.com/lxc/pylxd/issues/280 saltresult = dict(exit_code=0, stdout="", stderr=six.text_type(e)) if int(saltresult['exit_code']) > 0: saltresult['result'] = False else: saltresult['result'] = True return saltresult
[ "def", "container_execute", "(", "name", ",", "cmd", ",", "remote_addr", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "verify_cert", "=", "True", ")", ":", "container", "=", "container_get", "(", "name", ",", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ",", "_raw", "=", "True", ")", "try", ":", "result", "=", "container", ".", "execute", "(", "cmd", ")", "saltresult", "=", "{", "}", "if", "not", "hasattr", "(", "result", ",", "'exit_code'", ")", ":", "saltresult", "=", "dict", "(", "exit_code", "=", "0", ",", "stdout", "=", "result", "[", "0", "]", ",", "stderr", "=", "result", "[", "1", "]", ",", ")", "else", ":", "saltresult", "=", "dict", "(", "exit_code", "=", "result", ".", "exit_code", ",", "stdout", "=", "result", ".", "stdout", ",", "stderr", "=", "result", ".", "stderr", ",", ")", "except", "pylxd", ".", "exceptions", ".", "NotFound", "as", "e", ":", "# TODO: Using exit_code 0 here is not always right,", "# in the most cases the command worked ok though.", "# See: https://github.com/lxc/pylxd/issues/280", "saltresult", "=", "dict", "(", "exit_code", "=", "0", ",", "stdout", "=", "\"\"", ",", "stderr", "=", "six", ".", "text_type", "(", "e", ")", ")", "if", "int", "(", "saltresult", "[", "'exit_code'", "]", ")", ">", "0", ":", "saltresult", "[", "'result'", "]", "=", "False", "else", ":", "saltresult", "[", "'result'", "]", "=", "True", "return", "saltresult" ]
Execute a command list on a container. name : Name of the container cmd : Command to be executed (as a list) Example : '["ls", "-l"]' remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Example: .. code-block:: bash salt '*' lxd.container_execute <container name> '["ls", "-l"]'
[ "Execute", "a", "command", "list", "on", "a", "container", "." ]
python
train
saltstack/salt
salt/modules/lxc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxc.py#L2312-L2340
def _ensure_running(name, no_start=False, path=None): ''' If the container is not currently running, start it. This function returns the state that the container was in before changing path path to the container parent directory default: /var/lib/lxc (system) .. versionadded:: 2015.8.0 ''' _ensure_exists(name, path=path) pre = state(name, path=path) if pre == 'running': # This will be a no-op but running the function will give us a pretty # return dict. return start(name, path=path) elif pre == 'stopped': if no_start: raise CommandExecutionError( 'Container \'{0}\' is not running'.format(name) ) return start(name, path=path) elif pre == 'frozen': if no_start: raise CommandExecutionError( 'Container \'{0}\' is not running'.format(name) ) return unfreeze(name, path=path)
[ "def", "_ensure_running", "(", "name", ",", "no_start", "=", "False", ",", "path", "=", "None", ")", ":", "_ensure_exists", "(", "name", ",", "path", "=", "path", ")", "pre", "=", "state", "(", "name", ",", "path", "=", "path", ")", "if", "pre", "==", "'running'", ":", "# This will be a no-op but running the function will give us a pretty", "# return dict.", "return", "start", "(", "name", ",", "path", "=", "path", ")", "elif", "pre", "==", "'stopped'", ":", "if", "no_start", ":", "raise", "CommandExecutionError", "(", "'Container \\'{0}\\' is not running'", ".", "format", "(", "name", ")", ")", "return", "start", "(", "name", ",", "path", "=", "path", ")", "elif", "pre", "==", "'frozen'", ":", "if", "no_start", ":", "raise", "CommandExecutionError", "(", "'Container \\'{0}\\' is not running'", ".", "format", "(", "name", ")", ")", "return", "unfreeze", "(", "name", ",", "path", "=", "path", ")" ]
If the container is not currently running, start it. This function returns the state that the container was in before changing path path to the container parent directory default: /var/lib/lxc (system) .. versionadded:: 2015.8.0
[ "If", "the", "container", "is", "not", "currently", "running", "start", "it", ".", "This", "function", "returns", "the", "state", "that", "the", "container", "was", "in", "before", "changing" ]
python
train
oanda/v20-python
src/v20/pricing.py
https://github.com/oanda/v20-python/blob/f28192f4a31bce038cf6dfa302f5878bec192fe5/src/v20/pricing.py#L273-L298
def from_dict(data, ctx): """ Instantiate a new HomeConversions from a dict (generally from loading a JSON response). The data used to instantiate the HomeConversions is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('accountGain') is not None: data['accountGain'] = ctx.convert_decimal_number( data.get('accountGain') ) if data.get('accountLoss') is not None: data['accountLoss'] = ctx.convert_decimal_number( data.get('accountLoss') ) if data.get('positionValue') is not None: data['positionValue'] = ctx.convert_decimal_number( data.get('positionValue') ) return HomeConversions(**data)
[ "def", "from_dict", "(", "data", ",", "ctx", ")", ":", "data", "=", "data", ".", "copy", "(", ")", "if", "data", ".", "get", "(", "'accountGain'", ")", "is", "not", "None", ":", "data", "[", "'accountGain'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'accountGain'", ")", ")", "if", "data", ".", "get", "(", "'accountLoss'", ")", "is", "not", "None", ":", "data", "[", "'accountLoss'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'accountLoss'", ")", ")", "if", "data", ".", "get", "(", "'positionValue'", ")", "is", "not", "None", ":", "data", "[", "'positionValue'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'positionValue'", ")", ")", "return", "HomeConversions", "(", "*", "*", "data", ")" ]
Instantiate a new HomeConversions from a dict (generally from loading a JSON response). The data used to instantiate the HomeConversions is a shallow copy of the dict passed in, with any complex child types instantiated appropriately.
[ "Instantiate", "a", "new", "HomeConversions", "from", "a", "dict", "(", "generally", "from", "loading", "a", "JSON", "response", ")", ".", "The", "data", "used", "to", "instantiate", "the", "HomeConversions", "is", "a", "shallow", "copy", "of", "the", "dict", "passed", "in", "with", "any", "complex", "child", "types", "instantiated", "appropriately", "." ]
python
train
hydpy-dev/hydpy
hydpy/core/filetools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/filetools.py#L765-L779
def save_files(self, selections) -> None: """Save the |Selection| objects contained in the given |Selections| instance to separate network files.""" try: currentpath = self.currentpath selections = selectiontools.Selections(selections) for selection in selections: if selection.name == 'complete': continue path = os.path.join(currentpath, selection.name+'.py') selection.save_networkfile(filepath=path) except BaseException: objecttools.augment_excmessage( 'While trying to save selections `%s` into network files' % selections)
[ "def", "save_files", "(", "self", ",", "selections", ")", "->", "None", ":", "try", ":", "currentpath", "=", "self", ".", "currentpath", "selections", "=", "selectiontools", ".", "Selections", "(", "selections", ")", "for", "selection", "in", "selections", ":", "if", "selection", ".", "name", "==", "'complete'", ":", "continue", "path", "=", "os", ".", "path", ".", "join", "(", "currentpath", ",", "selection", ".", "name", "+", "'.py'", ")", "selection", ".", "save_networkfile", "(", "filepath", "=", "path", ")", "except", "BaseException", ":", "objecttools", ".", "augment_excmessage", "(", "'While trying to save selections `%s` into network files'", "%", "selections", ")" ]
Save the |Selection| objects contained in the given |Selections| instance to separate network files.
[ "Save", "the", "|Selection|", "objects", "contained", "in", "the", "given", "|Selections|", "instance", "to", "separate", "network", "files", "." ]
python
train
CivicSpleen/ambry
ambry/metadata/proptree.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/metadata/proptree.py#L390-L399
def scalar_term(self, st): """Return a _ScalarTermS or _ScalarTermU from a string, to perform text and HTML substitutions""" if isinstance(st, binary_type): return _ScalarTermS(st, self._jinja_sub) elif isinstance(st, text_type): return _ScalarTermU(st, self._jinja_sub) elif st is None: return _ScalarTermU(u(''), self._jinja_sub) else: return st
[ "def", "scalar_term", "(", "self", ",", "st", ")", ":", "if", "isinstance", "(", "st", ",", "binary_type", ")", ":", "return", "_ScalarTermS", "(", "st", ",", "self", ".", "_jinja_sub", ")", "elif", "isinstance", "(", "st", ",", "text_type", ")", ":", "return", "_ScalarTermU", "(", "st", ",", "self", ".", "_jinja_sub", ")", "elif", "st", "is", "None", ":", "return", "_ScalarTermU", "(", "u", "(", "''", ")", ",", "self", ".", "_jinja_sub", ")", "else", ":", "return", "st" ]
Return a _ScalarTermS or _ScalarTermU from a string, to perform text and HTML substitutions
[ "Return", "a", "_ScalarTermS", "or", "_ScalarTermU", "from", "a", "string", "to", "perform", "text", "and", "HTML", "substitutions" ]
python
train
GeospatialPython/pyshp
shapefile.py
https://github.com/GeospatialPython/pyshp/blob/71231ddc5aa54f155d4f0563c56006fffbfc84e7/shapefile.py#L702-L711
def __getFileObj(self, f): """Checks to see if the requested shapefile file object is available. If not a ShapefileException is raised.""" if not f: raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.") if self.shp and self.shpLength is None: self.load() if self.dbf and len(self.fields) == 0: self.load() return f
[ "def", "__getFileObj", "(", "self", ",", "f", ")", ":", "if", "not", "f", ":", "raise", "ShapefileException", "(", "\"Shapefile Reader requires a shapefile or file-like object.\"", ")", "if", "self", ".", "shp", "and", "self", ".", "shpLength", "is", "None", ":", "self", ".", "load", "(", ")", "if", "self", ".", "dbf", "and", "len", "(", "self", ".", "fields", ")", "==", "0", ":", "self", ".", "load", "(", ")", "return", "f" ]
Checks to see if the requested shapefile file object is available. If not a ShapefileException is raised.
[ "Checks", "to", "see", "if", "the", "requested", "shapefile", "file", "object", "is", "available", ".", "If", "not", "a", "ShapefileException", "is", "raised", "." ]
python
train
wq/django-natural-keys
natural_keys/models.py
https://github.com/wq/django-natural-keys/blob/f6bd6baf848e709ae9920b259a3ad1a6be8af615/natural_keys/models.py#L194-L203
def natural_key(self): """ Return the natural key for this object. (This is a generic implementation of the standard Django function) """ # Recursively extract properties from related objects if needed vals = [reduce(getattr, name.split('__'), self) for name in self.get_natural_key_fields()] return vals
[ "def", "natural_key", "(", "self", ")", ":", "# Recursively extract properties from related objects if needed", "vals", "=", "[", "reduce", "(", "getattr", ",", "name", ".", "split", "(", "'__'", ")", ",", "self", ")", "for", "name", "in", "self", ".", "get_natural_key_fields", "(", ")", "]", "return", "vals" ]
Return the natural key for this object. (This is a generic implementation of the standard Django function)
[ "Return", "the", "natural", "key", "for", "this", "object", "." ]
python
train
HiPERCAM/hcam_widgets
hcam_widgets/hcam.py
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/hcam.py#L1314-L1327
def checkUpdate(self, *args): """ Updates values after first checking instrument parameters are OK. This is not integrated within update to prevent ifinite recursion since update gets called from ipars. """ g = get_root(self).globals if not self.check(): g.clog.warn('Current observing parameters are not valid.') return False if not g.ipars.check(): g.clog.warn('Current instrument parameters are not valid.') return False
[ "def", "checkUpdate", "(", "self", ",", "*", "args", ")", ":", "g", "=", "get_root", "(", "self", ")", ".", "globals", "if", "not", "self", ".", "check", "(", ")", ":", "g", ".", "clog", ".", "warn", "(", "'Current observing parameters are not valid.'", ")", "return", "False", "if", "not", "g", ".", "ipars", ".", "check", "(", ")", ":", "g", ".", "clog", ".", "warn", "(", "'Current instrument parameters are not valid.'", ")", "return", "False" ]
Updates values after first checking instrument parameters are OK. This is not integrated within update to prevent ifinite recursion since update gets called from ipars.
[ "Updates", "values", "after", "first", "checking", "instrument", "parameters", "are", "OK", ".", "This", "is", "not", "integrated", "within", "update", "to", "prevent", "ifinite", "recursion", "since", "update", "gets", "called", "from", "ipars", "." ]
python
train
pytroll/satpy
satpy/multiscene.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/multiscene.py#L92-L96
def _create_cached_iter(self): """Iterate over the provided scenes, caching them for later.""" for scn in self._scene_gen: self._scene_cache.append(scn) yield scn
[ "def", "_create_cached_iter", "(", "self", ")", ":", "for", "scn", "in", "self", ".", "_scene_gen", ":", "self", ".", "_scene_cache", ".", "append", "(", "scn", ")", "yield", "scn" ]
Iterate over the provided scenes, caching them for later.
[ "Iterate", "over", "the", "provided", "scenes", "caching", "them", "for", "later", "." ]
python
train
assamite/creamas
creamas/mp.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/mp.py#L446-L481
def get_agents(self, addr=True, agent_cls=None, as_coro=False): """Get agents from the slave environments. :param bool addr: If ``True``, returns only addresses of the agents, otherwise returns a :class:`Proxy` object for each agent. :param agent_cls: If specified, returns only agents that are members of that particular class. :param bool as_coro: If ``True``, returns a coroutine, otherwise runs the method in an event loop. :returns: A coroutine or list of :class:`Proxy` objects or addresses as specified by the input parameters. Slave environment managers are excluded from the returned list by default. Essentially, this method calls each slave environment manager's :meth:`creamas.mp.EnvManager.get_agents` asynchronously. .. note:: Calling each slave environment's manager might be costly in some situations. Therefore, it is advisable to store the returned agent list if the agent sets in the slave environments are not bound to change. """ async def slave_task(mgr_addr, addr=True, agent_cls=None): r_manager = await self.env.connect(mgr_addr, timeout=TIMEOUT) return await r_manager.get_agents(addr=addr, agent_cls=agent_cls) tasks = create_tasks(slave_task, self.addrs, addr, agent_cls) return run_or_coro(tasks, as_coro)
[ "def", "get_agents", "(", "self", ",", "addr", "=", "True", ",", "agent_cls", "=", "None", ",", "as_coro", "=", "False", ")", ":", "async", "def", "slave_task", "(", "mgr_addr", ",", "addr", "=", "True", ",", "agent_cls", "=", "None", ")", ":", "r_manager", "=", "await", "self", ".", "env", ".", "connect", "(", "mgr_addr", ",", "timeout", "=", "TIMEOUT", ")", "return", "await", "r_manager", ".", "get_agents", "(", "addr", "=", "addr", ",", "agent_cls", "=", "agent_cls", ")", "tasks", "=", "create_tasks", "(", "slave_task", ",", "self", ".", "addrs", ",", "addr", ",", "agent_cls", ")", "return", "run_or_coro", "(", "tasks", ",", "as_coro", ")" ]
Get agents from the slave environments. :param bool addr: If ``True``, returns only addresses of the agents, otherwise returns a :class:`Proxy` object for each agent. :param agent_cls: If specified, returns only agents that are members of that particular class. :param bool as_coro: If ``True``, returns a coroutine, otherwise runs the method in an event loop. :returns: A coroutine or list of :class:`Proxy` objects or addresses as specified by the input parameters. Slave environment managers are excluded from the returned list by default. Essentially, this method calls each slave environment manager's :meth:`creamas.mp.EnvManager.get_agents` asynchronously. .. note:: Calling each slave environment's manager might be costly in some situations. Therefore, it is advisable to store the returned agent list if the agent sets in the slave environments are not bound to change.
[ "Get", "agents", "from", "the", "slave", "environments", "." ]
python
train
rgmining/ria
ria/bipartite.py
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L363-L389
def add_review(self, reviewer, product, review, date=None): """Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r
[ "def", "add_review", "(", "self", ",", "reviewer", ",", "product", ",", "review", ",", "date", "=", "None", ")", ":", "if", "not", "isinstance", "(", "reviewer", ",", "self", ".", "_reviewer_cls", ")", ":", "raise", "TypeError", "(", "\"Type of given reviewer isn't acceptable:\"", ",", "reviewer", ",", "\", expected:\"", ",", "self", ".", "_reviewer_cls", ")", "elif", "not", "isinstance", "(", "product", ",", "self", ".", "_product_cls", ")", ":", "raise", "TypeError", "(", "\"Type of given product isn't acceptable:\"", ",", "product", ",", "\", expected:\"", ",", "self", ".", "_product_cls", ")", "r", "=", "self", ".", "_review_cls", "(", "review", ",", "date", "=", "date", ")", "self", ".", "graph", ".", "add_edge", "(", "reviewer", ",", "product", ",", "review", "=", "r", ")", "return", "r" ]
Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed.
[ "Add", "a", "new", "review", "from", "a", "given", "reviewer", "to", "a", "given", "product", "." ]
python
train
xeroc/python-graphenelib
graphenestorage/masterpassword.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenestorage/masterpassword.py#L116-L131
def _new_masterpassword(self, password): """ Generate a new random masterkey, encrypt it with the password and store it in the store. :param str password: Password to use for en-/de-cryption """ # make sure to not overwrite an existing key if self.config_key in self.config and self.config[self.config_key]: raise Exception("Storage already has a masterpassword!") self.decrypted_master = hexlify(os.urandom(32)).decode("ascii") # Encrypt and save master self.password = password self._save_encrypted_masterpassword() return self.masterkey
[ "def", "_new_masterpassword", "(", "self", ",", "password", ")", ":", "# make sure to not overwrite an existing key", "if", "self", ".", "config_key", "in", "self", ".", "config", "and", "self", ".", "config", "[", "self", ".", "config_key", "]", ":", "raise", "Exception", "(", "\"Storage already has a masterpassword!\"", ")", "self", ".", "decrypted_master", "=", "hexlify", "(", "os", ".", "urandom", "(", "32", ")", ")", ".", "decode", "(", "\"ascii\"", ")", "# Encrypt and save master", "self", ".", "password", "=", "password", "self", ".", "_save_encrypted_masterpassword", "(", ")", "return", "self", ".", "masterkey" ]
Generate a new random masterkey, encrypt it with the password and store it in the store. :param str password: Password to use for en-/de-cryption
[ "Generate", "a", "new", "random", "masterkey", "encrypt", "it", "with", "the", "password", "and", "store", "it", "in", "the", "store", "." ]
python
valid
studionow/pybrightcove
pybrightcove/video.py
https://github.com/studionow/pybrightcove/blob/19c946b689a80156e070fe9bc35589c4b768e614/pybrightcove/video.py#L345-L410
def to_xml(self): # pylint: disable=R0912 """ Converts object into an XML string. """ xml = '' for asset in self.assets: xml += '<asset filename="%s" ' % \ os.path.basename(asset['filename']) xml += ' refid="%(refid)s"' % asset xml += ' size="%(size)s"' % asset xml += ' hash-code="%s"' % asset['hash-code'] xml += ' type="%(type)s"' % asset if asset.get('encoding-rate', None): xml += ' encoding-rate="%s"' % asset['encoding-rate'] if asset.get('frame-width', None): xml += ' frame-width="%s"' % asset['frame-width'] if asset.get('frame-height', None): xml += ' frame-height="%s"' % asset['frame-height'] if asset.get('display-name', None): xml += ' display-name="%s"' % asset['display-name'] if asset.get('encode-to', None): xml += ' encode-to="%s"' % asset['encode-to'] if asset.get('encode-multiple', None): xml += ' encode-multiple="%s"' % asset['encode-multiple'] if asset.get('h264-preserve-as-rendition', None): xml += ' h264-preserve-as-rendition="%s"' % \ asset['h264-preserve-as-rendition'] if asset.get('h264-no-processing', None): xml += ' h264-no-processing="%s"' % asset['h264-no-processing'] xml += ' />\n' xml += '<title name="%(name)s" refid="%(referenceId)s" active="TRUE" ' if self.start_date: xml += 'start-date="%(start_date)s" ' if self.end_date: xml += 'end-date="%(end_date)s" ' for asset in self.assets: if asset.get('encoding-rate', None) == None: choice = enums.AssetTypeEnum if asset.get('type', None) == choice.VIDEO_FULL: xml += 'video-full-refid="%s" ' % asset.get('refid') if asset.get('type', None) == choice.THUMBNAIL: xml += 'thumbnail-refid="%s" ' % asset.get('refid') if asset.get('type', None) == choice.VIDEO_STILL: xml += 'video-still-refid="%s" ' % asset.get('refid') if asset.get('type', None) == choice.FLV_BUMPER: xml += 'flash-prebumper-refid="%s" ' % asset.get('refid') xml += '>\n' if self.short_description: xml += '<short-description><![CDATA[%(shortDescription)s]]>' xml += '</short-description>\n' if self.long_description: xml += '<long-description><![CDATA[%(longDescription)s]]>' xml += '</long-description>\n' for tag in self.tags: xml += '<tag><![CDATA[%s]]></tag>\n' % tag for asset in self.assets: if asset.get('encoding-rate', None): xml += '<rendition-refid>%s</rendition-refid>\n' % \ asset['refid'] for meta in self.metadata: xml += '<custom-%s-value name="%s">%s</custom-%s-value>' % \ (meta['type'], meta['key'], meta['value'], meta['type']) xml += '</title>' xml = xml % self._to_dict() return xml
[ "def", "to_xml", "(", "self", ")", ":", "# pylint: disable=R0912", "xml", "=", "''", "for", "asset", "in", "self", ".", "assets", ":", "xml", "+=", "'<asset filename=\"%s\" '", "%", "os", ".", "path", ".", "basename", "(", "asset", "[", "'filename'", "]", ")", "xml", "+=", "' refid=\"%(refid)s\"'", "%", "asset", "xml", "+=", "' size=\"%(size)s\"'", "%", "asset", "xml", "+=", "' hash-code=\"%s\"'", "%", "asset", "[", "'hash-code'", "]", "xml", "+=", "' type=\"%(type)s\"'", "%", "asset", "if", "asset", ".", "get", "(", "'encoding-rate'", ",", "None", ")", ":", "xml", "+=", "' encoding-rate=\"%s\"'", "%", "asset", "[", "'encoding-rate'", "]", "if", "asset", ".", "get", "(", "'frame-width'", ",", "None", ")", ":", "xml", "+=", "' frame-width=\"%s\"'", "%", "asset", "[", "'frame-width'", "]", "if", "asset", ".", "get", "(", "'frame-height'", ",", "None", ")", ":", "xml", "+=", "' frame-height=\"%s\"'", "%", "asset", "[", "'frame-height'", "]", "if", "asset", ".", "get", "(", "'display-name'", ",", "None", ")", ":", "xml", "+=", "' display-name=\"%s\"'", "%", "asset", "[", "'display-name'", "]", "if", "asset", ".", "get", "(", "'encode-to'", ",", "None", ")", ":", "xml", "+=", "' encode-to=\"%s\"'", "%", "asset", "[", "'encode-to'", "]", "if", "asset", ".", "get", "(", "'encode-multiple'", ",", "None", ")", ":", "xml", "+=", "' encode-multiple=\"%s\"'", "%", "asset", "[", "'encode-multiple'", "]", "if", "asset", ".", "get", "(", "'h264-preserve-as-rendition'", ",", "None", ")", ":", "xml", "+=", "' h264-preserve-as-rendition=\"%s\"'", "%", "asset", "[", "'h264-preserve-as-rendition'", "]", "if", "asset", ".", "get", "(", "'h264-no-processing'", ",", "None", ")", ":", "xml", "+=", "' h264-no-processing=\"%s\"'", "%", "asset", "[", "'h264-no-processing'", "]", "xml", "+=", "' />\\n'", "xml", "+=", "'<title name=\"%(name)s\" refid=\"%(referenceId)s\" active=\"TRUE\" '", "if", "self", ".", "start_date", ":", "xml", "+=", "'start-date=\"%(start_date)s\" '", "if", "self", ".", "end_date", ":", "xml", "+=", "'end-date=\"%(end_date)s\" '", "for", "asset", "in", "self", ".", "assets", ":", "if", "asset", ".", "get", "(", "'encoding-rate'", ",", "None", ")", "==", "None", ":", "choice", "=", "enums", ".", "AssetTypeEnum", "if", "asset", ".", "get", "(", "'type'", ",", "None", ")", "==", "choice", ".", "VIDEO_FULL", ":", "xml", "+=", "'video-full-refid=\"%s\" '", "%", "asset", ".", "get", "(", "'refid'", ")", "if", "asset", ".", "get", "(", "'type'", ",", "None", ")", "==", "choice", ".", "THUMBNAIL", ":", "xml", "+=", "'thumbnail-refid=\"%s\" '", "%", "asset", ".", "get", "(", "'refid'", ")", "if", "asset", ".", "get", "(", "'type'", ",", "None", ")", "==", "choice", ".", "VIDEO_STILL", ":", "xml", "+=", "'video-still-refid=\"%s\" '", "%", "asset", ".", "get", "(", "'refid'", ")", "if", "asset", ".", "get", "(", "'type'", ",", "None", ")", "==", "choice", ".", "FLV_BUMPER", ":", "xml", "+=", "'flash-prebumper-refid=\"%s\" '", "%", "asset", ".", "get", "(", "'refid'", ")", "xml", "+=", "'>\\n'", "if", "self", ".", "short_description", ":", "xml", "+=", "'<short-description><![CDATA[%(shortDescription)s]]>'", "xml", "+=", "'</short-description>\\n'", "if", "self", ".", "long_description", ":", "xml", "+=", "'<long-description><![CDATA[%(longDescription)s]]>'", "xml", "+=", "'</long-description>\\n'", "for", "tag", "in", "self", ".", "tags", ":", "xml", "+=", "'<tag><![CDATA[%s]]></tag>\\n'", "%", "tag", "for", "asset", "in", "self", ".", "assets", ":", "if", "asset", ".", "get", "(", "'encoding-rate'", ",", "None", ")", ":", "xml", "+=", "'<rendition-refid>%s</rendition-refid>\\n'", "%", "asset", "[", "'refid'", "]", "for", "meta", "in", "self", ".", "metadata", ":", "xml", "+=", "'<custom-%s-value name=\"%s\">%s</custom-%s-value>'", "%", "(", "meta", "[", "'type'", "]", ",", "meta", "[", "'key'", "]", ",", "meta", "[", "'value'", "]", ",", "meta", "[", "'type'", "]", ")", "xml", "+=", "'</title>'", "xml", "=", "xml", "%", "self", ".", "_to_dict", "(", ")", "return", "xml" ]
Converts object into an XML string.
[ "Converts", "object", "into", "an", "XML", "string", "." ]
python
train
Murali-group/halp
halp/utilities/priority_queue.py
https://github.com/Murali-group/halp/blob/6eb27466ba84e2281e18f93b62aae5efb21ef8b3/halp/utilities/priority_queue.py#L41-L52
def add_element(self, priority, element, count=None): """Adds an element with a specific priority. :param priority: priority of the element. :param element: element to add. """ if count is None: count = next(self.counter) entry = [priority, count, element] self.element_finder[element] = entry heapq.heappush(self.pq, entry)
[ "def", "add_element", "(", "self", ",", "priority", ",", "element", ",", "count", "=", "None", ")", ":", "if", "count", "is", "None", ":", "count", "=", "next", "(", "self", ".", "counter", ")", "entry", "=", "[", "priority", ",", "count", ",", "element", "]", "self", ".", "element_finder", "[", "element", "]", "=", "entry", "heapq", ".", "heappush", "(", "self", ".", "pq", ",", "entry", ")" ]
Adds an element with a specific priority. :param priority: priority of the element. :param element: element to add.
[ "Adds", "an", "element", "with", "a", "specific", "priority", "." ]
python
train
ungarj/mapchete
mapchete/tile.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/tile.py#L42-L60
def tile(self, zoom, row, col): """ Return ``BufferedTile`` object of this ``BufferedTilePyramid``. Parameters ---------- zoom : integer zoom level row : integer tile matrix row col : integer tile matrix column Returns ------- buffered tile : ``BufferedTile`` """ tile = self.tile_pyramid.tile(zoom, row, col) return BufferedTile(tile, pixelbuffer=self.pixelbuffer)
[ "def", "tile", "(", "self", ",", "zoom", ",", "row", ",", "col", ")", ":", "tile", "=", "self", ".", "tile_pyramid", ".", "tile", "(", "zoom", ",", "row", ",", "col", ")", "return", "BufferedTile", "(", "tile", ",", "pixelbuffer", "=", "self", ".", "pixelbuffer", ")" ]
Return ``BufferedTile`` object of this ``BufferedTilePyramid``. Parameters ---------- zoom : integer zoom level row : integer tile matrix row col : integer tile matrix column Returns ------- buffered tile : ``BufferedTile``
[ "Return", "BufferedTile", "object", "of", "this", "BufferedTilePyramid", "." ]
python
valid
threeML/astromodels
astromodels/core/parameter.py
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L962-L1011
def set_uninformative_prior(self, prior_class): """ Sets the prior for the parameter to a uniform prior between the current minimum and maximum, or a log-uniform prior between the current minimum and maximum. NOTE: if the current minimum and maximum are not defined, the default bounds for the prior class will be used. :param prior_class : the class to be used as prior (either Log_uniform_prior or Uniform_prior, or a class which provide a lower_bound and an upper_bound properties) :return: (none) """ prior_instance = prior_class() if self.min_value is None: raise ParameterMustHaveBounds("Parameter %s does not have a defined minimum. Set one first, then re-run " "set_uninformative_prior" % self.path) else: try: prior_instance.lower_bound = self.min_value except SettingOutOfBounds: raise SettingOutOfBounds("Cannot use minimum of %s for prior %s" % (self.min_value, prior_instance.name)) if self.max_value is None: raise ParameterMustHaveBounds("Parameter %s does not have a defined maximum. Set one first, then re-run " "set_uninformative_prior" % self.path) else: # pragma: no cover try: prior_instance.upper_bound = self.max_value except SettingOutOfBounds: raise SettingOutOfBounds("Cannot use maximum of %s for prior %s" % (self.max_value, prior_instance.name)) assert np.isfinite(prior_instance.upper_bound.value),"The parameter %s must have a finite maximum" % self.name assert np.isfinite(prior_instance.lower_bound.value),"The parameter %s must have a finite minimum" % self.name self._set_prior(prior_instance)
[ "def", "set_uninformative_prior", "(", "self", ",", "prior_class", ")", ":", "prior_instance", "=", "prior_class", "(", ")", "if", "self", ".", "min_value", "is", "None", ":", "raise", "ParameterMustHaveBounds", "(", "\"Parameter %s does not have a defined minimum. Set one first, then re-run \"", "\"set_uninformative_prior\"", "%", "self", ".", "path", ")", "else", ":", "try", ":", "prior_instance", ".", "lower_bound", "=", "self", ".", "min_value", "except", "SettingOutOfBounds", ":", "raise", "SettingOutOfBounds", "(", "\"Cannot use minimum of %s for prior %s\"", "%", "(", "self", ".", "min_value", ",", "prior_instance", ".", "name", ")", ")", "if", "self", ".", "max_value", "is", "None", ":", "raise", "ParameterMustHaveBounds", "(", "\"Parameter %s does not have a defined maximum. Set one first, then re-run \"", "\"set_uninformative_prior\"", "%", "self", ".", "path", ")", "else", ":", "# pragma: no cover", "try", ":", "prior_instance", ".", "upper_bound", "=", "self", ".", "max_value", "except", "SettingOutOfBounds", ":", "raise", "SettingOutOfBounds", "(", "\"Cannot use maximum of %s for prior %s\"", "%", "(", "self", ".", "max_value", ",", "prior_instance", ".", "name", ")", ")", "assert", "np", ".", "isfinite", "(", "prior_instance", ".", "upper_bound", ".", "value", ")", ",", "\"The parameter %s must have a finite maximum\"", "%", "self", ".", "name", "assert", "np", ".", "isfinite", "(", "prior_instance", ".", "lower_bound", ".", "value", ")", ",", "\"The parameter %s must have a finite minimum\"", "%", "self", ".", "name", "self", ".", "_set_prior", "(", "prior_instance", ")" ]
Sets the prior for the parameter to a uniform prior between the current minimum and maximum, or a log-uniform prior between the current minimum and maximum. NOTE: if the current minimum and maximum are not defined, the default bounds for the prior class will be used. :param prior_class : the class to be used as prior (either Log_uniform_prior or Uniform_prior, or a class which provide a lower_bound and an upper_bound properties) :return: (none)
[ "Sets", "the", "prior", "for", "the", "parameter", "to", "a", "uniform", "prior", "between", "the", "current", "minimum", "and", "maximum", "or", "a", "log", "-", "uniform", "prior", "between", "the", "current", "minimum", "and", "maximum", "." ]
python
train
Erotemic/utool
utool/util_str.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2650-L2682
def to_camel_case(underscore_case, mixed=False): r""" Args: underscore_case (?): Returns: str: camel_case_str CommandLine: python -m utool.util_str --exec-to_camel_case References: https://en.wikipedia.org/wiki/CamelCase Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> underscore_case = 'underscore_funcname' >>> camel_case_str = to_camel_case(underscore_case) >>> result = ('camel_case_str = %s' % (str(camel_case_str),)) >>> print(result) camel_case_str = UnderscoreFuncname """ thresh = 0 if mixed else -1 words = underscore_case.split('_') words2 = [ word[0].upper() + word[1:] if count > thresh else word for count, word in enumerate(words) ] camel_case_str = ''.join(words2) return camel_case_str
[ "def", "to_camel_case", "(", "underscore_case", ",", "mixed", "=", "False", ")", ":", "thresh", "=", "0", "if", "mixed", "else", "-", "1", "words", "=", "underscore_case", ".", "split", "(", "'_'", ")", "words2", "=", "[", "word", "[", "0", "]", ".", "upper", "(", ")", "+", "word", "[", "1", ":", "]", "if", "count", ">", "thresh", "else", "word", "for", "count", ",", "word", "in", "enumerate", "(", "words", ")", "]", "camel_case_str", "=", "''", ".", "join", "(", "words2", ")", "return", "camel_case_str" ]
r""" Args: underscore_case (?): Returns: str: camel_case_str CommandLine: python -m utool.util_str --exec-to_camel_case References: https://en.wikipedia.org/wiki/CamelCase Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> underscore_case = 'underscore_funcname' >>> camel_case_str = to_camel_case(underscore_case) >>> result = ('camel_case_str = %s' % (str(camel_case_str),)) >>> print(result) camel_case_str = UnderscoreFuncname
[ "r", "Args", ":", "underscore_case", "(", "?", ")", ":" ]
python
train
viswa-swami/python-foscam
libpyfoscam/foscam.py
https://github.com/viswa-swami/python-foscam/blob/d76f2f7016959b7b758751637fad103c9032e488/libpyfoscam/foscam.py#L172-L182
def set_port_info(self, webport, mediaport, httpsport, onvifport, callback=None): ''' Set http port and media port of camera. ''' params = {'webPort' : webport, 'mediaPort' : mediaport, 'httpsPort' : httpsport, 'onvifPort' : onvifport, } return self.execute_command('setPortInfo', params, callback=callback)
[ "def", "set_port_info", "(", "self", ",", "webport", ",", "mediaport", ",", "httpsport", ",", "onvifport", ",", "callback", "=", "None", ")", ":", "params", "=", "{", "'webPort'", ":", "webport", ",", "'mediaPort'", ":", "mediaport", ",", "'httpsPort'", ":", "httpsport", ",", "'onvifPort'", ":", "onvifport", ",", "}", "return", "self", ".", "execute_command", "(", "'setPortInfo'", ",", "params", ",", "callback", "=", "callback", ")" ]
Set http port and media port of camera.
[ "Set", "http", "port", "and", "media", "port", "of", "camera", "." ]
python
train
CalebBell/fluids
fluids/geometry.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/geometry.py#L156-L221
def V_horiz_conical(D, L, a, h, headonly=False): r'''Calculates volume of a tank with conical ends, according to [1]_. .. math:: V_f = A_fL + \frac{2aR^2}{3}K, \;\;0 \le h < R\\ .. math:: V_f = A_fL + \frac{2aR^2}{3}\pi/2,\;\; h = R\\ .. math:: V_f = A_fL + \frac{2aR^2}{3}(\pi-K), \;\; R< h \le 2R .. math:: K = \cos^{-1} M + M^3\cosh^{-1} \frac{1}{M} - 2M\sqrt{1 - M^2} .. math:: M = \left|\frac{R-h}{R}\right| .. math:: Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2} Parameters ---------- D : float Diameter of the main cylindrical section, [m] L : float Length of the main cylindrical section, [m] a : float Distance the cone head extends on one side, [m] h : float Height, as measured up to where the fluid ends, [m] headonly : bool, optional Function returns only the volume of a single head side if True Returns ------- V : float Volume [m^3] Examples -------- Matching example from [1]_, with inputs in inches and volume in gallons. >>> V_horiz_conical(D=108., L=156., a=42., h=36)/231 2041.1923581273443 References ---------- .. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015. http://www.webcalc.com.br/blog/Tank_Volume.PDF''' R = D/2. Af = R*R*acos((R-h)/R) - (R-h)*(2*R*h - h*h)**0.5 M = abs((R-h)/R) if h == R: Vf = a*R*R/3.*pi else: K = acos(M) + M*M*M*acosh(1./M) - 2.*M*(1.-M*M)**0.5 if 0. <= h < R: Vf = 2.*a*R*R/3*K elif R < h <= 2*R: Vf = 2.*a*R*R/3*(pi - K) if headonly: Vf = 0.5*Vf else: Vf += Af*L return Vf
[ "def", "V_horiz_conical", "(", "D", ",", "L", ",", "a", ",", "h", ",", "headonly", "=", "False", ")", ":", "R", "=", "D", "/", "2.", "Af", "=", "R", "*", "R", "*", "acos", "(", "(", "R", "-", "h", ")", "/", "R", ")", "-", "(", "R", "-", "h", ")", "*", "(", "2", "*", "R", "*", "h", "-", "h", "*", "h", ")", "**", "0.5", "M", "=", "abs", "(", "(", "R", "-", "h", ")", "/", "R", ")", "if", "h", "==", "R", ":", "Vf", "=", "a", "*", "R", "*", "R", "/", "3.", "*", "pi", "else", ":", "K", "=", "acos", "(", "M", ")", "+", "M", "*", "M", "*", "M", "*", "acosh", "(", "1.", "/", "M", ")", "-", "2.", "*", "M", "*", "(", "1.", "-", "M", "*", "M", ")", "**", "0.5", "if", "0.", "<=", "h", "<", "R", ":", "Vf", "=", "2.", "*", "a", "*", "R", "*", "R", "/", "3", "*", "K", "elif", "R", "<", "h", "<=", "2", "*", "R", ":", "Vf", "=", "2.", "*", "a", "*", "R", "*", "R", "/", "3", "*", "(", "pi", "-", "K", ")", "if", "headonly", ":", "Vf", "=", "0.5", "*", "Vf", "else", ":", "Vf", "+=", "Af", "*", "L", "return", "Vf" ]
r'''Calculates volume of a tank with conical ends, according to [1]_. .. math:: V_f = A_fL + \frac{2aR^2}{3}K, \;\;0 \le h < R\\ .. math:: V_f = A_fL + \frac{2aR^2}{3}\pi/2,\;\; h = R\\ .. math:: V_f = A_fL + \frac{2aR^2}{3}(\pi-K), \;\; R< h \le 2R .. math:: K = \cos^{-1} M + M^3\cosh^{-1} \frac{1}{M} - 2M\sqrt{1 - M^2} .. math:: M = \left|\frac{R-h}{R}\right| .. math:: Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2} Parameters ---------- D : float Diameter of the main cylindrical section, [m] L : float Length of the main cylindrical section, [m] a : float Distance the cone head extends on one side, [m] h : float Height, as measured up to where the fluid ends, [m] headonly : bool, optional Function returns only the volume of a single head side if True Returns ------- V : float Volume [m^3] Examples -------- Matching example from [1]_, with inputs in inches and volume in gallons. >>> V_horiz_conical(D=108., L=156., a=42., h=36)/231 2041.1923581273443 References ---------- .. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015. http://www.webcalc.com.br/blog/Tank_Volume.PDF
[ "r", "Calculates", "volume", "of", "a", "tank", "with", "conical", "ends", "according", "to", "[", "1", "]", "_", "." ]
python
train
google/tangent
tangent/tf_extensions.py
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/tf_extensions.py#L116-L138
def unreduce_tensor(tensor, shape, axis, keepdims): """Reverse summing over a dimension. See utils.py. Args: tensor: The tensor that was reduced. shape: A list, the original shape of the tensor before reduction. axis: The axis or axes that were summed. keepdims: Whether these axes were kept as singleton axes. Returns: A tensor with axes broadcast to match the shape of the original tensor. """ if not keepdims: if axis is None: axis = range(len(shape)) elif isinstance(axis, int): axis = axis, for ax in sorted(axis): tensor = tf.expand_dims(tensor, ax) tile_shape = np.array(shape) / np.array(shape_as_list(tensor)) return tf.tile(tensor, tile_shape)
[ "def", "unreduce_tensor", "(", "tensor", ",", "shape", ",", "axis", ",", "keepdims", ")", ":", "if", "not", "keepdims", ":", "if", "axis", "is", "None", ":", "axis", "=", "range", "(", "len", "(", "shape", ")", ")", "elif", "isinstance", "(", "axis", ",", "int", ")", ":", "axis", "=", "axis", ",", "for", "ax", "in", "sorted", "(", "axis", ")", ":", "tensor", "=", "tf", ".", "expand_dims", "(", "tensor", ",", "ax", ")", "tile_shape", "=", "np", ".", "array", "(", "shape", ")", "/", "np", ".", "array", "(", "shape_as_list", "(", "tensor", ")", ")", "return", "tf", ".", "tile", "(", "tensor", ",", "tile_shape", ")" ]
Reverse summing over a dimension. See utils.py. Args: tensor: The tensor that was reduced. shape: A list, the original shape of the tensor before reduction. axis: The axis or axes that were summed. keepdims: Whether these axes were kept as singleton axes. Returns: A tensor with axes broadcast to match the shape of the original tensor.
[ "Reverse", "summing", "over", "a", "dimension", "." ]
python
train
awslabs/serverless-application-model
examples/apps/greengrass-hello-world/greengrasssdk/IoTDataPlane.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/examples/apps/greengrass-hello-world/greengrasssdk/IoTDataPlane.py#L28-L45
def get_thing_shadow(self, **kwargs): r""" Call shadow lambda to obtain current shadow state. :Keyword Arguments: * *thingName* (``string``) -- [REQUIRED] The name of the thing. :returns: (``dict``) -- The output from the GetThingShadow operation * *payload* (``bytes``) -- The state information, in JSON format. """ thing_name = self._get_required_parameter('thingName', **kwargs) payload = b'' return self._shadow_op('get', thing_name, payload)
[ "def", "get_thing_shadow", "(", "self", ",", "*", "*", "kwargs", ")", ":", "thing_name", "=", "self", ".", "_get_required_parameter", "(", "'thingName'", ",", "*", "*", "kwargs", ")", "payload", "=", "b''", "return", "self", ".", "_shadow_op", "(", "'get'", ",", "thing_name", ",", "payload", ")" ]
r""" Call shadow lambda to obtain current shadow state. :Keyword Arguments: * *thingName* (``string``) -- [REQUIRED] The name of the thing. :returns: (``dict``) -- The output from the GetThingShadow operation * *payload* (``bytes``) -- The state information, in JSON format.
[ "r", "Call", "shadow", "lambda", "to", "obtain", "current", "shadow", "state", "." ]
python
train
adamzap/landslide
landslide/parser.py
https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/parser.py#L49-L86
def parse(self, text): """Parses and renders a text as HTML regarding current format. """ if self.format == 'markdown': try: import markdown except ImportError: raise RuntimeError(u"Looks like markdown is not installed") if text.startswith(u'\ufeff'): # check for unicode BOM text = text[1:] return markdown.markdown(text, extensions=self.md_extensions) elif self.format == 'restructuredtext': try: from landslide.rst import html_body except ImportError: raise RuntimeError(u"Looks like docutils are not installed") html = html_body(text, input_encoding=self.encoding) # RST generates pretty much markup to be removed in our case for (pattern, replacement, mode) in self.RST_REPLACEMENTS: html = re.sub(re.compile(pattern, mode), replacement, html, 0) return html.strip() elif self.format == 'textile': try: import textile except ImportError: raise RuntimeError(u"Looks like textile is not installed") text = text.replace('\n---\n', '\n<hr />\n') return textile.textile(text, encoding=self.encoding) else: raise NotImplementedError(u"Unsupported format %s, cannot parse" % self.format)
[ "def", "parse", "(", "self", ",", "text", ")", ":", "if", "self", ".", "format", "==", "'markdown'", ":", "try", ":", "import", "markdown", "except", "ImportError", ":", "raise", "RuntimeError", "(", "u\"Looks like markdown is not installed\"", ")", "if", "text", ".", "startswith", "(", "u'\\ufeff'", ")", ":", "# check for unicode BOM", "text", "=", "text", "[", "1", ":", "]", "return", "markdown", ".", "markdown", "(", "text", ",", "extensions", "=", "self", ".", "md_extensions", ")", "elif", "self", ".", "format", "==", "'restructuredtext'", ":", "try", ":", "from", "landslide", ".", "rst", "import", "html_body", "except", "ImportError", ":", "raise", "RuntimeError", "(", "u\"Looks like docutils are not installed\"", ")", "html", "=", "html_body", "(", "text", ",", "input_encoding", "=", "self", ".", "encoding", ")", "# RST generates pretty much markup to be removed in our case", "for", "(", "pattern", ",", "replacement", ",", "mode", ")", "in", "self", ".", "RST_REPLACEMENTS", ":", "html", "=", "re", ".", "sub", "(", "re", ".", "compile", "(", "pattern", ",", "mode", ")", ",", "replacement", ",", "html", ",", "0", ")", "return", "html", ".", "strip", "(", ")", "elif", "self", ".", "format", "==", "'textile'", ":", "try", ":", "import", "textile", "except", "ImportError", ":", "raise", "RuntimeError", "(", "u\"Looks like textile is not installed\"", ")", "text", "=", "text", ".", "replace", "(", "'\\n---\\n'", ",", "'\\n<hr />\\n'", ")", "return", "textile", ".", "textile", "(", "text", ",", "encoding", "=", "self", ".", "encoding", ")", "else", ":", "raise", "NotImplementedError", "(", "u\"Unsupported format %s, cannot parse\"", "%", "self", ".", "format", ")" ]
Parses and renders a text as HTML regarding current format.
[ "Parses", "and", "renders", "a", "text", "as", "HTML", "regarding", "current", "format", "." ]
python
train
square/connect-python-sdk
squareconnect/models/order_fulfillment_recipient.py
https://github.com/square/connect-python-sdk/blob/adc1d09e817986cdc607391580f71d6b48ed4066/squareconnect/models/order_fulfillment_recipient.py#L149-L163
def phone_number(self, phone_number): """ Sets the phone_number of this OrderFulfillmentRecipient. The phone number of the fulfillment recipient. If provided, overrides the value from customer profile indicated by customer_id. :param phone_number: The phone_number of this OrderFulfillmentRecipient. :type: str """ if phone_number is None: raise ValueError("Invalid value for `phone_number`, must not be `None`") if len(phone_number) > 16: raise ValueError("Invalid value for `phone_number`, length must be less than `16`") self._phone_number = phone_number
[ "def", "phone_number", "(", "self", ",", "phone_number", ")", ":", "if", "phone_number", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `phone_number`, must not be `None`\"", ")", "if", "len", "(", "phone_number", ")", ">", "16", ":", "raise", "ValueError", "(", "\"Invalid value for `phone_number`, length must be less than `16`\"", ")", "self", ".", "_phone_number", "=", "phone_number" ]
Sets the phone_number of this OrderFulfillmentRecipient. The phone number of the fulfillment recipient. If provided, overrides the value from customer profile indicated by customer_id. :param phone_number: The phone_number of this OrderFulfillmentRecipient. :type: str
[ "Sets", "the", "phone_number", "of", "this", "OrderFulfillmentRecipient", ".", "The", "phone", "number", "of", "the", "fulfillment", "recipient", ".", "If", "provided", "overrides", "the", "value", "from", "customer", "profile", "indicated", "by", "customer_id", "." ]
python
train
PyCQA/pylint
pylint/checkers/base.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/base.py#L1327-L1342
def visit_call(self, node): """visit a Call node -> check if this is not a blacklisted builtin call and check for * or ** use """ self._check_misplaced_format_function(node) if isinstance(node.func, astroid.Name): name = node.func.name # ignore the name if it's not a builtin (i.e. not defined in the # locals nor globals scope) if not (name in node.frame() or name in node.root()): if name == "exec": self.add_message("exec-used", node=node) elif name == "reversed": self._check_reversed(node) elif name == "eval": self.add_message("eval-used", node=node)
[ "def", "visit_call", "(", "self", ",", "node", ")", ":", "self", ".", "_check_misplaced_format_function", "(", "node", ")", "if", "isinstance", "(", "node", ".", "func", ",", "astroid", ".", "Name", ")", ":", "name", "=", "node", ".", "func", ".", "name", "# ignore the name if it's not a builtin (i.e. not defined in the", "# locals nor globals scope)", "if", "not", "(", "name", "in", "node", ".", "frame", "(", ")", "or", "name", "in", "node", ".", "root", "(", ")", ")", ":", "if", "name", "==", "\"exec\"", ":", "self", ".", "add_message", "(", "\"exec-used\"", ",", "node", "=", "node", ")", "elif", "name", "==", "\"reversed\"", ":", "self", ".", "_check_reversed", "(", "node", ")", "elif", "name", "==", "\"eval\"", ":", "self", ".", "add_message", "(", "\"eval-used\"", ",", "node", "=", "node", ")" ]
visit a Call node -> check if this is not a blacklisted builtin call and check for * or ** use
[ "visit", "a", "Call", "node", "-", ">", "check", "if", "this", "is", "not", "a", "blacklisted", "builtin", "call", "and", "check", "for", "*", "or", "**", "use" ]
python
test
memsql/memsql-python
memsql/common/query_builder.py
https://github.com/memsql/memsql-python/blob/aac223a1b937d5b348b42af3c601a6c685ca633a/memsql/common/query_builder.py#L1-L14
def simple_expression(joiner=', ', **fields): """ Build a simple expression ready to be added onto another query. >>> simple_expression(joiner=' AND ', name='bob', role='admin') "`name`=%(_QB_name)s AND `name`=%(_QB_role)s", { '_QB_name': 'bob', '_QB_role': 'admin' } """ expression, params = [], {} for field_name, value in sorted(fields.items(), key=lambda kv: kv[0]): key = '_QB_%s' % field_name expression.append('`%s`=%%(%s)s' % (field_name, key)) params[key] = value return joiner.join(expression), params
[ "def", "simple_expression", "(", "joiner", "=", "', '", ",", "*", "*", "fields", ")", ":", "expression", ",", "params", "=", "[", "]", ",", "{", "}", "for", "field_name", ",", "value", "in", "sorted", "(", "fields", ".", "items", "(", ")", ",", "key", "=", "lambda", "kv", ":", "kv", "[", "0", "]", ")", ":", "key", "=", "'_QB_%s'", "%", "field_name", "expression", ".", "append", "(", "'`%s`=%%(%s)s'", "%", "(", "field_name", ",", "key", ")", ")", "params", "[", "key", "]", "=", "value", "return", "joiner", ".", "join", "(", "expression", ")", ",", "params" ]
Build a simple expression ready to be added onto another query. >>> simple_expression(joiner=' AND ', name='bob', role='admin') "`name`=%(_QB_name)s AND `name`=%(_QB_role)s", { '_QB_name': 'bob', '_QB_role': 'admin' }
[ "Build", "a", "simple", "expression", "ready", "to", "be", "added", "onto", "another", "query", "." ]
python
test
CxAalto/gtfspy
gtfspy/import_validator.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/import_validator.py#L267-L281
def _compute_number_of_frequency_generated_stop_times(self, gtfs_source_path): """ Parameters ---------- Same as for "_frequency_generated_trips_rows" but for stop times table gtfs_source_path: table_name: Return ------ """ df_freq = self._frequency_generated_trips_rows(gtfs_source_path, return_df_freq=True) df_stop_times = source_csv_to_pandas(gtfs_source_path, "stop_times") df_stop_freq = pd.merge(df_freq, df_stop_times, how='outer', on='trip_id') return int(df_stop_freq['n_trips'].fillna(1).sum(axis=0))
[ "def", "_compute_number_of_frequency_generated_stop_times", "(", "self", ",", "gtfs_source_path", ")", ":", "df_freq", "=", "self", ".", "_frequency_generated_trips_rows", "(", "gtfs_source_path", ",", "return_df_freq", "=", "True", ")", "df_stop_times", "=", "source_csv_to_pandas", "(", "gtfs_source_path", ",", "\"stop_times\"", ")", "df_stop_freq", "=", "pd", ".", "merge", "(", "df_freq", ",", "df_stop_times", ",", "how", "=", "'outer'", ",", "on", "=", "'trip_id'", ")", "return", "int", "(", "df_stop_freq", "[", "'n_trips'", "]", ".", "fillna", "(", "1", ")", ".", "sum", "(", "axis", "=", "0", ")", ")" ]
Parameters ---------- Same as for "_frequency_generated_trips_rows" but for stop times table gtfs_source_path: table_name: Return ------
[ "Parameters", "----------", "Same", "as", "for", "_frequency_generated_trips_rows", "but", "for", "stop", "times", "table", "gtfs_source_path", ":", "table_name", ":" ]
python
valid
rigetti/pyquil
pyquil/api/_qpu.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_qpu.py#L128-L173
def run(self, run_priority: Optional[int] = None): """ Run a pyquil program on the QPU. This formats the classified data from the QPU server by stacking measured bits into an array of shape (trials, classical_addresses). The mapping of qubit to classical address is backed out from MEASURE instructions in the program, so only do measurements where there is a 1-to-1 mapping between qubits and classical addresses. :param run_priority: The priority with which to insert jobs into the QPU queue. Lower integers correspond to higher priority. If not specified, the QPU object's default priority is used. :return: The QPU object itself. """ # This prevents a common error where users expect QVM.run() # and QPU.run() to be interchangeable. QPU.run() needs the # supplied executable to have been compiled, QVM.run() does not. if isinstance(self._executable, Program): raise TypeError("It looks like you have provided a Program where an Executable" " is expected. Please use QuantumComputer.compile() to compile" " your program.") super().run() request = QPURequest(program=self._executable.program, patch_values=self._build_patch_values(), id=str(uuid.uuid4())) job_priority = run_priority if run_priority is not None else self.priority job_id = self.client.call('execute_qpu_request', request=request, user=self.user, priority=job_priority) results = self._get_buffers(job_id) ro_sources = self._executable.ro_sources if results: bitstrings = _extract_bitstrings(ro_sources, results) elif not ro_sources: warnings.warn("You are running a QPU program with no MEASURE instructions. " "The result of this program will always be an empty array. Are " "you sure you didn't mean to measure some of your qubits?") bitstrings = np.zeros((0, 0), dtype=np.int64) else: bitstrings = None self._bitstrings = bitstrings self._last_results = results return self
[ "def", "run", "(", "self", ",", "run_priority", ":", "Optional", "[", "int", "]", "=", "None", ")", ":", "# This prevents a common error where users expect QVM.run()", "# and QPU.run() to be interchangeable. QPU.run() needs the", "# supplied executable to have been compiled, QVM.run() does not.", "if", "isinstance", "(", "self", ".", "_executable", ",", "Program", ")", ":", "raise", "TypeError", "(", "\"It looks like you have provided a Program where an Executable\"", "\" is expected. Please use QuantumComputer.compile() to compile\"", "\" your program.\"", ")", "super", "(", ")", ".", "run", "(", ")", "request", "=", "QPURequest", "(", "program", "=", "self", ".", "_executable", ".", "program", ",", "patch_values", "=", "self", ".", "_build_patch_values", "(", ")", ",", "id", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ")", "job_priority", "=", "run_priority", "if", "run_priority", "is", "not", "None", "else", "self", ".", "priority", "job_id", "=", "self", ".", "client", ".", "call", "(", "'execute_qpu_request'", ",", "request", "=", "request", ",", "user", "=", "self", ".", "user", ",", "priority", "=", "job_priority", ")", "results", "=", "self", ".", "_get_buffers", "(", "job_id", ")", "ro_sources", "=", "self", ".", "_executable", ".", "ro_sources", "if", "results", ":", "bitstrings", "=", "_extract_bitstrings", "(", "ro_sources", ",", "results", ")", "elif", "not", "ro_sources", ":", "warnings", ".", "warn", "(", "\"You are running a QPU program with no MEASURE instructions. \"", "\"The result of this program will always be an empty array. Are \"", "\"you sure you didn't mean to measure some of your qubits?\"", ")", "bitstrings", "=", "np", ".", "zeros", "(", "(", "0", ",", "0", ")", ",", "dtype", "=", "np", ".", "int64", ")", "else", ":", "bitstrings", "=", "None", "self", ".", "_bitstrings", "=", "bitstrings", "self", ".", "_last_results", "=", "results", "return", "self" ]
Run a pyquil program on the QPU. This formats the classified data from the QPU server by stacking measured bits into an array of shape (trials, classical_addresses). The mapping of qubit to classical address is backed out from MEASURE instructions in the program, so only do measurements where there is a 1-to-1 mapping between qubits and classical addresses. :param run_priority: The priority with which to insert jobs into the QPU queue. Lower integers correspond to higher priority. If not specified, the QPU object's default priority is used. :return: The QPU object itself.
[ "Run", "a", "pyquil", "program", "on", "the", "QPU", "." ]
python
train
chrisspen/burlap
burlap/project.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/project.py#L164-L181
def update_settings(self, d, role, path='roles/{role}/settings.yaml'): """ Writes a key/value pair to a settings file. """ try: import ruamel.yaml load_func = ruamel.yaml.round_trip_load dump_func = ruamel.yaml.round_trip_dump except ImportError: print('Warning: ruamel.yaml not available, reverting to yaml package, possible lost of formatting may occur.') import yaml load_func = yaml.load dump_func = yaml.dump settings_fn = path.format(role=role) data = load_func(open(settings_fn)) data.update(d) settings_str = dump_func(data) open(settings_fn, 'w').write(settings_str)
[ "def", "update_settings", "(", "self", ",", "d", ",", "role", ",", "path", "=", "'roles/{role}/settings.yaml'", ")", ":", "try", ":", "import", "ruamel", ".", "yaml", "load_func", "=", "ruamel", ".", "yaml", ".", "round_trip_load", "dump_func", "=", "ruamel", ".", "yaml", ".", "round_trip_dump", "except", "ImportError", ":", "print", "(", "'Warning: ruamel.yaml not available, reverting to yaml package, possible lost of formatting may occur.'", ")", "import", "yaml", "load_func", "=", "yaml", ".", "load", "dump_func", "=", "yaml", ".", "dump", "settings_fn", "=", "path", ".", "format", "(", "role", "=", "role", ")", "data", "=", "load_func", "(", "open", "(", "settings_fn", ")", ")", "data", ".", "update", "(", "d", ")", "settings_str", "=", "dump_func", "(", "data", ")", "open", "(", "settings_fn", ",", "'w'", ")", ".", "write", "(", "settings_str", ")" ]
Writes a key/value pair to a settings file.
[ "Writes", "a", "key", "/", "value", "pair", "to", "a", "settings", "file", "." ]
python
valid
estnltk/estnltk
estnltk/clausesegmenter.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/clausesegmenter.py#L93-L106
def rename_annotations(self, sentence): """Function that renames and restructures clause information.""" annotations = [] for token in sentence: data = {CLAUSE_IDX: token[CLAUSE_IDX]} if CLAUSE_ANNOT in token: if 'KINDEL_PIIR' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = CLAUSE_BOUNDARY elif 'KIILU_ALGUS' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_START elif 'KIILU_LOPP' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_END annotations.append(data) return annotations
[ "def", "rename_annotations", "(", "self", ",", "sentence", ")", ":", "annotations", "=", "[", "]", "for", "token", "in", "sentence", ":", "data", "=", "{", "CLAUSE_IDX", ":", "token", "[", "CLAUSE_IDX", "]", "}", "if", "CLAUSE_ANNOT", "in", "token", ":", "if", "'KINDEL_PIIR'", "in", "token", "[", "CLAUSE_ANNOT", "]", ":", "data", "[", "CLAUSE_ANNOTATION", "]", "=", "CLAUSE_BOUNDARY", "elif", "'KIILU_ALGUS'", "in", "token", "[", "CLAUSE_ANNOT", "]", ":", "data", "[", "CLAUSE_ANNOTATION", "]", "=", "EMBEDDED_CLAUSE_START", "elif", "'KIILU_LOPP'", "in", "token", "[", "CLAUSE_ANNOT", "]", ":", "data", "[", "CLAUSE_ANNOTATION", "]", "=", "EMBEDDED_CLAUSE_END", "annotations", ".", "append", "(", "data", ")", "return", "annotations" ]
Function that renames and restructures clause information.
[ "Function", "that", "renames", "and", "restructures", "clause", "information", "." ]
python
train
edx/edx-organizations
organizations/data.py
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L75-L85
def _activate_organization_course_relationship(relationship): # pylint: disable=invalid-name """ Activates an inactive organization-course relationship """ # If the relationship doesn't exist or the organization isn't active we'll want to raise an error relationship = internal.OrganizationCourse.objects.get( id=relationship.id, active=False, organization__active=True ) _activate_record(relationship)
[ "def", "_activate_organization_course_relationship", "(", "relationship", ")", ":", "# pylint: disable=invalid-name", "# If the relationship doesn't exist or the organization isn't active we'll want to raise an error", "relationship", "=", "internal", ".", "OrganizationCourse", ".", "objects", ".", "get", "(", "id", "=", "relationship", ".", "id", ",", "active", "=", "False", ",", "organization__active", "=", "True", ")", "_activate_record", "(", "relationship", ")" ]
Activates an inactive organization-course relationship
[ "Activates", "an", "inactive", "organization", "-", "course", "relationship" ]
python
valid
iskandr/fancyimpute
fancyimpute/dictionary_helpers.py
https://github.com/iskandr/fancyimpute/blob/9f0837d387c7303d5c8c925a9989ca77a1a96e3e/fancyimpute/dictionary_helpers.py#L191-L200
def uncurry_nested_dictionary(curried_dict): """ Transform dictionary from (key_a -> key_b -> float) to (key_a, key_b) -> float """ result = {} for a, a_dict in curried_dict.items(): for b, value in a_dict.items(): result[(a, b)] = value return result
[ "def", "uncurry_nested_dictionary", "(", "curried_dict", ")", ":", "result", "=", "{", "}", "for", "a", ",", "a_dict", "in", "curried_dict", ".", "items", "(", ")", ":", "for", "b", ",", "value", "in", "a_dict", ".", "items", "(", ")", ":", "result", "[", "(", "a", ",", "b", ")", "]", "=", "value", "return", "result" ]
Transform dictionary from (key_a -> key_b -> float) to (key_a, key_b) -> float
[ "Transform", "dictionary", "from", "(", "key_a", "-", ">", "key_b", "-", ">", "float", ")", "to", "(", "key_a", "key_b", ")", "-", ">", "float" ]
python
train
pdkit/pdkit
pdkit/utils.py
https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/utils.py#L633-L660
def smoothing_window(data, window=[1, 1, 1]): """ This is a smoothing functionality so we can fix misclassifications. It will run a sliding window of form [border, smoothing, border] on the signal and if the border elements are the same it will change the smooth elements to match the border. An example would be for a window of [2, 1, 2] we have the following elements [1, 1, 0, 1, 1], this will transform it into [1, 1, 1, 1, 1]. So if the border elements match it will transform the middle (smoothing) into the same as the border. :param data array: One-dimensional array. :param window array: Used to define the [border, smoothing, border] regions. :return data array: The smoothed version of the original data. """ for i in range(len(data) - sum(window)): start_window_from = i start_window_to = i+window[0] end_window_from = start_window_to + window[1] end_window_to = end_window_from + window[2] if np.all(data[start_window_from: start_window_to] == data[end_window_from: end_window_to]): data[start_window_from: end_window_to] = data[start_window_from] return data
[ "def", "smoothing_window", "(", "data", ",", "window", "=", "[", "1", ",", "1", ",", "1", "]", ")", ":", "for", "i", "in", "range", "(", "len", "(", "data", ")", "-", "sum", "(", "window", ")", ")", ":", "start_window_from", "=", "i", "start_window_to", "=", "i", "+", "window", "[", "0", "]", "end_window_from", "=", "start_window_to", "+", "window", "[", "1", "]", "end_window_to", "=", "end_window_from", "+", "window", "[", "2", "]", "if", "np", ".", "all", "(", "data", "[", "start_window_from", ":", "start_window_to", "]", "==", "data", "[", "end_window_from", ":", "end_window_to", "]", ")", ":", "data", "[", "start_window_from", ":", "end_window_to", "]", "=", "data", "[", "start_window_from", "]", "return", "data" ]
This is a smoothing functionality so we can fix misclassifications. It will run a sliding window of form [border, smoothing, border] on the signal and if the border elements are the same it will change the smooth elements to match the border. An example would be for a window of [2, 1, 2] we have the following elements [1, 1, 0, 1, 1], this will transform it into [1, 1, 1, 1, 1]. So if the border elements match it will transform the middle (smoothing) into the same as the border. :param data array: One-dimensional array. :param window array: Used to define the [border, smoothing, border] regions. :return data array: The smoothed version of the original data.
[ "This", "is", "a", "smoothing", "functionality", "so", "we", "can", "fix", "misclassifications", ".", "It", "will", "run", "a", "sliding", "window", "of", "form", "[", "border", "smoothing", "border", "]", "on", "the", "signal", "and", "if", "the", "border", "elements", "are", "the", "same", "it", "will", "change", "the", "smooth", "elements", "to", "match", "the", "border", ".", "An", "example", "would", "be", "for", "a", "window", "of", "[", "2", "1", "2", "]", "we", "have", "the", "following", "elements", "[", "1", "1", "0", "1", "1", "]", "this", "will", "transform", "it", "into", "[", "1", "1", "1", "1", "1", "]", ".", "So", "if", "the", "border", "elements", "match", "it", "will", "transform", "the", "middle", "(", "smoothing", ")", "into", "the", "same", "as", "the", "border", ".", ":", "param", "data", "array", ":", "One", "-", "dimensional", "array", ".", ":", "param", "window", "array", ":", "Used", "to", "define", "the", "[", "border", "smoothing", "border", "]", "regions", ".", ":", "return", "data", "array", ":", "The", "smoothed", "version", "of", "the", "original", "data", "." ]
python
train
quantopian/trading_calendars
trading_calendars/trading_calendar.py
https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L465-L492
def next_session_label(self, session_label): """ Given a session label, returns the label of the next session. Parameters ---------- session_label: pd.Timestamp A session whose next session is desired. Returns ------- pd.Timestamp The next session label (midnight UTC). Notes ----- Raises ValueError if the given session is the last session in this calendar. """ idx = self.schedule.index.get_loc(session_label) try: return self.schedule.index[idx + 1] except IndexError: if idx == len(self.schedule.index) - 1: raise ValueError("There is no next session as this is the end" " of the exchange calendar.") else: raise
[ "def", "next_session_label", "(", "self", ",", "session_label", ")", ":", "idx", "=", "self", ".", "schedule", ".", "index", ".", "get_loc", "(", "session_label", ")", "try", ":", "return", "self", ".", "schedule", ".", "index", "[", "idx", "+", "1", "]", "except", "IndexError", ":", "if", "idx", "==", "len", "(", "self", ".", "schedule", ".", "index", ")", "-", "1", ":", "raise", "ValueError", "(", "\"There is no next session as this is the end\"", "\" of the exchange calendar.\"", ")", "else", ":", "raise" ]
Given a session label, returns the label of the next session. Parameters ---------- session_label: pd.Timestamp A session whose next session is desired. Returns ------- pd.Timestamp The next session label (midnight UTC). Notes ----- Raises ValueError if the given session is the last session in this calendar.
[ "Given", "a", "session", "label", "returns", "the", "label", "of", "the", "next", "session", "." ]
python
train
lambdamusic/Ontospy
ontospy/extras/hacks/click_example.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/hacks/click_example.py#L37-L56
def hello(): """"http://click.pocoo.org/5/ http://click.pocoo.org/5/api/ """ click.clear() click.secho('Hello World!', fg='green') click.secho('Some more text', bg='blue', fg='white') click.secho('ATTENTION', blink=True, bold=True) click.echo('Continue? [yn] ', nl=False) c = click.getchar() click.echo() if c == 'y': click.echo('We will go on') elif c == 'n': click.echo('Abort!') else: click.echo('Invalid input :(') click.echo_via_pager('\n'.join('Line %d' % idx for idx in range(200)))
[ "def", "hello", "(", ")", ":", "click", ".", "clear", "(", ")", "click", ".", "secho", "(", "'Hello World!'", ",", "fg", "=", "'green'", ")", "click", ".", "secho", "(", "'Some more text'", ",", "bg", "=", "'blue'", ",", "fg", "=", "'white'", ")", "click", ".", "secho", "(", "'ATTENTION'", ",", "blink", "=", "True", ",", "bold", "=", "True", ")", "click", ".", "echo", "(", "'Continue? [yn] '", ",", "nl", "=", "False", ")", "c", "=", "click", ".", "getchar", "(", ")", "click", ".", "echo", "(", ")", "if", "c", "==", "'y'", ":", "click", ".", "echo", "(", "'We will go on'", ")", "elif", "c", "==", "'n'", ":", "click", ".", "echo", "(", "'Abort!'", ")", "else", ":", "click", ".", "echo", "(", "'Invalid input :('", ")", "click", ".", "echo_via_pager", "(", "'\\n'", ".", "join", "(", "'Line %d'", "%", "idx", "for", "idx", "in", "range", "(", "200", ")", ")", ")" ]
http://click.pocoo.org/5/ http://click.pocoo.org/5/api/
[ "http", ":", "//", "click", ".", "pocoo", ".", "org", "/", "5", "/", "http", ":", "//", "click", ".", "pocoo", ".", "org", "/", "5", "/", "api", "/" ]
python
train
Microsoft/botbuilder-python
libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_context.py
https://github.com/Microsoft/botbuilder-python/blob/274663dd91c811bae6ac4488915ba5880771b0a7/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_context.py#L202-L214
async def replace_dialog(self, dialog_id: str, options: object = None) -> DialogTurnResult: """ Ends the active dialog and starts a new dialog in its place. This is particularly useful for creating loops or redirecting to another dialog. :param dialog_id: ID of the dialog to search for. :param options: (Optional) additional argument(s) to pass to the new dialog. :return: """ # End the current dialog and giving the reason. await self.end_active_dialog(DialogReason.ReplaceCalled) # Start replacement dialog return await self.begin_dialog(dialog_id, options)
[ "async", "def", "replace_dialog", "(", "self", ",", "dialog_id", ":", "str", ",", "options", ":", "object", "=", "None", ")", "->", "DialogTurnResult", ":", "# End the current dialog and giving the reason.", "await", "self", ".", "end_active_dialog", "(", "DialogReason", ".", "ReplaceCalled", ")", "# Start replacement dialog", "return", "await", "self", ".", "begin_dialog", "(", "dialog_id", ",", "options", ")" ]
Ends the active dialog and starts a new dialog in its place. This is particularly useful for creating loops or redirecting to another dialog. :param dialog_id: ID of the dialog to search for. :param options: (Optional) additional argument(s) to pass to the new dialog. :return:
[ "Ends", "the", "active", "dialog", "and", "starts", "a", "new", "dialog", "in", "its", "place", ".", "This", "is", "particularly", "useful", "for", "creating", "loops", "or", "redirecting", "to", "another", "dialog", ".", ":", "param", "dialog_id", ":", "ID", "of", "the", "dialog", "to", "search", "for", ".", ":", "param", "options", ":", "(", "Optional", ")", "additional", "argument", "(", "s", ")", "to", "pass", "to", "the", "new", "dialog", ".", ":", "return", ":" ]
python
test
binux/pyspider
pyspider/libs/utils.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/utils.py#L316-L330
def decode_unicode_obj(obj): """ Decode unicoded dict/list/tuple encoded by `unicode_obj` """ if isinstance(obj, dict): r = {} for k, v in iteritems(obj): r[decode_unicode_string(k)] = decode_unicode_obj(v) return r elif isinstance(obj, six.string_types): return decode_unicode_string(obj) elif isinstance(obj, (list, tuple)): return [decode_unicode_obj(x) for x in obj] else: return obj
[ "def", "decode_unicode_obj", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "r", "=", "{", "}", "for", "k", ",", "v", "in", "iteritems", "(", "obj", ")", ":", "r", "[", "decode_unicode_string", "(", "k", ")", "]", "=", "decode_unicode_obj", "(", "v", ")", "return", "r", "elif", "isinstance", "(", "obj", ",", "six", ".", "string_types", ")", ":", "return", "decode_unicode_string", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "decode_unicode_obj", "(", "x", ")", "for", "x", "in", "obj", "]", "else", ":", "return", "obj" ]
Decode unicoded dict/list/tuple encoded by `unicode_obj`
[ "Decode", "unicoded", "dict", "/", "list", "/", "tuple", "encoded", "by", "unicode_obj" ]
python
train
AguaClara/aguaclara
aguaclara/research/environmental_processes_analysis.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/research/environmental_processes_analysis.py#L58-L75
def alpha1_carbonate(pH): """Calculate the fraction of total carbonates in bicarbonate form (HCO3-) :param pH: pH of the system :type pH: float :return: Fraction of carbonates in bicarbonate form (HCO3-) :rtype: float :Examples: >>> from aguaclara.research.environmental_processes_analysis import alpha1_carbonate >>> round(alpha1_carbonate(10), 7) <Quantity(0.639969, 'dimensionless')> """ alpha1_carbonate = 1/((invpH(pH)/K1_carbonate) + 1 + (K2_carbonate/invpH(pH))) return alpha1_carbonate
[ "def", "alpha1_carbonate", "(", "pH", ")", ":", "alpha1_carbonate", "=", "1", "/", "(", "(", "invpH", "(", "pH", ")", "/", "K1_carbonate", ")", "+", "1", "+", "(", "K2_carbonate", "/", "invpH", "(", "pH", ")", ")", ")", "return", "alpha1_carbonate" ]
Calculate the fraction of total carbonates in bicarbonate form (HCO3-) :param pH: pH of the system :type pH: float :return: Fraction of carbonates in bicarbonate form (HCO3-) :rtype: float :Examples: >>> from aguaclara.research.environmental_processes_analysis import alpha1_carbonate >>> round(alpha1_carbonate(10), 7) <Quantity(0.639969, 'dimensionless')>
[ "Calculate", "the", "fraction", "of", "total", "carbonates", "in", "bicarbonate", "form", "(", "HCO3", "-", ")" ]
python
train
brutasse/graphite-api
graphite_api/functions.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1789-L1828
def areaBetween(requestContext, *seriesLists): """ Draws the vertical area in between the two series in seriesList. Useful for visualizing a range such as the minimum and maximum latency for a service. areaBetween expects **exactly one argument** that results in exactly two series (see example below). The order of the lower and higher values series does not matter. The visualization only works when used in conjunction with ``areaMode=stacked``. Most likely use case is to provide a band within which another metric should move. In such case applying an ``alpha()``, as in the second example, gives best visual results. Example:: &target=areaBetween(service.latency.{min,max})&areaMode=stacked &target=alpha(areaBetween(service.latency.{min,max}),0.3)&areaMode=stacked If for instance, you need to build a seriesList, you should use the ``group`` function, like so:: &target=areaBetween(group(minSeries(a.*.min),maxSeries(a.*.max))) """ if len(seriesLists) == 1: [seriesLists] = seriesLists assert len(seriesLists) == 2, ("areaBetween series argument must " "reference *exactly* 2 series") lower, upper = seriesLists if len(lower) == 1: [lower] = lower if len(upper) == 1: [upper] = upper lower.options['stacked'] = True lower.options['invisible'] = True upper.options['stacked'] = True lower.name = upper.name = "areaBetween(%s)" % upper.pathExpression return [lower, upper]
[ "def", "areaBetween", "(", "requestContext", ",", "*", "seriesLists", ")", ":", "if", "len", "(", "seriesLists", ")", "==", "1", ":", "[", "seriesLists", "]", "=", "seriesLists", "assert", "len", "(", "seriesLists", ")", "==", "2", ",", "(", "\"areaBetween series argument must \"", "\"reference *exactly* 2 series\"", ")", "lower", ",", "upper", "=", "seriesLists", "if", "len", "(", "lower", ")", "==", "1", ":", "[", "lower", "]", "=", "lower", "if", "len", "(", "upper", ")", "==", "1", ":", "[", "upper", "]", "=", "upper", "lower", ".", "options", "[", "'stacked'", "]", "=", "True", "lower", ".", "options", "[", "'invisible'", "]", "=", "True", "upper", ".", "options", "[", "'stacked'", "]", "=", "True", "lower", ".", "name", "=", "upper", ".", "name", "=", "\"areaBetween(%s)\"", "%", "upper", ".", "pathExpression", "return", "[", "lower", ",", "upper", "]" ]
Draws the vertical area in between the two series in seriesList. Useful for visualizing a range such as the minimum and maximum latency for a service. areaBetween expects **exactly one argument** that results in exactly two series (see example below). The order of the lower and higher values series does not matter. The visualization only works when used in conjunction with ``areaMode=stacked``. Most likely use case is to provide a band within which another metric should move. In such case applying an ``alpha()``, as in the second example, gives best visual results. Example:: &target=areaBetween(service.latency.{min,max})&areaMode=stacked &target=alpha(areaBetween(service.latency.{min,max}),0.3)&areaMode=stacked If for instance, you need to build a seriesList, you should use the ``group`` function, like so:: &target=areaBetween(group(minSeries(a.*.min),maxSeries(a.*.max)))
[ "Draws", "the", "vertical", "area", "in", "between", "the", "two", "series", "in", "seriesList", ".", "Useful", "for", "visualizing", "a", "range", "such", "as", "the", "minimum", "and", "maximum", "latency", "for", "a", "service", "." ]
python
train
gijzelaerr/python-snap7
snap7/client.py
https://github.com/gijzelaerr/python-snap7/blob/a6db134c7a3a2ef187b9eca04669221d6fc634c3/snap7/client.py#L388-L397
def get_connected(self): """ Returns the connection status :returns: a boolean that indicates if connected. """ connected = c_int32() result = self.library.Cli_GetConnected(self.pointer, byref(connected)) check_error(result, context="client") return bool(connected)
[ "def", "get_connected", "(", "self", ")", ":", "connected", "=", "c_int32", "(", ")", "result", "=", "self", ".", "library", ".", "Cli_GetConnected", "(", "self", ".", "pointer", ",", "byref", "(", "connected", ")", ")", "check_error", "(", "result", ",", "context", "=", "\"client\"", ")", "return", "bool", "(", "connected", ")" ]
Returns the connection status :returns: a boolean that indicates if connected.
[ "Returns", "the", "connection", "status" ]
python
train
myint/rstcheck
rstcheck.py
https://github.com/myint/rstcheck/blob/2f975906b75f3b88d501ef3b13d213815cf7079a/rstcheck.py#L447-L471
def ignore_sphinx(): """Register Sphinx directives and roles to ignore.""" (directives, roles) = _get_directives_and_roles_from_sphinx() directives += [ 'centered', 'include', 'deprecated', 'index', 'no-code-block', 'literalinclude', 'hlist', 'seealso', 'toctree', 'todo', 'versionadded', 'versionchanged'] ext_autosummary = [ 'autosummary', 'currentmodule', ] ignore_directives_and_roles(directives + ext_autosummary, roles + ['ctype'])
[ "def", "ignore_sphinx", "(", ")", ":", "(", "directives", ",", "roles", ")", "=", "_get_directives_and_roles_from_sphinx", "(", ")", "directives", "+=", "[", "'centered'", ",", "'include'", ",", "'deprecated'", ",", "'index'", ",", "'no-code-block'", ",", "'literalinclude'", ",", "'hlist'", ",", "'seealso'", ",", "'toctree'", ",", "'todo'", ",", "'versionadded'", ",", "'versionchanged'", "]", "ext_autosummary", "=", "[", "'autosummary'", ",", "'currentmodule'", ",", "]", "ignore_directives_and_roles", "(", "directives", "+", "ext_autosummary", ",", "roles", "+", "[", "'ctype'", "]", ")" ]
Register Sphinx directives and roles to ignore.
[ "Register", "Sphinx", "directives", "and", "roles", "to", "ignore", "." ]
python
train
jbeluch/xbmcswift2
xbmcswift2/xbmcmixin.py
https://github.com/jbeluch/xbmcswift2/blob/0e7a3642499554edc8265fdf1ba6c5ee567daa78/xbmcswift2/xbmcmixin.py#L420-L434
def end_of_directory(self, succeeded=True, update_listing=False, cache_to_disc=True): '''Wrapper for xbmcplugin.endOfDirectory. Records state in self._end_of_directory. Typically it is not necessary to call this method directly, as calling :meth:`~xbmcswift2.Plugin.finish` will call this method. ''' self._update_listing = update_listing if not self._end_of_directory: self._end_of_directory = True # Finalize the directory items return xbmcplugin.endOfDirectory(self.handle, succeeded, update_listing, cache_to_disc) assert False, 'Already called endOfDirectory.'
[ "def", "end_of_directory", "(", "self", ",", "succeeded", "=", "True", ",", "update_listing", "=", "False", ",", "cache_to_disc", "=", "True", ")", ":", "self", ".", "_update_listing", "=", "update_listing", "if", "not", "self", ".", "_end_of_directory", ":", "self", ".", "_end_of_directory", "=", "True", "# Finalize the directory items", "return", "xbmcplugin", ".", "endOfDirectory", "(", "self", ".", "handle", ",", "succeeded", ",", "update_listing", ",", "cache_to_disc", ")", "assert", "False", ",", "'Already called endOfDirectory.'" ]
Wrapper for xbmcplugin.endOfDirectory. Records state in self._end_of_directory. Typically it is not necessary to call this method directly, as calling :meth:`~xbmcswift2.Plugin.finish` will call this method.
[ "Wrapper", "for", "xbmcplugin", ".", "endOfDirectory", ".", "Records", "state", "in", "self", ".", "_end_of_directory", "." ]
python
train
limix/limix-core
limix_core/optimize/optimize_bfgs.py
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/optimize/optimize_bfgs.py#L66-L142
def opt_hyper(gpr,Ifilter=None,bounds=None,opts={},*args,**kw_args): """ optimize params Input: gpr: GP regression class params0: dictionary filled with starting hyperparameters opts: options for optimizer """ if 'gradcheck' in opts: gradcheck = opts['gradcheck'] else: gradcheck = False if 'max_iter_opt' in opts: max_iter = opts['max_iter_opt'] else: max_iter = 5000 if 'pgtol' in opts: pgtol = opts['pgtol'] else: pgtol = 1e-10 params0 = gpr.getParams() def f(x): x_ = X0 x_[Ifilter_x] = x gpr.setParams(param_list_to_dict(x_,param_struct,skeys)) lml = gpr.LML() if SP.isnan(lml): lml=1E6 lml_grad = gpr.LML_grad() lml_grad = param_dict_to_list(lml_grad,skeys) if (~SP.isfinite(lml_grad)).any(): idx = (~SP.isfinite(lml_grad)) lml_grad[idx] = 1E6 return lml, lml_grad[Ifilter_x] skeys = SP.sort(list(params0.keys())) param_struct = dict([(name,params0[name].shape) for name in skeys]) # mask params that should not be optimized X0 = param_dict_to_list(params0,skeys) if Ifilter is not None: Ifilter_x = SP.array(param_dict_to_list(Ifilter,skeys),dtype=bool) else: Ifilter_x = SP.ones(len(X0),dtype='bool') # add bounds if necessary if bounds is not None: _b = [] for key in skeys: if key in list(bounds.keys()): _b.extend(bounds[key]) else: _b.extend([[-SP.inf,+SP.inf]]*params0[key].size) bounds = SP.array(_b) bounds = bounds[Ifilter_x] LG.info('Starting optimization ...') t = time.time() x = X0.copy()[Ifilter_x] RV = optimize(f,x,maxfun=int(max_iter),pgtol=pgtol,bounds=bounds,**kw_args) #RVopt = optimize(f,x,messages=True,maxfun=int(max_iter),pgtol=pgtol,bounds=bounds) #LG.info('%s'%OPT.tnc.RCSTRINGS[RVopt[2]]) #LG.info('Optimization is converged at iteration %d'%RVopt[1]) #LG.info('Total time: %.2fs'%(time.time()-t)) info = RV[2] conv = info['warnflag']==0 if gradcheck: err = OPT.check_grad(f,df,xopt) LG.info("check_grad (post): %.2f"%err) return conv,info
[ "def", "opt_hyper", "(", "gpr", ",", "Ifilter", "=", "None", ",", "bounds", "=", "None", ",", "opts", "=", "{", "}", ",", "*", "args", ",", "*", "*", "kw_args", ")", ":", "if", "'gradcheck'", "in", "opts", ":", "gradcheck", "=", "opts", "[", "'gradcheck'", "]", "else", ":", "gradcheck", "=", "False", "if", "'max_iter_opt'", "in", "opts", ":", "max_iter", "=", "opts", "[", "'max_iter_opt'", "]", "else", ":", "max_iter", "=", "5000", "if", "'pgtol'", "in", "opts", ":", "pgtol", "=", "opts", "[", "'pgtol'", "]", "else", ":", "pgtol", "=", "1e-10", "params0", "=", "gpr", ".", "getParams", "(", ")", "def", "f", "(", "x", ")", ":", "x_", "=", "X0", "x_", "[", "Ifilter_x", "]", "=", "x", "gpr", ".", "setParams", "(", "param_list_to_dict", "(", "x_", ",", "param_struct", ",", "skeys", ")", ")", "lml", "=", "gpr", ".", "LML", "(", ")", "if", "SP", ".", "isnan", "(", "lml", ")", ":", "lml", "=", "1E6", "lml_grad", "=", "gpr", ".", "LML_grad", "(", ")", "lml_grad", "=", "param_dict_to_list", "(", "lml_grad", ",", "skeys", ")", "if", "(", "~", "SP", ".", "isfinite", "(", "lml_grad", ")", ")", ".", "any", "(", ")", ":", "idx", "=", "(", "~", "SP", ".", "isfinite", "(", "lml_grad", ")", ")", "lml_grad", "[", "idx", "]", "=", "1E6", "return", "lml", ",", "lml_grad", "[", "Ifilter_x", "]", "skeys", "=", "SP", ".", "sort", "(", "list", "(", "params0", ".", "keys", "(", ")", ")", ")", "param_struct", "=", "dict", "(", "[", "(", "name", ",", "params0", "[", "name", "]", ".", "shape", ")", "for", "name", "in", "skeys", "]", ")", "# mask params that should not be optimized", "X0", "=", "param_dict_to_list", "(", "params0", ",", "skeys", ")", "if", "Ifilter", "is", "not", "None", ":", "Ifilter_x", "=", "SP", ".", "array", "(", "param_dict_to_list", "(", "Ifilter", ",", "skeys", ")", ",", "dtype", "=", "bool", ")", "else", ":", "Ifilter_x", "=", "SP", ".", "ones", "(", "len", "(", "X0", ")", ",", "dtype", "=", "'bool'", ")", "# add bounds if necessary", "if", "bounds", "is", "not", "None", ":", "_b", "=", "[", "]", "for", "key", "in", "skeys", ":", "if", "key", "in", "list", "(", "bounds", ".", "keys", "(", ")", ")", ":", "_b", ".", "extend", "(", "bounds", "[", "key", "]", ")", "else", ":", "_b", ".", "extend", "(", "[", "[", "-", "SP", ".", "inf", ",", "+", "SP", ".", "inf", "]", "]", "*", "params0", "[", "key", "]", ".", "size", ")", "bounds", "=", "SP", ".", "array", "(", "_b", ")", "bounds", "=", "bounds", "[", "Ifilter_x", "]", "LG", ".", "info", "(", "'Starting optimization ...'", ")", "t", "=", "time", ".", "time", "(", ")", "x", "=", "X0", ".", "copy", "(", ")", "[", "Ifilter_x", "]", "RV", "=", "optimize", "(", "f", ",", "x", ",", "maxfun", "=", "int", "(", "max_iter", ")", ",", "pgtol", "=", "pgtol", ",", "bounds", "=", "bounds", ",", "*", "*", "kw_args", ")", "#RVopt = optimize(f,x,messages=True,maxfun=int(max_iter),pgtol=pgtol,bounds=bounds)", "#LG.info('%s'%OPT.tnc.RCSTRINGS[RVopt[2]])", "#LG.info('Optimization is converged at iteration %d'%RVopt[1])", "#LG.info('Total time: %.2fs'%(time.time()-t))", "info", "=", "RV", "[", "2", "]", "conv", "=", "info", "[", "'warnflag'", "]", "==", "0", "if", "gradcheck", ":", "err", "=", "OPT", ".", "check_grad", "(", "f", ",", "df", ",", "xopt", ")", "LG", ".", "info", "(", "\"check_grad (post): %.2f\"", "%", "err", ")", "return", "conv", ",", "info" ]
optimize params Input: gpr: GP regression class params0: dictionary filled with starting hyperparameters opts: options for optimizer
[ "optimize", "params" ]
python
train
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/export.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/export.py#L265-L317
def tsv(self, path, features, filtered=True, override=False): """Export the data of the current instance to a .tsv file Parameters ---------- path: str Path to a .tsv file. The ending .tsv is added automatically. features: list of str The features in the resulting .tsv file. These are strings that are defined in `dclab.definitions.scalar_feature_names`, e.g. "area_cvx", "deform", "frame", "fl1_max", "aspect". filtered: bool If set to `True`, only the filtered data (index in ds._filter) are used. override: bool If set to `True`, an existing file ``path`` will be overridden. If set to `False`, raises `OSError` if ``path`` exists. """ features = [c.lower() for c in features] path = pathlib.Path(path) ds = self.rtdc_ds # Make sure that path ends with .tsv if path.suffix != ".tsv": path = path.with_name(path.name + ".tsv") # Check if file already exist if not override and path.exists(): raise OSError("File already exists: {}\n".format( str(path).encode("ascii", "ignore")) + "Please use the `override=True` option.") # Check that features are in dfn.scalar_feature_names for c in features: if c not in dfn.scalar_feature_names: raise ValueError("Unknown feature name {}".format(c)) # Open file with path.open("w") as fd: # write header header1 = "\t".join([c for c in features]) fd.write("# "+header1+"\n") header2 = "\t".join([dfn.feature_name2label[c] for c in features]) fd.write("# "+header2+"\n") with path.open("ab") as fd: # write data if filtered: data = [ds[c][ds._filter] for c in features] else: data = [ds[c] for c in features] np.savetxt(fd, np.array(data).transpose(), fmt=str("%.10e"), delimiter="\t")
[ "def", "tsv", "(", "self", ",", "path", ",", "features", ",", "filtered", "=", "True", ",", "override", "=", "False", ")", ":", "features", "=", "[", "c", ".", "lower", "(", ")", "for", "c", "in", "features", "]", "path", "=", "pathlib", ".", "Path", "(", "path", ")", "ds", "=", "self", ".", "rtdc_ds", "# Make sure that path ends with .tsv", "if", "path", ".", "suffix", "!=", "\".tsv\"", ":", "path", "=", "path", ".", "with_name", "(", "path", ".", "name", "+", "\".tsv\"", ")", "# Check if file already exist", "if", "not", "override", "and", "path", ".", "exists", "(", ")", ":", "raise", "OSError", "(", "\"File already exists: {}\\n\"", ".", "format", "(", "str", "(", "path", ")", ".", "encode", "(", "\"ascii\"", ",", "\"ignore\"", ")", ")", "+", "\"Please use the `override=True` option.\"", ")", "# Check that features are in dfn.scalar_feature_names", "for", "c", "in", "features", ":", "if", "c", "not", "in", "dfn", ".", "scalar_feature_names", ":", "raise", "ValueError", "(", "\"Unknown feature name {}\"", ".", "format", "(", "c", ")", ")", "# Open file", "with", "path", ".", "open", "(", "\"w\"", ")", "as", "fd", ":", "# write header", "header1", "=", "\"\\t\"", ".", "join", "(", "[", "c", "for", "c", "in", "features", "]", ")", "fd", ".", "write", "(", "\"# \"", "+", "header1", "+", "\"\\n\"", ")", "header2", "=", "\"\\t\"", ".", "join", "(", "[", "dfn", ".", "feature_name2label", "[", "c", "]", "for", "c", "in", "features", "]", ")", "fd", ".", "write", "(", "\"# \"", "+", "header2", "+", "\"\\n\"", ")", "with", "path", ".", "open", "(", "\"ab\"", ")", "as", "fd", ":", "# write data", "if", "filtered", ":", "data", "=", "[", "ds", "[", "c", "]", "[", "ds", ".", "_filter", "]", "for", "c", "in", "features", "]", "else", ":", "data", "=", "[", "ds", "[", "c", "]", "for", "c", "in", "features", "]", "np", ".", "savetxt", "(", "fd", ",", "np", ".", "array", "(", "data", ")", ".", "transpose", "(", ")", ",", "fmt", "=", "str", "(", "\"%.10e\"", ")", ",", "delimiter", "=", "\"\\t\"", ")" ]
Export the data of the current instance to a .tsv file Parameters ---------- path: str Path to a .tsv file. The ending .tsv is added automatically. features: list of str The features in the resulting .tsv file. These are strings that are defined in `dclab.definitions.scalar_feature_names`, e.g. "area_cvx", "deform", "frame", "fl1_max", "aspect". filtered: bool If set to `True`, only the filtered data (index in ds._filter) are used. override: bool If set to `True`, an existing file ``path`` will be overridden. If set to `False`, raises `OSError` if ``path`` exists.
[ "Export", "the", "data", "of", "the", "current", "instance", "to", "a", ".", "tsv", "file" ]
python
train
python-openxml/python-docx
docx/text/font.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/text/font.py#L397-L404
def _get_bool_prop(self, name): """ Return the value of boolean child of `w:rPr` having *name*. """ rPr = self._element.rPr if rPr is None: return None return rPr._get_bool_val(name)
[ "def", "_get_bool_prop", "(", "self", ",", "name", ")", ":", "rPr", "=", "self", ".", "_element", ".", "rPr", "if", "rPr", "is", "None", ":", "return", "None", "return", "rPr", ".", "_get_bool_val", "(", "name", ")" ]
Return the value of boolean child of `w:rPr` having *name*.
[ "Return", "the", "value", "of", "boolean", "child", "of", "w", ":", "rPr", "having", "*", "name", "*", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/pages/api/authentication.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/api/authentication.py#L18-L25
def API_GET(self): # pylint: disable=arguments-differ """ Returns {"authenticated": false} or {"authenticated": true, "username": "your_username"} (always 200 OK) """ if self.user_manager.session_logged_in(): return 200, {"authenticated": True, "username": self.user_manager.session_username()} else: return 200, {"authenticated": False}
[ "def", "API_GET", "(", "self", ")", ":", "# pylint: disable=arguments-differ", "if", "self", ".", "user_manager", ".", "session_logged_in", "(", ")", ":", "return", "200", ",", "{", "\"authenticated\"", ":", "True", ",", "\"username\"", ":", "self", ".", "user_manager", ".", "session_username", "(", ")", "}", "else", ":", "return", "200", ",", "{", "\"authenticated\"", ":", "False", "}" ]
Returns {"authenticated": false} or {"authenticated": true, "username": "your_username"} (always 200 OK)
[ "Returns", "{", "authenticated", ":", "false", "}", "or", "{", "authenticated", ":", "true", "username", ":", "your_username", "}", "(", "always", "200", "OK", ")" ]
python
train
foxx/python-helpful
helpful.py
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L196-L209
def iter_ensure_instance(iterable, types): """ Iterate over object and check each item type >>> iter_ensure_instance([1,2,3], [str]) Traceback (most recent call last): TypeError: >>> iter_ensure_instance([1,2,3], int) >>> iter_ensure_instance(1, int) Traceback (most recent call last): TypeError: """ ensure_instance(iterable, Iterable) [ ensure_instance(item, types) for item in iterable ]
[ "def", "iter_ensure_instance", "(", "iterable", ",", "types", ")", ":", "ensure_instance", "(", "iterable", ",", "Iterable", ")", "[", "ensure_instance", "(", "item", ",", "types", ")", "for", "item", "in", "iterable", "]" ]
Iterate over object and check each item type >>> iter_ensure_instance([1,2,3], [str]) Traceback (most recent call last): TypeError: >>> iter_ensure_instance([1,2,3], int) >>> iter_ensure_instance(1, int) Traceback (most recent call last): TypeError:
[ "Iterate", "over", "object", "and", "check", "each", "item", "type" ]
python
train
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L5514-L5520
def annotation(self, type, set=None): """Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found""" l = self.count(type,set,True,default_ignore_annotations) if len(l) >= 1: return l[0] else: raise NoSuchAnnotation()
[ "def", "annotation", "(", "self", ",", "type", ",", "set", "=", "None", ")", ":", "l", "=", "self", ".", "count", "(", "type", ",", "set", ",", "True", ",", "default_ignore_annotations", ")", "if", "len", "(", "l", ")", ">=", "1", ":", "return", "l", "[", "0", "]", "else", ":", "raise", "NoSuchAnnotation", "(", ")" ]
Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found
[ "Will", "return", "a", "**", "single", "**", "annotation", "(", "even", "if", "there", "are", "multiple", ")", ".", "Raises", "a", "NoSuchAnnotation", "exception", "if", "none", "was", "found" ]
python
train
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidgetitem.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidgetitem.py#L105-L123
def adjustHeight(self, column): """ Adjusts the height for this item based on the columna and its text. :param column | <int> """ tree = self.treeWidget() if not tree: return w = tree.width() if tree.verticalScrollBar().isVisible(): w -= tree.verticalScrollBar().width() doc = QtGui.QTextDocument() doc.setTextWidth(w) doc.setHtml(self.text(0)) height = doc.documentLayout().documentSize().height() self.setFixedHeight(height+2)
[ "def", "adjustHeight", "(", "self", ",", "column", ")", ":", "tree", "=", "self", ".", "treeWidget", "(", ")", "if", "not", "tree", ":", "return", "w", "=", "tree", ".", "width", "(", ")", "if", "tree", ".", "verticalScrollBar", "(", ")", ".", "isVisible", "(", ")", ":", "w", "-=", "tree", ".", "verticalScrollBar", "(", ")", ".", "width", "(", ")", "doc", "=", "QtGui", ".", "QTextDocument", "(", ")", "doc", ".", "setTextWidth", "(", "w", ")", "doc", ".", "setHtml", "(", "self", ".", "text", "(", "0", ")", ")", "height", "=", "doc", ".", "documentLayout", "(", ")", ".", "documentSize", "(", ")", ".", "height", "(", ")", "self", ".", "setFixedHeight", "(", "height", "+", "2", ")" ]
Adjusts the height for this item based on the columna and its text. :param column | <int>
[ "Adjusts", "the", "height", "for", "this", "item", "based", "on", "the", "columna", "and", "its", "text", ".", ":", "param", "column", "|", "<int", ">" ]
python
train
fermiPy/fermipy
fermipy/jobs/target_analysis.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/target_analysis.py#L107-L172
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) if not HAVE_ST: raise RuntimeError( "Trying to run fermipy analysis, but don't have ST") if is_null(args.skydirs): skydir_dict = None else: skydir_dict = load_yaml(args.skydirs) gta = GTAnalysis(args.config, logging={'verbosity': 3}, fileio={'workdir_regex': '\.xml$|\.npy$'}) #gta.setup(overwrite=False) gta.load_roi(args.roi_baseline) gta.print_roi() basedir = os.path.dirname(args.config) # This should be a no-op, b/c it was done in the baseline analysis for profile in args.profiles: if skydir_dict is None: skydir_keys = [None] else: skydir_keys = sorted(skydir_dict.keys()) for skydir_key in skydir_keys: if skydir_key is None: pkey, psrc_name, pdict = build_profile_dict(basedir, profile) else: skydir_val = skydir_dict[skydir_key] pkey, psrc_name, pdict = build_profile_dict(basedir, profile) pdict['ra'] = skydir_val['ra'] pdict['dec'] = skydir_val['dec'] pkey += "_%06i" % skydir_key outfile = "sed_%s.fits" % pkey # Add the source and get the list of correlated soruces correl_dict, test_src_name = add_source_get_correlated(gta, psrc_name, pdict, correl_thresh=0.25, non_null_src=args.non_null_src) # Write the list of correlated sources correl_yaml = os.path.join(basedir, "correl_%s.yaml" % pkey) write_yaml(correl_dict, correl_yaml) gta.free_sources(False) for src_name in correl_dict.keys(): gta.free_source(src_name, pars='norm') # build the SED if args.non_null_src: gta.update_source(test_src_name, reoptimize=True) gta.write_roi("base_%s"% pkey, make_plots=False) gta.sed(test_src_name, prefix=pkey, outfile=outfile, make_plots=args.make_plots) # remove the source gta.delete_source(test_src_name) # put the ROI back to how it was gta.load_xml(args.roi_baseline) return gta
[ "def", "run_analysis", "(", "self", ",", "argv", ")", ":", "args", "=", "self", ".", "_parser", ".", "parse_args", "(", "argv", ")", "if", "not", "HAVE_ST", ":", "raise", "RuntimeError", "(", "\"Trying to run fermipy analysis, but don't have ST\"", ")", "if", "is_null", "(", "args", ".", "skydirs", ")", ":", "skydir_dict", "=", "None", "else", ":", "skydir_dict", "=", "load_yaml", "(", "args", ".", "skydirs", ")", "gta", "=", "GTAnalysis", "(", "args", ".", "config", ",", "logging", "=", "{", "'verbosity'", ":", "3", "}", ",", "fileio", "=", "{", "'workdir_regex'", ":", "'\\.xml$|\\.npy$'", "}", ")", "#gta.setup(overwrite=False)", "gta", ".", "load_roi", "(", "args", ".", "roi_baseline", ")", "gta", ".", "print_roi", "(", ")", "basedir", "=", "os", ".", "path", ".", "dirname", "(", "args", ".", "config", ")", "# This should be a no-op, b/c it was done in the baseline analysis", "for", "profile", "in", "args", ".", "profiles", ":", "if", "skydir_dict", "is", "None", ":", "skydir_keys", "=", "[", "None", "]", "else", ":", "skydir_keys", "=", "sorted", "(", "skydir_dict", ".", "keys", "(", ")", ")", "for", "skydir_key", "in", "skydir_keys", ":", "if", "skydir_key", "is", "None", ":", "pkey", ",", "psrc_name", ",", "pdict", "=", "build_profile_dict", "(", "basedir", ",", "profile", ")", "else", ":", "skydir_val", "=", "skydir_dict", "[", "skydir_key", "]", "pkey", ",", "psrc_name", ",", "pdict", "=", "build_profile_dict", "(", "basedir", ",", "profile", ")", "pdict", "[", "'ra'", "]", "=", "skydir_val", "[", "'ra'", "]", "pdict", "[", "'dec'", "]", "=", "skydir_val", "[", "'dec'", "]", "pkey", "+=", "\"_%06i\"", "%", "skydir_key", "outfile", "=", "\"sed_%s.fits\"", "%", "pkey", "# Add the source and get the list of correlated soruces", "correl_dict", ",", "test_src_name", "=", "add_source_get_correlated", "(", "gta", ",", "psrc_name", ",", "pdict", ",", "correl_thresh", "=", "0.25", ",", "non_null_src", "=", "args", ".", "non_null_src", ")", "# Write the list of correlated sources", "correl_yaml", "=", "os", ".", "path", ".", "join", "(", "basedir", ",", "\"correl_%s.yaml\"", "%", "pkey", ")", "write_yaml", "(", "correl_dict", ",", "correl_yaml", ")", "gta", ".", "free_sources", "(", "False", ")", "for", "src_name", "in", "correl_dict", ".", "keys", "(", ")", ":", "gta", ".", "free_source", "(", "src_name", ",", "pars", "=", "'norm'", ")", "# build the SED", "if", "args", ".", "non_null_src", ":", "gta", ".", "update_source", "(", "test_src_name", ",", "reoptimize", "=", "True", ")", "gta", ".", "write_roi", "(", "\"base_%s\"", "%", "pkey", ",", "make_plots", "=", "False", ")", "gta", ".", "sed", "(", "test_src_name", ",", "prefix", "=", "pkey", ",", "outfile", "=", "outfile", ",", "make_plots", "=", "args", ".", "make_plots", ")", "# remove the source", "gta", ".", "delete_source", "(", "test_src_name", ")", "# put the ROI back to how it was", "gta", ".", "load_xml", "(", "args", ".", "roi_baseline", ")", "return", "gta" ]
Run this analysis
[ "Run", "this", "analysis" ]
python
train
Deathnerd/pyterp
pyterp/__init__.py
https://github.com/Deathnerd/pyterp/blob/baf2957263685f03873f368226f5752da4e51f08/pyterp/__init__.py#L179-L186
def _output_current_byte(self): """ Prints out the ASCII value of the current byte """ if self.tape[self.pointer] is None: print "{}".format(chr(0)), else: print "{}".format(chr(int(self.tape[self.pointer]))),
[ "def", "_output_current_byte", "(", "self", ")", ":", "if", "self", ".", "tape", "[", "self", ".", "pointer", "]", "is", "None", ":", "print", "\"{}\"", ".", "format", "(", "chr", "(", "0", ")", ")", ",", "else", ":", "print", "\"{}\"", ".", "format", "(", "chr", "(", "int", "(", "self", ".", "tape", "[", "self", ".", "pointer", "]", ")", ")", ")", "," ]
Prints out the ASCII value of the current byte
[ "Prints", "out", "the", "ASCII", "value", "of", "the", "current", "byte" ]
python
train
openstack/python-scciclient
scciclient/irmc/viom/elcm.py
https://github.com/openstack/python-scciclient/blob/4585ce2f76853b9773fb190ca0cfff0aa04a7cf8/scciclient/irmc/viom/elcm.py#L919-L960
def get_json(self): """Create JSON data for iSCSI target. :returns: JSON data for iSCSI target as follows: { "DHCPUsage":{ }, "Name":{ }, "IPv4Address":{ }, "PortNumber":{ }, "BootLUN":{ }, "AuthenticationMethod":{ }, "ChapUserName":{ }, "ChapSecret":{ }, "MutualChapSecret":{ } } """ json = { 'DHCPUsage': self.dhcp_usage, 'AuthenticationMethod': self.auth_method, } if not self.dhcp_usage: json['Name'] = self.iqn json['IPv4Address'] = self.ip json['PortNumber'] = self.port json['BootLUN'] = self.lun if self.chap_user: json['ChapUserName'] = self.chap_user if self.chap_secret: json['ChapSecret'] = self.chap_secret if self.mutual_chap_secret: json['MutualChapSecret'] = self.mutual_chap_secret return json
[ "def", "get_json", "(", "self", ")", ":", "json", "=", "{", "'DHCPUsage'", ":", "self", ".", "dhcp_usage", ",", "'AuthenticationMethod'", ":", "self", ".", "auth_method", ",", "}", "if", "not", "self", ".", "dhcp_usage", ":", "json", "[", "'Name'", "]", "=", "self", ".", "iqn", "json", "[", "'IPv4Address'", "]", "=", "self", ".", "ip", "json", "[", "'PortNumber'", "]", "=", "self", ".", "port", "json", "[", "'BootLUN'", "]", "=", "self", ".", "lun", "if", "self", ".", "chap_user", ":", "json", "[", "'ChapUserName'", "]", "=", "self", ".", "chap_user", "if", "self", ".", "chap_secret", ":", "json", "[", "'ChapSecret'", "]", "=", "self", ".", "chap_secret", "if", "self", ".", "mutual_chap_secret", ":", "json", "[", "'MutualChapSecret'", "]", "=", "self", ".", "mutual_chap_secret", "return", "json" ]
Create JSON data for iSCSI target. :returns: JSON data for iSCSI target as follows: { "DHCPUsage":{ }, "Name":{ }, "IPv4Address":{ }, "PortNumber":{ }, "BootLUN":{ }, "AuthenticationMethod":{ }, "ChapUserName":{ }, "ChapSecret":{ }, "MutualChapSecret":{ } }
[ "Create", "JSON", "data", "for", "iSCSI", "target", "." ]
python
train
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L3440-L3459
def DOM_setOuterHTML(self, nodeId, outerHTML): """ Function path: DOM.setOuterHTML Domain: DOM Method name: setOuterHTML Parameters: Required arguments: 'nodeId' (type: NodeId) -> Id of the node to set markup for. 'outerHTML' (type: string) -> Outer HTML markup to set. No return value. Description: Sets node HTML markup, returns new node id. """ assert isinstance(outerHTML, (str,) ), "Argument 'outerHTML' must be of type '['str']'. Received type: '%s'" % type( outerHTML) subdom_funcs = self.synchronous_command('DOM.setOuterHTML', nodeId=nodeId, outerHTML=outerHTML) return subdom_funcs
[ "def", "DOM_setOuterHTML", "(", "self", ",", "nodeId", ",", "outerHTML", ")", ":", "assert", "isinstance", "(", "outerHTML", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'outerHTML' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "outerHTML", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'DOM.setOuterHTML'", ",", "nodeId", "=", "nodeId", ",", "outerHTML", "=", "outerHTML", ")", "return", "subdom_funcs" ]
Function path: DOM.setOuterHTML Domain: DOM Method name: setOuterHTML Parameters: Required arguments: 'nodeId' (type: NodeId) -> Id of the node to set markup for. 'outerHTML' (type: string) -> Outer HTML markup to set. No return value. Description: Sets node HTML markup, returns new node id.
[ "Function", "path", ":", "DOM", ".", "setOuterHTML", "Domain", ":", "DOM", "Method", "name", ":", "setOuterHTML", "Parameters", ":", "Required", "arguments", ":", "nodeId", "(", "type", ":", "NodeId", ")", "-", ">", "Id", "of", "the", "node", "to", "set", "markup", "for", ".", "outerHTML", "(", "type", ":", "string", ")", "-", ">", "Outer", "HTML", "markup", "to", "set", ".", "No", "return", "value", ".", "Description", ":", "Sets", "node", "HTML", "markup", "returns", "new", "node", "id", "." ]
python
train
Arubacloud/pyArubaCloud
ArubaCloud/ReverseDns/ReverseDns.py
https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/ReverseDns/ReverseDns.py#L23-L34
def set(self, address, host_name): """ Assign one or more PTR record to a single IP Address :type address: str :type host_name: list[str] :param address: (str) The IP address to configure :param host_name: (list[str]) The list of strings representing PTR records :return: (bool) True in case of success, False in case of failure """ request = self._call(SetEnqueueSetReverseDns.SetEnqueueSetReverseDns, IP=address, Hosts=host_name) response = request.commit() return response['Success']
[ "def", "set", "(", "self", ",", "address", ",", "host_name", ")", ":", "request", "=", "self", ".", "_call", "(", "SetEnqueueSetReverseDns", ".", "SetEnqueueSetReverseDns", ",", "IP", "=", "address", ",", "Hosts", "=", "host_name", ")", "response", "=", "request", ".", "commit", "(", ")", "return", "response", "[", "'Success'", "]" ]
Assign one or more PTR record to a single IP Address :type address: str :type host_name: list[str] :param address: (str) The IP address to configure :param host_name: (list[str]) The list of strings representing PTR records :return: (bool) True in case of success, False in case of failure
[ "Assign", "one", "or", "more", "PTR", "record", "to", "a", "single", "IP", "Address", ":", "type", "address", ":", "str", ":", "type", "host_name", ":", "list", "[", "str", "]", ":", "param", "address", ":", "(", "str", ")", "The", "IP", "address", "to", "configure", ":", "param", "host_name", ":", "(", "list", "[", "str", "]", ")", "The", "list", "of", "strings", "representing", "PTR", "records", ":", "return", ":", "(", "bool", ")", "True", "in", "case", "of", "success", "False", "in", "case", "of", "failure" ]
python
train
fermiPy/fermipy
fermipy/diffuse/name_policy.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L200-L209
def galprop_ringkey(self, **kwargs): """ return the sourcekey for galprop input maps : specifies the component and ring """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) try: return NameFactory.galprop_ringkey_format.format(**kwargs_copy) except KeyError: return None
[ "def", "galprop_ringkey", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "self", ".", "_replace_none", "(", "kwargs_copy", ")", "try", ":", "return", "NameFactory", ".", "galprop_ringkey_format", ".", "format", "(", "*", "*", "kwargs_copy", ")", "except", "KeyError", ":", "return", "None" ]
return the sourcekey for galprop input maps : specifies the component and ring
[ "return", "the", "sourcekey", "for", "galprop", "input", "maps", ":", "specifies", "the", "component", "and", "ring" ]
python
train
devassistant/devassistant
devassistant/dapi/dapicli.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/dapi/dapicli.py#L102-L111
def _unpaginated(what): '''Returns a dictionary with all <what>, unpaginated''' page = data(what) results = page['results'] count = page['count'] while page['next']: page = data(page['next']) results += page['results'] count += page['count'] return {'results': results, 'count': count}
[ "def", "_unpaginated", "(", "what", ")", ":", "page", "=", "data", "(", "what", ")", "results", "=", "page", "[", "'results'", "]", "count", "=", "page", "[", "'count'", "]", "while", "page", "[", "'next'", "]", ":", "page", "=", "data", "(", "page", "[", "'next'", "]", ")", "results", "+=", "page", "[", "'results'", "]", "count", "+=", "page", "[", "'count'", "]", "return", "{", "'results'", ":", "results", ",", "'count'", ":", "count", "}" ]
Returns a dictionary with all <what>, unpaginated
[ "Returns", "a", "dictionary", "with", "all", "<what", ">", "unpaginated" ]
python
train
lesscpy/lesscpy
lesscpy/lessc/parser.py
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/lessc/parser.py#L594-L601
def p_ident_parts(self, p): """ ident_parts : ident_part | selector | filter_group """ if not isinstance(p[1], list): p[1] = [p[1]] p[0] = p[1]
[ "def", "p_ident_parts", "(", "self", ",", "p", ")", ":", "if", "not", "isinstance", "(", "p", "[", "1", "]", ",", "list", ")", ":", "p", "[", "1", "]", "=", "[", "p", "[", "1", "]", "]", "p", "[", "0", "]", "=", "p", "[", "1", "]" ]
ident_parts : ident_part | selector | filter_group
[ "ident_parts", ":", "ident_part", "|", "selector", "|", "filter_group" ]
python
valid
saltstack/salt
salt/modules/git.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/git.py#L1722-L1785
def describe(cwd, rev='HEAD', user=None, password=None, ignore_retcode=False, output_encoding=None): ''' Returns the `git-describe(1)`_ string (or the SHA1 hash if there are no tags) for the given revision. cwd The path to the git checkout rev : HEAD The revision to describe user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-describe(1)`: http://git-scm.com/docs/git-describe CLI Examples: .. code-block:: bash salt myminion git.describe /path/to/repo salt myminion git.describe /path/to/repo develop ''' cwd = _expand_path(cwd, user) command = ['git', 'describe'] if _LooseVersion(version(versioninfo=False)) >= _LooseVersion('1.5.6'): command.append('--always') command.append(rev) return _git_run(command, cwd=cwd, user=user, password=password, ignore_retcode=ignore_retcode, output_encoding=output_encoding)['stdout']
[ "def", "describe", "(", "cwd", ",", "rev", "=", "'HEAD'", ",", "user", "=", "None", ",", "password", "=", "None", ",", "ignore_retcode", "=", "False", ",", "output_encoding", "=", "None", ")", ":", "cwd", "=", "_expand_path", "(", "cwd", ",", "user", ")", "command", "=", "[", "'git'", ",", "'describe'", "]", "if", "_LooseVersion", "(", "version", "(", "versioninfo", "=", "False", ")", ")", ">=", "_LooseVersion", "(", "'1.5.6'", ")", ":", "command", ".", "append", "(", "'--always'", ")", "command", ".", "append", "(", "rev", ")", "return", "_git_run", "(", "command", ",", "cwd", "=", "cwd", ",", "user", "=", "user", ",", "password", "=", "password", ",", "ignore_retcode", "=", "ignore_retcode", ",", "output_encoding", "=", "output_encoding", ")", "[", "'stdout'", "]" ]
Returns the `git-describe(1)`_ string (or the SHA1 hash if there are no tags) for the given revision. cwd The path to the git checkout rev : HEAD The revision to describe user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-describe(1)`: http://git-scm.com/docs/git-describe CLI Examples: .. code-block:: bash salt myminion git.describe /path/to/repo salt myminion git.describe /path/to/repo develop
[ "Returns", "the", "git", "-", "describe", "(", "1", ")", "_", "string", "(", "or", "the", "SHA1", "hash", "if", "there", "are", "no", "tags", ")", "for", "the", "given", "revision", "." ]
python
train
SoCo/SoCo
soco/discovery.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/discovery.py#L188-L213
def any_soco(): """Return any visible soco device, for when it doesn't matter which. Try to obtain an existing instance, or use `discover` if necessary. Note that this assumes that the existing instance has not left the network. Returns: SoCo: A `SoCo` instance (or subclass if `config.SOCO_CLASS` is set, or `None` if no instances are found """ cls = config.SOCO_CLASS # pylint: disable=no-member, protected-access try: # Try to get the first pre-existing soco instance we know about, # as long as it is visible (i.e. not a bridge etc). Otherwise, # perform discovery (again, excluding invisibles) and return one of # those device = next(d for d in cls._instances[cls._class_group].values() if d.is_visible) except (KeyError, StopIteration): devices = discover() return None if devices is None else devices.pop() return device
[ "def", "any_soco", "(", ")", ":", "cls", "=", "config", ".", "SOCO_CLASS", "# pylint: disable=no-member, protected-access", "try", ":", "# Try to get the first pre-existing soco instance we know about,", "# as long as it is visible (i.e. not a bridge etc). Otherwise,", "# perform discovery (again, excluding invisibles) and return one of", "# those", "device", "=", "next", "(", "d", "for", "d", "in", "cls", ".", "_instances", "[", "cls", ".", "_class_group", "]", ".", "values", "(", ")", "if", "d", ".", "is_visible", ")", "except", "(", "KeyError", ",", "StopIteration", ")", ":", "devices", "=", "discover", "(", ")", "return", "None", "if", "devices", "is", "None", "else", "devices", ".", "pop", "(", ")", "return", "device" ]
Return any visible soco device, for when it doesn't matter which. Try to obtain an existing instance, or use `discover` if necessary. Note that this assumes that the existing instance has not left the network. Returns: SoCo: A `SoCo` instance (or subclass if `config.SOCO_CLASS` is set, or `None` if no instances are found
[ "Return", "any", "visible", "soco", "device", "for", "when", "it", "doesn", "t", "matter", "which", "." ]
python
train
klen/muffin-peewee
muffin_peewee/mpeewee.py
https://github.com/klen/muffin-peewee/blob/8e893e3ea1dfc82fbcfc6efe784308c8d4e2852e/muffin_peewee/mpeewee.py#L128-L144
async def async_connect(self): """Asyncronously wait for a connection from the pool.""" if self._waiters is None: raise Exception('Error, database not properly initialized before async connection') if self._waiters or self.max_connections and (len(self._in_use) >= self.max_connections): waiter = asyncio.Future(loop=self._loop) self._waiters.append(waiter) try: logger.debug('Wait for connection.') await waiter finally: self._waiters.remove(waiter) self.connect() return self._state.conn
[ "async", "def", "async_connect", "(", "self", ")", ":", "if", "self", ".", "_waiters", "is", "None", ":", "raise", "Exception", "(", "'Error, database not properly initialized before async connection'", ")", "if", "self", ".", "_waiters", "or", "self", ".", "max_connections", "and", "(", "len", "(", "self", ".", "_in_use", ")", ">=", "self", ".", "max_connections", ")", ":", "waiter", "=", "asyncio", ".", "Future", "(", "loop", "=", "self", ".", "_loop", ")", "self", ".", "_waiters", ".", "append", "(", "waiter", ")", "try", ":", "logger", ".", "debug", "(", "'Wait for connection.'", ")", "await", "waiter", "finally", ":", "self", ".", "_waiters", ".", "remove", "(", "waiter", ")", "self", ".", "connect", "(", ")", "return", "self", ".", "_state", ".", "conn" ]
Asyncronously wait for a connection from the pool.
[ "Asyncronously", "wait", "for", "a", "connection", "from", "the", "pool", "." ]
python
valid
elastic/apm-agent-python
setup.py
https://github.com/elastic/apm-agent-python/blob/2975663d7bd22282dc39336b2c37b37c12c7a774/setup.py#L88-L99
def get_version(): """ Get version without importing from elasticapm. This avoids any side effects from importing while installing and/or building the module :return: a string, indicating the version """ version_file = open(os.path.join("elasticapm", "version.py"), encoding="utf-8") for line in version_file: if line.startswith("__version__"): version_tuple = ast.literal_eval(line.split(" = ")[1]) return ".".join(map(str, version_tuple)) return "unknown"
[ "def", "get_version", "(", ")", ":", "version_file", "=", "open", "(", "os", ".", "path", ".", "join", "(", "\"elasticapm\"", ",", "\"version.py\"", ")", ",", "encoding", "=", "\"utf-8\"", ")", "for", "line", "in", "version_file", ":", "if", "line", ".", "startswith", "(", "\"__version__\"", ")", ":", "version_tuple", "=", "ast", ".", "literal_eval", "(", "line", ".", "split", "(", "\" = \"", ")", "[", "1", "]", ")", "return", "\".\"", ".", "join", "(", "map", "(", "str", ",", "version_tuple", ")", ")", "return", "\"unknown\"" ]
Get version without importing from elasticapm. This avoids any side effects from importing while installing and/or building the module :return: a string, indicating the version
[ "Get", "version", "without", "importing", "from", "elasticapm", ".", "This", "avoids", "any", "side", "effects", "from", "importing", "while", "installing", "and", "/", "or", "building", "the", "module", ":", "return", ":", "a", "string", "indicating", "the", "version" ]
python
train
roclark/sportsreference
sportsreference/mlb/schedule.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/mlb/schedule.py#L172-L179
def datetime(self): """ Returns a datetime object of the month, day, year, and time the game was played. """ date_string = '%s %s' % (self._date, self._year) date_string = re.sub(r' \(\d+\)', '', date_string) return datetime.strptime(date_string, '%A, %b %d %Y')
[ "def", "datetime", "(", "self", ")", ":", "date_string", "=", "'%s %s'", "%", "(", "self", ".", "_date", ",", "self", ".", "_year", ")", "date_string", "=", "re", ".", "sub", "(", "r' \\(\\d+\\)'", ",", "''", ",", "date_string", ")", "return", "datetime", ".", "strptime", "(", "date_string", ",", "'%A, %b %d %Y'", ")" ]
Returns a datetime object of the month, day, year, and time the game was played.
[ "Returns", "a", "datetime", "object", "of", "the", "month", "day", "year", "and", "time", "the", "game", "was", "played", "." ]
python
train
chaoss/grimoirelab-sortinghat
sortinghat/api.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/api.py#L884-L939
def search_unique_identities_slice(db, term, offset, limit): """Look for unique identities using slicing. This function returns those unique identities which match with the given `term`. The term will be compared with name, email, username and source values of each identity. When an empty term is given, all unique identities will be returned. The results are limited by `offset` (starting on 0) and `limit`. Along with the list of unique identities, this function returns the total number of unique identities that match the given `term`. :param db: database manager :param term: term to match with unique identities data :param offset: return results starting on this position :param limit: maximum number of unique identities to return :raises InvalidValueError: raised when either the given value of `offset` or `limit` is lower than zero """ uidentities = [] pattern = '%' + term + '%' if term else None if offset < 0: raise InvalidValueError('offset must be greater than 0 - %s given' % str(offset)) if limit < 0: raise InvalidValueError('limit must be greater than 0 - %s given' % str(limit)) with db.connect() as session: query = session.query(UniqueIdentity).\ join(Identity).\ filter(UniqueIdentity.uuid == Identity.uuid) if pattern: query = query.filter(Identity.name.like(pattern) | Identity.email.like(pattern) | Identity.username.like(pattern) | Identity.source.like(pattern)) query = query.group_by(UniqueIdentity).\ order_by(UniqueIdentity.uuid) # Get the total number of unique identities for that search nuids = query.count() start = offset end = offset + limit uidentities = query.slice(start, end).all() # Detach objects from the session session.expunge_all() return uidentities, nuids
[ "def", "search_unique_identities_slice", "(", "db", ",", "term", ",", "offset", ",", "limit", ")", ":", "uidentities", "=", "[", "]", "pattern", "=", "'%'", "+", "term", "+", "'%'", "if", "term", "else", "None", "if", "offset", "<", "0", ":", "raise", "InvalidValueError", "(", "'offset must be greater than 0 - %s given'", "%", "str", "(", "offset", ")", ")", "if", "limit", "<", "0", ":", "raise", "InvalidValueError", "(", "'limit must be greater than 0 - %s given'", "%", "str", "(", "limit", ")", ")", "with", "db", ".", "connect", "(", ")", "as", "session", ":", "query", "=", "session", ".", "query", "(", "UniqueIdentity", ")", ".", "join", "(", "Identity", ")", ".", "filter", "(", "UniqueIdentity", ".", "uuid", "==", "Identity", ".", "uuid", ")", "if", "pattern", ":", "query", "=", "query", ".", "filter", "(", "Identity", ".", "name", ".", "like", "(", "pattern", ")", "|", "Identity", ".", "email", ".", "like", "(", "pattern", ")", "|", "Identity", ".", "username", ".", "like", "(", "pattern", ")", "|", "Identity", ".", "source", ".", "like", "(", "pattern", ")", ")", "query", "=", "query", ".", "group_by", "(", "UniqueIdentity", ")", ".", "order_by", "(", "UniqueIdentity", ".", "uuid", ")", "# Get the total number of unique identities for that search", "nuids", "=", "query", ".", "count", "(", ")", "start", "=", "offset", "end", "=", "offset", "+", "limit", "uidentities", "=", "query", ".", "slice", "(", "start", ",", "end", ")", ".", "all", "(", ")", "# Detach objects from the session", "session", ".", "expunge_all", "(", ")", "return", "uidentities", ",", "nuids" ]
Look for unique identities using slicing. This function returns those unique identities which match with the given `term`. The term will be compared with name, email, username and source values of each identity. When an empty term is given, all unique identities will be returned. The results are limited by `offset` (starting on 0) and `limit`. Along with the list of unique identities, this function returns the total number of unique identities that match the given `term`. :param db: database manager :param term: term to match with unique identities data :param offset: return results starting on this position :param limit: maximum number of unique identities to return :raises InvalidValueError: raised when either the given value of `offset` or `limit` is lower than zero
[ "Look", "for", "unique", "identities", "using", "slicing", "." ]
python
train
mattjj/pyslds
pyslds/states.py
https://github.com/mattjj/pyslds/blob/c505c2bd05a5549d450b518f02493b68ed12e590/pyslds/states.py#L366-L407
def _set_gaussian_expected_stats(self, smoothed_mus, smoothed_sigmas, E_xtp1_xtT): """ Both meanfield and VBEM require expected statistics of the continuous latent states, x. This is a helper function to take E[x_t], E[x_t x_t^T] and E[x_{t+1}, x_t^T] and compute the expected sufficient statistics for the initial distribution, dynamics distribution, and Gaussian observation distribution. """ assert not np.isnan(E_xtp1_xtT).any() assert not np.isnan(smoothed_mus).any() assert not np.isnan(smoothed_sigmas).any() assert smoothed_mus.shape == (self.T, self.D_latent) assert smoothed_sigmas.shape == (self.T, self.D_latent, self.D_latent) assert E_xtp1_xtT.shape == (self.T-1, self.D_latent, self.D_latent) # This is like LDSStates._set_expected_states but doesn't sum over time T, D_obs = self.T, self.D_emission E_x_xT = smoothed_sigmas + smoothed_mus[:, :, None] * smoothed_mus[:, None, :] E_x_uT = smoothed_mus[:, :, None] * self.inputs[:, None, :] E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :] E_xu_xuT = np.concatenate(( np.concatenate((E_x_xT, E_x_uT), axis=2), np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)), axis=1) E_xut_xutT = E_xu_xuT[:-1] E_xtp1_xtp1T = E_x_xT[1:] E_xtp1_utT = (smoothed_mus[1:, :, None] * self.inputs[:-1, None, :]) E_xtp1_xutT = np.concatenate((E_xtp1_xtT, E_xtp1_utT), axis=-1) # Initial state stats self.E_init_stats = (self.smoothed_mus[0], E_x_xT[0], 1.) # Dynamics stats self.E_dynamics_stats = (E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, np.ones(self.T-1)) # Emission stats -- special case diagonal noise E_yyT = self.data**2 if self.diagonal_noise else self.data[:, :, None] * self.data[:, None, :] E_yxT = self.data[:, :, None] * self.smoothed_mus[:, None, :] E_yuT = self.data[:, :, None] * self.inputs[:, None, :] E_yxuT = np.concatenate((E_yxT, E_yuT), axis=-1) E_n = np.ones((T, D_obs)) if self.diagonal_noise else np.ones(T) self.E_emission_stats = (E_yyT, E_yxuT, E_xu_xuT, E_n)
[ "def", "_set_gaussian_expected_stats", "(", "self", ",", "smoothed_mus", ",", "smoothed_sigmas", ",", "E_xtp1_xtT", ")", ":", "assert", "not", "np", ".", "isnan", "(", "E_xtp1_xtT", ")", ".", "any", "(", ")", "assert", "not", "np", ".", "isnan", "(", "smoothed_mus", ")", ".", "any", "(", ")", "assert", "not", "np", ".", "isnan", "(", "smoothed_sigmas", ")", ".", "any", "(", ")", "assert", "smoothed_mus", ".", "shape", "==", "(", "self", ".", "T", ",", "self", ".", "D_latent", ")", "assert", "smoothed_sigmas", ".", "shape", "==", "(", "self", ".", "T", ",", "self", ".", "D_latent", ",", "self", ".", "D_latent", ")", "assert", "E_xtp1_xtT", ".", "shape", "==", "(", "self", ".", "T", "-", "1", ",", "self", ".", "D_latent", ",", "self", ".", "D_latent", ")", "# This is like LDSStates._set_expected_states but doesn't sum over time", "T", ",", "D_obs", "=", "self", ".", "T", ",", "self", ".", "D_emission", "E_x_xT", "=", "smoothed_sigmas", "+", "smoothed_mus", "[", ":", ",", ":", ",", "None", "]", "*", "smoothed_mus", "[", ":", ",", "None", ",", ":", "]", "E_x_uT", "=", "smoothed_mus", "[", ":", ",", ":", ",", "None", "]", "*", "self", ".", "inputs", "[", ":", ",", "None", ",", ":", "]", "E_u_uT", "=", "self", ".", "inputs", "[", ":", ",", ":", ",", "None", "]", "*", "self", ".", "inputs", "[", ":", ",", "None", ",", ":", "]", "E_xu_xuT", "=", "np", ".", "concatenate", "(", "(", "np", ".", "concatenate", "(", "(", "E_x_xT", ",", "E_x_uT", ")", ",", "axis", "=", "2", ")", ",", "np", ".", "concatenate", "(", "(", "np", ".", "transpose", "(", "E_x_uT", ",", "(", "0", ",", "2", ",", "1", ")", ")", ",", "E_u_uT", ")", ",", "axis", "=", "2", ")", ")", ",", "axis", "=", "1", ")", "E_xut_xutT", "=", "E_xu_xuT", "[", ":", "-", "1", "]", "E_xtp1_xtp1T", "=", "E_x_xT", "[", "1", ":", "]", "E_xtp1_utT", "=", "(", "smoothed_mus", "[", "1", ":", ",", ":", ",", "None", "]", "*", "self", ".", "inputs", "[", ":", "-", "1", ",", "None", ",", ":", "]", ")", "E_xtp1_xutT", "=", "np", ".", "concatenate", "(", "(", "E_xtp1_xtT", ",", "E_xtp1_utT", ")", ",", "axis", "=", "-", "1", ")", "# Initial state stats", "self", ".", "E_init_stats", "=", "(", "self", ".", "smoothed_mus", "[", "0", "]", ",", "E_x_xT", "[", "0", "]", ",", "1.", ")", "# Dynamics stats", "self", ".", "E_dynamics_stats", "=", "(", "E_xtp1_xtp1T", ",", "E_xtp1_xutT", ",", "E_xut_xutT", ",", "np", ".", "ones", "(", "self", ".", "T", "-", "1", ")", ")", "# Emission stats -- special case diagonal noise", "E_yyT", "=", "self", ".", "data", "**", "2", "if", "self", ".", "diagonal_noise", "else", "self", ".", "data", "[", ":", ",", ":", ",", "None", "]", "*", "self", ".", "data", "[", ":", ",", "None", ",", ":", "]", "E_yxT", "=", "self", ".", "data", "[", ":", ",", ":", ",", "None", "]", "*", "self", ".", "smoothed_mus", "[", ":", ",", "None", ",", ":", "]", "E_yuT", "=", "self", ".", "data", "[", ":", ",", ":", ",", "None", "]", "*", "self", ".", "inputs", "[", ":", ",", "None", ",", ":", "]", "E_yxuT", "=", "np", ".", "concatenate", "(", "(", "E_yxT", ",", "E_yuT", ")", ",", "axis", "=", "-", "1", ")", "E_n", "=", "np", ".", "ones", "(", "(", "T", ",", "D_obs", ")", ")", "if", "self", ".", "diagonal_noise", "else", "np", ".", "ones", "(", "T", ")", "self", ".", "E_emission_stats", "=", "(", "E_yyT", ",", "E_yxuT", ",", "E_xu_xuT", ",", "E_n", ")" ]
Both meanfield and VBEM require expected statistics of the continuous latent states, x. This is a helper function to take E[x_t], E[x_t x_t^T] and E[x_{t+1}, x_t^T] and compute the expected sufficient statistics for the initial distribution, dynamics distribution, and Gaussian observation distribution.
[ "Both", "meanfield", "and", "VBEM", "require", "expected", "statistics", "of", "the", "continuous", "latent", "states", "x", ".", "This", "is", "a", "helper", "function", "to", "take", "E", "[", "x_t", "]", "E", "[", "x_t", "x_t^T", "]", "and", "E", "[", "x_", "{", "t", "+", "1", "}", "x_t^T", "]", "and", "compute", "the", "expected", "sufficient", "statistics", "for", "the", "initial", "distribution", "dynamics", "distribution", "and", "Gaussian", "observation", "distribution", "." ]
python
train
inveniosoftware-contrib/record-recommender
record_recommender/profiles.py
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/profiles.py#L129-L139
def count_records(self, record_counter, file): """Count the number of viewed records.""" counter = record_counter events_counter = 0 for record in file.get_records(): recid = record[2] counter[recid] = counter.get(recid, 0) + 1 events_counter += 1 self.stat['user_record_events'] = events_counter return counter
[ "def", "count_records", "(", "self", ",", "record_counter", ",", "file", ")", ":", "counter", "=", "record_counter", "events_counter", "=", "0", "for", "record", "in", "file", ".", "get_records", "(", ")", ":", "recid", "=", "record", "[", "2", "]", "counter", "[", "recid", "]", "=", "counter", ".", "get", "(", "recid", ",", "0", ")", "+", "1", "events_counter", "+=", "1", "self", ".", "stat", "[", "'user_record_events'", "]", "=", "events_counter", "return", "counter" ]
Count the number of viewed records.
[ "Count", "the", "number", "of", "viewed", "records", "." ]
python
train
priestc/giotto
giotto/contrib/static/programs.py
https://github.com/priestc/giotto/blob/d4c26380caefa7745bb27135e315de830f7254d3/giotto/contrib/static/programs.py#L24-L41
def StaticServe(base_path='/views/static/'): """ Meta program for serving any file based on the path """ def get_file(path=RAW_INVOCATION_ARGS): fullpath = get_config('project_path') + os.path.join(base_path, path) try: mime, encoding = mimetypes.guess_type(fullpath) return open(fullpath, 'rb'), mime or 'application/octet-stream' except IOError: raise DataNotFound("File does not exist") class StaticServe(Program): controllers = ['http-get'] model = [get_file] view = FileView() return StaticServe()
[ "def", "StaticServe", "(", "base_path", "=", "'/views/static/'", ")", ":", "def", "get_file", "(", "path", "=", "RAW_INVOCATION_ARGS", ")", ":", "fullpath", "=", "get_config", "(", "'project_path'", ")", "+", "os", ".", "path", ".", "join", "(", "base_path", ",", "path", ")", "try", ":", "mime", ",", "encoding", "=", "mimetypes", ".", "guess_type", "(", "fullpath", ")", "return", "open", "(", "fullpath", ",", "'rb'", ")", ",", "mime", "or", "'application/octet-stream'", "except", "IOError", ":", "raise", "DataNotFound", "(", "\"File does not exist\"", ")", "class", "StaticServe", "(", "Program", ")", ":", "controllers", "=", "[", "'http-get'", "]", "model", "=", "[", "get_file", "]", "view", "=", "FileView", "(", ")", "return", "StaticServe", "(", ")" ]
Meta program for serving any file based on the path
[ "Meta", "program", "for", "serving", "any", "file", "based", "on", "the", "path" ]
python
train
greenape/mktheapidocs
mktheapidocs/mkapi.py
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L360-L381
def summary(doc): """ Generate markdown for summary section. Parameters ---------- doc : dict Output from numpydoc Returns ------- list of str Markdown strings """ lines = [] if "Summary" in doc and len(doc["Summary"]) > 0: lines.append(fix_footnotes(" ".join(doc["Summary"]))) lines.append("\n") if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0: lines.append(fix_footnotes(" ".join(doc["Extended Summary"]))) lines.append("\n") return lines
[ "def", "summary", "(", "doc", ")", ":", "lines", "=", "[", "]", "if", "\"Summary\"", "in", "doc", "and", "len", "(", "doc", "[", "\"Summary\"", "]", ")", ">", "0", ":", "lines", ".", "append", "(", "fix_footnotes", "(", "\" \"", ".", "join", "(", "doc", "[", "\"Summary\"", "]", ")", ")", ")", "lines", ".", "append", "(", "\"\\n\"", ")", "if", "\"Extended Summary\"", "in", "doc", "and", "len", "(", "doc", "[", "\"Extended Summary\"", "]", ")", ">", "0", ":", "lines", ".", "append", "(", "fix_footnotes", "(", "\" \"", ".", "join", "(", "doc", "[", "\"Extended Summary\"", "]", ")", ")", ")", "lines", ".", "append", "(", "\"\\n\"", ")", "return", "lines" ]
Generate markdown for summary section. Parameters ---------- doc : dict Output from numpydoc Returns ------- list of str Markdown strings
[ "Generate", "markdown", "for", "summary", "section", "." ]
python
train
justquick/django-activity-stream
actstream/managers.py
https://github.com/justquick/django-activity-stream/blob/a1e06f2e6429cc5fc321e7801440dd7c5b9d5a35/actstream/managers.py#L145-L155
def is_following(self, user, instance, flag=''): """ Check if a user is following an instance. """ if not user or user.is_anonymous: return False queryset = self.for_object(instance) if flag: queryset = queryset.filter(flag=flag) return queryset.filter(user=user).exists()
[ "def", "is_following", "(", "self", ",", "user", ",", "instance", ",", "flag", "=", "''", ")", ":", "if", "not", "user", "or", "user", ".", "is_anonymous", ":", "return", "False", "queryset", "=", "self", ".", "for_object", "(", "instance", ")", "if", "flag", ":", "queryset", "=", "queryset", ".", "filter", "(", "flag", "=", "flag", ")", "return", "queryset", ".", "filter", "(", "user", "=", "user", ")", ".", "exists", "(", ")" ]
Check if a user is following an instance.
[ "Check", "if", "a", "user", "is", "following", "an", "instance", "." ]
python
train
Chilipp/model-organization
model_organization/__init__.py
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/__init__.py#L159-L204
def start(self, **kwargs): """ Start the commands of this organizer Parameters ---------- ``**kwargs`` Any keyword from the :attr:`commands` or :attr:`parser_commands` attribute Returns ------- argparse.Namespace The namespace with the commands as given in ``**kwargs`` and the return values of the corresponding method""" ts = {} ret = {} info_parts = {'info', 'get-value', 'get_value'} for cmd in self.commands: parser_cmd = self.parser_commands.get(cmd, cmd) if parser_cmd in kwargs or cmd in kwargs: kws = kwargs.get(cmd, kwargs.get(parser_cmd)) if isinstance(kws, Namespace): kws = vars(kws) func = getattr(self, cmd or 'main') ret[cmd] = func(**kws) if cmd not in info_parts: ts[cmd] = str(dt.datetime.now()) exp = self._experiment project_parts = {'setup'} projectname = self._projectname if (projectname is not None and project_parts.intersection(ts) and projectname in self.config.projects): self.config.projects[projectname]['timestamps'].update( {key: ts[key] for key in project_parts.intersection(ts)}) elif not ts: # don't make modifications for info self.no_modification = True if exp is not None and exp in self.config.experiments: projectname = self.projectname try: ts.update(self.config.projects[projectname]['timestamps']) except KeyError: pass if not self.is_archived(exp): self.config.experiments[exp]['timestamps'].update(ts) return Namespace(**ret)
[ "def", "start", "(", "self", ",", "*", "*", "kwargs", ")", ":", "ts", "=", "{", "}", "ret", "=", "{", "}", "info_parts", "=", "{", "'info'", ",", "'get-value'", ",", "'get_value'", "}", "for", "cmd", "in", "self", ".", "commands", ":", "parser_cmd", "=", "self", ".", "parser_commands", ".", "get", "(", "cmd", ",", "cmd", ")", "if", "parser_cmd", "in", "kwargs", "or", "cmd", "in", "kwargs", ":", "kws", "=", "kwargs", ".", "get", "(", "cmd", ",", "kwargs", ".", "get", "(", "parser_cmd", ")", ")", "if", "isinstance", "(", "kws", ",", "Namespace", ")", ":", "kws", "=", "vars", "(", "kws", ")", "func", "=", "getattr", "(", "self", ",", "cmd", "or", "'main'", ")", "ret", "[", "cmd", "]", "=", "func", "(", "*", "*", "kws", ")", "if", "cmd", "not", "in", "info_parts", ":", "ts", "[", "cmd", "]", "=", "str", "(", "dt", ".", "datetime", ".", "now", "(", ")", ")", "exp", "=", "self", ".", "_experiment", "project_parts", "=", "{", "'setup'", "}", "projectname", "=", "self", ".", "_projectname", "if", "(", "projectname", "is", "not", "None", "and", "project_parts", ".", "intersection", "(", "ts", ")", "and", "projectname", "in", "self", ".", "config", ".", "projects", ")", ":", "self", ".", "config", ".", "projects", "[", "projectname", "]", "[", "'timestamps'", "]", ".", "update", "(", "{", "key", ":", "ts", "[", "key", "]", "for", "key", "in", "project_parts", ".", "intersection", "(", "ts", ")", "}", ")", "elif", "not", "ts", ":", "# don't make modifications for info", "self", ".", "no_modification", "=", "True", "if", "exp", "is", "not", "None", "and", "exp", "in", "self", ".", "config", ".", "experiments", ":", "projectname", "=", "self", ".", "projectname", "try", ":", "ts", ".", "update", "(", "self", ".", "config", ".", "projects", "[", "projectname", "]", "[", "'timestamps'", "]", ")", "except", "KeyError", ":", "pass", "if", "not", "self", ".", "is_archived", "(", "exp", ")", ":", "self", ".", "config", ".", "experiments", "[", "exp", "]", "[", "'timestamps'", "]", ".", "update", "(", "ts", ")", "return", "Namespace", "(", "*", "*", "ret", ")" ]
Start the commands of this organizer Parameters ---------- ``**kwargs`` Any keyword from the :attr:`commands` or :attr:`parser_commands` attribute Returns ------- argparse.Namespace The namespace with the commands as given in ``**kwargs`` and the return values of the corresponding method
[ "Start", "the", "commands", "of", "this", "organizer" ]
python
train
timkpaine/pyEX
pyEX/stocks.py
https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/stocks.py#L256-L272
def bookDF(symbol, token='', version=''): '''Book data https://iextrading.com/developer/docs/#book realtime during Investors Exchange market hours Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result ''' x = book(symbol, token, version) df = _bookToDF(x) return df
[ "def", "bookDF", "(", "symbol", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "x", "=", "book", "(", "symbol", ",", "token", ",", "version", ")", "df", "=", "_bookToDF", "(", "x", ")", "return", "df" ]
Book data https://iextrading.com/developer/docs/#book realtime during Investors Exchange market hours Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
[ "Book", "data" ]
python
valid
acsone/git-aggregator
git_aggregator/repo.py
https://github.com/acsone/git-aggregator/blob/8631b0e64f9e8ce1857b21adeddb890ebd8469a6/git_aggregator/repo.py#L359-L365
def show_closed_prs(self): """Log only closed PRs.""" all_prs = self.collect_prs_info() for pr_info in all_prs.get('closed', []): logger.info( '{url} in state {state} ({merged})'.format(**pr_info) )
[ "def", "show_closed_prs", "(", "self", ")", ":", "all_prs", "=", "self", ".", "collect_prs_info", "(", ")", "for", "pr_info", "in", "all_prs", ".", "get", "(", "'closed'", ",", "[", "]", ")", ":", "logger", ".", "info", "(", "'{url} in state {state} ({merged})'", ".", "format", "(", "*", "*", "pr_info", ")", ")" ]
Log only closed PRs.
[ "Log", "only", "closed", "PRs", "." ]
python
train
Aula13/poloniex
poloniex/poloniex.py
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L250-L266
def buy(self, currencyPair, rate, amount, fillOrKill=None, immediateOrCancel=None, postOnly=None): """Places a limit buy order in a given market. Required POST parameters are "currencyPair", "rate", and "amount". If successful, the method will return the order number. You may optionally set "fillOrKill", "immediateOrCancel", "postOnly" to 1. A fill-or-kill order will either fill in its entirety or be completely aborted. An immediate-or-cancel order can be partially or completely filled, but any portion of the order that cannot be filled immediately will be canceled rather than left on the order book. A post-only order will only be placed if no portion of it fills immediately; this guarantees you will never pay the taker fee on any part of the order that fills.""" return self._private('buy', currencyPair=currencyPair, rate=rate, amount=amount, fillOrKill=fillOrKill, immediateOrCancel=immediateOrCancel, postOnly=postOnly)
[ "def", "buy", "(", "self", ",", "currencyPair", ",", "rate", ",", "amount", ",", "fillOrKill", "=", "None", ",", "immediateOrCancel", "=", "None", ",", "postOnly", "=", "None", ")", ":", "return", "self", ".", "_private", "(", "'buy'", ",", "currencyPair", "=", "currencyPair", ",", "rate", "=", "rate", ",", "amount", "=", "amount", ",", "fillOrKill", "=", "fillOrKill", ",", "immediateOrCancel", "=", "immediateOrCancel", ",", "postOnly", "=", "postOnly", ")" ]
Places a limit buy order in a given market. Required POST parameters are "currencyPair", "rate", and "amount". If successful, the method will return the order number. You may optionally set "fillOrKill", "immediateOrCancel", "postOnly" to 1. A fill-or-kill order will either fill in its entirety or be completely aborted. An immediate-or-cancel order can be partially or completely filled, but any portion of the order that cannot be filled immediately will be canceled rather than left on the order book. A post-only order will only be placed if no portion of it fills immediately; this guarantees you will never pay the taker fee on any part of the order that fills.
[ "Places", "a", "limit", "buy", "order", "in", "a", "given", "market", ".", "Required", "POST", "parameters", "are", "currencyPair", "rate", "and", "amount", ".", "If", "successful", "the", "method", "will", "return", "the", "order", "number", ".", "You", "may", "optionally", "set", "fillOrKill", "immediateOrCancel", "postOnly", "to", "1", ".", "A", "fill", "-", "or", "-", "kill", "order", "will", "either", "fill", "in", "its", "entirety", "or", "be", "completely", "aborted", ".", "An", "immediate", "-", "or", "-", "cancel", "order", "can", "be", "partially", "or", "completely", "filled", "but", "any", "portion", "of", "the", "order", "that", "cannot", "be", "filled", "immediately", "will", "be", "canceled", "rather", "than", "left", "on", "the", "order", "book", ".", "A", "post", "-", "only", "order", "will", "only", "be", "placed", "if", "no", "portion", "of", "it", "fills", "immediately", ";", "this", "guarantees", "you", "will", "never", "pay", "the", "taker", "fee", "on", "any", "part", "of", "the", "order", "that", "fills", "." ]
python
train
ministryofjustice/money-to-prisoners-common
mtp_common/forms/__init__.py
https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/mtp_common/forms/__init__.py#L6-L27
def replace_default_error_messages(): """ Replace Django's generic error messages with MTP-specific versions NB: avoid trailing full stops visually, they are added for screen readers in templates """ forms.Field.default_error_messages['required'] = _('This field is required') forms.CharField.default_error_messages['min_length'] = _('Youโ€™ve entered too few characters') forms.CharField.default_error_messages['max_length'] = _('Youโ€™ve entered too many characters') forms.IntegerField.default_error_messages['invalid'] = _('Enter a whole number') forms.FloatField.default_error_messages['invalid'] = _('Enter a number') forms.DecimalField.default_error_messages['invalid'] = _('Enter a number') forms.DateField.default_error_messages['invalid'] = _('Enter a valid date') forms.TimeField.default_error_messages['invalid'] = _('Enter a valid time') forms.DateTimeField.default_error_messages['invalid'] = _('Enter a valid date and time') forms.FileField.default_error_messages.update({ 'invalid': _('No file was submitted'), 'missing': _('No file was submitted'), 'empty': _('The submitted file is empty'), }) forms.SplitDateTimeField.default_error_messages['invalid_date'] = _('Enter a valid date') forms.SplitDateTimeField.default_error_messages['invalid_time'] = _('Enter a valid time') validators.EmailValidator.message = _('Enter a valid email address')
[ "def", "replace_default_error_messages", "(", ")", ":", "forms", ".", "Field", ".", "default_error_messages", "[", "'required'", "]", "=", "_", "(", "'This field is required'", ")", "forms", ".", "CharField", ".", "default_error_messages", "[", "'min_length'", "]", "=", "_", "(", "'Youโ€™ve entered too few characters')", "", "forms", ".", "CharField", ".", "default_error_messages", "[", "'max_length'", "]", "=", "_", "(", "'Youโ€™ve entered too many characters')", "", "forms", ".", "IntegerField", ".", "default_error_messages", "[", "'invalid'", "]", "=", "_", "(", "'Enter a whole number'", ")", "forms", ".", "FloatField", ".", "default_error_messages", "[", "'invalid'", "]", "=", "_", "(", "'Enter a number'", ")", "forms", ".", "DecimalField", ".", "default_error_messages", "[", "'invalid'", "]", "=", "_", "(", "'Enter a number'", ")", "forms", ".", "DateField", ".", "default_error_messages", "[", "'invalid'", "]", "=", "_", "(", "'Enter a valid date'", ")", "forms", ".", "TimeField", ".", "default_error_messages", "[", "'invalid'", "]", "=", "_", "(", "'Enter a valid time'", ")", "forms", ".", "DateTimeField", ".", "default_error_messages", "[", "'invalid'", "]", "=", "_", "(", "'Enter a valid date and time'", ")", "forms", ".", "FileField", ".", "default_error_messages", ".", "update", "(", "{", "'invalid'", ":", "_", "(", "'No file was submitted'", ")", ",", "'missing'", ":", "_", "(", "'No file was submitted'", ")", ",", "'empty'", ":", "_", "(", "'The submitted file is empty'", ")", ",", "}", ")", "forms", ".", "SplitDateTimeField", ".", "default_error_messages", "[", "'invalid_date'", "]", "=", "_", "(", "'Enter a valid date'", ")", "forms", ".", "SplitDateTimeField", ".", "default_error_messages", "[", "'invalid_time'", "]", "=", "_", "(", "'Enter a valid time'", ")", "validators", ".", "EmailValidator", ".", "message", "=", "_", "(", "'Enter a valid email address'", ")" ]
Replace Django's generic error messages with MTP-specific versions NB: avoid trailing full stops visually, they are added for screen readers in templates
[ "Replace", "Django", "s", "generic", "error", "messages", "with", "MTP", "-", "specific", "versions", "NB", ":", "avoid", "trailing", "full", "stops", "visually", "they", "are", "added", "for", "screen", "readers", "in", "templates" ]
python
train
devopshq/artifactory
artifactory.py
https://github.com/devopshq/artifactory/blob/b9ec08cd72527d7d43159fe45c3a98a0b0838534/artifactory.py#L1280-L1288
def move(self, dst): """ Move artifact from this path to destinaiton. """ if self.drive != dst.drive: raise NotImplementedError( "Moving between instances is not implemented yet") self._accessor.move(self, dst)
[ "def", "move", "(", "self", ",", "dst", ")", ":", "if", "self", ".", "drive", "!=", "dst", ".", "drive", ":", "raise", "NotImplementedError", "(", "\"Moving between instances is not implemented yet\"", ")", "self", ".", "_accessor", ".", "move", "(", "self", ",", "dst", ")" ]
Move artifact from this path to destinaiton.
[ "Move", "artifact", "from", "this", "path", "to", "destinaiton", "." ]
python
train
jreese/aiosqlite
aiosqlite/core.py
https://github.com/jreese/aiosqlite/blob/3f548b568b8db9a57022b6e2c9627f5cdefb983f/aiosqlite/core.py#L64-L69
async def fetchmany(self, size: int = None) -> Iterable[sqlite3.Row]: """Fetch up to `cursor.arraysize` number of rows.""" args = () # type: Tuple[int, ...] if size is not None: args = (size,) return await self._execute(self._cursor.fetchmany, *args)
[ "async", "def", "fetchmany", "(", "self", ",", "size", ":", "int", "=", "None", ")", "->", "Iterable", "[", "sqlite3", ".", "Row", "]", ":", "args", "=", "(", ")", "# type: Tuple[int, ...]", "if", "size", "is", "not", "None", ":", "args", "=", "(", "size", ",", ")", "return", "await", "self", ".", "_execute", "(", "self", ".", "_cursor", ".", "fetchmany", ",", "*", "args", ")" ]
Fetch up to `cursor.arraysize` number of rows.
[ "Fetch", "up", "to", "cursor", ".", "arraysize", "number", "of", "rows", "." ]
python
train
markovmodel/PyEMMA
pyemma/coordinates/transform/nystroem_tica.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/transform/nystroem_tica.py#L604-L643
def approximate_eig(self, epsilon=1e-6): """ Compute low-rank approximation of the eigenvalue decomposition of target matrix. If spd is True, the decomposition will be conducted while ensuring that the spectrum of `A_k^{-1}` is positive. Parameters ---------- epsilon : float, optional, default 1e-6 Cutoff for eigenvalue norms. If negative eigenvalues occur, with norms larger than epsilon, the largest negative eigenvalue norm will be used instead of epsilon, i.e. a band including all negative eigenvalues will be cut off. Returns ------- s : ndarray((m,), dtype=float) approximated eigenvalues. Number of eigenvalues returned is at most the number of columns used in the Nystroem approximation, but may be smaller depending on epsilon. W : ndarray((n,m), dtype=float) approximated eigenvectors in columns. Number of eigenvectors returned is at most the number of columns used in the Nystroem approximation, but may be smaller depending on epsilon. """ L = self.approximate_cholesky(epsilon=epsilon) LL = np.dot(L.T, L) s, V = np.linalg.eigh(LL) # sort s, V = sort_by_norm(s, V) # back-transform eigenvectors Linv = np.linalg.pinv(L.T) V = np.dot(Linv, V) # normalize eigenvectors ncol = V.shape[1] for i in range(ncol): if not np.allclose(V[:, i], 0): V[:, i] /= np.sqrt(np.dot(V[:, i], V[:, i])) return s, V
[ "def", "approximate_eig", "(", "self", ",", "epsilon", "=", "1e-6", ")", ":", "L", "=", "self", ".", "approximate_cholesky", "(", "epsilon", "=", "epsilon", ")", "LL", "=", "np", ".", "dot", "(", "L", ".", "T", ",", "L", ")", "s", ",", "V", "=", "np", ".", "linalg", ".", "eigh", "(", "LL", ")", "# sort", "s", ",", "V", "=", "sort_by_norm", "(", "s", ",", "V", ")", "# back-transform eigenvectors", "Linv", "=", "np", ".", "linalg", ".", "pinv", "(", "L", ".", "T", ")", "V", "=", "np", ".", "dot", "(", "Linv", ",", "V", ")", "# normalize eigenvectors", "ncol", "=", "V", ".", "shape", "[", "1", "]", "for", "i", "in", "range", "(", "ncol", ")", ":", "if", "not", "np", ".", "allclose", "(", "V", "[", ":", ",", "i", "]", ",", "0", ")", ":", "V", "[", ":", ",", "i", "]", "/=", "np", ".", "sqrt", "(", "np", ".", "dot", "(", "V", "[", ":", ",", "i", "]", ",", "V", "[", ":", ",", "i", "]", ")", ")", "return", "s", ",", "V" ]
Compute low-rank approximation of the eigenvalue decomposition of target matrix. If spd is True, the decomposition will be conducted while ensuring that the spectrum of `A_k^{-1}` is positive. Parameters ---------- epsilon : float, optional, default 1e-6 Cutoff for eigenvalue norms. If negative eigenvalues occur, with norms larger than epsilon, the largest negative eigenvalue norm will be used instead of epsilon, i.e. a band including all negative eigenvalues will be cut off. Returns ------- s : ndarray((m,), dtype=float) approximated eigenvalues. Number of eigenvalues returned is at most the number of columns used in the Nystroem approximation, but may be smaller depending on epsilon. W : ndarray((n,m), dtype=float) approximated eigenvectors in columns. Number of eigenvectors returned is at most the number of columns used in the Nystroem approximation, but may be smaller depending on epsilon.
[ "Compute", "low", "-", "rank", "approximation", "of", "the", "eigenvalue", "decomposition", "of", "target", "matrix", "." ]
python
train
greenbone/ospd
ospd/misc.py
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L365-L375
def inet_pton(address_family, ip_string): """ A platform independent version of inet_pton """ global __inet_pton if __inet_pton is None: if hasattr(socket, 'inet_pton'): __inet_pton = socket.inet_pton else: from ospd import win_socket __inet_pton = win_socket.inet_pton return __inet_pton(address_family, ip_string)
[ "def", "inet_pton", "(", "address_family", ",", "ip_string", ")", ":", "global", "__inet_pton", "if", "__inet_pton", "is", "None", ":", "if", "hasattr", "(", "socket", ",", "'inet_pton'", ")", ":", "__inet_pton", "=", "socket", ".", "inet_pton", "else", ":", "from", "ospd", "import", "win_socket", "__inet_pton", "=", "win_socket", ".", "inet_pton", "return", "__inet_pton", "(", "address_family", ",", "ip_string", ")" ]
A platform independent version of inet_pton
[ "A", "platform", "independent", "version", "of", "inet_pton" ]
python
train
csparpa/pyowm
pyowm/alertapi30/trigger.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/alertapi30/trigger.py#L93-L107
def get_alerts_on(self, weather_param): """ Returns all the `Alert` objects of this `Trigger` that refer to the specified weather parameter (eg. 'temp', 'pressure', etc.). The allowed weather params are the ones enumerated by class `pyowm.alertapi30.enums.WeatherParametersEnum` :param weather_param: str, values in `pyowm.alertapi30.enums.WeatherParametersEnum` :return: list of `Alert` instances """ result = [] for alert in self.alerts: for met_condition in alert.met_conditions: if met_condition['condition'].weather_param == weather_param: result.append(alert) break return result
[ "def", "get_alerts_on", "(", "self", ",", "weather_param", ")", ":", "result", "=", "[", "]", "for", "alert", "in", "self", ".", "alerts", ":", "for", "met_condition", "in", "alert", ".", "met_conditions", ":", "if", "met_condition", "[", "'condition'", "]", ".", "weather_param", "==", "weather_param", ":", "result", ".", "append", "(", "alert", ")", "break", "return", "result" ]
Returns all the `Alert` objects of this `Trigger` that refer to the specified weather parameter (eg. 'temp', 'pressure', etc.). The allowed weather params are the ones enumerated by class `pyowm.alertapi30.enums.WeatherParametersEnum` :param weather_param: str, values in `pyowm.alertapi30.enums.WeatherParametersEnum` :return: list of `Alert` instances
[ "Returns", "all", "the", "Alert", "objects", "of", "this", "Trigger", "that", "refer", "to", "the", "specified", "weather", "parameter", "(", "eg", ".", "temp", "pressure", "etc", ".", ")", ".", "The", "allowed", "weather", "params", "are", "the", "ones", "enumerated", "by", "class", "pyowm", ".", "alertapi30", ".", "enums", ".", "WeatherParametersEnum", ":", "param", "weather_param", ":", "str", "values", "in", "pyowm", ".", "alertapi30", ".", "enums", ".", "WeatherParametersEnum", ":", "return", ":", "list", "of", "Alert", "instances" ]
python
train
fabioz/PyDev.Debugger
_pydev_bundle/pydev_versioncheck.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_bundle/pydev_versioncheck.py#L3-L15
def versionok_for_gui(): ''' Return True if running Python is suitable for GUI Event Integration and deeper IPython integration ''' # We require Python 2.6+ ... if sys.hexversion < 0x02060000: return False # Or Python 3.2+ if sys.hexversion >= 0x03000000 and sys.hexversion < 0x03020000: return False # Not supported under Jython nor IronPython if sys.platform.startswith("java") or sys.platform.startswith('cli'): return False return True
[ "def", "versionok_for_gui", "(", ")", ":", "# We require Python 2.6+ ...", "if", "sys", ".", "hexversion", "<", "0x02060000", ":", "return", "False", "# Or Python 3.2+", "if", "sys", ".", "hexversion", ">=", "0x03000000", "and", "sys", ".", "hexversion", "<", "0x03020000", ":", "return", "False", "# Not supported under Jython nor IronPython", "if", "sys", ".", "platform", ".", "startswith", "(", "\"java\"", ")", "or", "sys", ".", "platform", ".", "startswith", "(", "'cli'", ")", ":", "return", "False", "return", "True" ]
Return True if running Python is suitable for GUI Event Integration and deeper IPython integration
[ "Return", "True", "if", "running", "Python", "is", "suitable", "for", "GUI", "Event", "Integration", "and", "deeper", "IPython", "integration" ]
python
train
mardix/Mocha
mocha/extras/md.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/extras/md.py#L90-L99
def toc(text): """ Return a table of context list :param text: :return: """ extensions = ['markdown.extensions.toc'] mkd = markdown.Markdown(extensions=extensions) html = mkd.convert(text) return mkd.toc
[ "def", "toc", "(", "text", ")", ":", "extensions", "=", "[", "'markdown.extensions.toc'", "]", "mkd", "=", "markdown", ".", "Markdown", "(", "extensions", "=", "extensions", ")", "html", "=", "mkd", ".", "convert", "(", "text", ")", "return", "mkd", ".", "toc" ]
Return a table of context list :param text: :return:
[ "Return", "a", "table", "of", "context", "list", ":", "param", "text", ":", ":", "return", ":" ]
python
train
vtkiorg/vtki
vtki/renderer.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L433-L439
def remove_bounding_box(self): """ Removes bounding box """ if hasattr(self, '_box_object'): actor = self.bounding_box_actor self.bounding_box_actor = None del self._box_object self.remove_actor(actor, reset_camera=False)
[ "def", "remove_bounding_box", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_box_object'", ")", ":", "actor", "=", "self", ".", "bounding_box_actor", "self", ".", "bounding_box_actor", "=", "None", "del", "self", ".", "_box_object", "self", ".", "remove_actor", "(", "actor", ",", "reset_camera", "=", "False", ")" ]
Removes bounding box
[ "Removes", "bounding", "box" ]
python
train
nikcub/floyd
floyd/util/dateformat.py
https://github.com/nikcub/floyd/blob/5772d0047efb11c9ce5f7d234a9da4576ce24edc/floyd/util/dateformat.py#L111-L120
def f(self): """ Time, in 12-hour hours and minutes, with minutes left off if they're zero. Examples: '1', '1:30', '2:05', '2' Proprietary extension. """ if self.data.minute == 0: return self.g() return u'%s:%s' % (self.g(), self.i())
[ "def", "f", "(", "self", ")", ":", "if", "self", ".", "data", ".", "minute", "==", "0", ":", "return", "self", ".", "g", "(", ")", "return", "u'%s:%s'", "%", "(", "self", ".", "g", "(", ")", ",", "self", ".", "i", "(", ")", ")" ]
Time, in 12-hour hours and minutes, with minutes left off if they're zero. Examples: '1', '1:30', '2:05', '2' Proprietary extension.
[ "Time", "in", "12", "-", "hour", "hours", "and", "minutes", "with", "minutes", "left", "off", "if", "they", "re", "zero", ".", "Examples", ":", "1", "1", ":", "30", "2", ":", "05", "2", "Proprietary", "extension", "." ]
python
train
ewiger/mlab
src/mlab/awmstools.py
https://github.com/ewiger/mlab/blob/72a98adf6499f548848ad44c604f74d68f07fe4f/src/mlab/awmstools.py#L1864-L1874
def argmin(iterable, key=None, both=False): """See `argmax`. """ if key is not None: it = imap(key, iterable) else: it = iter(iterable) score, argmin = reduce(min, izip(it, count())) if both: return argmin, score return argmin
[ "def", "argmin", "(", "iterable", ",", "key", "=", "None", ",", "both", "=", "False", ")", ":", "if", "key", "is", "not", "None", ":", "it", "=", "imap", "(", "key", ",", "iterable", ")", "else", ":", "it", "=", "iter", "(", "iterable", ")", "score", ",", "argmin", "=", "reduce", "(", "min", ",", "izip", "(", "it", ",", "count", "(", ")", ")", ")", "if", "both", ":", "return", "argmin", ",", "score", "return", "argmin" ]
See `argmax`.
[ "See", "argmax", "." ]
python
train
Scoppio/RagnarokEngine3
RagnarokEngine3/RE3.py
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L3166-L3177
def find_obj_by_tag(self, tag): """Search through all the objects in the world and return the first instance whose tag matches the specified string.""" for obj in self.__up_objects: if obj.tag == tag: return obj for obj in self.__draw_objects: if obj.tag == tag: return obj return None
[ "def", "find_obj_by_tag", "(", "self", ",", "tag", ")", ":", "for", "obj", "in", "self", ".", "__up_objects", ":", "if", "obj", ".", "tag", "==", "tag", ":", "return", "obj", "for", "obj", "in", "self", ".", "__draw_objects", ":", "if", "obj", ".", "tag", "==", "tag", ":", "return", "obj", "return", "None" ]
Search through all the objects in the world and return the first instance whose tag matches the specified string.
[ "Search", "through", "all", "the", "objects", "in", "the", "world", "and", "return", "the", "first", "instance", "whose", "tag", "matches", "the", "specified", "string", "." ]
python
train
SMTG-UCL/sumo
sumo/cli/dosplot.py
https://github.com/SMTG-UCL/sumo/blob/47aec6bbfa033a624435a65bd4edabd18bfb437f/sumo/cli/dosplot.py#L40-L184
def dosplot(filename=None, prefix=None, directory=None, elements=None, lm_orbitals=None, atoms=None, subplot=False, shift=True, total_only=False, plot_total=True, legend_on=True, legend_frame_on=False, legend_cutoff=3., gaussian=None, height=6., width=8., xmin=-6., xmax=6., num_columns=2, colours=None, yscale=1, xlabel='Energy (eV)', ylabel='Arb. units', style=None, no_base_style=False, image_format='pdf', dpi=400, plt=None, fonts=None): """A script to plot the density of states from a vasprun.xml file. Args: filename (:obj:`str`, optional): Path to a vasprun.xml file (can be gzipped). prefix (:obj:`str`, optional): Prefix for file names. directory (:obj:`str`, optional): The directory in which to save files. elements (:obj:`dict`, optional): The elements and orbitals to extract from the projected density of states. Should be provided as a :obj:`dict` with the keys as the element names and corresponding values as a :obj:`tuple` of orbitals. For example, the following would extract the Bi s, px, py and d orbitals:: {'Bi': ('s', 'px', 'py', 'd')} If an element is included with an empty :obj:`tuple`, all orbitals for that species will be extracted. If ``elements`` is not set or set to ``None``, all elements for all species will be extracted. lm_orbitals (:obj:`dict`, optional): The orbitals to decompose into their lm contributions (e.g. p -> px, py, pz). Should be provided as a :obj:`dict`, with the elements names as keys and a :obj:`tuple` of orbitals as the corresponding values. For example, the following would be used to decompose the oxygen p and d orbitals:: {'O': ('p', 'd')} atoms (:obj:`dict`, optional): Which atomic sites to use when calculating the projected density of states. Should be provided as a :obj:`dict`, with the element names as keys and a :obj:`tuple` of :obj:`int` specifying the atomic indices as the corresponding values. The elemental projected density of states will be summed only over the atom indices specified. If an element is included with an empty :obj:`tuple`, then all sites for that element will be included. The indices are 0 based for each element specified in the POSCAR. For example, the following will calculate the density of states for the first 4 Sn atoms and all O atoms in the structure:: {'Sn': (1, 2, 3, 4), 'O': (, )} If ``atoms`` is not set or set to ``None`` then all atomic sites for all elements will be considered. subplot (:obj:`bool`, optional): Plot the density of states for each element on separate subplots. Defaults to ``False``. shift (:obj:`bool`, optional): Shift the energies such that the valence band maximum (or Fermi level for metals) is at 0 eV. Defaults to ``True``. total_only (:obj:`bool`, optional): Only extract the total density of states. Defaults to ``False``. plot_total (:obj:`bool`, optional): Plot the total density of states. Defaults to ``True``. legend_on (:obj:`bool`, optional): Plot the graph legend. Defaults to ``True``. legend_frame_on (:obj:`bool`, optional): Plot a frame around the graph legend. Defaults to ``False``. legend_cutoff (:obj:`float`, optional): The cut-off (in % of the maximum density of states within the plotting range) for an elemental orbital to be labelled in the legend. This prevents the legend from containing labels for orbitals that have very little contribution in the plotting range. gaussian (:obj:`float`, optional): Broaden the density of states using convolution with a gaussian function. This parameter controls the sigma or standard deviation of the gaussian distribution. height (:obj:`float`, optional): The height of the plot. width (:obj:`float`, optional): The width of the plot. xmin (:obj:`float`, optional): The minimum energy on the x-axis. xmax (:obj:`float`, optional): The maximum energy on the x-axis. num_columns (:obj:`int`, optional): The number of columns in the legend. colours (:obj:`dict`, optional): Use custom colours for specific element and orbital combinations. Specified as a :obj:`dict` of :obj:`dict` of the colours. For example:: { 'Sn': {'s': 'r', 'p': 'b'}, 'O': {'s': '#000000'} } The colour can be a hex code, series of rgb value, or any other format supported by matplotlib. xlabel (:obj:`str`, optional): Label/units for x-axis (i.e. energy) ylabel (:obj:`str`, optional): Label/units for y-axis (i.e. DOS) yscale (:obj:`float`, optional): Scaling factor for the y-axis. style (:obj:`list` or :obj:`str`, optional): (List of) matplotlib style specifications, to be composed on top of Sumo base style. no_base_style (:obj:`bool`, optional): Prevent use of sumo base style. This can make alternative styles behave more predictably. image_format (:obj:`str`, optional): The image file format. Can be any format supported by matplotlib, including: png, jpg, pdf, and svg. Defaults to pdf. dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for the image. plt (:obj:`matplotlib.pyplot`, optional): A :obj:`matplotlib.pyplot` object to use for plotting. fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a a single font, specified as a :obj:`str`, or several fonts, specified as a :obj:`list` of :obj:`str`. Returns: A matplotlib pyplot object. """ if not filename: if os.path.exists('vasprun.xml'): filename = 'vasprun.xml' elif os.path.exists('vasprun.xml.gz'): filename = 'vasprun.xml.gz' else: logging.error('ERROR: No vasprun.xml found!') sys.exit() dos, pdos = load_dos(filename, elements, lm_orbitals, atoms, gaussian, total_only) save_files = False if plt else True # don't save if pyplot object provided plotter = SDOSPlotter(dos, pdos) plt = plotter.get_plot(subplot=subplot, width=width, height=height, xmin=xmin, xmax=xmax, yscale=yscale, colours=colours, plot_total=plot_total, legend_on=legend_on, num_columns=num_columns, legend_frame_on=legend_frame_on, xlabel=xlabel, ylabel=ylabel, legend_cutoff=legend_cutoff, dpi=dpi, plt=plt, fonts=fonts, style=style, no_base_style=no_base_style) if save_files: basename = 'dos.{}'.format(image_format) filename = '{}_{}'.format(prefix, basename) if prefix else basename if directory: filename = os.path.join(directory, filename) plt.savefig(filename, format=image_format, dpi=dpi, bbox_inches='tight') write_files(dos, pdos, prefix=prefix, directory=directory) else: return plt
[ "def", "dosplot", "(", "filename", "=", "None", ",", "prefix", "=", "None", ",", "directory", "=", "None", ",", "elements", "=", "None", ",", "lm_orbitals", "=", "None", ",", "atoms", "=", "None", ",", "subplot", "=", "False", ",", "shift", "=", "True", ",", "total_only", "=", "False", ",", "plot_total", "=", "True", ",", "legend_on", "=", "True", ",", "legend_frame_on", "=", "False", ",", "legend_cutoff", "=", "3.", ",", "gaussian", "=", "None", ",", "height", "=", "6.", ",", "width", "=", "8.", ",", "xmin", "=", "-", "6.", ",", "xmax", "=", "6.", ",", "num_columns", "=", "2", ",", "colours", "=", "None", ",", "yscale", "=", "1", ",", "xlabel", "=", "'Energy (eV)'", ",", "ylabel", "=", "'Arb. units'", ",", "style", "=", "None", ",", "no_base_style", "=", "False", ",", "image_format", "=", "'pdf'", ",", "dpi", "=", "400", ",", "plt", "=", "None", ",", "fonts", "=", "None", ")", ":", "if", "not", "filename", ":", "if", "os", ".", "path", ".", "exists", "(", "'vasprun.xml'", ")", ":", "filename", "=", "'vasprun.xml'", "elif", "os", ".", "path", ".", "exists", "(", "'vasprun.xml.gz'", ")", ":", "filename", "=", "'vasprun.xml.gz'", "else", ":", "logging", ".", "error", "(", "'ERROR: No vasprun.xml found!'", ")", "sys", ".", "exit", "(", ")", "dos", ",", "pdos", "=", "load_dos", "(", "filename", ",", "elements", ",", "lm_orbitals", ",", "atoms", ",", "gaussian", ",", "total_only", ")", "save_files", "=", "False", "if", "plt", "else", "True", "# don't save if pyplot object provided", "plotter", "=", "SDOSPlotter", "(", "dos", ",", "pdos", ")", "plt", "=", "plotter", ".", "get_plot", "(", "subplot", "=", "subplot", ",", "width", "=", "width", ",", "height", "=", "height", ",", "xmin", "=", "xmin", ",", "xmax", "=", "xmax", ",", "yscale", "=", "yscale", ",", "colours", "=", "colours", ",", "plot_total", "=", "plot_total", ",", "legend_on", "=", "legend_on", ",", "num_columns", "=", "num_columns", ",", "legend_frame_on", "=", "legend_frame_on", ",", "xlabel", "=", "xlabel", ",", "ylabel", "=", "ylabel", ",", "legend_cutoff", "=", "legend_cutoff", ",", "dpi", "=", "dpi", ",", "plt", "=", "plt", ",", "fonts", "=", "fonts", ",", "style", "=", "style", ",", "no_base_style", "=", "no_base_style", ")", "if", "save_files", ":", "basename", "=", "'dos.{}'", ".", "format", "(", "image_format", ")", "filename", "=", "'{}_{}'", ".", "format", "(", "prefix", ",", "basename", ")", "if", "prefix", "else", "basename", "if", "directory", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ")", "plt", ".", "savefig", "(", "filename", ",", "format", "=", "image_format", ",", "dpi", "=", "dpi", ",", "bbox_inches", "=", "'tight'", ")", "write_files", "(", "dos", ",", "pdos", ",", "prefix", "=", "prefix", ",", "directory", "=", "directory", ")", "else", ":", "return", "plt" ]
A script to plot the density of states from a vasprun.xml file. Args: filename (:obj:`str`, optional): Path to a vasprun.xml file (can be gzipped). prefix (:obj:`str`, optional): Prefix for file names. directory (:obj:`str`, optional): The directory in which to save files. elements (:obj:`dict`, optional): The elements and orbitals to extract from the projected density of states. Should be provided as a :obj:`dict` with the keys as the element names and corresponding values as a :obj:`tuple` of orbitals. For example, the following would extract the Bi s, px, py and d orbitals:: {'Bi': ('s', 'px', 'py', 'd')} If an element is included with an empty :obj:`tuple`, all orbitals for that species will be extracted. If ``elements`` is not set or set to ``None``, all elements for all species will be extracted. lm_orbitals (:obj:`dict`, optional): The orbitals to decompose into their lm contributions (e.g. p -> px, py, pz). Should be provided as a :obj:`dict`, with the elements names as keys and a :obj:`tuple` of orbitals as the corresponding values. For example, the following would be used to decompose the oxygen p and d orbitals:: {'O': ('p', 'd')} atoms (:obj:`dict`, optional): Which atomic sites to use when calculating the projected density of states. Should be provided as a :obj:`dict`, with the element names as keys and a :obj:`tuple` of :obj:`int` specifying the atomic indices as the corresponding values. The elemental projected density of states will be summed only over the atom indices specified. If an element is included with an empty :obj:`tuple`, then all sites for that element will be included. The indices are 0 based for each element specified in the POSCAR. For example, the following will calculate the density of states for the first 4 Sn atoms and all O atoms in the structure:: {'Sn': (1, 2, 3, 4), 'O': (, )} If ``atoms`` is not set or set to ``None`` then all atomic sites for all elements will be considered. subplot (:obj:`bool`, optional): Plot the density of states for each element on separate subplots. Defaults to ``False``. shift (:obj:`bool`, optional): Shift the energies such that the valence band maximum (or Fermi level for metals) is at 0 eV. Defaults to ``True``. total_only (:obj:`bool`, optional): Only extract the total density of states. Defaults to ``False``. plot_total (:obj:`bool`, optional): Plot the total density of states. Defaults to ``True``. legend_on (:obj:`bool`, optional): Plot the graph legend. Defaults to ``True``. legend_frame_on (:obj:`bool`, optional): Plot a frame around the graph legend. Defaults to ``False``. legend_cutoff (:obj:`float`, optional): The cut-off (in % of the maximum density of states within the plotting range) for an elemental orbital to be labelled in the legend. This prevents the legend from containing labels for orbitals that have very little contribution in the plotting range. gaussian (:obj:`float`, optional): Broaden the density of states using convolution with a gaussian function. This parameter controls the sigma or standard deviation of the gaussian distribution. height (:obj:`float`, optional): The height of the plot. width (:obj:`float`, optional): The width of the plot. xmin (:obj:`float`, optional): The minimum energy on the x-axis. xmax (:obj:`float`, optional): The maximum energy on the x-axis. num_columns (:obj:`int`, optional): The number of columns in the legend. colours (:obj:`dict`, optional): Use custom colours for specific element and orbital combinations. Specified as a :obj:`dict` of :obj:`dict` of the colours. For example:: { 'Sn': {'s': 'r', 'p': 'b'}, 'O': {'s': '#000000'} } The colour can be a hex code, series of rgb value, or any other format supported by matplotlib. xlabel (:obj:`str`, optional): Label/units for x-axis (i.e. energy) ylabel (:obj:`str`, optional): Label/units for y-axis (i.e. DOS) yscale (:obj:`float`, optional): Scaling factor for the y-axis. style (:obj:`list` or :obj:`str`, optional): (List of) matplotlib style specifications, to be composed on top of Sumo base style. no_base_style (:obj:`bool`, optional): Prevent use of sumo base style. This can make alternative styles behave more predictably. image_format (:obj:`str`, optional): The image file format. Can be any format supported by matplotlib, including: png, jpg, pdf, and svg. Defaults to pdf. dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for the image. plt (:obj:`matplotlib.pyplot`, optional): A :obj:`matplotlib.pyplot` object to use for plotting. fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a a single font, specified as a :obj:`str`, or several fonts, specified as a :obj:`list` of :obj:`str`. Returns: A matplotlib pyplot object.
[ "A", "script", "to", "plot", "the", "density", "of", "states", "from", "a", "vasprun", ".", "xml", "file", "." ]
python
train
saltstack/salt
salt/proxy/bluecoat_sslv.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/bluecoat_sslv.py#L170-L198
def logon(): ''' Logs into the bluecoat_sslv device and returns the session cookies. ''' session = requests.session() payload = {"jsonrpc": "2.0", "id": "ID0", "method": "login", "params": [DETAILS['username'], DETAILS['password'], DETAILS['auth'], True] } logon_response = session.post(DETAILS['url'], data=json.dumps(payload), verify=False) if logon_response.status_code != 200: log.error("Error logging into proxy. HTTP Error code: %s", logon_response.status_code) raise salt.exceptions.CommandExecutionError( "Did not receive a valid response from host.") try: cookies = {'sslng_csrf_token': logon_response.cookies['sslng_csrf_token'], 'sslng_session_id': logon_response.cookies['sslng_session_id']} csrf_token = logon_response.cookies['sslng_csrf_token'] except KeyError: log.error("Unable to authentication to the bluecoat_sslv proxy.") raise salt.exceptions.CommandExecutionError( "Did not receive a valid response from host.") return session, cookies, csrf_token
[ "def", "logon", "(", ")", ":", "session", "=", "requests", ".", "session", "(", ")", "payload", "=", "{", "\"jsonrpc\"", ":", "\"2.0\"", ",", "\"id\"", ":", "\"ID0\"", ",", "\"method\"", ":", "\"login\"", ",", "\"params\"", ":", "[", "DETAILS", "[", "'username'", "]", ",", "DETAILS", "[", "'password'", "]", ",", "DETAILS", "[", "'auth'", "]", ",", "True", "]", "}", "logon_response", "=", "session", ".", "post", "(", "DETAILS", "[", "'url'", "]", ",", "data", "=", "json", ".", "dumps", "(", "payload", ")", ",", "verify", "=", "False", ")", "if", "logon_response", ".", "status_code", "!=", "200", ":", "log", ".", "error", "(", "\"Error logging into proxy. HTTP Error code: %s\"", ",", "logon_response", ".", "status_code", ")", "raise", "salt", ".", "exceptions", ".", "CommandExecutionError", "(", "\"Did not receive a valid response from host.\"", ")", "try", ":", "cookies", "=", "{", "'sslng_csrf_token'", ":", "logon_response", ".", "cookies", "[", "'sslng_csrf_token'", "]", ",", "'sslng_session_id'", ":", "logon_response", ".", "cookies", "[", "'sslng_session_id'", "]", "}", "csrf_token", "=", "logon_response", ".", "cookies", "[", "'sslng_csrf_token'", "]", "except", "KeyError", ":", "log", ".", "error", "(", "\"Unable to authentication to the bluecoat_sslv proxy.\"", ")", "raise", "salt", ".", "exceptions", ".", "CommandExecutionError", "(", "\"Did not receive a valid response from host.\"", ")", "return", "session", ",", "cookies", ",", "csrf_token" ]
Logs into the bluecoat_sslv device and returns the session cookies.
[ "Logs", "into", "the", "bluecoat_sslv", "device", "and", "returns", "the", "session", "cookies", "." ]
python
train
PyCQA/astroid
astroid/scoped_nodes.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/scoped_nodes.py#L1917-L1927
def implicit_locals(self): """Get implicitly defined class definition locals. :returns: the the name and Const pair for each local :rtype: tuple(tuple(str, node_classes.Const), ...) """ locals_ = (("__module__", self.special_attributes.attr___module__),) if sys.version_info >= (3, 3): # __qualname__ is defined in PEP3155 locals_ += (("__qualname__", self.special_attributes.attr___qualname__),) return locals_
[ "def", "implicit_locals", "(", "self", ")", ":", "locals_", "=", "(", "(", "\"__module__\"", ",", "self", ".", "special_attributes", ".", "attr___module__", ")", ",", ")", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "3", ")", ":", "# __qualname__ is defined in PEP3155", "locals_", "+=", "(", "(", "\"__qualname__\"", ",", "self", ".", "special_attributes", ".", "attr___qualname__", ")", ",", ")", "return", "locals_" ]
Get implicitly defined class definition locals. :returns: the the name and Const pair for each local :rtype: tuple(tuple(str, node_classes.Const), ...)
[ "Get", "implicitly", "defined", "class", "definition", "locals", "." ]
python
train
fishtown-analytics/dbt
core/dbt/task/runnable.py
https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/task/runnable.py#L347-L366
def decode_sql(self, sql): """Base64 decode a string. This should only be used for sql in calls. :param str sql: The base64 encoded form of the original utf-8 string :return str: The decoded utf-8 string """ # JSON is defined as using "unicode", we'll go a step further and # mandate utf-8 (though for the base64 part, it doesn't really matter!) base64_sql_bytes = to_unicode(sql).encode('utf-8') # in python3.x you can pass `validate=True` to b64decode to get this # behavior. if not re.match(b'^[A-Za-z0-9+/]*={0,2}$', base64_sql_bytes): self.raise_invalid_base64(sql) try: sql_bytes = base64.b64decode(base64_sql_bytes) except ValueError: self.raise_invalid_base64(sql) return sql_bytes.decode('utf-8')
[ "def", "decode_sql", "(", "self", ",", "sql", ")", ":", "# JSON is defined as using \"unicode\", we'll go a step further and", "# mandate utf-8 (though for the base64 part, it doesn't really matter!)", "base64_sql_bytes", "=", "to_unicode", "(", "sql", ")", ".", "encode", "(", "'utf-8'", ")", "# in python3.x you can pass `validate=True` to b64decode to get this", "# behavior.", "if", "not", "re", ".", "match", "(", "b'^[A-Za-z0-9+/]*={0,2}$'", ",", "base64_sql_bytes", ")", ":", "self", ".", "raise_invalid_base64", "(", "sql", ")", "try", ":", "sql_bytes", "=", "base64", ".", "b64decode", "(", "base64_sql_bytes", ")", "except", "ValueError", ":", "self", ".", "raise_invalid_base64", "(", "sql", ")", "return", "sql_bytes", ".", "decode", "(", "'utf-8'", ")" ]
Base64 decode a string. This should only be used for sql in calls. :param str sql: The base64 encoded form of the original utf-8 string :return str: The decoded utf-8 string
[ "Base64", "decode", "a", "string", ".", "This", "should", "only", "be", "used", "for", "sql", "in", "calls", "." ]
python
train
Erotemic/utool
utool/util_list.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2459-L2470
def depth(sequence, func=max, _depth=0): """ Find the nesting depth of a nested sequence """ if isinstance(sequence, dict): sequence = list(sequence.values()) depth_list = [depth(item, func=func, _depth=_depth + 1) for item in sequence if (isinstance(item, dict) or util_type.is_listlike(item))] if len(depth_list) > 0: return func(depth_list) else: return _depth
[ "def", "depth", "(", "sequence", ",", "func", "=", "max", ",", "_depth", "=", "0", ")", ":", "if", "isinstance", "(", "sequence", ",", "dict", ")", ":", "sequence", "=", "list", "(", "sequence", ".", "values", "(", ")", ")", "depth_list", "=", "[", "depth", "(", "item", ",", "func", "=", "func", ",", "_depth", "=", "_depth", "+", "1", ")", "for", "item", "in", "sequence", "if", "(", "isinstance", "(", "item", ",", "dict", ")", "or", "util_type", ".", "is_listlike", "(", "item", ")", ")", "]", "if", "len", "(", "depth_list", ")", ">", "0", ":", "return", "func", "(", "depth_list", ")", "else", ":", "return", "_depth" ]
Find the nesting depth of a nested sequence
[ "Find", "the", "nesting", "depth", "of", "a", "nested", "sequence" ]
python
train
tgalal/python-axolotl
axolotl/sessionbuilder.py
https://github.com/tgalal/python-axolotl/blob/0c681af4b756f556e23a9bf961abfbc6f82800cc/axolotl/sessionbuilder.py#L100-L138
def processV3(self, sessionRecord, message): """ :param sessionRecord: :param message: :type message: PreKeyWhisperMessage :return: """ if sessionRecord.hasSessionState(message.getMessageVersion(), message.getBaseKey().serialize()): logger.warn("We've already setup a session for this V3 message, letting bundled message fall through...") return None ourSignedPreKey = self.signedPreKeyStore.loadSignedPreKey(message.getSignedPreKeyId()).getKeyPair() parameters = BobAxolotlParameters.newBuilder() parameters.setTheirBaseKey(message.getBaseKey())\ .setTheirIdentityKey(message.getIdentityKey())\ .setOurIdentityKey(self.identityKeyStore.getIdentityKeyPair())\ .setOurSignedPreKey(ourSignedPreKey)\ .setOurRatchetKey(ourSignedPreKey) if message.getPreKeyId() is not None: parameters.setOurOneTimePreKey(self.preKeyStore.loadPreKey(message.getPreKeyId()).getKeyPair()) else: parameters.setOurOneTimePreKey(None) if not sessionRecord.isFresh(): sessionRecord.archiveCurrentState() RatchetingSession.initializeSessionAsBob(sessionRecord.getSessionState(), message.getMessageVersion(), parameters.create()) sessionRecord.getSessionState().setLocalRegistrationId(self.identityKeyStore.getLocalRegistrationId()) sessionRecord.getSessionState().setRemoteRegistrationId(message.getRegistrationId()) sessionRecord.getSessionState().setAliceBaseKey(message.getBaseKey().serialize()) if message.getPreKeyId() is not None and message.getPreKeyId() != Medium.MAX_VALUE: return message.getPreKeyId() else: return None
[ "def", "processV3", "(", "self", ",", "sessionRecord", ",", "message", ")", ":", "if", "sessionRecord", ".", "hasSessionState", "(", "message", ".", "getMessageVersion", "(", ")", ",", "message", ".", "getBaseKey", "(", ")", ".", "serialize", "(", ")", ")", ":", "logger", ".", "warn", "(", "\"We've already setup a session for this V3 message, letting bundled message fall through...\"", ")", "return", "None", "ourSignedPreKey", "=", "self", ".", "signedPreKeyStore", ".", "loadSignedPreKey", "(", "message", ".", "getSignedPreKeyId", "(", ")", ")", ".", "getKeyPair", "(", ")", "parameters", "=", "BobAxolotlParameters", ".", "newBuilder", "(", ")", "parameters", ".", "setTheirBaseKey", "(", "message", ".", "getBaseKey", "(", ")", ")", ".", "setTheirIdentityKey", "(", "message", ".", "getIdentityKey", "(", ")", ")", ".", "setOurIdentityKey", "(", "self", ".", "identityKeyStore", ".", "getIdentityKeyPair", "(", ")", ")", ".", "setOurSignedPreKey", "(", "ourSignedPreKey", ")", ".", "setOurRatchetKey", "(", "ourSignedPreKey", ")", "if", "message", ".", "getPreKeyId", "(", ")", "is", "not", "None", ":", "parameters", ".", "setOurOneTimePreKey", "(", "self", ".", "preKeyStore", ".", "loadPreKey", "(", "message", ".", "getPreKeyId", "(", ")", ")", ".", "getKeyPair", "(", ")", ")", "else", ":", "parameters", ".", "setOurOneTimePreKey", "(", "None", ")", "if", "not", "sessionRecord", ".", "isFresh", "(", ")", ":", "sessionRecord", ".", "archiveCurrentState", "(", ")", "RatchetingSession", ".", "initializeSessionAsBob", "(", "sessionRecord", ".", "getSessionState", "(", ")", ",", "message", ".", "getMessageVersion", "(", ")", ",", "parameters", ".", "create", "(", ")", ")", "sessionRecord", ".", "getSessionState", "(", ")", ".", "setLocalRegistrationId", "(", "self", ".", "identityKeyStore", ".", "getLocalRegistrationId", "(", ")", ")", "sessionRecord", ".", "getSessionState", "(", ")", ".", "setRemoteRegistrationId", "(", "message", ".", "getRegistrationId", "(", ")", ")", "sessionRecord", ".", "getSessionState", "(", ")", ".", "setAliceBaseKey", "(", "message", ".", "getBaseKey", "(", ")", ".", "serialize", "(", ")", ")", "if", "message", ".", "getPreKeyId", "(", ")", "is", "not", "None", "and", "message", ".", "getPreKeyId", "(", ")", "!=", "Medium", ".", "MAX_VALUE", ":", "return", "message", ".", "getPreKeyId", "(", ")", "else", ":", "return", "None" ]
:param sessionRecord: :param message: :type message: PreKeyWhisperMessage :return:
[ ":", "param", "sessionRecord", ":", ":", "param", "message", ":", ":", "type", "message", ":", "PreKeyWhisperMessage", ":", "return", ":" ]
python
train
Grunny/zap-cli
zapcli/zap_helper.py
https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/zap_helper.py#L277-L291
def enable_scanners(self, scanners): """ Enable the provided scanners by group and/or IDs. """ scanner_ids = [] for scanner in scanners: if scanner in self.scanner_groups: self.enable_scanners_by_group(scanner) elif scanner.isdigit(): scanner_ids.append(scanner) else: raise ZAPError('Invalid scanner "{0}" provided. Must be a valid group or numeric ID.'.format(scanner)) if scanner_ids: self.enable_scanners_by_ids(scanner_ids)
[ "def", "enable_scanners", "(", "self", ",", "scanners", ")", ":", "scanner_ids", "=", "[", "]", "for", "scanner", "in", "scanners", ":", "if", "scanner", "in", "self", ".", "scanner_groups", ":", "self", ".", "enable_scanners_by_group", "(", "scanner", ")", "elif", "scanner", ".", "isdigit", "(", ")", ":", "scanner_ids", ".", "append", "(", "scanner", ")", "else", ":", "raise", "ZAPError", "(", "'Invalid scanner \"{0}\" provided. Must be a valid group or numeric ID.'", ".", "format", "(", "scanner", ")", ")", "if", "scanner_ids", ":", "self", ".", "enable_scanners_by_ids", "(", "scanner_ids", ")" ]
Enable the provided scanners by group and/or IDs.
[ "Enable", "the", "provided", "scanners", "by", "group", "and", "/", "or", "IDs", "." ]
python
train
dahlia/sqlalchemy-imageattach
sqlalchemy_imageattach/entity.py
https://github.com/dahlia/sqlalchemy-imageattach/blob/b4bafa73f3bb576ecf67ed7b40b702704a0fbdc8/sqlalchemy_imageattach/entity.py#L1034-L1041
def image_sets(self): """(:class:`typing.Iterable`\ [:class:`ImageSubset`]) The set of attached image sets. """ images = self._original_images() for image in images: yield ImageSubset(self, **image.identity_map)
[ "def", "image_sets", "(", "self", ")", ":", "images", "=", "self", ".", "_original_images", "(", ")", "for", "image", "in", "images", ":", "yield", "ImageSubset", "(", "self", ",", "*", "*", "image", ".", "identity_map", ")" ]
(:class:`typing.Iterable`\ [:class:`ImageSubset`]) The set of attached image sets.
[ "(", ":", "class", ":", "typing", ".", "Iterable", "\\", "[", ":", "class", ":", "ImageSubset", "]", ")", "The", "set", "of", "attached", "image", "sets", "." ]
python
train
marrow/mongo
marrow/mongo/core/trait/published.py
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/trait/published.py#L21-L37
def only_published(cls, at=None): """Produce a query fragment suitable for selecting documents public. Now (no arguments), at a specific time (datetime argument), or relative to now (timedelta). """ if isinstance(at, timedelta): at = utcnow() + at else: at = at or utcnow() pub, ret = cls.published, cls.retracted publication = (-pub) | (pub == None) | (pub <= at) retraction = (-ret) | (ret == None) | (ret > at) return publication & retraction
[ "def", "only_published", "(", "cls", ",", "at", "=", "None", ")", ":", "if", "isinstance", "(", "at", ",", "timedelta", ")", ":", "at", "=", "utcnow", "(", ")", "+", "at", "else", ":", "at", "=", "at", "or", "utcnow", "(", ")", "pub", ",", "ret", "=", "cls", ".", "published", ",", "cls", ".", "retracted", "publication", "=", "(", "-", "pub", ")", "|", "(", "pub", "==", "None", ")", "|", "(", "pub", "<=", "at", ")", "retraction", "=", "(", "-", "ret", ")", "|", "(", "ret", "==", "None", ")", "|", "(", "ret", ">", "at", ")", "return", "publication", "&", "retraction" ]
Produce a query fragment suitable for selecting documents public. Now (no arguments), at a specific time (datetime argument), or relative to now (timedelta).
[ "Produce", "a", "query", "fragment", "suitable", "for", "selecting", "documents", "public", ".", "Now", "(", "no", "arguments", ")", "at", "a", "specific", "time", "(", "datetime", "argument", ")", "or", "relative", "to", "now", "(", "timedelta", ")", "." ]
python
train
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L4351-L4377
def get_current_date_time(i): """ Input: {} Output: { return - return code = 0 array - array with date and time iso_datetime - date and time in ISO format } """ import datetime a={} now1=datetime.datetime.now() now=now1.timetuple() a['date_year']=now[0] a['date_month']=now[1] a['date_day']=now[2] a['time_hour']=now[3] a['time_minute']=now[4] a['time_second']=now[5] return {'return':0, 'array':a, 'iso_datetime':now1.isoformat()}
[ "def", "get_current_date_time", "(", "i", ")", ":", "import", "datetime", "a", "=", "{", "}", "now1", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "now", "=", "now1", ".", "timetuple", "(", ")", "a", "[", "'date_year'", "]", "=", "now", "[", "0", "]", "a", "[", "'date_month'", "]", "=", "now", "[", "1", "]", "a", "[", "'date_day'", "]", "=", "now", "[", "2", "]", "a", "[", "'time_hour'", "]", "=", "now", "[", "3", "]", "a", "[", "'time_minute'", "]", "=", "now", "[", "4", "]", "a", "[", "'time_second'", "]", "=", "now", "[", "5", "]", "return", "{", "'return'", ":", "0", ",", "'array'", ":", "a", ",", "'iso_datetime'", ":", "now1", ".", "isoformat", "(", ")", "}" ]
Input: {} Output: { return - return code = 0 array - array with date and time iso_datetime - date and time in ISO format }
[ "Input", ":", "{}" ]
python
train
teepark/greenhouse
greenhouse/pool.py
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/pool.py#L216-L252
def get(self): """retrieve a result from the pool if nothing is already completed when this method is called, it will block until something comes back if the pool's function exited via exception, that will come back as a result here as well, but will be re-raised in :meth:`get`. .. note:: if there is nothing in the pool's output queue when this method is called, it will block until something is ready :returns: a return value from one of the function's invocations if it exited normally :raises: :class:`PoolClosed` if the pool was closed before a result could be produced for thie call :raises: any exception that was raised inside the worker function """ if self.closed: raise PoolClosed() while self._getcount not in self._cache: counter, result = self.outq.get() self._cache[counter] = result result, succeeded = self._cache.pop(self._getcount) self._getcount += 1 if not succeeded: klass, exc, tb = result raise klass, exc, tb return result
[ "def", "get", "(", "self", ")", ":", "if", "self", ".", "closed", ":", "raise", "PoolClosed", "(", ")", "while", "self", ".", "_getcount", "not", "in", "self", ".", "_cache", ":", "counter", ",", "result", "=", "self", ".", "outq", ".", "get", "(", ")", "self", ".", "_cache", "[", "counter", "]", "=", "result", "result", ",", "succeeded", "=", "self", ".", "_cache", ".", "pop", "(", "self", ".", "_getcount", ")", "self", ".", "_getcount", "+=", "1", "if", "not", "succeeded", ":", "klass", ",", "exc", ",", "tb", "=", "result", "raise", "klass", ",", "exc", ",", "tb", "return", "result" ]
retrieve a result from the pool if nothing is already completed when this method is called, it will block until something comes back if the pool's function exited via exception, that will come back as a result here as well, but will be re-raised in :meth:`get`. .. note:: if there is nothing in the pool's output queue when this method is called, it will block until something is ready :returns: a return value from one of the function's invocations if it exited normally :raises: :class:`PoolClosed` if the pool was closed before a result could be produced for thie call :raises: any exception that was raised inside the worker function
[ "retrieve", "a", "result", "from", "the", "pool" ]
python
train
chrisspen/webarticle2text
webarticle2text/webarticle2text.py
https://github.com/chrisspen/webarticle2text/blob/3c88e948e31aedf1eccfea2106e5848d224771eb/webarticle2text/webarticle2text.py#L337-L355
def tidyHTML(dirtyHTML): """ Runs an arbitrary HTML string through Tidy. """ try: from tidylib import tidy_document except ImportError as e: raise ImportError(("%s\nYou need to install pytidylib.\n" + "e.g. sudo pip install pytidylib") % e) options = { 'output-xhtml':1, #add_xml_decl=1,#option in tidy but not pytidylib 'indent':1, 'tidy-mark':1, #'char-encoding':'utf8', 'char-encoding':'raw', } html, errors = tidy_document(dirtyHTML, options=options) return html
[ "def", "tidyHTML", "(", "dirtyHTML", ")", ":", "try", ":", "from", "tidylib", "import", "tidy_document", "except", "ImportError", "as", "e", ":", "raise", "ImportError", "(", "(", "\"%s\\nYou need to install pytidylib.\\n\"", "+", "\"e.g. sudo pip install pytidylib\"", ")", "%", "e", ")", "options", "=", "{", "'output-xhtml'", ":", "1", ",", "#add_xml_decl=1,#option in tidy but not pytidylib", "'indent'", ":", "1", ",", "'tidy-mark'", ":", "1", ",", "#'char-encoding':'utf8',", "'char-encoding'", ":", "'raw'", ",", "}", "html", ",", "errors", "=", "tidy_document", "(", "dirtyHTML", ",", "options", "=", "options", ")", "return", "html" ]
Runs an arbitrary HTML string through Tidy.
[ "Runs", "an", "arbitrary", "HTML", "string", "through", "Tidy", "." ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/router/ospf/max_metric/router_lsa/on_startup/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/router/ospf/max_metric/router_lsa/on_startup/__init__.py#L231-L252
def _set_summary_lsa_onstartup(self, v, load=False): """ Setter method for summary_lsa_onstartup, mapped from YANG variable /rbridge_id/router/ospf/max_metric/router_lsa/on_startup/summary_lsa_onstartup (container) If this variable is read-only (config: false) in the source YANG file, then _set_summary_lsa_onstartup is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_summary_lsa_onstartup() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=summary_lsa_onstartup.summary_lsa_onstartup, is_container='container', presence=True, yang_name="summary-lsa-onstartup", rest_name="summary-lsa", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Replace Metric in Summary LSA with max metric value', u'alt-name': u'summary-lsa'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """summary_lsa_onstartup must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=summary_lsa_onstartup.summary_lsa_onstartup, is_container='container', presence=True, yang_name="summary-lsa-onstartup", rest_name="summary-lsa", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Replace Metric in Summary LSA with max metric value', u'alt-name': u'summary-lsa'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""", }) self.__summary_lsa_onstartup = t if hasattr(self, '_set'): self._set()
[ "def", "_set_summary_lsa_onstartup", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "summary_lsa_onstartup", ".", "summary_lsa_onstartup", ",", "is_container", "=", "'container'", ",", "presence", "=", "True", ",", "yang_name", "=", "\"summary-lsa-onstartup\"", ",", "rest_name", "=", "\"summary-lsa\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Replace Metric in Summary LSA with max metric value'", ",", "u'alt-name'", ":", "u'summary-lsa'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-ospf'", ",", "defining_module", "=", "'brocade-ospf'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"summary_lsa_onstartup must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=summary_lsa_onstartup.summary_lsa_onstartup, is_container='container', presence=True, yang_name=\"summary-lsa-onstartup\", rest_name=\"summary-lsa\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Replace Metric in Summary LSA with max metric value', u'alt-name': u'summary-lsa'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__summary_lsa_onstartup", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for summary_lsa_onstartup, mapped from YANG variable /rbridge_id/router/ospf/max_metric/router_lsa/on_startup/summary_lsa_onstartup (container) If this variable is read-only (config: false) in the source YANG file, then _set_summary_lsa_onstartup is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_summary_lsa_onstartup() directly.
[ "Setter", "method", "for", "summary_lsa_onstartup", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "router", "/", "ospf", "/", "max_metric", "/", "router_lsa", "/", "on_startup", "/", "summary_lsa_onstartup", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_summary_lsa_onstartup", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_summary_lsa_onstartup", "()", "directly", "." ]
python
train
PyCQA/pylint
pylint/message/message.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/message/message.py#L45-L53
def format(self, template): """Format the message according to the given template. The template format is the one of the format method : cf. http://docs.python.org/2/library/string.html#formatstrings """ # For some reason, _asdict on derived namedtuples does not work with # Python 3.4. Needs some investigation. return template.format(**dict(zip(self._fields, self)))
[ "def", "format", "(", "self", ",", "template", ")", ":", "# For some reason, _asdict on derived namedtuples does not work with", "# Python 3.4. Needs some investigation.", "return", "template", ".", "format", "(", "*", "*", "dict", "(", "zip", "(", "self", ".", "_fields", ",", "self", ")", ")", ")" ]
Format the message according to the given template. The template format is the one of the format method : cf. http://docs.python.org/2/library/string.html#formatstrings
[ "Format", "the", "message", "according", "to", "the", "given", "template", "." ]
python
test