repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
ouroboroscoding/format-oc-python
FormatOC/__init__.py
https://github.com/ouroboroscoding/format-oc-python/blob/c160b46fe4ff2c92333c776991c712de23991225/FormatOC/__init__.py#L1200-L1378
def minmax(self, minimum=None, maximum=None): """Min/Max Sets or gets the minimum and/or maximum values for the Node. For getting, returns {"minimum":mixed,"maximum":mixed} Arguments: minimum {mixed} -- The minimum value maximum {mixed} -- The maximum value Raises: TypeError, ValueError Returns: None | dict """ # If neither min or max is set, this is a getter if minimum is None and maximum is None: return {"minimum": self._minimum, "maximum": self._maximum}; # If the minimum is set if minimum != None: # If the current type is a date, datetime, ip, or time if self._type in ['base64', 'date', 'datetime', 'ip', 'time']: # Make sure the value is valid for the type if not isinstance(minimum, basestring) \ or not _typeToRegex[self._type].match(minimum): raise ValueError('__minimum__') # Else if the type is an int (unsigned, timestamp), or a string in # which the min/max are lengths elif self._type in ['int', 'string', 'timestamp', 'uint']: # If the value is not a valid int or long if not isinstance(minimum, (int, long)): # If it's a valid representation of an integer if isinstance(minimum, basestring) \ and _typeToRegex['int'].match(minimum): # Convert it minimum = int(minimum, 0) # Else, raise an error else: raise ValueError('__minimum__') # If the type is meant to be unsigned if self._type in ['base64', 'string', 'timestamp', 'uint']: # And it's below zero if minimum < 0: raise ValueError('__minimum__') # Else if the type is decimal elif self._type == 'decimal': # Store it if it's valid, else throw a ValueError try: minimum = Decimal(minimum) except ValueError: raise ValueError('__minimum__') # Else if the type is float elif self._type == 'float': # Store it if it's valid, else throw a ValueError try: minimum = float(minimum) except ValueError: raise ValueError('__minimum__') # Else if the type is price elif self._type == 'price': # If it's not a valid representation of a price if not isinstance(minimum, basestring) or not _typeToRegex['price'].match(minimum): raise ValueError('__minimum__') # Store it as a Decimal minimum = Decimal(minimum) # Else we can't have a minimum else: raise TypeError('can not set __minimum__ for ' + self._type) # Store the minimum self._minimum = minimum # If the maximum is set if maximum != None: # If the current type is a date, datetime, ip, or time if self._type in ['date', 'datetime', 'ip', 'time']: # Make sure the value is valid for the type if not isinstance(maximum, basestring) \ or not _typeToRegex[self._type].match(maximum): raise ValueError('__maximum__') # Else if the type is an int (unsigned, timestamp), or a string in # which the min/max are lengths elif self._type in ['int', 'string', 'timestamp', 'uint']: # If the value is not a valid int or long if not isinstance(maximum, (int, long)): # If it's a valid representation of an integer if isinstance(maximum, basestring) \ and _typeToRegex['int'].match(maximum): # Convert it maximum = int(maximum, 0) # Else, raise an error else: raise ValueError('__minimum__') # If the type is meant to be unsigned if self._type in ['string', 'timestamp', 'uint']: # And it's below zero if maximum < 0: raise ValueError('__maximum__') # Else if the type is decimal elif self._type == 'decimal': # Store it if it's valid, else throw a ValueError try: maximum = Decimal(maximum) except ValueError: raise ValueError('__maximum__') # Else if the type is float elif self._type == 'float': # Store it if it's valid, else throw a ValueError try: minimum = float(minimum) except ValueError: raise ValueError('__maximum__') # Else if the type is price elif self._type == 'price': # If it's not a valid representation of a price if not isinstance(maximum, basestring) or not _typeToRegex['price'].match(maximum): raise ValueError('__maximum__') # Store it as a Decimal maximum = Decimal(maximum) # Else we can't have a maximum else: raise TypeError('can not set __maximum__ for ' + self._type) # If we also have a minimum if self._minimum is not None: # If the type is an IP if self._type == 'ip': # If the min is above the max, we have a problem if self.__compare_ips(self._minimum, maximum) == 1: raise ValueError('__maximum__') # Else any other data type else: # If the min is above the max, we have a problem if self._minimum > maximum: raise ValueError('__maximum__') # Store the maximum self._maximum = maximum
[ "def", "minmax", "(", "self", ",", "minimum", "=", "None", ",", "maximum", "=", "None", ")", ":", "# If neither min or max is set, this is a getter", "if", "minimum", "is", "None", "and", "maximum", "is", "None", ":", "return", "{", "\"minimum\"", ":", "self", ".", "_minimum", ",", "\"maximum\"", ":", "self", ".", "_maximum", "}", "# If the minimum is set", "if", "minimum", "!=", "None", ":", "# If the current type is a date, datetime, ip, or time", "if", "self", ".", "_type", "in", "[", "'base64'", ",", "'date'", ",", "'datetime'", ",", "'ip'", ",", "'time'", "]", ":", "# Make sure the value is valid for the type", "if", "not", "isinstance", "(", "minimum", ",", "basestring", ")", "or", "not", "_typeToRegex", "[", "self", ".", "_type", "]", ".", "match", "(", "minimum", ")", ":", "raise", "ValueError", "(", "'__minimum__'", ")", "# Else if the type is an int (unsigned, timestamp), or a string in", "# \twhich the min/max are lengths", "elif", "self", ".", "_type", "in", "[", "'int'", ",", "'string'", ",", "'timestamp'", ",", "'uint'", "]", ":", "# If the value is not a valid int or long", "if", "not", "isinstance", "(", "minimum", ",", "(", "int", ",", "long", ")", ")", ":", "# If it's a valid representation of an integer", "if", "isinstance", "(", "minimum", ",", "basestring", ")", "and", "_typeToRegex", "[", "'int'", "]", ".", "match", "(", "minimum", ")", ":", "# Convert it", "minimum", "=", "int", "(", "minimum", ",", "0", ")", "# Else, raise an error", "else", ":", "raise", "ValueError", "(", "'__minimum__'", ")", "# If the type is meant to be unsigned", "if", "self", ".", "_type", "in", "[", "'base64'", ",", "'string'", ",", "'timestamp'", ",", "'uint'", "]", ":", "# And it's below zero", "if", "minimum", "<", "0", ":", "raise", "ValueError", "(", "'__minimum__'", ")", "# Else if the type is decimal", "elif", "self", ".", "_type", "==", "'decimal'", ":", "# Store it if it's valid, else throw a ValueError", "try", ":", "minimum", "=", "Decimal", "(", "minimum", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'__minimum__'", ")", "# Else if the type is float", "elif", "self", ".", "_type", "==", "'float'", ":", "# Store it if it's valid, else throw a ValueError", "try", ":", "minimum", "=", "float", "(", "minimum", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'__minimum__'", ")", "# Else if the type is price", "elif", "self", ".", "_type", "==", "'price'", ":", "# If it's not a valid representation of a price", "if", "not", "isinstance", "(", "minimum", ",", "basestring", ")", "or", "not", "_typeToRegex", "[", "'price'", "]", ".", "match", "(", "minimum", ")", ":", "raise", "ValueError", "(", "'__minimum__'", ")", "# Store it as a Decimal", "minimum", "=", "Decimal", "(", "minimum", ")", "# Else we can't have a minimum", "else", ":", "raise", "TypeError", "(", "'can not set __minimum__ for '", "+", "self", ".", "_type", ")", "# Store the minimum", "self", ".", "_minimum", "=", "minimum", "# If the maximum is set", "if", "maximum", "!=", "None", ":", "# If the current type is a date, datetime, ip, or time", "if", "self", ".", "_type", "in", "[", "'date'", ",", "'datetime'", ",", "'ip'", ",", "'time'", "]", ":", "# Make sure the value is valid for the type", "if", "not", "isinstance", "(", "maximum", ",", "basestring", ")", "or", "not", "_typeToRegex", "[", "self", ".", "_type", "]", ".", "match", "(", "maximum", ")", ":", "raise", "ValueError", "(", "'__maximum__'", ")", "# Else if the type is an int (unsigned, timestamp), or a string in", "# \twhich the min/max are lengths", "elif", "self", ".", "_type", "in", "[", "'int'", ",", "'string'", ",", "'timestamp'", ",", "'uint'", "]", ":", "# If the value is not a valid int or long", "if", "not", "isinstance", "(", "maximum", ",", "(", "int", ",", "long", ")", ")", ":", "# If it's a valid representation of an integer", "if", "isinstance", "(", "maximum", ",", "basestring", ")", "and", "_typeToRegex", "[", "'int'", "]", ".", "match", "(", "maximum", ")", ":", "# Convert it", "maximum", "=", "int", "(", "maximum", ",", "0", ")", "# Else, raise an error", "else", ":", "raise", "ValueError", "(", "'__minimum__'", ")", "# If the type is meant to be unsigned", "if", "self", ".", "_type", "in", "[", "'string'", ",", "'timestamp'", ",", "'uint'", "]", ":", "# And it's below zero", "if", "maximum", "<", "0", ":", "raise", "ValueError", "(", "'__maximum__'", ")", "# Else if the type is decimal", "elif", "self", ".", "_type", "==", "'decimal'", ":", "# Store it if it's valid, else throw a ValueError", "try", ":", "maximum", "=", "Decimal", "(", "maximum", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'__maximum__'", ")", "# Else if the type is float", "elif", "self", ".", "_type", "==", "'float'", ":", "# Store it if it's valid, else throw a ValueError", "try", ":", "minimum", "=", "float", "(", "minimum", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'__maximum__'", ")", "# Else if the type is price", "elif", "self", ".", "_type", "==", "'price'", ":", "# If it's not a valid representation of a price", "if", "not", "isinstance", "(", "maximum", ",", "basestring", ")", "or", "not", "_typeToRegex", "[", "'price'", "]", ".", "match", "(", "maximum", ")", ":", "raise", "ValueError", "(", "'__maximum__'", ")", "# Store it as a Decimal", "maximum", "=", "Decimal", "(", "maximum", ")", "# Else we can't have a maximum", "else", ":", "raise", "TypeError", "(", "'can not set __maximum__ for '", "+", "self", ".", "_type", ")", "# If we also have a minimum", "if", "self", ".", "_minimum", "is", "not", "None", ":", "# If the type is an IP", "if", "self", ".", "_type", "==", "'ip'", ":", "# If the min is above the max, we have a problem", "if", "self", ".", "__compare_ips", "(", "self", ".", "_minimum", ",", "maximum", ")", "==", "1", ":", "raise", "ValueError", "(", "'__maximum__'", ")", "# Else any other data type", "else", ":", "# If the min is above the max, we have a problem", "if", "self", ".", "_minimum", ">", "maximum", ":", "raise", "ValueError", "(", "'__maximum__'", ")", "# Store the maximum", "self", ".", "_maximum", "=", "maximum" ]
Min/Max Sets or gets the minimum and/or maximum values for the Node. For getting, returns {"minimum":mixed,"maximum":mixed} Arguments: minimum {mixed} -- The minimum value maximum {mixed} -- The maximum value Raises: TypeError, ValueError Returns: None | dict
[ "Min", "/", "Max" ]
python
train
26.368715
devopshq/artifactory
artifactory.py
https://github.com/devopshq/artifactory/blob/b9ec08cd72527d7d43159fe45c3a98a0b0838534/artifactory.py#L1308-L1327
def set_properties(self, properties, recursive=True): """ Adds new or modifies existing properties listed in properties properties - is a dict which contains the property names and values to set. Property values can be a list or tuple to set multiple values for a key. recursive - on folders property attachment is recursive by default. It is possible to force recursive behavior. """ if not properties: return # If URL > 13KB, nginx default raise error '414 Request-URI Too Large' MAX_SIZE = 50 if len(properties) > MAX_SIZE: for chunk in chunks(properties, MAX_SIZE): self._accessor.set_properties(self, chunk, recursive) else: self._accessor.set_properties(self, properties, recursive)
[ "def", "set_properties", "(", "self", ",", "properties", ",", "recursive", "=", "True", ")", ":", "if", "not", "properties", ":", "return", "# If URL > 13KB, nginx default raise error '414 Request-URI Too Large'", "MAX_SIZE", "=", "50", "if", "len", "(", "properties", ")", ">", "MAX_SIZE", ":", "for", "chunk", "in", "chunks", "(", "properties", ",", "MAX_SIZE", ")", ":", "self", ".", "_accessor", ".", "set_properties", "(", "self", ",", "chunk", ",", "recursive", ")", "else", ":", "self", ".", "_accessor", ".", "set_properties", "(", "self", ",", "properties", ",", "recursive", ")" ]
Adds new or modifies existing properties listed in properties properties - is a dict which contains the property names and values to set. Property values can be a list or tuple to set multiple values for a key. recursive - on folders property attachment is recursive by default. It is possible to force recursive behavior.
[ "Adds", "new", "or", "modifies", "existing", "properties", "listed", "in", "properties" ]
python
train
43.35
tobgu/pyrsistent
pyrsistent/_pset.py
https://github.com/tobgu/pyrsistent/blob/c84dab0daaa44973cbe83830d14888827b307632/pyrsistent/_pset.py#L90-L101
def remove(self, element): """ Return a new PSet with element removed. Raises KeyError if element is not present. >>> s1 = s(1, 2) >>> s1.remove(2) pset([1]) """ if element in self._map: return self.evolver().remove(element).persistent() raise KeyError("Element '%s' not present in PSet" % element)
[ "def", "remove", "(", "self", ",", "element", ")", ":", "if", "element", "in", "self", ".", "_map", ":", "return", "self", ".", "evolver", "(", ")", ".", "remove", "(", "element", ")", ".", "persistent", "(", ")", "raise", "KeyError", "(", "\"Element '%s' not present in PSet\"", "%", "element", ")" ]
Return a new PSet with element removed. Raises KeyError if element is not present. >>> s1 = s(1, 2) >>> s1.remove(2) pset([1])
[ "Return", "a", "new", "PSet", "with", "element", "removed", ".", "Raises", "KeyError", "if", "element", "is", "not", "present", "." ]
python
train
30.416667
markovmodel/msmtools
msmtools/analysis/sparse/expectations.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/sparse/expectations.py#L77-L110
def expected_counts_stationary(T, n, mu=None): r"""Expected transition counts for Markov chain in equilibrium. Since mu is stationary for T we have .. math:: E(C^{(n)})=n diag(mu)*T. Parameters ---------- T : (M, M) sparse matrix Transition matrix. n : int Number of steps for chain. mu : (M,) ndarray (optional) Stationary distribution for T. If mu is not specified it will be computed via diagonalization of T. Returns ------- EC : (M, M) sparse matrix Expected value for transition counts after N steps. """ if (n <= 0): EC = coo_matrix(T.shape, dtype=float) return EC else: if mu is None: mu = stationary_distribution(T) D_mu = diags(mu, 0) EC = n * D_mu.dot(T) return EC
[ "def", "expected_counts_stationary", "(", "T", ",", "n", ",", "mu", "=", "None", ")", ":", "if", "(", "n", "<=", "0", ")", ":", "EC", "=", "coo_matrix", "(", "T", ".", "shape", ",", "dtype", "=", "float", ")", "return", "EC", "else", ":", "if", "mu", "is", "None", ":", "mu", "=", "stationary_distribution", "(", "T", ")", "D_mu", "=", "diags", "(", "mu", ",", "0", ")", "EC", "=", "n", "*", "D_mu", ".", "dot", "(", "T", ")", "return", "EC" ]
r"""Expected transition counts for Markov chain in equilibrium. Since mu is stationary for T we have .. math:: E(C^{(n)})=n diag(mu)*T. Parameters ---------- T : (M, M) sparse matrix Transition matrix. n : int Number of steps for chain. mu : (M,) ndarray (optional) Stationary distribution for T. If mu is not specified it will be computed via diagonalization of T. Returns ------- EC : (M, M) sparse matrix Expected value for transition counts after N steps.
[ "r", "Expected", "transition", "counts", "for", "Markov", "chain", "in", "equilibrium", "." ]
python
train
23.882353
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/results.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/results.py#L118-L127
def arcs_missing(self): """Returns a sorted list of the arcs in the code not executed.""" possible = self.arc_possibilities() executed = self.arcs_executed() missing = [ p for p in possible if p not in executed and p[0] not in self.no_branch ] return sorted(missing)
[ "def", "arcs_missing", "(", "self", ")", ":", "possible", "=", "self", ".", "arc_possibilities", "(", ")", "executed", "=", "self", ".", "arcs_executed", "(", ")", "missing", "=", "[", "p", "for", "p", "in", "possible", "if", "p", "not", "in", "executed", "and", "p", "[", "0", "]", "not", "in", "self", ".", "no_branch", "]", "return", "sorted", "(", "missing", ")" ]
Returns a sorted list of the arcs in the code not executed.
[ "Returns", "a", "sorted", "list", "of", "the", "arcs", "in", "the", "code", "not", "executed", "." ]
python
test
35.7
spacetelescope/drizzlepac
drizzlepac/wcs_functions.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/wcs_functions.py#L355-L365
def make_perfect_cd(wcs): """ Create a perfect (square, orthogonal, undistorted) CD matrix from the input WCS. """ def_scale = (wcs.pscale) / 3600. def_orientat = np.deg2rad(wcs.orientat) perfect_cd = def_scale * np.array( [[-np.cos(def_orientat),np.sin(def_orientat)], [np.sin(def_orientat),np.cos(def_orientat)]] ) return perfect_cd
[ "def", "make_perfect_cd", "(", "wcs", ")", ":", "def_scale", "=", "(", "wcs", ".", "pscale", ")", "/", "3600.", "def_orientat", "=", "np", ".", "deg2rad", "(", "wcs", ".", "orientat", ")", "perfect_cd", "=", "def_scale", "*", "np", ".", "array", "(", "[", "[", "-", "np", ".", "cos", "(", "def_orientat", ")", ",", "np", ".", "sin", "(", "def_orientat", ")", "]", ",", "[", "np", ".", "sin", "(", "def_orientat", ")", ",", "np", ".", "cos", "(", "def_orientat", ")", "]", "]", ")", "return", "perfect_cd" ]
Create a perfect (square, orthogonal, undistorted) CD matrix from the input WCS.
[ "Create", "a", "perfect", "(", "square", "orthogonal", "undistorted", ")", "CD", "matrix", "from", "the", "input", "WCS", "." ]
python
train
34.272727
saltstack/salt
salt/modules/junos.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/junos.py#L1217-L1341
def load(path=None, **kwargs): ''' Loads the configuration from the file provided onto the device. path (required) Path where the configuration/template file is present. If the file has a ``.conf`` extension, the content is treated as text format. If the file has a ``.xml`` extension, the content is treated as XML format. If the file has a ``.set`` extension, the content is treated as Junos OS ``set`` commands. overwrite : False Set to ``True`` if you want this file is to completely replace the configuration file. replace : False Specify whether the configuration file uses ``replace:`` statements. If ``True``, only those statements under the ``replace`` tag will be changed. format Determines the format of the contents update : False Compare a complete loaded configuration against the candidate configuration. For each hierarchy level or configuration object that is different in the two configurations, the version in the loaded configuration replaces the version in the candidate configuration. When the configuration is later committed, only system processes that are affected by the changed configuration elements parse the new configuration. This action is supported from PyEZ 2.1. template_vars Variables to be passed into the template processing engine in addition to those present in pillar, the minion configuration, grains, etc. You may reference these variables in your template like so: .. code-block:: jinja {{ template_vars["var_name"] }} CLI Examples: .. code-block:: bash salt 'device_name' junos.load 'salt://production/network/routers/config.set' salt 'device_name' junos.load 'salt://templates/replace_config.conf' replace=True salt 'device_name' junos.load 'salt://my_new_configuration.conf' overwrite=True salt 'device_name' junos.load 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}' ''' conn = __proxy__['junos.conn']() ret = {} ret['out'] = True if path is None: ret['message'] = \ 'Please provide the salt path where the configuration is present' ret['out'] = False return ret op = {} if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) template_vars = {} if "template_vars" in op: template_vars = op["template_vars"] template_cached_path = salt.utils.files.mkstemp() __salt__['cp.get_template']( path, template_cached_path, template_vars=template_vars) if not os.path.isfile(template_cached_path): ret['message'] = 'Invalid file path.' ret['out'] = False return ret if os.path.getsize(template_cached_path) == 0: ret['message'] = 'Template failed to render' ret['out'] = False return ret op['path'] = template_cached_path if 'format' not in op: if path.endswith('set'): template_format = 'set' elif path.endswith('xml'): template_format = 'xml' else: template_format = 'text' op['format'] = template_format if 'replace' in op and op['replace']: op['merge'] = False del op['replace'] elif 'overwrite' in op and op['overwrite']: op['overwrite'] = True elif 'overwrite' in op and not op['overwrite']: op['merge'] = True del op['overwrite'] try: conn.cu.load(**op) ret['message'] = "Successfully loaded the configuration." except Exception as exception: ret['message'] = 'Could not load configuration due to : "{0}"'.format( exception) ret['format'] = op['format'] ret['out'] = False return ret finally: salt.utils.files.safe_rm(template_cached_path) return ret
[ "def", "load", "(", "path", "=", "None", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "__proxy__", "[", "'junos.conn'", "]", "(", ")", "ret", "=", "{", "}", "ret", "[", "'out'", "]", "=", "True", "if", "path", "is", "None", ":", "ret", "[", "'message'", "]", "=", "'Please provide the salt path where the configuration is present'", "ret", "[", "'out'", "]", "=", "False", "return", "ret", "op", "=", "{", "}", "if", "'__pub_arg'", "in", "kwargs", ":", "if", "kwargs", "[", "'__pub_arg'", "]", ":", "if", "isinstance", "(", "kwargs", "[", "'__pub_arg'", "]", "[", "-", "1", "]", ",", "dict", ")", ":", "op", ".", "update", "(", "kwargs", "[", "'__pub_arg'", "]", "[", "-", "1", "]", ")", "else", ":", "op", ".", "update", "(", "kwargs", ")", "template_vars", "=", "{", "}", "if", "\"template_vars\"", "in", "op", ":", "template_vars", "=", "op", "[", "\"template_vars\"", "]", "template_cached_path", "=", "salt", ".", "utils", ".", "files", ".", "mkstemp", "(", ")", "__salt__", "[", "'cp.get_template'", "]", "(", "path", ",", "template_cached_path", ",", "template_vars", "=", "template_vars", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "template_cached_path", ")", ":", "ret", "[", "'message'", "]", "=", "'Invalid file path.'", "ret", "[", "'out'", "]", "=", "False", "return", "ret", "if", "os", ".", "path", ".", "getsize", "(", "template_cached_path", ")", "==", "0", ":", "ret", "[", "'message'", "]", "=", "'Template failed to render'", "ret", "[", "'out'", "]", "=", "False", "return", "ret", "op", "[", "'path'", "]", "=", "template_cached_path", "if", "'format'", "not", "in", "op", ":", "if", "path", ".", "endswith", "(", "'set'", ")", ":", "template_format", "=", "'set'", "elif", "path", ".", "endswith", "(", "'xml'", ")", ":", "template_format", "=", "'xml'", "else", ":", "template_format", "=", "'text'", "op", "[", "'format'", "]", "=", "template_format", "if", "'replace'", "in", "op", "and", "op", "[", "'replace'", "]", ":", "op", "[", "'merge'", "]", "=", "False", "del", "op", "[", "'replace'", "]", "elif", "'overwrite'", "in", "op", "and", "op", "[", "'overwrite'", "]", ":", "op", "[", "'overwrite'", "]", "=", "True", "elif", "'overwrite'", "in", "op", "and", "not", "op", "[", "'overwrite'", "]", ":", "op", "[", "'merge'", "]", "=", "True", "del", "op", "[", "'overwrite'", "]", "try", ":", "conn", ".", "cu", ".", "load", "(", "*", "*", "op", ")", "ret", "[", "'message'", "]", "=", "\"Successfully loaded the configuration.\"", "except", "Exception", "as", "exception", ":", "ret", "[", "'message'", "]", "=", "'Could not load configuration due to : \"{0}\"'", ".", "format", "(", "exception", ")", "ret", "[", "'format'", "]", "=", "op", "[", "'format'", "]", "ret", "[", "'out'", "]", "=", "False", "return", "ret", "finally", ":", "salt", ".", "utils", ".", "files", ".", "safe_rm", "(", "template_cached_path", ")", "return", "ret" ]
Loads the configuration from the file provided onto the device. path (required) Path where the configuration/template file is present. If the file has a ``.conf`` extension, the content is treated as text format. If the file has a ``.xml`` extension, the content is treated as XML format. If the file has a ``.set`` extension, the content is treated as Junos OS ``set`` commands. overwrite : False Set to ``True`` if you want this file is to completely replace the configuration file. replace : False Specify whether the configuration file uses ``replace:`` statements. If ``True``, only those statements under the ``replace`` tag will be changed. format Determines the format of the contents update : False Compare a complete loaded configuration against the candidate configuration. For each hierarchy level or configuration object that is different in the two configurations, the version in the loaded configuration replaces the version in the candidate configuration. When the configuration is later committed, only system processes that are affected by the changed configuration elements parse the new configuration. This action is supported from PyEZ 2.1. template_vars Variables to be passed into the template processing engine in addition to those present in pillar, the minion configuration, grains, etc. You may reference these variables in your template like so: .. code-block:: jinja {{ template_vars["var_name"] }} CLI Examples: .. code-block:: bash salt 'device_name' junos.load 'salt://production/network/routers/config.set' salt 'device_name' junos.load 'salt://templates/replace_config.conf' replace=True salt 'device_name' junos.load 'salt://my_new_configuration.conf' overwrite=True salt 'device_name' junos.load 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}'
[ "Loads", "the", "configuration", "from", "the", "file", "provided", "onto", "the", "device", "." ]
python
train
32.048
tensorpack/tensorpack
examples/OpticalFlow/flownet_models.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/OpticalFlow/flownet_models.py#L324-L375
def graph_structure(self, x, standalone=True): """ Architecture of FlowNetSimple in Figure 2 of FlowNet 1.0. Args: x: 2CHW if standalone==True, else NCHW where C=12 is a concatenation of 5 tensors of [3, 3, 3, 2, 1] channels. standalone: If True, this model is used to predict flow from two inputs. If False, this model is used as part of the FlowNet2. """ if standalone: x = tf.concat(tf.split(x, 2, axis=0), axis=1) with argscope([tf.layers.conv2d], activation=lambda x: tf.nn.leaky_relu(x, 0.1), padding='valid', strides=2, kernel_size=3, data_format='channels_first'), \ argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity, data_format='channels_first', strides=2, kernel_size=4): x = tf.layers.conv2d(pad(x, 3), 64, kernel_size=7, name='conv1') conv2 = tf.layers.conv2d(pad(x, 2), 128, kernel_size=5, name='conv2') x = tf.layers.conv2d(pad(conv2, 2), 256, kernel_size=5, name='conv3') conv3 = tf.layers.conv2d(pad(x, 1), 256, name='conv3_1', strides=1) x = tf.layers.conv2d(pad(conv3, 1), 512, name='conv4') conv4 = tf.layers.conv2d(pad(x, 1), 512, name='conv4_1', strides=1) x = tf.layers.conv2d(pad(conv4, 1), 512, name='conv5') conv5 = tf.layers.conv2d(pad(x, 1), 512, name='conv5_1', strides=1) x = tf.layers.conv2d(pad(conv5, 1), 1024, name='conv6') conv6 = tf.layers.conv2d(pad(x, 1), 1024, name='conv6_1', strides=1) flow6 = tf.layers.conv2d(pad(conv6, 1), 2, name='predict_flow6', strides=1, activation=tf.identity) flow6_up = tf.layers.conv2d_transpose(flow6, 2, name='upsampled_flow6_to_5', use_bias=False) x = tf.layers.conv2d_transpose(conv6, 512, name='deconv5', activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat5 = tf.concat([conv5, x, flow6_up], axis=1, name='concat5') flow5 = tf.layers.conv2d(pad(concat5, 1), 2, name='predict_flow5', strides=1, activation=tf.identity) flow5_up = tf.layers.conv2d_transpose(flow5, 2, name='upsampled_flow5_to_4', use_bias=False) x = tf.layers.conv2d_transpose(concat5, 256, name='deconv4', activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat4 = tf.concat([conv4, x, flow5_up], axis=1, name='concat4') flow4 = tf.layers.conv2d(pad(concat4, 1), 2, name='predict_flow4', strides=1, activation=tf.identity) flow4_up = tf.layers.conv2d_transpose(flow4, 2, name='upsampled_flow4_to_3', use_bias=False) x = tf.layers.conv2d_transpose(concat4, 128, name='deconv3', activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat3 = tf.concat([conv3, x, flow4_up], axis=1, name='concat3') flow3 = tf.layers.conv2d(pad(concat3, 1), 2, name='predict_flow3', strides=1, activation=tf.identity) flow3_up = tf.layers.conv2d_transpose(flow3, 2, name='upsampled_flow3_to_2', use_bias=False) x = tf.layers.conv2d_transpose(concat3, 64, name='deconv2', activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat2 = tf.concat([conv2, x, flow3_up], axis=1, name='concat2') flow2 = tf.layers.conv2d(pad(concat2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity) return tf.identity(flow2, name='flow2')
[ "def", "graph_structure", "(", "self", ",", "x", ",", "standalone", "=", "True", ")", ":", "if", "standalone", ":", "x", "=", "tf", ".", "concat", "(", "tf", ".", "split", "(", "x", ",", "2", ",", "axis", "=", "0", ")", ",", "axis", "=", "1", ")", "with", "argscope", "(", "[", "tf", ".", "layers", ".", "conv2d", "]", ",", "activation", "=", "lambda", "x", ":", "tf", ".", "nn", ".", "leaky_relu", "(", "x", ",", "0.1", ")", ",", "padding", "=", "'valid'", ",", "strides", "=", "2", ",", "kernel_size", "=", "3", ",", "data_format", "=", "'channels_first'", ")", ",", "argscope", "(", "[", "tf", ".", "layers", ".", "conv2d_transpose", "]", ",", "padding", "=", "'same'", ",", "activation", "=", "tf", ".", "identity", ",", "data_format", "=", "'channels_first'", ",", "strides", "=", "2", ",", "kernel_size", "=", "4", ")", ":", "x", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "x", ",", "3", ")", ",", "64", ",", "kernel_size", "=", "7", ",", "name", "=", "'conv1'", ")", "conv2", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "x", ",", "2", ")", ",", "128", ",", "kernel_size", "=", "5", ",", "name", "=", "'conv2'", ")", "x", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "conv2", ",", "2", ")", ",", "256", ",", "kernel_size", "=", "5", ",", "name", "=", "'conv3'", ")", "conv3", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "x", ",", "1", ")", ",", "256", ",", "name", "=", "'conv3_1'", ",", "strides", "=", "1", ")", "x", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "conv3", ",", "1", ")", ",", "512", ",", "name", "=", "'conv4'", ")", "conv4", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "x", ",", "1", ")", ",", "512", ",", "name", "=", "'conv4_1'", ",", "strides", "=", "1", ")", "x", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "conv4", ",", "1", ")", ",", "512", ",", "name", "=", "'conv5'", ")", "conv5", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "x", ",", "1", ")", ",", "512", ",", "name", "=", "'conv5_1'", ",", "strides", "=", "1", ")", "x", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "conv5", ",", "1", ")", ",", "1024", ",", "name", "=", "'conv6'", ")", "conv6", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "x", ",", "1", ")", ",", "1024", ",", "name", "=", "'conv6_1'", ",", "strides", "=", "1", ")", "flow6", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "conv6", ",", "1", ")", ",", "2", ",", "name", "=", "'predict_flow6'", ",", "strides", "=", "1", ",", "activation", "=", "tf", ".", "identity", ")", "flow6_up", "=", "tf", ".", "layers", ".", "conv2d_transpose", "(", "flow6", ",", "2", ",", "name", "=", "'upsampled_flow6_to_5'", ",", "use_bias", "=", "False", ")", "x", "=", "tf", ".", "layers", ".", "conv2d_transpose", "(", "conv6", ",", "512", ",", "name", "=", "'deconv5'", ",", "activation", "=", "lambda", "x", ":", "tf", ".", "nn", ".", "leaky_relu", "(", "x", ",", "0.1", ")", ")", "concat5", "=", "tf", ".", "concat", "(", "[", "conv5", ",", "x", ",", "flow6_up", "]", ",", "axis", "=", "1", ",", "name", "=", "'concat5'", ")", "flow5", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "concat5", ",", "1", ")", ",", "2", ",", "name", "=", "'predict_flow5'", ",", "strides", "=", "1", ",", "activation", "=", "tf", ".", "identity", ")", "flow5_up", "=", "tf", ".", "layers", ".", "conv2d_transpose", "(", "flow5", ",", "2", ",", "name", "=", "'upsampled_flow5_to_4'", ",", "use_bias", "=", "False", ")", "x", "=", "tf", ".", "layers", ".", "conv2d_transpose", "(", "concat5", ",", "256", ",", "name", "=", "'deconv4'", ",", "activation", "=", "lambda", "x", ":", "tf", ".", "nn", ".", "leaky_relu", "(", "x", ",", "0.1", ")", ")", "concat4", "=", "tf", ".", "concat", "(", "[", "conv4", ",", "x", ",", "flow5_up", "]", ",", "axis", "=", "1", ",", "name", "=", "'concat4'", ")", "flow4", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "concat4", ",", "1", ")", ",", "2", ",", "name", "=", "'predict_flow4'", ",", "strides", "=", "1", ",", "activation", "=", "tf", ".", "identity", ")", "flow4_up", "=", "tf", ".", "layers", ".", "conv2d_transpose", "(", "flow4", ",", "2", ",", "name", "=", "'upsampled_flow4_to_3'", ",", "use_bias", "=", "False", ")", "x", "=", "tf", ".", "layers", ".", "conv2d_transpose", "(", "concat4", ",", "128", ",", "name", "=", "'deconv3'", ",", "activation", "=", "lambda", "x", ":", "tf", ".", "nn", ".", "leaky_relu", "(", "x", ",", "0.1", ")", ")", "concat3", "=", "tf", ".", "concat", "(", "[", "conv3", ",", "x", ",", "flow4_up", "]", ",", "axis", "=", "1", ",", "name", "=", "'concat3'", ")", "flow3", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "concat3", ",", "1", ")", ",", "2", ",", "name", "=", "'predict_flow3'", ",", "strides", "=", "1", ",", "activation", "=", "tf", ".", "identity", ")", "flow3_up", "=", "tf", ".", "layers", ".", "conv2d_transpose", "(", "flow3", ",", "2", ",", "name", "=", "'upsampled_flow3_to_2'", ",", "use_bias", "=", "False", ")", "x", "=", "tf", ".", "layers", ".", "conv2d_transpose", "(", "concat3", ",", "64", ",", "name", "=", "'deconv2'", ",", "activation", "=", "lambda", "x", ":", "tf", ".", "nn", ".", "leaky_relu", "(", "x", ",", "0.1", ")", ")", "concat2", "=", "tf", ".", "concat", "(", "[", "conv2", ",", "x", ",", "flow3_up", "]", ",", "axis", "=", "1", ",", "name", "=", "'concat2'", ")", "flow2", "=", "tf", ".", "layers", ".", "conv2d", "(", "pad", "(", "concat2", ",", "1", ")", ",", "2", ",", "name", "=", "'predict_flow2'", ",", "strides", "=", "1", ",", "activation", "=", "tf", ".", "identity", ")", "return", "tf", ".", "identity", "(", "flow2", ",", "name", "=", "'flow2'", ")" ]
Architecture of FlowNetSimple in Figure 2 of FlowNet 1.0. Args: x: 2CHW if standalone==True, else NCHW where C=12 is a concatenation of 5 tensors of [3, 3, 3, 2, 1] channels. standalone: If True, this model is used to predict flow from two inputs. If False, this model is used as part of the FlowNet2.
[ "Architecture", "of", "FlowNetSimple", "in", "Figure", "2", "of", "FlowNet", "1", ".", "0", "." ]
python
train
66.403846
JosuaKrause/quick_server
quick_server/quick_server.py
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L2716-L2748
def handle_request(self): """Handles an HTTP request.The actual HTTP request is handled using a different thread. """ timeout = self.socket.gettimeout() if timeout is None: timeout = self.timeout elif self.timeout is not None: timeout = min(timeout, self.timeout) ctime = get_time() done_req = False shutdown_latency = self.shutdown_latency if timeout is not None: shutdown_latency = min(shutdown_latency, timeout) \ if shutdown_latency is not None else timeout while not (self.done or done_req) and (timeout is None or timeout == 0 or (get_time() - ctime) < timeout): try: fd_sets = select.select([self], [], [], shutdown_latency) except (OSError, select.error) as e: if e.args[0] != errno.EINTR: raise # treat EINTR as shutdown_latency timeout fd_sets = [[], [], []] for _fd in fd_sets[0]: done_req = True self._handle_request_noblock() if timeout == 0: break if not (self.done or done_req): # don't handle timeouts if we should shut down the server instead self.handle_timeout()
[ "def", "handle_request", "(", "self", ")", ":", "timeout", "=", "self", ".", "socket", ".", "gettimeout", "(", ")", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "timeout", "elif", "self", ".", "timeout", "is", "not", "None", ":", "timeout", "=", "min", "(", "timeout", ",", "self", ".", "timeout", ")", "ctime", "=", "get_time", "(", ")", "done_req", "=", "False", "shutdown_latency", "=", "self", ".", "shutdown_latency", "if", "timeout", "is", "not", "None", ":", "shutdown_latency", "=", "min", "(", "shutdown_latency", ",", "timeout", ")", "if", "shutdown_latency", "is", "not", "None", "else", "timeout", "while", "not", "(", "self", ".", "done", "or", "done_req", ")", "and", "(", "timeout", "is", "None", "or", "timeout", "==", "0", "or", "(", "get_time", "(", ")", "-", "ctime", ")", "<", "timeout", ")", ":", "try", ":", "fd_sets", "=", "select", ".", "select", "(", "[", "self", "]", ",", "[", "]", ",", "[", "]", ",", "shutdown_latency", ")", "except", "(", "OSError", ",", "select", ".", "error", ")", "as", "e", ":", "if", "e", ".", "args", "[", "0", "]", "!=", "errno", ".", "EINTR", ":", "raise", "# treat EINTR as shutdown_latency timeout", "fd_sets", "=", "[", "[", "]", ",", "[", "]", ",", "[", "]", "]", "for", "_fd", "in", "fd_sets", "[", "0", "]", ":", "done_req", "=", "True", "self", ".", "_handle_request_noblock", "(", ")", "if", "timeout", "==", "0", ":", "break", "if", "not", "(", "self", ".", "done", "or", "done_req", ")", ":", "# don't handle timeouts if we should shut down the server instead", "self", ".", "handle_timeout", "(", ")" ]
Handles an HTTP request.The actual HTTP request is handled using a different thread.
[ "Handles", "an", "HTTP", "request", ".", "The", "actual", "HTTP", "request", "is", "handled", "using", "a", "different", "thread", "." ]
python
train
42.363636
scanny/python-pptx
pptx/text/text.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/text/text.py#L135-L141
def paragraphs(self): """ Immutable sequence of |_Paragraph| instances corresponding to the paragraphs in this text frame. A text frame always contains at least one paragraph. """ return tuple([_Paragraph(p, self) for p in self._txBody.p_lst])
[ "def", "paragraphs", "(", "self", ")", ":", "return", "tuple", "(", "[", "_Paragraph", "(", "p", ",", "self", ")", "for", "p", "in", "self", ".", "_txBody", ".", "p_lst", "]", ")" ]
Immutable sequence of |_Paragraph| instances corresponding to the paragraphs in this text frame. A text frame always contains at least one paragraph.
[ "Immutable", "sequence", "of", "|_Paragraph|", "instances", "corresponding", "to", "the", "paragraphs", "in", "this", "text", "frame", ".", "A", "text", "frame", "always", "contains", "at", "least", "one", "paragraph", "." ]
python
train
40.714286
ibm-watson-iot/iot-python
tmp/src/things/things.py
https://github.com/ibm-watson-iot/iot-python/blob/195f05adce3fba4ec997017e41e02ebd85c0c4cc/tmp/src/things/things.py#L221-L246
def addDraftThingType(self, thingTypeId, name = None, description = None, schemaId = None, metadata = None): """ Creates a thing type. It accepts thingTypeId (string), name (string), description (string), schemaId(string) and metadata(dict) as parameter In case of failure it throws APIException """ draftThingTypesUrl = ApiClient.draftThingTypesUrl % (self.host) payload = {'id' : thingTypeId, 'name' : name, 'description' : description, 'schemaId' : schemaId, 'metadata': metadata} r = requests.post(draftThingTypesUrl, auth=self.credentials, data=json.dumps(payload), headers = {'content-type': 'application/json'}, verify=self.verify) status = r.status_code if status == 201: self.logger.debug("The draft thing Type is created") return r.json() elif status == 400: raise ibmiotf.APIException(400, "Invalid request (No body, invalid JSON, unexpected key, bad value)", r.json()) elif status == 401: raise ibmiotf.APIException(401, "The authentication token is empty or invalid", None) elif status == 403: raise ibmiotf.APIException(403, "The authentication method is invalid or the api key used does not exist", None) elif status == 409: raise ibmiotf.APIException(409, "The draft thing type already exists", r.json()) elif status == 500: raise ibmiotf.APIException(500, "Unexpected error", None) else: raise ibmiotf.APIException(None, "Unexpected error", None)
[ "def", "addDraftThingType", "(", "self", ",", "thingTypeId", ",", "name", "=", "None", ",", "description", "=", "None", ",", "schemaId", "=", "None", ",", "metadata", "=", "None", ")", ":", "draftThingTypesUrl", "=", "ApiClient", ".", "draftThingTypesUrl", "%", "(", "self", ".", "host", ")", "payload", "=", "{", "'id'", ":", "thingTypeId", ",", "'name'", ":", "name", ",", "'description'", ":", "description", ",", "'schemaId'", ":", "schemaId", ",", "'metadata'", ":", "metadata", "}", "r", "=", "requests", ".", "post", "(", "draftThingTypesUrl", ",", "auth", "=", "self", ".", "credentials", ",", "data", "=", "json", ".", "dumps", "(", "payload", ")", ",", "headers", "=", "{", "'content-type'", ":", "'application/json'", "}", ",", "verify", "=", "self", ".", "verify", ")", "status", "=", "r", ".", "status_code", "if", "status", "==", "201", ":", "self", ".", "logger", ".", "debug", "(", "\"The draft thing Type is created\"", ")", "return", "r", ".", "json", "(", ")", "elif", "status", "==", "400", ":", "raise", "ibmiotf", ".", "APIException", "(", "400", ",", "\"Invalid request (No body, invalid JSON, unexpected key, bad value)\"", ",", "r", ".", "json", "(", ")", ")", "elif", "status", "==", "401", ":", "raise", "ibmiotf", ".", "APIException", "(", "401", ",", "\"The authentication token is empty or invalid\"", ",", "None", ")", "elif", "status", "==", "403", ":", "raise", "ibmiotf", ".", "APIException", "(", "403", ",", "\"The authentication method is invalid or the api key used does not exist\"", ",", "None", ")", "elif", "status", "==", "409", ":", "raise", "ibmiotf", ".", "APIException", "(", "409", ",", "\"The draft thing type already exists\"", ",", "r", ".", "json", "(", ")", ")", "elif", "status", "==", "500", ":", "raise", "ibmiotf", ".", "APIException", "(", "500", ",", "\"Unexpected error\"", ",", "None", ")", "else", ":", "raise", "ibmiotf", ".", "APIException", "(", "None", ",", "\"Unexpected error\"", ",", "None", ")" ]
Creates a thing type. It accepts thingTypeId (string), name (string), description (string), schemaId(string) and metadata(dict) as parameter In case of failure it throws APIException
[ "Creates", "a", "thing", "type", ".", "It", "accepts", "thingTypeId", "(", "string", ")", "name", "(", "string", ")", "description", "(", "string", ")", "schemaId", "(", "string", ")", "and", "metadata", "(", "dict", ")", "as", "parameter", "In", "case", "of", "failure", "it", "throws", "APIException" ]
python
test
60.115385
tcalmant/ipopo
pelix/ldapfilter.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ldapfilter.py#L834-L929
def _parse_ldap(ldap_filter): # type: (str) -> Optional[LDAPFilter] """ Parses the given LDAP filter string :param ldap_filter: An LDAP filter string :return: An LDAPFilter object, None if the filter was empty :raise ValueError: The LDAP filter string is invalid """ if ldap_filter is None: # Nothing to do return None assert is_string(ldap_filter) # Remove surrounding spaces ldap_filter = ldap_filter.strip() if not ldap_filter: # Empty string return None escaped = False filter_len = len(ldap_filter) root = None stack = [] subfilter_stack = [] idx = 0 while idx < filter_len: if not escaped: if ldap_filter[idx] == "(": # Opening filter : get the operator idx = _skip_spaces(ldap_filter, idx + 1) if idx == -1: raise ValueError( "Missing filter operator: {0}".format(ldap_filter) ) operator = _compute_operation(ldap_filter, idx) if operator is not None: # New sub-filter stack.append(LDAPFilter(operator)) else: # Sub-filter content subfilter_stack.append(idx) elif ldap_filter[idx] == ")": # Ending filter : store it in its parent if subfilter_stack: # criterion finished start_idx = subfilter_stack.pop() criterion = _parse_ldap_criteria( ldap_filter, start_idx, idx ) if stack: top = stack.pop() top.append(criterion) stack.append(top) else: # No parent : filter contains only one criterion # Make a parent to stay homogeneous root = LDAPFilter(AND) root.append(criterion) elif stack: # Sub filter finished ended_filter = stack.pop() if stack: top = stack.pop() top.append(ended_filter) stack.append(top) else: # End of the parse root = ended_filter else: raise ValueError( "Too many end of parenthesis:{0}: {1}".format( idx, ldap_filter[idx:] ) ) elif ldap_filter[idx] == "\\": # Next character must be ignored escaped = True else: # Escaped character ignored escaped = False # Don't forget to increment... idx += 1 # No root : invalid content if root is None: raise ValueError("Invalid filter string: {0}".format(ldap_filter)) # Return the root of the filter return root.normalize()
[ "def", "_parse_ldap", "(", "ldap_filter", ")", ":", "# type: (str) -> Optional[LDAPFilter]", "if", "ldap_filter", "is", "None", ":", "# Nothing to do", "return", "None", "assert", "is_string", "(", "ldap_filter", ")", "# Remove surrounding spaces", "ldap_filter", "=", "ldap_filter", ".", "strip", "(", ")", "if", "not", "ldap_filter", ":", "# Empty string", "return", "None", "escaped", "=", "False", "filter_len", "=", "len", "(", "ldap_filter", ")", "root", "=", "None", "stack", "=", "[", "]", "subfilter_stack", "=", "[", "]", "idx", "=", "0", "while", "idx", "<", "filter_len", ":", "if", "not", "escaped", ":", "if", "ldap_filter", "[", "idx", "]", "==", "\"(\"", ":", "# Opening filter : get the operator", "idx", "=", "_skip_spaces", "(", "ldap_filter", ",", "idx", "+", "1", ")", "if", "idx", "==", "-", "1", ":", "raise", "ValueError", "(", "\"Missing filter operator: {0}\"", ".", "format", "(", "ldap_filter", ")", ")", "operator", "=", "_compute_operation", "(", "ldap_filter", ",", "idx", ")", "if", "operator", "is", "not", "None", ":", "# New sub-filter", "stack", ".", "append", "(", "LDAPFilter", "(", "operator", ")", ")", "else", ":", "# Sub-filter content", "subfilter_stack", ".", "append", "(", "idx", ")", "elif", "ldap_filter", "[", "idx", "]", "==", "\")\"", ":", "# Ending filter : store it in its parent", "if", "subfilter_stack", ":", "# criterion finished", "start_idx", "=", "subfilter_stack", ".", "pop", "(", ")", "criterion", "=", "_parse_ldap_criteria", "(", "ldap_filter", ",", "start_idx", ",", "idx", ")", "if", "stack", ":", "top", "=", "stack", ".", "pop", "(", ")", "top", ".", "append", "(", "criterion", ")", "stack", ".", "append", "(", "top", ")", "else", ":", "# No parent : filter contains only one criterion", "# Make a parent to stay homogeneous", "root", "=", "LDAPFilter", "(", "AND", ")", "root", ".", "append", "(", "criterion", ")", "elif", "stack", ":", "# Sub filter finished", "ended_filter", "=", "stack", ".", "pop", "(", ")", "if", "stack", ":", "top", "=", "stack", ".", "pop", "(", ")", "top", ".", "append", "(", "ended_filter", ")", "stack", ".", "append", "(", "top", ")", "else", ":", "# End of the parse", "root", "=", "ended_filter", "else", ":", "raise", "ValueError", "(", "\"Too many end of parenthesis:{0}: {1}\"", ".", "format", "(", "idx", ",", "ldap_filter", "[", "idx", ":", "]", ")", ")", "elif", "ldap_filter", "[", "idx", "]", "==", "\"\\\\\"", ":", "# Next character must be ignored", "escaped", "=", "True", "else", ":", "# Escaped character ignored", "escaped", "=", "False", "# Don't forget to increment...", "idx", "+=", "1", "# No root : invalid content", "if", "root", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid filter string: {0}\"", ".", "format", "(", "ldap_filter", ")", ")", "# Return the root of the filter", "return", "root", ".", "normalize", "(", ")" ]
Parses the given LDAP filter string :param ldap_filter: An LDAP filter string :return: An LDAPFilter object, None if the filter was empty :raise ValueError: The LDAP filter string is invalid
[ "Parses", "the", "given", "LDAP", "filter", "string" ]
python
train
32.25
Azure/azure-sdk-for-python
azure-servicemanagement-legacy/azure/servicemanagement/sqldatabasemanagementservice.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/sqldatabasemanagementservice.py#L262-L273
def list_firewall_rules(self, server_name): ''' Retrieves the set of firewall rules for an Azure SQL Database Server. server_name: Name of the server. ''' _validate_not_none('server_name', server_name) response = self._perform_get(self._get_firewall_rules_path(server_name), None) return _MinidomXmlToObject.parse_service_resources_response( response, FirewallRule)
[ "def", "list_firewall_rules", "(", "self", ",", "server_name", ")", ":", "_validate_not_none", "(", "'server_name'", ",", "server_name", ")", "response", "=", "self", ".", "_perform_get", "(", "self", ".", "_get_firewall_rules_path", "(", "server_name", ")", ",", "None", ")", "return", "_MinidomXmlToObject", ".", "parse_service_resources_response", "(", "response", ",", "FirewallRule", ")" ]
Retrieves the set of firewall rules for an Azure SQL Database Server. server_name: Name of the server.
[ "Retrieves", "the", "set", "of", "firewall", "rules", "for", "an", "Azure", "SQL", "Database", "Server", "." ]
python
test
39.333333
PmagPy/PmagPy
dialogs/pmag_gui_menu2.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_menu2.py#L177-L181
def orient_import2(self, event): """ initialize window to import an AzDip format file into the working directory """ pmag_menu_dialogs.ImportAzDipFile(self.parent, self.parent.WD)
[ "def", "orient_import2", "(", "self", ",", "event", ")", ":", "pmag_menu_dialogs", ".", "ImportAzDipFile", "(", "self", ".", "parent", ",", "self", ".", "parent", ".", "WD", ")" ]
initialize window to import an AzDip format file into the working directory
[ "initialize", "window", "to", "import", "an", "AzDip", "format", "file", "into", "the", "working", "directory" ]
python
train
41.4
zeroSteiner/smoke-zephyr
smoke_zephyr/utilities.py
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/utilities.py#L577-L591
def parse_case_snake_to_camel(snake, upper_first=True): """ Convert a string from snake_case to CamelCase. :param str snake: The snake_case string to convert. :param bool upper_first: Whether or not to capitalize the first character of the string. :return: The CamelCase version of string. :rtype: str """ snake = snake.split('_') first_part = snake[0] if upper_first: first_part = first_part.title() return first_part + ''.join(word.title() for word in snake[1:])
[ "def", "parse_case_snake_to_camel", "(", "snake", ",", "upper_first", "=", "True", ")", ":", "snake", "=", "snake", ".", "split", "(", "'_'", ")", "first_part", "=", "snake", "[", "0", "]", "if", "upper_first", ":", "first_part", "=", "first_part", ".", "title", "(", ")", "return", "first_part", "+", "''", ".", "join", "(", "word", ".", "title", "(", ")", "for", "word", "in", "snake", "[", "1", ":", "]", ")" ]
Convert a string from snake_case to CamelCase. :param str snake: The snake_case string to convert. :param bool upper_first: Whether or not to capitalize the first character of the string. :return: The CamelCase version of string. :rtype: str
[ "Convert", "a", "string", "from", "snake_case", "to", "CamelCase", "." ]
python
train
31.066667
polyledger/lattice
lattice/optimize.py
https://github.com/polyledger/lattice/blob/d68d27c93b1634ee29f5c1a1dbcd67397481323b/lattice/optimize.py#L141-L158
def solve_minimize( self, func, weights, constraints, lower_bound=0.0, upper_bound=1.0, func_deriv=False ): """ Returns the solution to a minimization problem. """ bounds = ((lower_bound, upper_bound), ) * len(self.SUPPORTED_COINS) return minimize( fun=func, x0=weights, jac=func_deriv, bounds=bounds, constraints=constraints, method='SLSQP', options={'disp': False} )
[ "def", "solve_minimize", "(", "self", ",", "func", ",", "weights", ",", "constraints", ",", "lower_bound", "=", "0.0", ",", "upper_bound", "=", "1.0", ",", "func_deriv", "=", "False", ")", ":", "bounds", "=", "(", "(", "lower_bound", ",", "upper_bound", ")", ",", ")", "*", "len", "(", "self", ".", "SUPPORTED_COINS", ")", "return", "minimize", "(", "fun", "=", "func", ",", "x0", "=", "weights", ",", "jac", "=", "func_deriv", ",", "bounds", "=", "bounds", ",", "constraints", "=", "constraints", ",", "method", "=", "'SLSQP'", ",", "options", "=", "{", "'disp'", ":", "False", "}", ")" ]
Returns the solution to a minimization problem.
[ "Returns", "the", "solution", "to", "a", "minimization", "problem", "." ]
python
train
26.888889
zencoder/zencoder-py
zencoder/core.py
https://github.com/zencoder/zencoder-py/blob/9d762e33e2bb2edadb0e5da0bb80a61e27636426/zencoder/core.py#L329-L350
def create(self, input=None, live_stream=False, outputs=None, options=None): """ Creates a transcoding job. Here are some examples:: job.create('s3://zencodertesting/test.mov') job.create(live_stream=True) job.create(input='http://example.com/input.mov', outputs=({'label': 'test output'},)) https://app.zencoder.com/docs/api/jobs/create """ data = {"input": input, "test": self.test} if outputs: data['outputs'] = outputs if options: data.update(options) if live_stream: data['live_stream'] = live_stream return self.post(self.base_url, body=json.dumps(data))
[ "def", "create", "(", "self", ",", "input", "=", "None", ",", "live_stream", "=", "False", ",", "outputs", "=", "None", ",", "options", "=", "None", ")", ":", "data", "=", "{", "\"input\"", ":", "input", ",", "\"test\"", ":", "self", ".", "test", "}", "if", "outputs", ":", "data", "[", "'outputs'", "]", "=", "outputs", "if", "options", ":", "data", ".", "update", "(", "options", ")", "if", "live_stream", ":", "data", "[", "'live_stream'", "]", "=", "live_stream", "return", "self", ".", "post", "(", "self", ".", "base_url", ",", "body", "=", "json", ".", "dumps", "(", "data", ")", ")" ]
Creates a transcoding job. Here are some examples:: job.create('s3://zencodertesting/test.mov') job.create(live_stream=True) job.create(input='http://example.com/input.mov', outputs=({'label': 'test output'},)) https://app.zencoder.com/docs/api/jobs/create
[ "Creates", "a", "transcoding", "job", ".", "Here", "are", "some", "examples", "::" ]
python
train
32
CalebBell/thermo
thermo/chemical.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/chemical.py#L1247-L1261
def rdkitmol_Hs(self): r'''RDKit object of the chemical, with hydrogen. If RDKit is not available, holds None. For examples of what can be done with RDKit, see `their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_. ''' if self.__rdkitmol_Hs: return self.__rdkitmol_Hs else: try: self.__rdkitmol_Hs = Chem.AddHs(self.rdkitmol) return self.__rdkitmol_Hs except: return None
[ "def", "rdkitmol_Hs", "(", "self", ")", ":", "if", "self", ".", "__rdkitmol_Hs", ":", "return", "self", ".", "__rdkitmol_Hs", "else", ":", "try", ":", "self", ".", "__rdkitmol_Hs", "=", "Chem", ".", "AddHs", "(", "self", ".", "rdkitmol", ")", "return", "self", ".", "__rdkitmol_Hs", "except", ":", "return", "None" ]
r'''RDKit object of the chemical, with hydrogen. If RDKit is not available, holds None. For examples of what can be done with RDKit, see `their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_.
[ "r", "RDKit", "object", "of", "the", "chemical", "with", "hydrogen", ".", "If", "RDKit", "is", "not", "available", "holds", "None", "." ]
python
valid
34.466667
unt-libraries/codalib
codalib/bagatom.py
https://github.com/unt-libraries/codalib/blob/458d117bb48938c1a0e26d9161cb5f730461b4c7/codalib/bagatom.py#L272-L395
def makeObjectFeed( paginator, objectToXMLFunction, feedId, title, webRoot, idAttr="id", nameAttr="name", dateAttr=None, request=None, page=1, count=20, author=APP_AUTHOR): """ Take a list of some kind of object, a conversion function, an id and a title Return XML representing an ATOM feed """ listSize = paginator.count if listSize: object_list = paginator.page(page).object_list else: object_list = [] count = int(count) originalId = feedId idParts = feedId.split("?", 1) if len(idParts) == 2: feedId = idParts[0] if request: GETStruct = request.GET else: GETStruct = False feedTag = etree.Element(ATOM + "feed", nsmap=ATOM_NSMAP) # The id tag is very similar to the 'self' link idTag = etree.SubElement(feedTag, ATOM + "id") idTag.text = "%s/%s" % (webRoot, feedId) # The title is passed in from the calling function titleTag = etree.SubElement(feedTag, ATOM + "title") titleTag.text = title # The author is passed in from the calling function and required to be valid ATOM if author: authorTag = etree.SubElement(feedTag, ATOM + "author") nameTag = etree.SubElement(authorTag, ATOM + "name") urlTag = etree.SubElement(authorTag, ATOM + "uri") nameTag.text = author.get('name', 'UNT') urlTag.text = author.get('uri', 'http://library.unt.edu/') # The updated tag is a updatedTag = etree.SubElement(feedTag, ATOM + "updated") updatedTag.text = xsDateTime_format(localize_datetime(datetime.now())) # We will always show the link to the current 'self' page linkTag = etree.SubElement(feedTag, ATOM + "link") linkTag.set("rel", "self") if not request or not request.META['QUERY_STRING']: linkTag.set("href", "%s/%s" % (webRoot, feedId)) else: linkTag.set( "href", "%s/%s?%s" % ( webRoot, feedId, urllib.urlencode(request.GET, doseq=True) ) ) # We always have a last page endLink = etree.SubElement(feedTag, ATOM + "link") endLink.set("rel", "last") if GETStruct: endLinkGS = GETStruct.copy() else: endLinkGS = {} endLinkGS.update({"page": paginator.num_pages}) endLink.set( "href", "%s/%s?%s" % ( webRoot, feedId, urllib.urlencode(endLinkGS, doseq=True) ) ) # We always have a first page startLink = etree.SubElement(feedTag, ATOM + "link") startLink.set("rel", "first") if GETStruct: startLinkGS = GETStruct.copy() else: startLinkGS = {} startLinkGS.update({"page": paginator.page_range[0]}) startLink.set( "href", "%s/%s?%s" % ( webRoot, feedId, urllib.urlencode(startLinkGS, doseq=True) ) ) # Potentially there is a previous page, list it's details if paginator.page(page).has_previous(): prevLink = etree.SubElement(feedTag, ATOM + "link") prevLink.set("rel", "previous") if GETStruct: prevLinkGS = GETStruct.copy() else: prevLinkGS = {} prevLinkGS.update( {"page": paginator.page(page).previous_page_number()} ) prevLinkText = "%s/%s?%s" % ( webRoot, feedId, urllib.urlencode(prevLinkGS, doseq=True) ) prevLink.set("href", prevLinkText) # Potentially there is a next page, fill in it's details if paginator.page(page).has_next(): nextLink = etree.SubElement(feedTag, ATOM + "link") nextLink.set("rel", "next") if GETStruct: nextLinkGS = GETStruct.copy() else: nextLinkGS = {} nextLinkGS.update({"page": paginator.page(page).next_page_number()}) nextLinkText = "%s/%s?%s" % ( webRoot, feedId, urllib.urlencode(nextLinkGS, doseq=True) ) nextLink.set("href", nextLinkText) for o in object_list: objectXML = objectToXMLFunction(o) if dateAttr: dateStamp = getattr(o, dateAttr) else: dateStamp = None althref = feedId.strip('/').split('/')[-1] althref = '%s/%s/%s/' % ( webRoot, althref, getattr(o, idAttr) ) objectEntry = wrapAtom( xml=objectXML, id='%s/%s%s/' % (webRoot, originalId, getattr(o, idAttr)), title=getattr(o, nameAttr), updated=dateStamp, alt=althref ) feedTag.append(objectEntry) return feedTag
[ "def", "makeObjectFeed", "(", "paginator", ",", "objectToXMLFunction", ",", "feedId", ",", "title", ",", "webRoot", ",", "idAttr", "=", "\"id\"", ",", "nameAttr", "=", "\"name\"", ",", "dateAttr", "=", "None", ",", "request", "=", "None", ",", "page", "=", "1", ",", "count", "=", "20", ",", "author", "=", "APP_AUTHOR", ")", ":", "listSize", "=", "paginator", ".", "count", "if", "listSize", ":", "object_list", "=", "paginator", ".", "page", "(", "page", ")", ".", "object_list", "else", ":", "object_list", "=", "[", "]", "count", "=", "int", "(", "count", ")", "originalId", "=", "feedId", "idParts", "=", "feedId", ".", "split", "(", "\"?\"", ",", "1", ")", "if", "len", "(", "idParts", ")", "==", "2", ":", "feedId", "=", "idParts", "[", "0", "]", "if", "request", ":", "GETStruct", "=", "request", ".", "GET", "else", ":", "GETStruct", "=", "False", "feedTag", "=", "etree", ".", "Element", "(", "ATOM", "+", "\"feed\"", ",", "nsmap", "=", "ATOM_NSMAP", ")", "# The id tag is very similar to the 'self' link", "idTag", "=", "etree", ".", "SubElement", "(", "feedTag", ",", "ATOM", "+", "\"id\"", ")", "idTag", ".", "text", "=", "\"%s/%s\"", "%", "(", "webRoot", ",", "feedId", ")", "# The title is passed in from the calling function", "titleTag", "=", "etree", ".", "SubElement", "(", "feedTag", ",", "ATOM", "+", "\"title\"", ")", "titleTag", ".", "text", "=", "title", "# The author is passed in from the calling function and required to be valid ATOM", "if", "author", ":", "authorTag", "=", "etree", ".", "SubElement", "(", "feedTag", ",", "ATOM", "+", "\"author\"", ")", "nameTag", "=", "etree", ".", "SubElement", "(", "authorTag", ",", "ATOM", "+", "\"name\"", ")", "urlTag", "=", "etree", ".", "SubElement", "(", "authorTag", ",", "ATOM", "+", "\"uri\"", ")", "nameTag", ".", "text", "=", "author", ".", "get", "(", "'name'", ",", "'UNT'", ")", "urlTag", ".", "text", "=", "author", ".", "get", "(", "'uri'", ",", "'http://library.unt.edu/'", ")", "# The updated tag is a", "updatedTag", "=", "etree", ".", "SubElement", "(", "feedTag", ",", "ATOM", "+", "\"updated\"", ")", "updatedTag", ".", "text", "=", "xsDateTime_format", "(", "localize_datetime", "(", "datetime", ".", "now", "(", ")", ")", ")", "# We will always show the link to the current 'self' page", "linkTag", "=", "etree", ".", "SubElement", "(", "feedTag", ",", "ATOM", "+", "\"link\"", ")", "linkTag", ".", "set", "(", "\"rel\"", ",", "\"self\"", ")", "if", "not", "request", "or", "not", "request", ".", "META", "[", "'QUERY_STRING'", "]", ":", "linkTag", ".", "set", "(", "\"href\"", ",", "\"%s/%s\"", "%", "(", "webRoot", ",", "feedId", ")", ")", "else", ":", "linkTag", ".", "set", "(", "\"href\"", ",", "\"%s/%s?%s\"", "%", "(", "webRoot", ",", "feedId", ",", "urllib", ".", "urlencode", "(", "request", ".", "GET", ",", "doseq", "=", "True", ")", ")", ")", "# We always have a last page", "endLink", "=", "etree", ".", "SubElement", "(", "feedTag", ",", "ATOM", "+", "\"link\"", ")", "endLink", ".", "set", "(", "\"rel\"", ",", "\"last\"", ")", "if", "GETStruct", ":", "endLinkGS", "=", "GETStruct", ".", "copy", "(", ")", "else", ":", "endLinkGS", "=", "{", "}", "endLinkGS", ".", "update", "(", "{", "\"page\"", ":", "paginator", ".", "num_pages", "}", ")", "endLink", ".", "set", "(", "\"href\"", ",", "\"%s/%s?%s\"", "%", "(", "webRoot", ",", "feedId", ",", "urllib", ".", "urlencode", "(", "endLinkGS", ",", "doseq", "=", "True", ")", ")", ")", "# We always have a first page", "startLink", "=", "etree", ".", "SubElement", "(", "feedTag", ",", "ATOM", "+", "\"link\"", ")", "startLink", ".", "set", "(", "\"rel\"", ",", "\"first\"", ")", "if", "GETStruct", ":", "startLinkGS", "=", "GETStruct", ".", "copy", "(", ")", "else", ":", "startLinkGS", "=", "{", "}", "startLinkGS", ".", "update", "(", "{", "\"page\"", ":", "paginator", ".", "page_range", "[", "0", "]", "}", ")", "startLink", ".", "set", "(", "\"href\"", ",", "\"%s/%s?%s\"", "%", "(", "webRoot", ",", "feedId", ",", "urllib", ".", "urlencode", "(", "startLinkGS", ",", "doseq", "=", "True", ")", ")", ")", "# Potentially there is a previous page, list it's details", "if", "paginator", ".", "page", "(", "page", ")", ".", "has_previous", "(", ")", ":", "prevLink", "=", "etree", ".", "SubElement", "(", "feedTag", ",", "ATOM", "+", "\"link\"", ")", "prevLink", ".", "set", "(", "\"rel\"", ",", "\"previous\"", ")", "if", "GETStruct", ":", "prevLinkGS", "=", "GETStruct", ".", "copy", "(", ")", "else", ":", "prevLinkGS", "=", "{", "}", "prevLinkGS", ".", "update", "(", "{", "\"page\"", ":", "paginator", ".", "page", "(", "page", ")", ".", "previous_page_number", "(", ")", "}", ")", "prevLinkText", "=", "\"%s/%s?%s\"", "%", "(", "webRoot", ",", "feedId", ",", "urllib", ".", "urlencode", "(", "prevLinkGS", ",", "doseq", "=", "True", ")", ")", "prevLink", ".", "set", "(", "\"href\"", ",", "prevLinkText", ")", "# Potentially there is a next page, fill in it's details", "if", "paginator", ".", "page", "(", "page", ")", ".", "has_next", "(", ")", ":", "nextLink", "=", "etree", ".", "SubElement", "(", "feedTag", ",", "ATOM", "+", "\"link\"", ")", "nextLink", ".", "set", "(", "\"rel\"", ",", "\"next\"", ")", "if", "GETStruct", ":", "nextLinkGS", "=", "GETStruct", ".", "copy", "(", ")", "else", ":", "nextLinkGS", "=", "{", "}", "nextLinkGS", ".", "update", "(", "{", "\"page\"", ":", "paginator", ".", "page", "(", "page", ")", ".", "next_page_number", "(", ")", "}", ")", "nextLinkText", "=", "\"%s/%s?%s\"", "%", "(", "webRoot", ",", "feedId", ",", "urllib", ".", "urlencode", "(", "nextLinkGS", ",", "doseq", "=", "True", ")", ")", "nextLink", ".", "set", "(", "\"href\"", ",", "nextLinkText", ")", "for", "o", "in", "object_list", ":", "objectXML", "=", "objectToXMLFunction", "(", "o", ")", "if", "dateAttr", ":", "dateStamp", "=", "getattr", "(", "o", ",", "dateAttr", ")", "else", ":", "dateStamp", "=", "None", "althref", "=", "feedId", ".", "strip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "althref", "=", "'%s/%s/%s/'", "%", "(", "webRoot", ",", "althref", ",", "getattr", "(", "o", ",", "idAttr", ")", ")", "objectEntry", "=", "wrapAtom", "(", "xml", "=", "objectXML", ",", "id", "=", "'%s/%s%s/'", "%", "(", "webRoot", ",", "originalId", ",", "getattr", "(", "o", ",", "idAttr", ")", ")", ",", "title", "=", "getattr", "(", "o", ",", "nameAttr", ")", ",", "updated", "=", "dateStamp", ",", "alt", "=", "althref", ")", "feedTag", ".", "append", "(", "objectEntry", ")", "return", "feedTag" ]
Take a list of some kind of object, a conversion function, an id and a title Return XML representing an ATOM feed
[ "Take", "a", "list", "of", "some", "kind", "of", "object", "a", "conversion", "function", "an", "id", "and", "a", "title", "Return", "XML", "representing", "an", "ATOM", "feed" ]
python
train
35.798387
molmod/molmod
molmod/graphs.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L349-L426
def canonical_order(self): """The vertices in a canonical or normalized order. This routine will return a list of vertices in an order that does not depend on the initial order, but only depends on the connectivity and the return values of the function self.get_vertex_string. Only the vertices that are involved in edges will be included. The result can be given as first argument to self.get_subgraph, with reduce=True as second argument. This will return a complete canonical graph. The routine is designed not to use symmetry relations that are obtained with the GraphSearch routine. We also tried to create an ordering that feels like natural, i.e. starting in the center and pushing vertices with few equivalents to the front. If necessary, the nature of the vertices and their bonds to atoms closer to the center will also play a role, but only as a last resort. """ # A) find an appropriate starting vertex. # Here we take a central vertex that has a minimal number of symmetrical # equivalents, 'the highest atom number', and the highest fingerprint. # Note that the symmetrical equivalents are computed from the vertex # fingerprints, i.e. without the GraphSearch. starting_vertex = max( ( -len(self.equivalent_vertices[vertex]), self.get_vertex_string(vertex), self.vertex_fingerprints[vertex].tobytes(), vertex ) for vertex in self.central_vertices )[-1] # B) sort all vertices based on # 1) distance from central vertex # 2) number of equivalent vertices # 3) vertex string, (higher atom numbers come first) # 4) fingerprint # 5) vertex index # The last field is only included to collect the result of the sort. # The fingerprint on itself would be sufficient, but the three first are # there to have a naturally appealing result. l = [ [ -distance, -len(self.equivalent_vertices[vertex]), self.get_vertex_string(vertex), self.vertex_fingerprints[vertex].tobytes(), vertex ] for vertex, distance in self.iter_breadth_first(starting_vertex) if len(self.neighbors[vertex]) > 0 ] l.sort(reverse=True) # C) The order of some vertices is still not completely set. e.g. # consider the case of allene. The four hydrogen atoms are equivalent, # but one can have two different orders: make geminiles consecutive or # don't. It is more trikcy than one would think at first sight. In the # case of allene, geminility could easily solve the problem. Consider a # big flat rotationally symmetric molecule (order 2). The first five # shells are order 4 and one would just give a random order to four # segemnts in the first shell. Only when one reaches the outer part that # has order two, it turns out that the arbitrary choices in the inner # shell play a role. So it does not help to look at relations with # vertices at inner or current shells only. One has to consider the # whole picture. (unit testing reveals troubles like these) # I need some sleep now. The code below checks for potential fuzz and # will raise an error if the ordering is not fully determined yet. One # day, I'll need this code more than I do now, and I'll fix things up. # I know how to do this, but I don't care enough right now. # -- Toon for i in range(1, len(l)): if l[i][:-1] == l[i-1][:-1]: raise NotImplementedError # D) Return only the vertex indexes. return [record[-1] for record in l]
[ "def", "canonical_order", "(", "self", ")", ":", "# A) find an appropriate starting vertex.", "# Here we take a central vertex that has a minimal number of symmetrical", "# equivalents, 'the highest atom number', and the highest fingerprint.", "# Note that the symmetrical equivalents are computed from the vertex", "# fingerprints, i.e. without the GraphSearch.", "starting_vertex", "=", "max", "(", "(", "-", "len", "(", "self", ".", "equivalent_vertices", "[", "vertex", "]", ")", ",", "self", ".", "get_vertex_string", "(", "vertex", ")", ",", "self", ".", "vertex_fingerprints", "[", "vertex", "]", ".", "tobytes", "(", ")", ",", "vertex", ")", "for", "vertex", "in", "self", ".", "central_vertices", ")", "[", "-", "1", "]", "# B) sort all vertices based on", "# 1) distance from central vertex", "# 2) number of equivalent vertices", "# 3) vertex string, (higher atom numbers come first)", "# 4) fingerprint", "# 5) vertex index", "# The last field is only included to collect the result of the sort.", "# The fingerprint on itself would be sufficient, but the three first are", "# there to have a naturally appealing result.", "l", "=", "[", "[", "-", "distance", ",", "-", "len", "(", "self", ".", "equivalent_vertices", "[", "vertex", "]", ")", ",", "self", ".", "get_vertex_string", "(", "vertex", ")", ",", "self", ".", "vertex_fingerprints", "[", "vertex", "]", ".", "tobytes", "(", ")", ",", "vertex", "]", "for", "vertex", ",", "distance", "in", "self", ".", "iter_breadth_first", "(", "starting_vertex", ")", "if", "len", "(", "self", ".", "neighbors", "[", "vertex", "]", ")", ">", "0", "]", "l", ".", "sort", "(", "reverse", "=", "True", ")", "# C) The order of some vertices is still not completely set. e.g.", "# consider the case of allene. The four hydrogen atoms are equivalent,", "# but one can have two different orders: make geminiles consecutive or", "# don't. It is more trikcy than one would think at first sight. In the", "# case of allene, geminility could easily solve the problem. Consider a", "# big flat rotationally symmetric molecule (order 2). The first five", "# shells are order 4 and one would just give a random order to four", "# segemnts in the first shell. Only when one reaches the outer part that", "# has order two, it turns out that the arbitrary choices in the inner", "# shell play a role. So it does not help to look at relations with", "# vertices at inner or current shells only. One has to consider the", "# whole picture. (unit testing reveals troubles like these)", "# I need some sleep now. The code below checks for potential fuzz and", "# will raise an error if the ordering is not fully determined yet. One", "# day, I'll need this code more than I do now, and I'll fix things up.", "# I know how to do this, but I don't care enough right now.", "# -- Toon", "for", "i", "in", "range", "(", "1", ",", "len", "(", "l", ")", ")", ":", "if", "l", "[", "i", "]", "[", ":", "-", "1", "]", "==", "l", "[", "i", "-", "1", "]", "[", ":", "-", "1", "]", ":", "raise", "NotImplementedError", "# D) Return only the vertex indexes.", "return", "[", "record", "[", "-", "1", "]", "for", "record", "in", "l", "]" ]
The vertices in a canonical or normalized order. This routine will return a list of vertices in an order that does not depend on the initial order, but only depends on the connectivity and the return values of the function self.get_vertex_string. Only the vertices that are involved in edges will be included. The result can be given as first argument to self.get_subgraph, with reduce=True as second argument. This will return a complete canonical graph. The routine is designed not to use symmetry relations that are obtained with the GraphSearch routine. We also tried to create an ordering that feels like natural, i.e. starting in the center and pushing vertices with few equivalents to the front. If necessary, the nature of the vertices and their bonds to atoms closer to the center will also play a role, but only as a last resort.
[ "The", "vertices", "in", "a", "canonical", "or", "normalized", "order", "." ]
python
train
50.410256
epfl-lts2/pygsp
pygsp/graphs/graph.py
https://github.com/epfl-lts2/pygsp/blob/8ce5bde39206129287375af24fdbcd7edddca8c5/pygsp/graphs/graph.py#L883-L911
def _get_upper_bound(self): r"""Return an upper bound on the eigenvalues of the Laplacian.""" if self.lap_type == 'normalized': return 2 # Equal iff the graph is bipartite. elif self.lap_type == 'combinatorial': bounds = [] # Equal for full graphs. bounds += [self.n_vertices * np.max(self.W)] # Gershgorin circle theorem. Equal for regular bipartite graphs. # Special case of the below bound. bounds += [2 * np.max(self.dw)] # Anderson, Morley, Eigenvalues of the Laplacian of a graph. # Equal for regular bipartite graphs. if self.n_edges > 0: sources, targets, _ = self.get_edge_list() bounds += [np.max(self.dw[sources] + self.dw[targets])] # Merris, A note on Laplacian graph eigenvalues. if not self.is_directed(): W = self.W else: W = utils.symmetrize(self.W, method='average') m = W.dot(self.dw) / self.dw # Mean degree of adjacent vertices. bounds += [np.max(self.dw + m)] # Good review: On upper bounds for Laplacian graph eigenvalues. return min(bounds) else: raise ValueError('Unknown Laplacian type ' '{}'.format(self.lap_type))
[ "def", "_get_upper_bound", "(", "self", ")", ":", "if", "self", ".", "lap_type", "==", "'normalized'", ":", "return", "2", "# Equal iff the graph is bipartite.", "elif", "self", ".", "lap_type", "==", "'combinatorial'", ":", "bounds", "=", "[", "]", "# Equal for full graphs.", "bounds", "+=", "[", "self", ".", "n_vertices", "*", "np", ".", "max", "(", "self", ".", "W", ")", "]", "# Gershgorin circle theorem. Equal for regular bipartite graphs.", "# Special case of the below bound.", "bounds", "+=", "[", "2", "*", "np", ".", "max", "(", "self", ".", "dw", ")", "]", "# Anderson, Morley, Eigenvalues of the Laplacian of a graph.", "# Equal for regular bipartite graphs.", "if", "self", ".", "n_edges", ">", "0", ":", "sources", ",", "targets", ",", "_", "=", "self", ".", "get_edge_list", "(", ")", "bounds", "+=", "[", "np", ".", "max", "(", "self", ".", "dw", "[", "sources", "]", "+", "self", ".", "dw", "[", "targets", "]", ")", "]", "# Merris, A note on Laplacian graph eigenvalues.", "if", "not", "self", ".", "is_directed", "(", ")", ":", "W", "=", "self", ".", "W", "else", ":", "W", "=", "utils", ".", "symmetrize", "(", "self", ".", "W", ",", "method", "=", "'average'", ")", "m", "=", "W", ".", "dot", "(", "self", ".", "dw", ")", "/", "self", ".", "dw", "# Mean degree of adjacent vertices.", "bounds", "+=", "[", "np", ".", "max", "(", "self", ".", "dw", "+", "m", ")", "]", "# Good review: On upper bounds for Laplacian graph eigenvalues.", "return", "min", "(", "bounds", ")", "else", ":", "raise", "ValueError", "(", "'Unknown Laplacian type '", "'{}'", ".", "format", "(", "self", ".", "lap_type", ")", ")" ]
r"""Return an upper bound on the eigenvalues of the Laplacian.
[ "r", "Return", "an", "upper", "bound", "on", "the", "eigenvalues", "of", "the", "Laplacian", "." ]
python
train
46.793103
horazont/aioxmpp
aioxmpp/callbacks.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/callbacks.py#L472-L503
def connect(self, f, mode=None): """ Connect an object `f` to the signal. The type the object needs to have depends on `mode`, but usually it needs to be a callable. :meth:`connect` returns an opaque token which can be used with :meth:`disconnect` to disconnect the object from the signal. The default value for `mode` is :attr:`STRONG`. Any decorator can be used as argument for `mode` and it is applied to `f`. The result is stored internally and is what will be called when the signal is being emitted. If the result of `mode` returns a false value during emission, the connection is removed. .. note:: The return values required by the callable returned by `mode` and the one required by a callable passed to `f` using the predefined modes are complementary! A callable `f` needs to return true to be removed from the connections, while a callable returned by the `mode` decorator needs to return false. Existing modes are listed below. """ mode = mode or self.STRONG self.logger.debug("connecting %r with mode %r", f, mode) return self._connect(mode(f))
[ "def", "connect", "(", "self", ",", "f", ",", "mode", "=", "None", ")", ":", "mode", "=", "mode", "or", "self", ".", "STRONG", "self", ".", "logger", ".", "debug", "(", "\"connecting %r with mode %r\"", ",", "f", ",", "mode", ")", "return", "self", ".", "_connect", "(", "mode", "(", "f", ")", ")" ]
Connect an object `f` to the signal. The type the object needs to have depends on `mode`, but usually it needs to be a callable. :meth:`connect` returns an opaque token which can be used with :meth:`disconnect` to disconnect the object from the signal. The default value for `mode` is :attr:`STRONG`. Any decorator can be used as argument for `mode` and it is applied to `f`. The result is stored internally and is what will be called when the signal is being emitted. If the result of `mode` returns a false value during emission, the connection is removed. .. note:: The return values required by the callable returned by `mode` and the one required by a callable passed to `f` using the predefined modes are complementary! A callable `f` needs to return true to be removed from the connections, while a callable returned by the `mode` decorator needs to return false. Existing modes are listed below.
[ "Connect", "an", "object", "f", "to", "the", "signal", ".", "The", "type", "the", "object", "needs", "to", "have", "depends", "on", "mode", "but", "usually", "it", "needs", "to", "be", "a", "callable", "." ]
python
train
38.65625
erdewit/ib_insync
ib_insync/ib.py
https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/ib.py#L343-L360
def accountSummary(self, account: str = '') -> List[AccountValue]: """ List of account values for the given account, or of all accounts if account is left blank. This method is blocking on first run, non-blocking after that. Args: account: If specified, filter for this account name. """ if not self.wrapper.acctSummary: # loaded on demand since it takes ca. 250 ms self.reqAccountSummary() if account: return [v for v in self.wrapper.acctSummary.values() if v.account == account] else: return list(self.wrapper.acctSummary.values())
[ "def", "accountSummary", "(", "self", ",", "account", ":", "str", "=", "''", ")", "->", "List", "[", "AccountValue", "]", ":", "if", "not", "self", ".", "wrapper", ".", "acctSummary", ":", "# loaded on demand since it takes ca. 250 ms", "self", ".", "reqAccountSummary", "(", ")", "if", "account", ":", "return", "[", "v", "for", "v", "in", "self", ".", "wrapper", ".", "acctSummary", ".", "values", "(", ")", "if", "v", ".", "account", "==", "account", "]", "else", ":", "return", "list", "(", "self", ".", "wrapper", ".", "acctSummary", ".", "values", "(", ")", ")" ]
List of account values for the given account, or of all accounts if account is left blank. This method is blocking on first run, non-blocking after that. Args: account: If specified, filter for this account name.
[ "List", "of", "account", "values", "for", "the", "given", "account", "or", "of", "all", "accounts", "if", "account", "is", "left", "blank", "." ]
python
train
37.222222
gmr/tredis
tredis/lists.py
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/lists.py#L120-L150
def lpush(self, key, *values): """ Insert all the specified values at the head of the list stored at key. :param key: The list's key :type key: :class:`str`, :class:`bytes` :param values: One or more positional arguments to insert at the beginning of the list. Each value is inserted at the beginning of the list individually (see discussion below). :returns: the length of the list after push operations :rtype: int :raises: :exc:`~tredis.exceptions.TRedisException` If `key` does not exist, it is created as empty list before performing the push operations. When key holds a value that is not a list, an error is returned. It is possible to push multiple elements using a single command call just specifying multiple arguments at the end of the command. Elements are inserted one after the other to the head of the list, from the leftmost element to the rightmost element. So for instance ``client.lpush('mylist', 'a', 'b', 'c')`` will result into a list containing ``c`` as first element, ``b`` as second element and ``a`` as third element. .. note:: **Time complexity**: ``O(1)`` """ return self._execute([b'LPUSH', key] + list(values))
[ "def", "lpush", "(", "self", ",", "key", ",", "*", "values", ")", ":", "return", "self", ".", "_execute", "(", "[", "b'LPUSH'", ",", "key", "]", "+", "list", "(", "values", ")", ")" ]
Insert all the specified values at the head of the list stored at key. :param key: The list's key :type key: :class:`str`, :class:`bytes` :param values: One or more positional arguments to insert at the beginning of the list. Each value is inserted at the beginning of the list individually (see discussion below). :returns: the length of the list after push operations :rtype: int :raises: :exc:`~tredis.exceptions.TRedisException` If `key` does not exist, it is created as empty list before performing the push operations. When key holds a value that is not a list, an error is returned. It is possible to push multiple elements using a single command call just specifying multiple arguments at the end of the command. Elements are inserted one after the other to the head of the list, from the leftmost element to the rightmost element. So for instance ``client.lpush('mylist', 'a', 'b', 'c')`` will result into a list containing ``c`` as first element, ``b`` as second element and ``a`` as third element. .. note:: **Time complexity**: ``O(1)``
[ "Insert", "all", "the", "specified", "values", "at", "the", "head", "of", "the", "list", "stored", "at", "key", "." ]
python
train
42.580645
RI-imaging/qpformat
qpformat/file_formats/series_hdf5_qpimage.py
https://github.com/RI-imaging/qpformat/blob/364e29d7d9e8b9f1d7a4a25c753d1baf9d73d5eb/qpformat/file_formats/series_hdf5_qpimage.py#L63-L78
def verify(path): """Verify that `path` has the qpimage series file format""" valid = False try: h5 = h5py.File(path, mode="r") qpi0 = h5["qpi_0"] except (OSError, KeyError): pass else: if ("qpimage version" in qpi0.attrs and "phase" in qpi0 and "amplitude" in qpi0 and "bg_data" in qpi0["phase"] and "bg_data" in qpi0["amplitude"]): valid = True return valid
[ "def", "verify", "(", "path", ")", ":", "valid", "=", "False", "try", ":", "h5", "=", "h5py", ".", "File", "(", "path", ",", "mode", "=", "\"r\"", ")", "qpi0", "=", "h5", "[", "\"qpi_0\"", "]", "except", "(", "OSError", ",", "KeyError", ")", ":", "pass", "else", ":", "if", "(", "\"qpimage version\"", "in", "qpi0", ".", "attrs", "and", "\"phase\"", "in", "qpi0", "and", "\"amplitude\"", "in", "qpi0", "and", "\"bg_data\"", "in", "qpi0", "[", "\"phase\"", "]", "and", "\"bg_data\"", "in", "qpi0", "[", "\"amplitude\"", "]", ")", ":", "valid", "=", "True", "return", "valid" ]
Verify that `path` has the qpimage series file format
[ "Verify", "that", "path", "has", "the", "qpimage", "series", "file", "format" ]
python
train
32.75
d0c-s4vage/pfp
pfp/fields.py
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/fields.py#L1070-L1107
def _pfp__build(self, stream=None, save_offset=False): """Build the field and write the result into the stream :stream: An IO stream that can be written to :returns: None """ if stream is not None and save_offset: self._pfp__offset = stream.tell() if self.bitsize is None: data = struct.pack( "{}{}".format(self.endian, self.format), self._pfp__value ) if stream is not None: stream.write(data) return len(data) else: return data else: data = struct.pack( "{}{}".format(BIG_ENDIAN, self.format), self._pfp__value ) num_bytes = int(math.ceil(self.bitsize / 8.0)) bit_data = data[-num_bytes:] raw_bits = bitwrap.bytes_to_bits(bit_data) bits = raw_bits[-self.bitsize:] if stream is not None: self.bitfield_rw.write_bits(stream, bits, self.bitfield_padded, self.bitfield_left_right, self.endian) return len(bits) // 8 else: # TODO this can't be right.... return bits
[ "def", "_pfp__build", "(", "self", ",", "stream", "=", "None", ",", "save_offset", "=", "False", ")", ":", "if", "stream", "is", "not", "None", "and", "save_offset", ":", "self", ".", "_pfp__offset", "=", "stream", ".", "tell", "(", ")", "if", "self", ".", "bitsize", "is", "None", ":", "data", "=", "struct", ".", "pack", "(", "\"{}{}\"", ".", "format", "(", "self", ".", "endian", ",", "self", ".", "format", ")", ",", "self", ".", "_pfp__value", ")", "if", "stream", "is", "not", "None", ":", "stream", ".", "write", "(", "data", ")", "return", "len", "(", "data", ")", "else", ":", "return", "data", "else", ":", "data", "=", "struct", ".", "pack", "(", "\"{}{}\"", ".", "format", "(", "BIG_ENDIAN", ",", "self", ".", "format", ")", ",", "self", ".", "_pfp__value", ")", "num_bytes", "=", "int", "(", "math", ".", "ceil", "(", "self", ".", "bitsize", "/", "8.0", ")", ")", "bit_data", "=", "data", "[", "-", "num_bytes", ":", "]", "raw_bits", "=", "bitwrap", ".", "bytes_to_bits", "(", "bit_data", ")", "bits", "=", "raw_bits", "[", "-", "self", ".", "bitsize", ":", "]", "if", "stream", "is", "not", "None", ":", "self", ".", "bitfield_rw", ".", "write_bits", "(", "stream", ",", "bits", ",", "self", ".", "bitfield_padded", ",", "self", ".", "bitfield_left_right", ",", "self", ".", "endian", ")", "return", "len", "(", "bits", ")", "//", "8", "else", ":", "# TODO this can't be right....", "return", "bits" ]
Build the field and write the result into the stream :stream: An IO stream that can be written to :returns: None
[ "Build", "the", "field", "and", "write", "the", "result", "into", "the", "stream" ]
python
train
32.078947
apache/airflow
airflow/utils/dag_processing.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L931-L944
def clear_nonexistent_import_errors(self, session): """ Clears import errors for files that no longer exist. :param session: session for ORM operations :type session: sqlalchemy.orm.session.Session """ query = session.query(errors.ImportError) if self._file_paths: query = query.filter( ~errors.ImportError.filename.in_(self._file_paths) ) query.delete(synchronize_session='fetch') session.commit()
[ "def", "clear_nonexistent_import_errors", "(", "self", ",", "session", ")", ":", "query", "=", "session", ".", "query", "(", "errors", ".", "ImportError", ")", "if", "self", ".", "_file_paths", ":", "query", "=", "query", ".", "filter", "(", "~", "errors", ".", "ImportError", ".", "filename", ".", "in_", "(", "self", ".", "_file_paths", ")", ")", "query", ".", "delete", "(", "synchronize_session", "=", "'fetch'", ")", "session", ".", "commit", "(", ")" ]
Clears import errors for files that no longer exist. :param session: session for ORM operations :type session: sqlalchemy.orm.session.Session
[ "Clears", "import", "errors", "for", "files", "that", "no", "longer", "exist", "." ]
python
test
35.571429
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L4210-L4228
def regex_in(pl,regex): ''' regex = re.compile("^[a-z]+$") pl = ['b1c3d','xab15cxx','1x','y2'] regex_in(pl,regex) regex = re.compile("^[0-9a-z]+$") pl = ['b1c3d','xab15cxx','1x','y2'] regex_in(pl,regex) ''' def cond_func(ele,regex): m = regex.search(ele) if(m == None): return(False) else: return(True) cond = some(pl,cond_func,regex)['cond'] return(cond)
[ "def", "regex_in", "(", "pl", ",", "regex", ")", ":", "def", "cond_func", "(", "ele", ",", "regex", ")", ":", "m", "=", "regex", ".", "search", "(", "ele", ")", "if", "(", "m", "==", "None", ")", ":", "return", "(", "False", ")", "else", ":", "return", "(", "True", ")", "cond", "=", "some", "(", "pl", ",", "cond_func", ",", "regex", ")", "[", "'cond'", "]", "return", "(", "cond", ")" ]
regex = re.compile("^[a-z]+$") pl = ['b1c3d','xab15cxx','1x','y2'] regex_in(pl,regex) regex = re.compile("^[0-9a-z]+$") pl = ['b1c3d','xab15cxx','1x','y2'] regex_in(pl,regex)
[ "regex", "=", "re", ".", "compile", "(", "^", "[", "a", "-", "z", "]", "+", "$", ")", "pl", "=", "[", "b1c3d", "xab15cxx", "1x", "y2", "]", "regex_in", "(", "pl", "regex", ")", "regex", "=", "re", ".", "compile", "(", "^", "[", "0", "-", "9a", "-", "z", "]", "+", "$", ")", "pl", "=", "[", "b1c3d", "xab15cxx", "1x", "y2", "]", "regex_in", "(", "pl", "regex", ")" ]
python
valid
24.789474
Clinical-Genomics/scout
scout/server/blueprints/dashboard/controllers.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/dashboard/controllers.py#L6-L163
def get_dashboard_info(adapter, institute_id=None, slice_query=None): """Returns cases with phenotype If phenotypes are provided search for only those Args: adapter(adapter.MongoAdapter) institute_id(str): an institute _id slice_query(str): query to filter cases to obtain statistics for. Returns: data(dict): Dictionary with relevant information """ LOG.debug("General query with institute_id {}.".format(institute_id)) # if institute_id == 'None' or None, all cases and general stats will be returned if institute_id == 'None': institute_id = None # If a slice_query is present then numbers in "General statistics" and "Case statistics" will # reflect the data available for the query general_sliced_info = get_general_case_info(adapter, institute_id=institute_id, slice_query=slice_query) total_sliced_cases = general_sliced_info['total_cases'] data = {'total_cases': total_sliced_cases} if total_sliced_cases == 0: return data data['pedigree'] = [] for ped_info in general_sliced_info['pedigree'].values(): ped_info['percent'] = ped_info['count'] / total_sliced_cases data['pedigree'].append(ped_info) data['cases'] = get_case_groups(adapter, total_sliced_cases, institute_id=institute_id, slice_query=slice_query) data['analysis_types'] = get_analysis_types(adapter, total_sliced_cases, institute_id=institute_id, slice_query=slice_query) overview = [ { 'title': 'Phenotype terms', 'count': general_sliced_info['phenotype_cases'], 'percent': general_sliced_info['phenotype_cases'] / total_sliced_cases, }, { 'title': 'Causative variants', 'count': general_sliced_info['causative_cases'], 'percent': general_sliced_info['causative_cases'] / total_sliced_cases, }, { 'title': 'Pinned variants', 'count': general_sliced_info['pinned_cases'], 'percent': general_sliced_info['pinned_cases'] / total_sliced_cases, }, { 'title': 'Cohort tag', 'count': general_sliced_info['cohort_cases'], 'percent': general_sliced_info['cohort_cases'] / total_sliced_cases, } ] # Data from "Variant statistics tab" is not filtered by slice_query and numbers will # reflect verified variants in all available cases for an institute general_info = get_general_case_info(adapter, institute_id=institute_id) total_cases = general_info['total_cases'] sliced_case_ids = general_sliced_info['case_ids'] verified_query = { 'verb' : 'validate', } if institute_id: # filter by institute if users wishes so verified_query['institute'] = institute_id # Case level information sliced_validation_cases = set() sliced_validated_cases = set() # Variant level information validated_tp = set() validated_fp = set() var_valid_orders = 0 # use this counter to count 'True Positive', 'False positive' and 'Not validated' vars validate_events = adapter.event_collection.find(verified_query) for validate_event in list(validate_events): case_id = validate_event.get('case') var_obj = adapter.variant(case_id=case_id, document_id=validate_event['variant_id']) if var_obj: # Don't take into account variants which have been removed from db var_valid_orders += 1 if case_id in sliced_case_ids: sliced_validation_cases.add(case_id) # add to the set. Can't add same id twice since it'a a set validation = var_obj.get('validation') if validation and validation in ['True positive', 'False positive']: if case_id in sliced_case_ids: sliced_validated_cases.add(case_id) if validation == 'True positive': validated_tp.add(var_obj['_id']) elif validation == 'False positive': validated_fp.add(var_obj['_id']) n_validation_cases = len(sliced_validation_cases) n_validated_cases = len(sliced_validated_cases) # append overview.append( { 'title': 'Validation ordered', 'count': n_validation_cases, 'percent': n_validation_cases / total_sliced_cases, }) overview.append( { 'title': 'Validated cases (TP + FP)', 'count': n_validated_cases, 'percent': n_validated_cases / total_sliced_cases, }) data['overview'] = overview variants = [] nr_validated = len(validated_tp) + len(validated_fp) variants.append( { 'title': 'Validation ordered', 'count': var_valid_orders, 'percent': 1 } ) # taking into account that var_valid_orders might be 0: percent_validated_tp = 0 percent_validated_fp = 0 if var_valid_orders: percent_validated_tp = len(validated_tp) / var_valid_orders percent_validated_fp = len(validated_fp) / var_valid_orders variants.append( { 'title': 'Validated True Positive', 'count': len(validated_tp), 'percent': percent_validated_tp, } ) variants.append( { 'title': 'Validated False Positive', 'count': len(validated_fp), 'percent': percent_validated_fp, } ) data['variants'] = variants return data
[ "def", "get_dashboard_info", "(", "adapter", ",", "institute_id", "=", "None", ",", "slice_query", "=", "None", ")", ":", "LOG", ".", "debug", "(", "\"General query with institute_id {}.\"", ".", "format", "(", "institute_id", ")", ")", "# if institute_id == 'None' or None, all cases and general stats will be returned", "if", "institute_id", "==", "'None'", ":", "institute_id", "=", "None", "# If a slice_query is present then numbers in \"General statistics\" and \"Case statistics\" will", "# reflect the data available for the query", "general_sliced_info", "=", "get_general_case_info", "(", "adapter", ",", "institute_id", "=", "institute_id", ",", "slice_query", "=", "slice_query", ")", "total_sliced_cases", "=", "general_sliced_info", "[", "'total_cases'", "]", "data", "=", "{", "'total_cases'", ":", "total_sliced_cases", "}", "if", "total_sliced_cases", "==", "0", ":", "return", "data", "data", "[", "'pedigree'", "]", "=", "[", "]", "for", "ped_info", "in", "general_sliced_info", "[", "'pedigree'", "]", ".", "values", "(", ")", ":", "ped_info", "[", "'percent'", "]", "=", "ped_info", "[", "'count'", "]", "/", "total_sliced_cases", "data", "[", "'pedigree'", "]", ".", "append", "(", "ped_info", ")", "data", "[", "'cases'", "]", "=", "get_case_groups", "(", "adapter", ",", "total_sliced_cases", ",", "institute_id", "=", "institute_id", ",", "slice_query", "=", "slice_query", ")", "data", "[", "'analysis_types'", "]", "=", "get_analysis_types", "(", "adapter", ",", "total_sliced_cases", ",", "institute_id", "=", "institute_id", ",", "slice_query", "=", "slice_query", ")", "overview", "=", "[", "{", "'title'", ":", "'Phenotype terms'", ",", "'count'", ":", "general_sliced_info", "[", "'phenotype_cases'", "]", ",", "'percent'", ":", "general_sliced_info", "[", "'phenotype_cases'", "]", "/", "total_sliced_cases", ",", "}", ",", "{", "'title'", ":", "'Causative variants'", ",", "'count'", ":", "general_sliced_info", "[", "'causative_cases'", "]", ",", "'percent'", ":", "general_sliced_info", "[", "'causative_cases'", "]", "/", "total_sliced_cases", ",", "}", ",", "{", "'title'", ":", "'Pinned variants'", ",", "'count'", ":", "general_sliced_info", "[", "'pinned_cases'", "]", ",", "'percent'", ":", "general_sliced_info", "[", "'pinned_cases'", "]", "/", "total_sliced_cases", ",", "}", ",", "{", "'title'", ":", "'Cohort tag'", ",", "'count'", ":", "general_sliced_info", "[", "'cohort_cases'", "]", ",", "'percent'", ":", "general_sliced_info", "[", "'cohort_cases'", "]", "/", "total_sliced_cases", ",", "}", "]", "# Data from \"Variant statistics tab\" is not filtered by slice_query and numbers will", "# reflect verified variants in all available cases for an institute", "general_info", "=", "get_general_case_info", "(", "adapter", ",", "institute_id", "=", "institute_id", ")", "total_cases", "=", "general_info", "[", "'total_cases'", "]", "sliced_case_ids", "=", "general_sliced_info", "[", "'case_ids'", "]", "verified_query", "=", "{", "'verb'", ":", "'validate'", ",", "}", "if", "institute_id", ":", "# filter by institute if users wishes so", "verified_query", "[", "'institute'", "]", "=", "institute_id", "# Case level information", "sliced_validation_cases", "=", "set", "(", ")", "sliced_validated_cases", "=", "set", "(", ")", "# Variant level information", "validated_tp", "=", "set", "(", ")", "validated_fp", "=", "set", "(", ")", "var_valid_orders", "=", "0", "# use this counter to count 'True Positive', 'False positive' and 'Not validated' vars", "validate_events", "=", "adapter", ".", "event_collection", ".", "find", "(", "verified_query", ")", "for", "validate_event", "in", "list", "(", "validate_events", ")", ":", "case_id", "=", "validate_event", ".", "get", "(", "'case'", ")", "var_obj", "=", "adapter", ".", "variant", "(", "case_id", "=", "case_id", ",", "document_id", "=", "validate_event", "[", "'variant_id'", "]", ")", "if", "var_obj", ":", "# Don't take into account variants which have been removed from db", "var_valid_orders", "+=", "1", "if", "case_id", "in", "sliced_case_ids", ":", "sliced_validation_cases", ".", "add", "(", "case_id", ")", "# add to the set. Can't add same id twice since it'a a set", "validation", "=", "var_obj", ".", "get", "(", "'validation'", ")", "if", "validation", "and", "validation", "in", "[", "'True positive'", ",", "'False positive'", "]", ":", "if", "case_id", "in", "sliced_case_ids", ":", "sliced_validated_cases", ".", "add", "(", "case_id", ")", "if", "validation", "==", "'True positive'", ":", "validated_tp", ".", "add", "(", "var_obj", "[", "'_id'", "]", ")", "elif", "validation", "==", "'False positive'", ":", "validated_fp", ".", "add", "(", "var_obj", "[", "'_id'", "]", ")", "n_validation_cases", "=", "len", "(", "sliced_validation_cases", ")", "n_validated_cases", "=", "len", "(", "sliced_validated_cases", ")", "# append", "overview", ".", "append", "(", "{", "'title'", ":", "'Validation ordered'", ",", "'count'", ":", "n_validation_cases", ",", "'percent'", ":", "n_validation_cases", "/", "total_sliced_cases", ",", "}", ")", "overview", ".", "append", "(", "{", "'title'", ":", "'Validated cases (TP + FP)'", ",", "'count'", ":", "n_validated_cases", ",", "'percent'", ":", "n_validated_cases", "/", "total_sliced_cases", ",", "}", ")", "data", "[", "'overview'", "]", "=", "overview", "variants", "=", "[", "]", "nr_validated", "=", "len", "(", "validated_tp", ")", "+", "len", "(", "validated_fp", ")", "variants", ".", "append", "(", "{", "'title'", ":", "'Validation ordered'", ",", "'count'", ":", "var_valid_orders", ",", "'percent'", ":", "1", "}", ")", "# taking into account that var_valid_orders might be 0:", "percent_validated_tp", "=", "0", "percent_validated_fp", "=", "0", "if", "var_valid_orders", ":", "percent_validated_tp", "=", "len", "(", "validated_tp", ")", "/", "var_valid_orders", "percent_validated_fp", "=", "len", "(", "validated_fp", ")", "/", "var_valid_orders", "variants", ".", "append", "(", "{", "'title'", ":", "'Validated True Positive'", ",", "'count'", ":", "len", "(", "validated_tp", ")", ",", "'percent'", ":", "percent_validated_tp", ",", "}", ")", "variants", ".", "append", "(", "{", "'title'", ":", "'Validated False Positive'", ",", "'count'", ":", "len", "(", "validated_fp", ")", ",", "'percent'", ":", "percent_validated_fp", ",", "}", ")", "data", "[", "'variants'", "]", "=", "variants", "return", "data" ]
Returns cases with phenotype If phenotypes are provided search for only those Args: adapter(adapter.MongoAdapter) institute_id(str): an institute _id slice_query(str): query to filter cases to obtain statistics for. Returns: data(dict): Dictionary with relevant information
[ "Returns", "cases", "with", "phenotype" ]
python
test
34.71519
casacore/python-casacore
casacore/functionals/functional.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/functionals/functional.py#L122-L141
def fdf(self, x): """Calculate the value of the functional for the specified arguments, and the derivatives with respect to the parameters (taking any specified mask into account). :param x: the value(s) to evaluate at """ x = self._flatten(x) n = 1 if hasattr(x, "__len__"): n = len(x) if self._dtype == 0: retval = _functional._fdf(self, x) else: retval = _functional._fdfc(self, x) if len(retval) == n: return numpy.array(retval) return numpy.array(retval).reshape(self.npar() + 1, n // self.ndim()).transpose()
[ "def", "fdf", "(", "self", ",", "x", ")", ":", "x", "=", "self", ".", "_flatten", "(", "x", ")", "n", "=", "1", "if", "hasattr", "(", "x", ",", "\"__len__\"", ")", ":", "n", "=", "len", "(", "x", ")", "if", "self", ".", "_dtype", "==", "0", ":", "retval", "=", "_functional", ".", "_fdf", "(", "self", ",", "x", ")", "else", ":", "retval", "=", "_functional", ".", "_fdfc", "(", "self", ",", "x", ")", "if", "len", "(", "retval", ")", "==", "n", ":", "return", "numpy", ".", "array", "(", "retval", ")", "return", "numpy", ".", "array", "(", "retval", ")", ".", "reshape", "(", "self", ".", "npar", "(", ")", "+", "1", ",", "n", "//", "self", ".", "ndim", "(", ")", ")", ".", "transpose", "(", ")" ]
Calculate the value of the functional for the specified arguments, and the derivatives with respect to the parameters (taking any specified mask into account). :param x: the value(s) to evaluate at
[ "Calculate", "the", "value", "of", "the", "functional", "for", "the", "specified", "arguments", "and", "the", "derivatives", "with", "respect", "to", "the", "parameters", "(", "taking", "any", "specified", "mask", "into", "account", ")", "." ]
python
train
34.1
bokeh/bokeh
bokeh/colors/color.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/colors/color.py#L81-L94
def darken(self, amount): ''' Darken (reduce the luminance) of this color. Args: amount (float) : Amount to reduce the luminance by (clamped above zero) Returns: Color ''' hsl = self.to_hsl() hsl.l = self.clamp(hsl.l - amount) return self.from_hsl(hsl)
[ "def", "darken", "(", "self", ",", "amount", ")", ":", "hsl", "=", "self", ".", "to_hsl", "(", ")", "hsl", ".", "l", "=", "self", ".", "clamp", "(", "hsl", ".", "l", "-", "amount", ")", "return", "self", ".", "from_hsl", "(", "hsl", ")" ]
Darken (reduce the luminance) of this color. Args: amount (float) : Amount to reduce the luminance by (clamped above zero) Returns: Color
[ "Darken", "(", "reduce", "the", "luminance", ")", "of", "this", "color", "." ]
python
train
24.142857
jmcarp/sqlalchemy-postgres-copy
postgres_copy/__init__.py
https://github.com/jmcarp/sqlalchemy-postgres-copy/blob/01ef522e8e46a6961e227069d465b0cb93e42383/postgres_copy/__init__.py#L10-L41
def copy_to(source, dest, engine_or_conn, **flags): """Export a query or select to a file. For flags, see the PostgreSQL documentation at http://www.postgresql.org/docs/9.5/static/sql-copy.html. Examples: :: select = MyTable.select() with open('/path/to/file.tsv', 'w') as fp: copy_to(select, fp, conn) query = session.query(MyModel) with open('/path/to/file/csv', 'w') as fp: copy_to(query, fp, engine, format='csv', null='.') :param source: SQLAlchemy query or select :param dest: Destination file pointer, in write mode :param engine_or_conn: SQLAlchemy engine, connection, or raw_connection :param **flags: Options passed through to COPY If an existing connection is passed to `engine_or_conn`, it is the caller's responsibility to commit and close. """ dialect = postgresql.dialect() statement = getattr(source, 'statement', source) compiled = statement.compile(dialect=dialect) conn, autoclose = raw_connection_from(engine_or_conn) cursor = conn.cursor() query = cursor.mogrify(compiled.string, compiled.params).decode() formatted_flags = '({})'.format(format_flags(flags)) if flags else '' copy = 'COPY ({}) TO STDOUT {}'.format(query, formatted_flags) cursor.copy_expert(copy, dest) if autoclose: conn.close()
[ "def", "copy_to", "(", "source", ",", "dest", ",", "engine_or_conn", ",", "*", "*", "flags", ")", ":", "dialect", "=", "postgresql", ".", "dialect", "(", ")", "statement", "=", "getattr", "(", "source", ",", "'statement'", ",", "source", ")", "compiled", "=", "statement", ".", "compile", "(", "dialect", "=", "dialect", ")", "conn", ",", "autoclose", "=", "raw_connection_from", "(", "engine_or_conn", ")", "cursor", "=", "conn", ".", "cursor", "(", ")", "query", "=", "cursor", ".", "mogrify", "(", "compiled", ".", "string", ",", "compiled", ".", "params", ")", ".", "decode", "(", ")", "formatted_flags", "=", "'({})'", ".", "format", "(", "format_flags", "(", "flags", ")", ")", "if", "flags", "else", "''", "copy", "=", "'COPY ({}) TO STDOUT {}'", ".", "format", "(", "query", ",", "formatted_flags", ")", "cursor", ".", "copy_expert", "(", "copy", ",", "dest", ")", "if", "autoclose", ":", "conn", ".", "close", "(", ")" ]
Export a query or select to a file. For flags, see the PostgreSQL documentation at http://www.postgresql.org/docs/9.5/static/sql-copy.html. Examples: :: select = MyTable.select() with open('/path/to/file.tsv', 'w') as fp: copy_to(select, fp, conn) query = session.query(MyModel) with open('/path/to/file/csv', 'w') as fp: copy_to(query, fp, engine, format='csv', null='.') :param source: SQLAlchemy query or select :param dest: Destination file pointer, in write mode :param engine_or_conn: SQLAlchemy engine, connection, or raw_connection :param **flags: Options passed through to COPY If an existing connection is passed to `engine_or_conn`, it is the caller's responsibility to commit and close.
[ "Export", "a", "query", "or", "select", "to", "a", "file", ".", "For", "flags", "see", "the", "PostgreSQL", "documentation", "at", "http", ":", "//", "www", ".", "postgresql", ".", "org", "/", "docs", "/", "9", ".", "5", "/", "static", "/", "sql", "-", "copy", ".", "html", "." ]
python
train
41.6875
samjabrahams/anchorhub
anchorhub/util/hasattrs.py
https://github.com/samjabrahams/anchorhub/blob/5ade359b08297d4003a5f477389c01de9e634b54/anchorhub/util/hasattrs.py#L8-L21
def hasattrs(object, *names): """ Takes in an object and a variable length amount of named attributes, and checks to see if the object has each property. If any of the attributes are missing, this returns false. :param object: an object that may or may not contain the listed attributes :param names: a variable amount of attribute names to check for :return: True if the object contains each named attribute, false otherwise """ for name in names: if not hasattr(object, name): return False return True
[ "def", "hasattrs", "(", "object", ",", "*", "names", ")", ":", "for", "name", "in", "names", ":", "if", "not", "hasattr", "(", "object", ",", "name", ")", ":", "return", "False", "return", "True" ]
Takes in an object and a variable length amount of named attributes, and checks to see if the object has each property. If any of the attributes are missing, this returns false. :param object: an object that may or may not contain the listed attributes :param names: a variable amount of attribute names to check for :return: True if the object contains each named attribute, false otherwise
[ "Takes", "in", "an", "object", "and", "a", "variable", "length", "amount", "of", "named", "attributes", "and", "checks", "to", "see", "if", "the", "object", "has", "each", "property", ".", "If", "any", "of", "the", "attributes", "are", "missing", "this", "returns", "false", "." ]
python
train
39.357143
pysathq/pysat
pysat/solvers.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L2520-L2527
def get_core(self): """ Get an unsatisfiable core if the formula was previously unsatisfied. """ if self.maplesat and self.status == False: return pysolvers.maplesat_core(self.maplesat)
[ "def", "get_core", "(", "self", ")", ":", "if", "self", ".", "maplesat", "and", "self", ".", "status", "==", "False", ":", "return", "pysolvers", ".", "maplesat_core", "(", "self", ".", "maplesat", ")" ]
Get an unsatisfiable core if the formula was previously unsatisfied.
[ "Get", "an", "unsatisfiable", "core", "if", "the", "formula", "was", "previously", "unsatisfied", "." ]
python
train
29.875
nerandell/cauldron
cauldron/sql.py
https://github.com/nerandell/cauldron/blob/d363bac763781bb2da18debfa0fdd4be28288b92/cauldron/sql.py#L226-L242
def insert(cls, cur, table: str, values: dict): """ Creates an insert statement with only chosen fields Args: table: a string indicating the name of the table values: a dict of fields and values to be inserted Returns: A 'Record' object with table columns as properties """ keys = cls._COMMA.join(values.keys()) value_place_holder = cls._PLACEHOLDER * len(values) query = cls._insert_string.format(table, keys, value_place_holder[:-1]) yield from cur.execute(query, tuple(values.values())) return (yield from cur.fetchone())
[ "def", "insert", "(", "cls", ",", "cur", ",", "table", ":", "str", ",", "values", ":", "dict", ")", ":", "keys", "=", "cls", ".", "_COMMA", ".", "join", "(", "values", ".", "keys", "(", ")", ")", "value_place_holder", "=", "cls", ".", "_PLACEHOLDER", "*", "len", "(", "values", ")", "query", "=", "cls", ".", "_insert_string", ".", "format", "(", "table", ",", "keys", ",", "value_place_holder", "[", ":", "-", "1", "]", ")", "yield", "from", "cur", ".", "execute", "(", "query", ",", "tuple", "(", "values", ".", "values", "(", ")", ")", ")", "return", "(", "yield", "from", "cur", ".", "fetchone", "(", ")", ")" ]
Creates an insert statement with only chosen fields Args: table: a string indicating the name of the table values: a dict of fields and values to be inserted Returns: A 'Record' object with table columns as properties
[ "Creates", "an", "insert", "statement", "with", "only", "chosen", "fields" ]
python
valid
36.882353
richardkiss/pycoin
pycoin/solve/ConstraintSolver.py
https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/solve/ConstraintSolver.py#L56-L70
def constraint_matches(self, c, m): """ Return dict noting the substitution values (or False for no match) """ if isinstance(m, tuple): d = {} if isinstance(c, Operator) and c._op_name == m[0]: for c1, m1 in zip(c._args, m[1:]): r = self.constraint_matches(c1, m1) if r is False: return r d.update(r) return d return False return m.match(c)
[ "def", "constraint_matches", "(", "self", ",", "c", ",", "m", ")", ":", "if", "isinstance", "(", "m", ",", "tuple", ")", ":", "d", "=", "{", "}", "if", "isinstance", "(", "c", ",", "Operator", ")", "and", "c", ".", "_op_name", "==", "m", "[", "0", "]", ":", "for", "c1", ",", "m1", "in", "zip", "(", "c", ".", "_args", ",", "m", "[", "1", ":", "]", ")", ":", "r", "=", "self", ".", "constraint_matches", "(", "c1", ",", "m1", ")", "if", "r", "is", "False", ":", "return", "r", "d", ".", "update", "(", "r", ")", "return", "d", "return", "False", "return", "m", ".", "match", "(", "c", ")" ]
Return dict noting the substitution values (or False for no match)
[ "Return", "dict", "noting", "the", "substitution", "values", "(", "or", "False", "for", "no", "match", ")" ]
python
train
34.533333
opendatateam/udata
udata/harvest/actions.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L45-L49
def paginate_sources(owner=None, page=1, page_size=DEFAULT_PAGE_SIZE): '''Paginate harvest sources''' sources = _sources_queryset(owner=owner) page = max(page or 1, 1) return sources.paginate(page, page_size)
[ "def", "paginate_sources", "(", "owner", "=", "None", ",", "page", "=", "1", ",", "page_size", "=", "DEFAULT_PAGE_SIZE", ")", ":", "sources", "=", "_sources_queryset", "(", "owner", "=", "owner", ")", "page", "=", "max", "(", "page", "or", "1", ",", "1", ")", "return", "sources", ".", "paginate", "(", "page", ",", "page_size", ")" ]
Paginate harvest sources
[ "Paginate", "harvest", "sources" ]
python
train
44
christophertbrown/bioscripts
ctbBio/subset_sam.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/subset_sam.py#L14-L37
def sort_sam(sam, sort): """ sort sam file """ tempdir = '%s/' % (os.path.abspath(sam).rsplit('/', 1)[0]) if sort is True: mapping = '%s.sorted.sam' % (sam.rsplit('.', 1)[0]) if sam != '-': if os.path.exists(mapping) is False: os.system("\ sort -k1 --buffer-size=%sG -T %s -o %s %s\ " % (sbuffer, tempdir, mapping, sam)) else: mapping = 'stdin-sam.sorted.sam' p = Popen("sort -k1 --buffer-size=%sG -T %s -o %s" \ % (sbuffer, tempdir, mapping), stdin = sys.stdin, shell = True) p.communicate() mapping = open(mapping) else: if sam == '-': mapping = sys.stdin else: mapping = open(sam) return mapping
[ "def", "sort_sam", "(", "sam", ",", "sort", ")", ":", "tempdir", "=", "'%s/'", "%", "(", "os", ".", "path", ".", "abspath", "(", "sam", ")", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "0", "]", ")", "if", "sort", "is", "True", ":", "mapping", "=", "'%s.sorted.sam'", "%", "(", "sam", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", ")", "if", "sam", "!=", "'-'", ":", "if", "os", ".", "path", ".", "exists", "(", "mapping", ")", "is", "False", ":", "os", ".", "system", "(", "\"\\\n sort -k1 --buffer-size=%sG -T %s -o %s %s\\\n \"", "%", "(", "sbuffer", ",", "tempdir", ",", "mapping", ",", "sam", ")", ")", "else", ":", "mapping", "=", "'stdin-sam.sorted.sam'", "p", "=", "Popen", "(", "\"sort -k1 --buffer-size=%sG -T %s -o %s\"", "%", "(", "sbuffer", ",", "tempdir", ",", "mapping", ")", ",", "stdin", "=", "sys", ".", "stdin", ",", "shell", "=", "True", ")", "p", ".", "communicate", "(", ")", "mapping", "=", "open", "(", "mapping", ")", "else", ":", "if", "sam", "==", "'-'", ":", "mapping", "=", "sys", ".", "stdin", "else", ":", "mapping", "=", "open", "(", "sam", ")", "return", "mapping" ]
sort sam file
[ "sort", "sam", "file" ]
python
train
33.375
ggravlingen/pytradfri
pytradfri/gateway.py
https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/gateway.py#L93-L104
def get_gateway_info(self): """ Return the gateway info. Returns a Command. """ def process_result(result): return GatewayInfo(result) return Command('get', [ROOT_GATEWAY, ATTR_GATEWAY_INFO], process_result=process_result)
[ "def", "get_gateway_info", "(", "self", ")", ":", "def", "process_result", "(", "result", ")", ":", "return", "GatewayInfo", "(", "result", ")", "return", "Command", "(", "'get'", ",", "[", "ROOT_GATEWAY", ",", "ATTR_GATEWAY_INFO", "]", ",", "process_result", "=", "process_result", ")" ]
Return the gateway info. Returns a Command.
[ "Return", "the", "gateway", "info", "." ]
python
train
26.583333
numenta/htmresearch
htmresearch/frameworks/layers/l2_l4_inference.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/l2_l4_inference.py#L950-L962
def _locateConvergencePoint(stats, minOverlap, maxOverlap): """ Walk backwards through stats until you locate the first point that diverges from target overlap values. We need this to handle cases where it might get to target values, diverge, and then get back again. We want the last convergence point. """ for i, v in enumerate(stats[::-1]): if not (v >= minOverlap and v <= maxOverlap): return len(stats) - i + 1 # Never differs - converged in one iteration return 1
[ "def", "_locateConvergencePoint", "(", "stats", ",", "minOverlap", ",", "maxOverlap", ")", ":", "for", "i", ",", "v", "in", "enumerate", "(", "stats", "[", ":", ":", "-", "1", "]", ")", ":", "if", "not", "(", "v", ">=", "minOverlap", "and", "v", "<=", "maxOverlap", ")", ":", "return", "len", "(", "stats", ")", "-", "i", "+", "1", "# Never differs - converged in one iteration", "return", "1" ]
Walk backwards through stats until you locate the first point that diverges from target overlap values. We need this to handle cases where it might get to target values, diverge, and then get back again. We want the last convergence point.
[ "Walk", "backwards", "through", "stats", "until", "you", "locate", "the", "first", "point", "that", "diverges", "from", "target", "overlap", "values", ".", "We", "need", "this", "to", "handle", "cases", "where", "it", "might", "get", "to", "target", "values", "diverge", "and", "then", "get", "back", "again", ".", "We", "want", "the", "last", "convergence", "point", "." ]
python
train
39.230769
synw/dataswim
dataswim/db/select.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/db/select.py#L16-L33
def getall(self, table): """ Get all rows values for a table """ try: self._check_db() except Exception as e: self.err(e, "Can not connect to database") return if table not in self.db.tables: self.warning("The table " + table + " does not exists") return try: res = self.db[table].all() df = pd.DataFrame(list(res)) return df except Exception as e: self.err(e, "Error retrieving data in table")
[ "def", "getall", "(", "self", ",", "table", ")", ":", "try", ":", "self", ".", "_check_db", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "\"Can not connect to database\"", ")", "return", "if", "table", "not", "in", "self", ".", "db", ".", "tables", ":", "self", ".", "warning", "(", "\"The table \"", "+", "table", "+", "\" does not exists\"", ")", "return", "try", ":", "res", "=", "self", ".", "db", "[", "table", "]", ".", "all", "(", ")", "df", "=", "pd", ".", "DataFrame", "(", "list", "(", "res", ")", ")", "return", "df", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "\"Error retrieving data in table\"", ")" ]
Get all rows values for a table
[ "Get", "all", "rows", "values", "for", "a", "table" ]
python
train
30.5
CZ-NIC/yangson
yangson/instance.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/instance.py#L1043-L1064
def _key_values(self, sn: "SequenceNode") -> Union[EntryKeys, EntryValue]: """Parse leaf-list value or list keys.""" try: keys = self.up_to("/") except EndOfInput: keys = self.remaining() if not keys: raise UnexpectedInput(self, "entry value or keys") if isinstance(sn, LeafListNode): return EntryValue(unquote(keys)) ks = keys.split(",") try: if len(ks) != len(sn.keys): raise UnexpectedInput(self, f"exactly {len(sn.keys)} keys") except AttributeError: raise BadSchemaNodeType(sn.qual_name, "list") sel = {} for j in range(len(ks)): knod = sn.get_data_child(*sn.keys[j]) val = unquote(ks[j]) sel[(knod.name, None if knod.ns == sn.ns else knod.ns)] = val return EntryKeys(sel)
[ "def", "_key_values", "(", "self", ",", "sn", ":", "\"SequenceNode\"", ")", "->", "Union", "[", "EntryKeys", ",", "EntryValue", "]", ":", "try", ":", "keys", "=", "self", ".", "up_to", "(", "\"/\"", ")", "except", "EndOfInput", ":", "keys", "=", "self", ".", "remaining", "(", ")", "if", "not", "keys", ":", "raise", "UnexpectedInput", "(", "self", ",", "\"entry value or keys\"", ")", "if", "isinstance", "(", "sn", ",", "LeafListNode", ")", ":", "return", "EntryValue", "(", "unquote", "(", "keys", ")", ")", "ks", "=", "keys", ".", "split", "(", "\",\"", ")", "try", ":", "if", "len", "(", "ks", ")", "!=", "len", "(", "sn", ".", "keys", ")", ":", "raise", "UnexpectedInput", "(", "self", ",", "f\"exactly {len(sn.keys)} keys\"", ")", "except", "AttributeError", ":", "raise", "BadSchemaNodeType", "(", "sn", ".", "qual_name", ",", "\"list\"", ")", "sel", "=", "{", "}", "for", "j", "in", "range", "(", "len", "(", "ks", ")", ")", ":", "knod", "=", "sn", ".", "get_data_child", "(", "*", "sn", ".", "keys", "[", "j", "]", ")", "val", "=", "unquote", "(", "ks", "[", "j", "]", ")", "sel", "[", "(", "knod", ".", "name", ",", "None", "if", "knod", ".", "ns", "==", "sn", ".", "ns", "else", "knod", ".", "ns", ")", "]", "=", "val", "return", "EntryKeys", "(", "sel", ")" ]
Parse leaf-list value or list keys.
[ "Parse", "leaf", "-", "list", "value", "or", "list", "keys", "." ]
python
train
39.454545
AguaClara/aguaclara
aguaclara/play.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/play.py#L43-L51
def set_sig_figs(n=4): """Set the number of significant figures used to print Pint, Pandas, and NumPy quantities. Args: n (int): Number of significant figures to display. """ u.default_format = '.' + str(n) + 'g' pd.options.display.float_format = ('{:,.' + str(n) + '}').format
[ "def", "set_sig_figs", "(", "n", "=", "4", ")", ":", "u", ".", "default_format", "=", "'.'", "+", "str", "(", "n", ")", "+", "'g'", "pd", ".", "options", ".", "display", ".", "float_format", "=", "(", "'{:,.'", "+", "str", "(", "n", ")", "+", "'}'", ")", ".", "format" ]
Set the number of significant figures used to print Pint, Pandas, and NumPy quantities. Args: n (int): Number of significant figures to display.
[ "Set", "the", "number", "of", "significant", "figures", "used", "to", "print", "Pint", "Pandas", "and", "NumPy", "quantities", "." ]
python
train
33.555556
Qiskit/qiskit-terra
qiskit/tools/qi/qi.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/tools/qi/qi.py#L174-L207
def vectorize(density_matrix, method='col'): """Flatten an operator to a vector in a specified basis. Args: density_matrix (ndarray): a density matrix. method (str): the method of vectorization. Allowed values are - 'col' (default) flattens to column-major vector. - 'row' flattens to row-major vector. - 'pauli'flattens in the n-qubit Pauli basis. - 'pauli-weights': flattens in the n-qubit Pauli basis ordered by weight. Returns: ndarray: the resulting vector. Raises: Exception: if input state is not a n-qubit state """ density_matrix = np.array(density_matrix) if method == 'col': return density_matrix.flatten(order='F') elif method == 'row': return density_matrix.flatten(order='C') elif method in ['pauli', 'pauli_weights']: num = int(np.log2(len(density_matrix))) # number of qubits if len(density_matrix) != 2**num: raise Exception('Input state must be n-qubit state') if method == 'pauli_weights': pgroup = pauli_group(num, case='weight') else: pgroup = pauli_group(num, case='tensor') vals = [np.trace(np.dot(p.to_matrix(), density_matrix)) for p in pgroup] return np.array(vals) return None
[ "def", "vectorize", "(", "density_matrix", ",", "method", "=", "'col'", ")", ":", "density_matrix", "=", "np", ".", "array", "(", "density_matrix", ")", "if", "method", "==", "'col'", ":", "return", "density_matrix", ".", "flatten", "(", "order", "=", "'F'", ")", "elif", "method", "==", "'row'", ":", "return", "density_matrix", ".", "flatten", "(", "order", "=", "'C'", ")", "elif", "method", "in", "[", "'pauli'", ",", "'pauli_weights'", "]", ":", "num", "=", "int", "(", "np", ".", "log2", "(", "len", "(", "density_matrix", ")", ")", ")", "# number of qubits", "if", "len", "(", "density_matrix", ")", "!=", "2", "**", "num", ":", "raise", "Exception", "(", "'Input state must be n-qubit state'", ")", "if", "method", "==", "'pauli_weights'", ":", "pgroup", "=", "pauli_group", "(", "num", ",", "case", "=", "'weight'", ")", "else", ":", "pgroup", "=", "pauli_group", "(", "num", ",", "case", "=", "'tensor'", ")", "vals", "=", "[", "np", ".", "trace", "(", "np", ".", "dot", "(", "p", ".", "to_matrix", "(", ")", ",", "density_matrix", ")", ")", "for", "p", "in", "pgroup", "]", "return", "np", ".", "array", "(", "vals", ")", "return", "None" ]
Flatten an operator to a vector in a specified basis. Args: density_matrix (ndarray): a density matrix. method (str): the method of vectorization. Allowed values are - 'col' (default) flattens to column-major vector. - 'row' flattens to row-major vector. - 'pauli'flattens in the n-qubit Pauli basis. - 'pauli-weights': flattens in the n-qubit Pauli basis ordered by weight. Returns: ndarray: the resulting vector. Raises: Exception: if input state is not a n-qubit state
[ "Flatten", "an", "operator", "to", "a", "vector", "in", "a", "specified", "basis", "." ]
python
test
38.941176
sdispater/pendulum
pendulum/date.py
https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/date.py#L216-L242
def add(self, years=0, months=0, weeks=0, days=0): """ Add duration to the instance. :param years: The number of years :type years: int :param months: The number of months :type months: int :param weeks: The number of weeks :type weeks: int :param days: The number of days :type days: int :rtype: Date """ dt = add_duration( date(self.year, self.month, self.day), years=years, months=months, weeks=weeks, days=days, ) return self.__class__(dt.year, dt.month, dt.day)
[ "def", "add", "(", "self", ",", "years", "=", "0", ",", "months", "=", "0", ",", "weeks", "=", "0", ",", "days", "=", "0", ")", ":", "dt", "=", "add_duration", "(", "date", "(", "self", ".", "year", ",", "self", ".", "month", ",", "self", ".", "day", ")", ",", "years", "=", "years", ",", "months", "=", "months", ",", "weeks", "=", "weeks", ",", "days", "=", "days", ",", ")", "return", "self", ".", "__class__", "(", "dt", ".", "year", ",", "dt", ".", "month", ",", "dt", ".", "day", ")" ]
Add duration to the instance. :param years: The number of years :type years: int :param months: The number of months :type months: int :param weeks: The number of weeks :type weeks: int :param days: The number of days :type days: int :rtype: Date
[ "Add", "duration", "to", "the", "instance", "." ]
python
train
23.185185
google/grr
grr/server/grr_response_server/aff4.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4.py#L1094-L1148
def MultiDelete(self, urns, token=None): """Drop all the information about given objects. DANGEROUS! This recursively deletes all objects contained within the specified URN. Args: urns: Urns of objects to remove. token: The Security Token to use for opening this item. Raises: ValueError: If one of the urns is too short. This is a safety check to ensure the root is not removed. """ urns = [rdfvalue.RDFURN(urn) for urn in urns] if token is None: token = data_store.default_token for urn in urns: if urn.Path() == "/": raise ValueError("Can't delete root URN. Please enter a valid URN") deletion_pool = DeletionPool(token=token) deletion_pool.MultiMarkForDeletion(urns) marked_root_urns = deletion_pool.root_urns_for_deletion marked_urns = deletion_pool.urns_for_deletion logging.debug(u"Found %d objects to remove when removing %s", len(marked_urns), urns) logging.debug(u"Removing %d root objects when removing %s: %s", len(marked_root_urns), urns, marked_root_urns) pool = data_store.DB.GetMutationPool() for root in marked_root_urns: # Only the index of the parent object should be updated. Everything # below the target object (along with indexes) is going to be # deleted. self._DeleteChildFromIndex(root, mutation_pool=pool) for urn_to_delete in marked_urns: try: self.intermediate_cache.ExpireObject(urn_to_delete.Path()) except KeyError: pass pool.DeleteSubjects(marked_urns) pool.Flush() # Ensure this is removed from the cache as well. self.Flush() logging.debug("Removed %d objects", len(marked_urns))
[ "def", "MultiDelete", "(", "self", ",", "urns", ",", "token", "=", "None", ")", ":", "urns", "=", "[", "rdfvalue", ".", "RDFURN", "(", "urn", ")", "for", "urn", "in", "urns", "]", "if", "token", "is", "None", ":", "token", "=", "data_store", ".", "default_token", "for", "urn", "in", "urns", ":", "if", "urn", ".", "Path", "(", ")", "==", "\"/\"", ":", "raise", "ValueError", "(", "\"Can't delete root URN. Please enter a valid URN\"", ")", "deletion_pool", "=", "DeletionPool", "(", "token", "=", "token", ")", "deletion_pool", ".", "MultiMarkForDeletion", "(", "urns", ")", "marked_root_urns", "=", "deletion_pool", ".", "root_urns_for_deletion", "marked_urns", "=", "deletion_pool", ".", "urns_for_deletion", "logging", ".", "debug", "(", "u\"Found %d objects to remove when removing %s\"", ",", "len", "(", "marked_urns", ")", ",", "urns", ")", "logging", ".", "debug", "(", "u\"Removing %d root objects when removing %s: %s\"", ",", "len", "(", "marked_root_urns", ")", ",", "urns", ",", "marked_root_urns", ")", "pool", "=", "data_store", ".", "DB", ".", "GetMutationPool", "(", ")", "for", "root", "in", "marked_root_urns", ":", "# Only the index of the parent object should be updated. Everything", "# below the target object (along with indexes) is going to be", "# deleted.", "self", ".", "_DeleteChildFromIndex", "(", "root", ",", "mutation_pool", "=", "pool", ")", "for", "urn_to_delete", "in", "marked_urns", ":", "try", ":", "self", ".", "intermediate_cache", ".", "ExpireObject", "(", "urn_to_delete", ".", "Path", "(", ")", ")", "except", "KeyError", ":", "pass", "pool", ".", "DeleteSubjects", "(", "marked_urns", ")", "pool", ".", "Flush", "(", ")", "# Ensure this is removed from the cache as well.", "self", ".", "Flush", "(", ")", "logging", ".", "debug", "(", "\"Removed %d objects\"", ",", "len", "(", "marked_urns", ")", ")" ]
Drop all the information about given objects. DANGEROUS! This recursively deletes all objects contained within the specified URN. Args: urns: Urns of objects to remove. token: The Security Token to use for opening this item. Raises: ValueError: If one of the urns is too short. This is a safety check to ensure the root is not removed.
[ "Drop", "all", "the", "information", "about", "given", "objects", "." ]
python
train
30.872727
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L2334-L2343
def fetch_fieldnames(self, sql: str, *args) -> List[str]: """Executes SQL; returns just the output fieldnames.""" self.ensure_db_open() cursor = self.db.cursor() self.db_exec_with_cursor(cursor, sql, *args) try: return [i[0] for i in cursor.description] except: # nopep8 log.exception("fetch_fieldnames: SQL was: " + sql) raise
[ "def", "fetch_fieldnames", "(", "self", ",", "sql", ":", "str", ",", "*", "args", ")", "->", "List", "[", "str", "]", ":", "self", ".", "ensure_db_open", "(", ")", "cursor", "=", "self", ".", "db", ".", "cursor", "(", ")", "self", ".", "db_exec_with_cursor", "(", "cursor", ",", "sql", ",", "*", "args", ")", "try", ":", "return", "[", "i", "[", "0", "]", "for", "i", "in", "cursor", ".", "description", "]", "except", ":", "# nopep8", "log", ".", "exception", "(", "\"fetch_fieldnames: SQL was: \"", "+", "sql", ")", "raise" ]
Executes SQL; returns just the output fieldnames.
[ "Executes", "SQL", ";", "returns", "just", "the", "output", "fieldnames", "." ]
python
train
40.3
foutaise/texttable
texttable.py
https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L361-L367
def header(self, array): """Specify the header of the table """ self._check_row_size(array) self._header = list(map(obj2unicode, array)) return self
[ "def", "header", "(", "self", ",", "array", ")", ":", "self", ".", "_check_row_size", "(", "array", ")", "self", ".", "_header", "=", "list", "(", "map", "(", "obj2unicode", ",", "array", ")", ")", "return", "self" ]
Specify the header of the table
[ "Specify", "the", "header", "of", "the", "table" ]
python
train
26.142857
bwohlberg/sporco
docs/source/docntbk.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/docs/source/docntbk.py#L219-L235
def rst_to_notebook(infile, outfile): """Convert an rst file to a notebook file.""" # Read infile into a string with open(infile, 'r') as fin: rststr = fin.read() # Convert string from rst to markdown mdfmt = 'markdown_github+tex_math_dollars+fenced_code_attributes' mdstr = pypandoc.convert_text(rststr, mdfmt, format='rst', extra_args=['--atx-headers']) # In links, replace .py extensions with .ipynb mdstr = re.sub(r'\(([^\)]+).py\)', r'(\1.ipynb)', mdstr) # Enclose the markdown within triple quotes and convert from # python to notebook mdstr = '"""' + mdstr + '"""' nb = py2jn.py_string_to_notebook(mdstr) py2jn.tools.write_notebook(nb, outfile, nbver=4)
[ "def", "rst_to_notebook", "(", "infile", ",", "outfile", ")", ":", "# Read infile into a string", "with", "open", "(", "infile", ",", "'r'", ")", "as", "fin", ":", "rststr", "=", "fin", ".", "read", "(", ")", "# Convert string from rst to markdown", "mdfmt", "=", "'markdown_github+tex_math_dollars+fenced_code_attributes'", "mdstr", "=", "pypandoc", ".", "convert_text", "(", "rststr", ",", "mdfmt", ",", "format", "=", "'rst'", ",", "extra_args", "=", "[", "'--atx-headers'", "]", ")", "# In links, replace .py extensions with .ipynb", "mdstr", "=", "re", ".", "sub", "(", "r'\\(([^\\)]+).py\\)'", ",", "r'(\\1.ipynb)'", ",", "mdstr", ")", "# Enclose the markdown within triple quotes and convert from", "# python to notebook", "mdstr", "=", "'\"\"\"'", "+", "mdstr", "+", "'\"\"\"'", "nb", "=", "py2jn", ".", "py_string_to_notebook", "(", "mdstr", ")", "py2jn", ".", "tools", ".", "write_notebook", "(", "nb", ",", "outfile", ",", "nbver", "=", "4", ")" ]
Convert an rst file to a notebook file.
[ "Convert", "an", "rst", "file", "to", "a", "notebook", "file", "." ]
python
train
43.470588
faucamp/python-gsmmodem
gsmmodem/modem.py
https://github.com/faucamp/python-gsmmodem/blob/834c68b1387ca2c91e2210faa8f75526b39723b5/gsmmodem/modem.py#L76-L85
def status(self): """ Status of this SMS. Can be ENROUTE, DELIVERED or FAILED The actual status report object may be accessed via the 'report' attribute if status is 'DELIVERED' or 'FAILED' """ if self.report == None: return SentSms.ENROUTE else: return SentSms.DELIVERED if self.report.deliveryStatus == StatusReport.DELIVERED else SentSms.FAILED
[ "def", "status", "(", "self", ")", ":", "if", "self", ".", "report", "==", "None", ":", "return", "SentSms", ".", "ENROUTE", "else", ":", "return", "SentSms", ".", "DELIVERED", "if", "self", ".", "report", ".", "deliveryStatus", "==", "StatusReport", ".", "DELIVERED", "else", "SentSms", ".", "FAILED" ]
Status of this SMS. Can be ENROUTE, DELIVERED or FAILED The actual status report object may be accessed via the 'report' attribute if status is 'DELIVERED' or 'FAILED'
[ "Status", "of", "this", "SMS", ".", "Can", "be", "ENROUTE", "DELIVERED", "or", "FAILED", "The", "actual", "status", "report", "object", "may", "be", "accessed", "via", "the", "report", "attribute", "if", "status", "is", "DELIVERED", "or", "FAILED" ]
python
train
41.9
alexcouper/captainhook
captainhook/checkers/utils.py
https://github.com/alexcouper/captainhook/blob/5593ee8756dfa06959adb4320b4f6308277bb9d3/captainhook/checkers/utils.py#L121-L130
def configuration(self, plugin): """ Get plugin configuration. Return a tuple of (on|off|default, args) """ conf = self.config.get(plugin, "default;").split(';') if len(conf) == 1: conf.append('') return tuple(conf)
[ "def", "configuration", "(", "self", ",", "plugin", ")", ":", "conf", "=", "self", ".", "config", ".", "get", "(", "plugin", ",", "\"default;\"", ")", ".", "split", "(", "';'", ")", "if", "len", "(", "conf", ")", "==", "1", ":", "conf", ".", "append", "(", "''", ")", "return", "tuple", "(", "conf", ")" ]
Get plugin configuration. Return a tuple of (on|off|default, args)
[ "Get", "plugin", "configuration", "." ]
python
train
27.5
quantopian/empyrical
empyrical/stats.py
https://github.com/quantopian/empyrical/blob/badbdca75f5b293f28b5e947974894de041d6868/empyrical/stats.py#L937-L960
def _to_pandas(ob): """Convert an array-like to a pandas object. Parameters ---------- ob : array-like The object to convert. Returns ------- pandas_structure : pd.Series or pd.DataFrame The correct structure based on the dimensionality of the data. """ if isinstance(ob, (pd.Series, pd.DataFrame)): return ob if ob.ndim == 1: return pd.Series(ob) elif ob.ndim == 2: return pd.DataFrame(ob) else: raise ValueError( 'cannot convert array of dim > 2 to a pandas structure', )
[ "def", "_to_pandas", "(", "ob", ")", ":", "if", "isinstance", "(", "ob", ",", "(", "pd", ".", "Series", ",", "pd", ".", "DataFrame", ")", ")", ":", "return", "ob", "if", "ob", ".", "ndim", "==", "1", ":", "return", "pd", ".", "Series", "(", "ob", ")", "elif", "ob", ".", "ndim", "==", "2", ":", "return", "pd", ".", "DataFrame", "(", "ob", ")", "else", ":", "raise", "ValueError", "(", "'cannot convert array of dim > 2 to a pandas structure'", ",", ")" ]
Convert an array-like to a pandas object. Parameters ---------- ob : array-like The object to convert. Returns ------- pandas_structure : pd.Series or pd.DataFrame The correct structure based on the dimensionality of the data.
[ "Convert", "an", "array", "-", "like", "to", "a", "pandas", "object", "." ]
python
train
23.708333
housecanary/hc-api-python
housecanary/utilities.py
https://github.com/housecanary/hc-api-python/blob/2bb9e2208b34e8617575de45934357ee33b8531c/housecanary/utilities.py#L37-L73
def get_rate_limits(response): """Returns a list of rate limit information from a given response's headers.""" periods = response.headers['X-RateLimit-Period'] if not periods: return [] rate_limits = [] periods = periods.split(',') limits = response.headers['X-RateLimit-Limit'].split(',') remaining = response.headers['X-RateLimit-Remaining'].split(',') reset = response.headers['X-RateLimit-Reset'].split(',') for idx, period in enumerate(periods): rate_limit = {} limit_period = get_readable_time_string(period) rate_limit["period"] = limit_period rate_limit["period_seconds"] = period rate_limit["request_limit"] = limits[idx] rate_limit["requests_remaining"] = remaining[idx] reset_datetime = get_datetime_from_timestamp(reset[idx]) rate_limit["reset"] = reset_datetime right_now = datetime.now() if (reset_datetime is not None) and (right_now < reset_datetime): # add 1 second because of rounding seconds_remaining = (reset_datetime - right_now).seconds + 1 else: seconds_remaining = 0 rate_limit["reset_in_seconds"] = seconds_remaining rate_limit["time_to_reset"] = get_readable_time_string(seconds_remaining) rate_limits.append(rate_limit) return rate_limits
[ "def", "get_rate_limits", "(", "response", ")", ":", "periods", "=", "response", ".", "headers", "[", "'X-RateLimit-Period'", "]", "if", "not", "periods", ":", "return", "[", "]", "rate_limits", "=", "[", "]", "periods", "=", "periods", ".", "split", "(", "','", ")", "limits", "=", "response", ".", "headers", "[", "'X-RateLimit-Limit'", "]", ".", "split", "(", "','", ")", "remaining", "=", "response", ".", "headers", "[", "'X-RateLimit-Remaining'", "]", ".", "split", "(", "','", ")", "reset", "=", "response", ".", "headers", "[", "'X-RateLimit-Reset'", "]", ".", "split", "(", "','", ")", "for", "idx", ",", "period", "in", "enumerate", "(", "periods", ")", ":", "rate_limit", "=", "{", "}", "limit_period", "=", "get_readable_time_string", "(", "period", ")", "rate_limit", "[", "\"period\"", "]", "=", "limit_period", "rate_limit", "[", "\"period_seconds\"", "]", "=", "period", "rate_limit", "[", "\"request_limit\"", "]", "=", "limits", "[", "idx", "]", "rate_limit", "[", "\"requests_remaining\"", "]", "=", "remaining", "[", "idx", "]", "reset_datetime", "=", "get_datetime_from_timestamp", "(", "reset", "[", "idx", "]", ")", "rate_limit", "[", "\"reset\"", "]", "=", "reset_datetime", "right_now", "=", "datetime", ".", "now", "(", ")", "if", "(", "reset_datetime", "is", "not", "None", ")", "and", "(", "right_now", "<", "reset_datetime", ")", ":", "# add 1 second because of rounding", "seconds_remaining", "=", "(", "reset_datetime", "-", "right_now", ")", ".", "seconds", "+", "1", "else", ":", "seconds_remaining", "=", "0", "rate_limit", "[", "\"reset_in_seconds\"", "]", "=", "seconds_remaining", "rate_limit", "[", "\"time_to_reset\"", "]", "=", "get_readable_time_string", "(", "seconds_remaining", ")", "rate_limits", ".", "append", "(", "rate_limit", ")", "return", "rate_limits" ]
Returns a list of rate limit information from a given response's headers.
[ "Returns", "a", "list", "of", "rate", "limit", "information", "from", "a", "given", "response", "s", "headers", "." ]
python
train
36.054054
evetrivia/thanatos
thanatos/database/universe.py
https://github.com/evetrivia/thanatos/blob/664c12a8ccf4d27ab0e06e0969bbb6381f74789c/thanatos/database/universe.py#L25-L37
def get_all_not_wh_regions(db_connection): """ Gets a list of all regions that are not WH regions. :return: A list of all regions not including wormhole regions. Results have regionID and regionName. :rtype: list """ if not hasattr(get_all_not_wh_regions, '_results'): sql = 'CALL get_all_not_wh_regions();' results = execute_sql(sql, db_connection) get_all_not_wh_regions._results = results return get_all_not_wh_regions._results
[ "def", "get_all_not_wh_regions", "(", "db_connection", ")", ":", "if", "not", "hasattr", "(", "get_all_not_wh_regions", ",", "'_results'", ")", ":", "sql", "=", "'CALL get_all_not_wh_regions();'", "results", "=", "execute_sql", "(", "sql", ",", "db_connection", ")", "get_all_not_wh_regions", ".", "_results", "=", "results", "return", "get_all_not_wh_regions", ".", "_results" ]
Gets a list of all regions that are not WH regions. :return: A list of all regions not including wormhole regions. Results have regionID and regionName. :rtype: list
[ "Gets", "a", "list", "of", "all", "regions", "that", "are", "not", "WH", "regions", "." ]
python
train
36.076923
lablup/backend.ai-client-py
src/ai/backend/client/cli/admin/images.py
https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/cli/admin/images.py#L40-L51
def rescan_images(registry): '''Update the kernel image metadata from all configured docker registries.''' with Session() as session: try: result = session.Image.rescanImages(registry) except Exception as e: print_error(e) sys.exit(1) if result['ok']: print("kernel image metadata updated") else: print("rescanning failed: {0}".format(result['msg']))
[ "def", "rescan_images", "(", "registry", ")", ":", "with", "Session", "(", ")", "as", "session", ":", "try", ":", "result", "=", "session", ".", "Image", ".", "rescanImages", "(", "registry", ")", "except", "Exception", "as", "e", ":", "print_error", "(", "e", ")", "sys", ".", "exit", "(", "1", ")", "if", "result", "[", "'ok'", "]", ":", "print", "(", "\"kernel image metadata updated\"", ")", "else", ":", "print", "(", "\"rescanning failed: {0}\"", ".", "format", "(", "result", "[", "'msg'", "]", ")", ")" ]
Update the kernel image metadata from all configured docker registries.
[ "Update", "the", "kernel", "image", "metadata", "from", "all", "configured", "docker", "registries", "." ]
python
train
36.583333
ivanyu/idx2numpy
idx2numpy/converters.py
https://github.com/ivanyu/idx2numpy/blob/9b88698314973226212181d1747dfad6c6974e51/idx2numpy/converters.py#L130-L139
def convert_to_file(file, ndarr): """ Writes the contents of the numpy.ndarray ndarr to file in IDX format. file is a file-like object (with write() method) or a file name. """ if isinstance(file, six_string_types): with open(file, 'wb') as fp: _internal_write(fp, ndarr) else: _internal_write(file, ndarr)
[ "def", "convert_to_file", "(", "file", ",", "ndarr", ")", ":", "if", "isinstance", "(", "file", ",", "six_string_types", ")", ":", "with", "open", "(", "file", ",", "'wb'", ")", "as", "fp", ":", "_internal_write", "(", "fp", ",", "ndarr", ")", "else", ":", "_internal_write", "(", "file", ",", "ndarr", ")" ]
Writes the contents of the numpy.ndarray ndarr to file in IDX format. file is a file-like object (with write() method) or a file name.
[ "Writes", "the", "contents", "of", "the", "numpy", ".", "ndarray", "ndarr", "to", "file", "in", "IDX", "format", ".", "file", "is", "a", "file", "-", "like", "object", "(", "with", "write", "()", "method", ")", "or", "a", "file", "name", "." ]
python
train
34.9
plivo/plivohelper-python
plivohelper.py
https://github.com/plivo/plivohelper-python/blob/a2f706d69e2138fbb973f792041341f662072d26/plivohelper.py#L307-L312
def conference_undeaf(self, call_params): """REST Conference Undeaf helper """ path = '/' + self.api_version + '/ConferenceUndeaf/' method = 'POST' return self.request(path, method, call_params)
[ "def", "conference_undeaf", "(", "self", ",", "call_params", ")", ":", "path", "=", "'/'", "+", "self", ".", "api_version", "+", "'/ConferenceUndeaf/'", "method", "=", "'POST'", "return", "self", ".", "request", "(", "path", ",", "method", ",", "call_params", ")" ]
REST Conference Undeaf helper
[ "REST", "Conference", "Undeaf", "helper" ]
python
valid
38.166667
phaethon/kamene
kamene/contrib/gsm_um.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L2677-L2686
def requestPdpContextActivation(AccessPointName_presence=0): """REQUEST PDP CONTEXT ACTIVATION Section 9.5.4""" a = TpPd(pd=0x8) b = MessageType(mesType=0x44) # 01000100 c = PacketDataProtocolAddress() packet = a / b / c if AccessPointName_presence is 1: d = AccessPointName(ieiAPN=0x28) packet = packet / d return packet
[ "def", "requestPdpContextActivation", "(", "AccessPointName_presence", "=", "0", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "0x8", ")", "b", "=", "MessageType", "(", "mesType", "=", "0x44", ")", "# 01000100", "c", "=", "PacketDataProtocolAddress", "(", ")", "packet", "=", "a", "/", "b", "/", "c", "if", "AccessPointName_presence", "is", "1", ":", "d", "=", "AccessPointName", "(", "ieiAPN", "=", "0x28", ")", "packet", "=", "packet", "/", "d", "return", "packet" ]
REQUEST PDP CONTEXT ACTIVATION Section 9.5.4
[ "REQUEST", "PDP", "CONTEXT", "ACTIVATION", "Section", "9", ".", "5", ".", "4" ]
python
train
35.7
wangwenpei/fantasy
fantasy/__init__.py
https://github.com/wangwenpei/fantasy/blob/0fe92059bd868f14da84235beb05b217b1d46e4a/fantasy/__init__.py#L80-L105
def smart_account(app): """尝试使用内置方式构建账户""" if os.environ['FANTASY_ACTIVE_ACCOUNT'] == 'no': return from flask_security import SQLAlchemyUserDatastore, Security account_module_name, account_class_name = os.environ[ 'FANTASY_ACCOUNT_MODEL'].rsplit('.', 1) account_module = importlib.import_module(account_module_name) account_class = getattr(account_module, account_class_name) role_module_name, role_class_name = os.environ[ 'FANTASY_ROLE_MODEL'].rsplit('.', 1) role_module = importlib.import_module(role_module_name) role_class = getattr(role_module, role_class_name) r = True if os.environ[ 'FANTASY_ACCOUNT_SECURITY_MODE'] != 'no' else False Security(app, SQLAlchemyUserDatastore( app.db, account_class, role_class), register_blueprint=r) pass
[ "def", "smart_account", "(", "app", ")", ":", "if", "os", ".", "environ", "[", "'FANTASY_ACTIVE_ACCOUNT'", "]", "==", "'no'", ":", "return", "from", "flask_security", "import", "SQLAlchemyUserDatastore", ",", "Security", "account_module_name", ",", "account_class_name", "=", "os", ".", "environ", "[", "'FANTASY_ACCOUNT_MODEL'", "]", ".", "rsplit", "(", "'.'", ",", "1", ")", "account_module", "=", "importlib", ".", "import_module", "(", "account_module_name", ")", "account_class", "=", "getattr", "(", "account_module", ",", "account_class_name", ")", "role_module_name", ",", "role_class_name", "=", "os", ".", "environ", "[", "'FANTASY_ROLE_MODEL'", "]", ".", "rsplit", "(", "'.'", ",", "1", ")", "role_module", "=", "importlib", ".", "import_module", "(", "role_module_name", ")", "role_class", "=", "getattr", "(", "role_module", ",", "role_class_name", ")", "r", "=", "True", "if", "os", ".", "environ", "[", "'FANTASY_ACCOUNT_SECURITY_MODE'", "]", "!=", "'no'", "else", "False", "Security", "(", "app", ",", "SQLAlchemyUserDatastore", "(", "app", ".", "db", ",", "account_class", ",", "role_class", ")", ",", "register_blueprint", "=", "r", ")", "pass" ]
尝试使用内置方式构建账户
[ "尝试使用内置方式构建账户" ]
python
test
33.115385
wummel/linkchecker
linkcheck/logger/text.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/text.py#L109-L136
def log_url (self, url_data): """Write url checking info.""" self.writeln() if self.has_part('url'): self.write_url(url_data) if url_data.name and self.has_part('name'): self.write_name(url_data) if url_data.parent_url and self.has_part('parenturl'): self.write_parent(url_data) if url_data.base_ref and self.has_part('base'): self.write_base(url_data) if url_data.url and self.has_part('realurl'): self.write_real(url_data) if url_data.checktime and self.has_part('checktime'): self.write_checktime(url_data) if url_data.dltime >= 0 and self.has_part('dltime'): self.write_dltime(url_data) if url_data.size >= 0 and self.has_part('dlsize'): self.write_size(url_data) if url_data.info and self.has_part('info'): self.write_info(url_data) if url_data.modified and self.has_part('modified'): self.write_modified(url_data) if url_data.warnings and self.has_part('warning'): self.write_warning(url_data) if self.has_part('result'): self.write_result(url_data) self.flush()
[ "def", "log_url", "(", "self", ",", "url_data", ")", ":", "self", ".", "writeln", "(", ")", "if", "self", ".", "has_part", "(", "'url'", ")", ":", "self", ".", "write_url", "(", "url_data", ")", "if", "url_data", ".", "name", "and", "self", ".", "has_part", "(", "'name'", ")", ":", "self", ".", "write_name", "(", "url_data", ")", "if", "url_data", ".", "parent_url", "and", "self", ".", "has_part", "(", "'parenturl'", ")", ":", "self", ".", "write_parent", "(", "url_data", ")", "if", "url_data", ".", "base_ref", "and", "self", ".", "has_part", "(", "'base'", ")", ":", "self", ".", "write_base", "(", "url_data", ")", "if", "url_data", ".", "url", "and", "self", ".", "has_part", "(", "'realurl'", ")", ":", "self", ".", "write_real", "(", "url_data", ")", "if", "url_data", ".", "checktime", "and", "self", ".", "has_part", "(", "'checktime'", ")", ":", "self", ".", "write_checktime", "(", "url_data", ")", "if", "url_data", ".", "dltime", ">=", "0", "and", "self", ".", "has_part", "(", "'dltime'", ")", ":", "self", ".", "write_dltime", "(", "url_data", ")", "if", "url_data", ".", "size", ">=", "0", "and", "self", ".", "has_part", "(", "'dlsize'", ")", ":", "self", ".", "write_size", "(", "url_data", ")", "if", "url_data", ".", "info", "and", "self", ".", "has_part", "(", "'info'", ")", ":", "self", ".", "write_info", "(", "url_data", ")", "if", "url_data", ".", "modified", "and", "self", ".", "has_part", "(", "'modified'", ")", ":", "self", ".", "write_modified", "(", "url_data", ")", "if", "url_data", ".", "warnings", "and", "self", ".", "has_part", "(", "'warning'", ")", ":", "self", ".", "write_warning", "(", "url_data", ")", "if", "self", ".", "has_part", "(", "'result'", ")", ":", "self", ".", "write_result", "(", "url_data", ")", "self", ".", "flush", "(", ")" ]
Write url checking info.
[ "Write", "url", "checking", "info", "." ]
python
train
43.035714
python-odin/odinweb
odinweb/decorators.py
https://github.com/python-odin/odinweb/blob/198424133584acc18cb41c8d18d91f803abc810f/odinweb/decorators.py#L503-L514
def create(callback=None, path=None, method=Method.POST, resource=None, tags=None, summary="Create a new resource", middleware=None): # type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation """ Decorator to configure an operation that creates a resource. """ def inner(c): op = ResourceOperation(c, path or NoPath, method, resource, tags, summary, middleware) op.responses.add(Response(HTTPStatus.CREATED, "{name} has been created")) op.responses.add(Response(HTTPStatus.BAD_REQUEST, "Validation failed.", Error)) return op return inner(callback) if callback else inner
[ "def", "create", "(", "callback", "=", "None", ",", "path", "=", "None", ",", "method", "=", "Method", ".", "POST", ",", "resource", "=", "None", ",", "tags", "=", "None", ",", "summary", "=", "\"Create a new resource\"", ",", "middleware", "=", "None", ")", ":", "# type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation", "def", "inner", "(", "c", ")", ":", "op", "=", "ResourceOperation", "(", "c", ",", "path", "or", "NoPath", ",", "method", ",", "resource", ",", "tags", ",", "summary", ",", "middleware", ")", "op", ".", "responses", ".", "add", "(", "Response", "(", "HTTPStatus", ".", "CREATED", ",", "\"{name} has been created\"", ")", ")", "op", ".", "responses", ".", "add", "(", "Response", "(", "HTTPStatus", ".", "BAD_REQUEST", ",", "\"Validation failed.\"", ",", "Error", ")", ")", "return", "op", "return", "inner", "(", "callback", ")", "if", "callback", "else", "inner" ]
Decorator to configure an operation that creates a resource.
[ "Decorator", "to", "configure", "an", "operation", "that", "creates", "a", "resource", "." ]
python
train
54
icometrix/dicom2nifti
dicom2nifti/convert_philips.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L393-L419
def _create_affine_multiframe(multiframe_dicom): """ Function to create the affine matrix for a siemens mosaic dataset This will work for siemens dti and 4D if in mosaic format """ first_frame = multiframe_dicom[Tag(0x5200, 0x9230)][0] last_frame = multiframe_dicom[Tag(0x5200, 0x9230)][-1] # Create affine matrix (http://nipy.sourceforge.net/nibabel/dicom/dicom_orientation.html#dicom-slice-affine) image_orient1 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[0:3].astype(float) image_orient2 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[3:6].astype(float) normal = numpy.cross(image_orient1, image_orient2) delta_r = float(first_frame[0x2005, 0x140f][0].PixelSpacing[0]) delta_c = float(first_frame[0x2005, 0x140f][0].PixelSpacing[1]) image_pos = numpy.array(first_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float) last_image_pos = numpy.array(last_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float) number_of_stack_slices = int(common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)])) delta_s = abs(numpy.linalg.norm(last_image_pos - image_pos)) / (number_of_stack_slices - 1) return numpy.array( [[-image_orient1[0] * delta_c, -image_orient2[0] * delta_r, -delta_s * normal[0], -image_pos[0]], [-image_orient1[1] * delta_c, -image_orient2[1] * delta_r, -delta_s * normal[1], -image_pos[1]], [image_orient1[2] * delta_c, image_orient2[2] * delta_r, delta_s * normal[2], image_pos[2]], [0, 0, 0, 1]])
[ "def", "_create_affine_multiframe", "(", "multiframe_dicom", ")", ":", "first_frame", "=", "multiframe_dicom", "[", "Tag", "(", "0x5200", ",", "0x9230", ")", "]", "[", "0", "]", "last_frame", "=", "multiframe_dicom", "[", "Tag", "(", "0x5200", ",", "0x9230", ")", "]", "[", "-", "1", "]", "# Create affine matrix (http://nipy.sourceforge.net/nibabel/dicom/dicom_orientation.html#dicom-slice-affine)", "image_orient1", "=", "numpy", ".", "array", "(", "first_frame", ".", "PlaneOrientationSequence", "[", "0", "]", ".", "ImageOrientationPatient", ")", "[", "0", ":", "3", "]", ".", "astype", "(", "float", ")", "image_orient2", "=", "numpy", ".", "array", "(", "first_frame", ".", "PlaneOrientationSequence", "[", "0", "]", ".", "ImageOrientationPatient", ")", "[", "3", ":", "6", "]", ".", "astype", "(", "float", ")", "normal", "=", "numpy", ".", "cross", "(", "image_orient1", ",", "image_orient2", ")", "delta_r", "=", "float", "(", "first_frame", "[", "0x2005", ",", "0x140f", "]", "[", "0", "]", ".", "PixelSpacing", "[", "0", "]", ")", "delta_c", "=", "float", "(", "first_frame", "[", "0x2005", ",", "0x140f", "]", "[", "0", "]", ".", "PixelSpacing", "[", "1", "]", ")", "image_pos", "=", "numpy", ".", "array", "(", "first_frame", ".", "PlanePositionSequence", "[", "0", "]", ".", "ImagePositionPatient", ")", ".", "astype", "(", "float", ")", "last_image_pos", "=", "numpy", ".", "array", "(", "last_frame", ".", "PlanePositionSequence", "[", "0", "]", ".", "ImagePositionPatient", ")", ".", "astype", "(", "float", ")", "number_of_stack_slices", "=", "int", "(", "common", ".", "get_ss_value", "(", "multiframe_dicom", "[", "Tag", "(", "0x2001", ",", "0x105f", ")", "]", "[", "0", "]", "[", "Tag", "(", "0x2001", ",", "0x102d", ")", "]", ")", ")", "delta_s", "=", "abs", "(", "numpy", ".", "linalg", ".", "norm", "(", "last_image_pos", "-", "image_pos", ")", ")", "/", "(", "number_of_stack_slices", "-", "1", ")", "return", "numpy", ".", "array", "(", "[", "[", "-", "image_orient1", "[", "0", "]", "*", "delta_c", ",", "-", "image_orient2", "[", "0", "]", "*", "delta_r", ",", "-", "delta_s", "*", "normal", "[", "0", "]", ",", "-", "image_pos", "[", "0", "]", "]", ",", "[", "-", "image_orient1", "[", "1", "]", "*", "delta_c", ",", "-", "image_orient2", "[", "1", "]", "*", "delta_r", ",", "-", "delta_s", "*", "normal", "[", "1", "]", ",", "-", "image_pos", "[", "1", "]", "]", ",", "[", "image_orient1", "[", "2", "]", "*", "delta_c", ",", "image_orient2", "[", "2", "]", "*", "delta_r", ",", "delta_s", "*", "normal", "[", "2", "]", ",", "image_pos", "[", "2", "]", "]", ",", "[", "0", ",", "0", ",", "0", ",", "1", "]", "]", ")" ]
Function to create the affine matrix for a siemens mosaic dataset This will work for siemens dti and 4D if in mosaic format
[ "Function", "to", "create", "the", "affine", "matrix", "for", "a", "siemens", "mosaic", "dataset", "This", "will", "work", "for", "siemens", "dti", "and", "4D", "if", "in", "mosaic", "format" ]
python
train
59.62963
eng-tools/bwplot
bwplot/colors.py
https://github.com/eng-tools/bwplot/blob/448bc422ffa301988f40d459230f9a4f21e2f1c6/bwplot/colors.py#L83-L157
def spectra(i, **kwargs): """ Define colours by number. Can be plotted either in order of gray scale or in the 'best' order for having a strong gray contrast for only three or four lines :param i: the index to access a colour """ ordered = kwargs.get('ordered', False) options = kwargs.get('options', 'best') gray = kwargs.get('gray', False) CD = {} CD['dark blue'] = (1.0, 0.0, 0.55) # 0 CD['dark green'] = (0.15, 0.35, 0.0) # 1 CD['dark red'] = (0.73, 0.0, 0.0) # 2 CD['dark purple'] = (0.8, 0.0, 0.8) # 3 CD['light green'] = (0.49, 0.64, 0.0) # 4 CD['orange'] = (1.0, 0.5, 0.0) # 5 CD['light blue'] = (0.5, 0.85, 1.0) # 6 CD['pink'] = (1.0, 0.8, 0.8) # 7 CD['brown'] = (0.5, 0.3, 0.0) # 8 CD['red'] = (0.9, 0.0, 0.0) # 9 CD['greenish blue'] = (0.12, .8, .8) # 10 CD['bluey purple'] = (0.8, 0.85, 1.0) # 12 CD['yellow'] = (1.0, 1.0, 0.0) # 6 CD['dark gray'] = (0.25, 0.25, 0.25) # CD['mid gray'] = (0.5, 0.5, 0.5) # CD['light gray'] = (0.75, 0.75, 0.75) # CD['black5'] = (0.05, 0.05, 0.05) # CD['black'] = (0.0, 0.0, 0.0) # CD['white'] = (1.0, 1.0, 1.0) # if isinstance(i, int): i = i elif isinstance(i, float): i = int(i) elif isinstance(i, str): dat = CD[i] return dat DtoL = ['dark blue', 'dark green', 'dark red', 'brown', 'light green', 'orange', 'light blue', 'pink', 'dark purple', 'red', 'greenish blue', 'bluey purple', 'yellow', 'dark gray', 'mid gray', 'light gray'] Best = ['dark blue', 'orange', 'light blue', 'dark purple', 'dark green', 'bluey purple', 'dark red', 'light green', 'pink', 'brown', 'red', 'yellow', 'greenish blue', 'dark gray', 'mid gray', 'light gray'] Dots = ['dark blue', 'yellow', 'light blue', 'dark purple', 'dark green', 'orange', 'bluey purple', 'dark red', 'light green', 'pink', 'brown', 'red', 'greenish blue', 'dark gray', 'mid gray', 'light gray'] # ll = [0, 5, 2, 4, 1, 6, 3, 7, 8, 11, 9, 12, 10, 13, 14, 15] # change 11 w 5 ind = i % len(Best) dat = CD[Best[ind]] col = Best[ind] if ordered: # if ordered is true then the colours are accessed from darkest to lightest ind = i % len(DtoL) dat = CD[DtoL[ind]] col = DtoL[ind] if options == "dots": ind = i % len(Dots) dat = CD[Dots[ind]] col = Dots[ind] if options == "ordered": ind = i % len(DtoL) dat = CD[DtoL[ind]] col = DtoL[ind] gray_value = 0.299 * dat[0] + 0.587 * dat[1] + 0.114 * dat[2] # calculate the gray scale value if gray: return gray_value, gray_value, gray_value return dat
[ "def", "spectra", "(", "i", ",", "*", "*", "kwargs", ")", ":", "ordered", "=", "kwargs", ".", "get", "(", "'ordered'", ",", "False", ")", "options", "=", "kwargs", ".", "get", "(", "'options'", ",", "'best'", ")", "gray", "=", "kwargs", ".", "get", "(", "'gray'", ",", "False", ")", "CD", "=", "{", "}", "CD", "[", "'dark blue'", "]", "=", "(", "1.0", ",", "0.0", ",", "0.55", ")", "# 0", "CD", "[", "'dark green'", "]", "=", "(", "0.15", ",", "0.35", ",", "0.0", ")", "# 1", "CD", "[", "'dark red'", "]", "=", "(", "0.73", ",", "0.0", ",", "0.0", ")", "# 2", "CD", "[", "'dark purple'", "]", "=", "(", "0.8", ",", "0.0", ",", "0.8", ")", "# 3", "CD", "[", "'light green'", "]", "=", "(", "0.49", ",", "0.64", ",", "0.0", ")", "# 4", "CD", "[", "'orange'", "]", "=", "(", "1.0", ",", "0.5", ",", "0.0", ")", "# 5", "CD", "[", "'light blue'", "]", "=", "(", "0.5", ",", "0.85", ",", "1.0", ")", "# 6", "CD", "[", "'pink'", "]", "=", "(", "1.0", ",", "0.8", ",", "0.8", ")", "# 7", "CD", "[", "'brown'", "]", "=", "(", "0.5", ",", "0.3", ",", "0.0", ")", "# 8", "CD", "[", "'red'", "]", "=", "(", "0.9", ",", "0.0", ",", "0.0", ")", "# 9", "CD", "[", "'greenish blue'", "]", "=", "(", "0.12", ",", ".8", ",", ".8", ")", "# 10", "CD", "[", "'bluey purple'", "]", "=", "(", "0.8", ",", "0.85", ",", "1.0", ")", "# 12", "CD", "[", "'yellow'", "]", "=", "(", "1.0", ",", "1.0", ",", "0.0", ")", "# 6", "CD", "[", "'dark gray'", "]", "=", "(", "0.25", ",", "0.25", ",", "0.25", ")", "#", "CD", "[", "'mid gray'", "]", "=", "(", "0.5", ",", "0.5", ",", "0.5", ")", "#", "CD", "[", "'light gray'", "]", "=", "(", "0.75", ",", "0.75", ",", "0.75", ")", "#", "CD", "[", "'black5'", "]", "=", "(", "0.05", ",", "0.05", ",", "0.05", ")", "#", "CD", "[", "'black'", "]", "=", "(", "0.0", ",", "0.0", ",", "0.0", ")", "#", "CD", "[", "'white'", "]", "=", "(", "1.0", ",", "1.0", ",", "1.0", ")", "#", "if", "isinstance", "(", "i", ",", "int", ")", ":", "i", "=", "i", "elif", "isinstance", "(", "i", ",", "float", ")", ":", "i", "=", "int", "(", "i", ")", "elif", "isinstance", "(", "i", ",", "str", ")", ":", "dat", "=", "CD", "[", "i", "]", "return", "dat", "DtoL", "=", "[", "'dark blue'", ",", "'dark green'", ",", "'dark red'", ",", "'brown'", ",", "'light green'", ",", "'orange'", ",", "'light blue'", ",", "'pink'", ",", "'dark purple'", ",", "'red'", ",", "'greenish blue'", ",", "'bluey purple'", ",", "'yellow'", ",", "'dark gray'", ",", "'mid gray'", ",", "'light gray'", "]", "Best", "=", "[", "'dark blue'", ",", "'orange'", ",", "'light blue'", ",", "'dark purple'", ",", "'dark green'", ",", "'bluey purple'", ",", "'dark red'", ",", "'light green'", ",", "'pink'", ",", "'brown'", ",", "'red'", ",", "'yellow'", ",", "'greenish blue'", ",", "'dark gray'", ",", "'mid gray'", ",", "'light gray'", "]", "Dots", "=", "[", "'dark blue'", ",", "'yellow'", ",", "'light blue'", ",", "'dark purple'", ",", "'dark green'", ",", "'orange'", ",", "'bluey purple'", ",", "'dark red'", ",", "'light green'", ",", "'pink'", ",", "'brown'", ",", "'red'", ",", "'greenish blue'", ",", "'dark gray'", ",", "'mid gray'", ",", "'light gray'", "]", "# ll = [0, 5, 2, 4, 1, 6, 3, 7, 8, 11, 9, 12, 10, 13, 14, 15] # change 11 w 5", "ind", "=", "i", "%", "len", "(", "Best", ")", "dat", "=", "CD", "[", "Best", "[", "ind", "]", "]", "col", "=", "Best", "[", "ind", "]", "if", "ordered", ":", "# if ordered is true then the colours are accessed from darkest to lightest", "ind", "=", "i", "%", "len", "(", "DtoL", ")", "dat", "=", "CD", "[", "DtoL", "[", "ind", "]", "]", "col", "=", "DtoL", "[", "ind", "]", "if", "options", "==", "\"dots\"", ":", "ind", "=", "i", "%", "len", "(", "Dots", ")", "dat", "=", "CD", "[", "Dots", "[", "ind", "]", "]", "col", "=", "Dots", "[", "ind", "]", "if", "options", "==", "\"ordered\"", ":", "ind", "=", "i", "%", "len", "(", "DtoL", ")", "dat", "=", "CD", "[", "DtoL", "[", "ind", "]", "]", "col", "=", "DtoL", "[", "ind", "]", "gray_value", "=", "0.299", "*", "dat", "[", "0", "]", "+", "0.587", "*", "dat", "[", "1", "]", "+", "0.114", "*", "dat", "[", "2", "]", "# calculate the gray scale value", "if", "gray", ":", "return", "gray_value", ",", "gray_value", ",", "gray_value", "return", "dat" ]
Define colours by number. Can be plotted either in order of gray scale or in the 'best' order for having a strong gray contrast for only three or four lines :param i: the index to access a colour
[ "Define", "colours", "by", "number", ".", "Can", "be", "plotted", "either", "in", "order", "of", "gray", "scale", "or", "in", "the", "best", "order", "for", "having", "a", "strong", "gray", "contrast", "for", "only", "three", "or", "four", "lines", ":", "param", "i", ":", "the", "index", "to", "access", "a", "colour" ]
python
train
36.546667
inveniosoftware/invenio-deposit
invenio_deposit/api.py
https://github.com/inveniosoftware/invenio-deposit/blob/f243ea1d01ab0a3bc92ade3262d1abdd2bc32447/invenio_deposit/api.py#L476-L483
def clear(self, *args, **kwargs): """Clear only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved. """ super(Deposit, self).clear(*args, **kwargs)
[ "def", "clear", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "Deposit", ",", "self", ")", ".", "clear", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Clear only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved.
[ "Clear", "only", "drafts", "." ]
python
valid
27.25
rtluckie/seria
seria/utils.py
https://github.com/rtluckie/seria/blob/8ae4f71237e69085d8f974a024720f45b34ab963/seria/utils.py#L3-L21
def str_to_num(i, exact_match=True): """ Attempts to convert a str to either an int or float """ # TODO: Cleanup -- this is really ugly if not isinstance(i, str): return i try: if not exact_match: return int(i) elif str(int(i)) == i: return int(i) elif str(float(i)) == i: return float(i) else: pass except ValueError: pass return i
[ "def", "str_to_num", "(", "i", ",", "exact_match", "=", "True", ")", ":", "# TODO: Cleanup -- this is really ugly", "if", "not", "isinstance", "(", "i", ",", "str", ")", ":", "return", "i", "try", ":", "if", "not", "exact_match", ":", "return", "int", "(", "i", ")", "elif", "str", "(", "int", "(", "i", ")", ")", "==", "i", ":", "return", "int", "(", "i", ")", "elif", "str", "(", "float", "(", "i", ")", ")", "==", "i", ":", "return", "float", "(", "i", ")", "else", ":", "pass", "except", "ValueError", ":", "pass", "return", "i" ]
Attempts to convert a str to either an int or float
[ "Attempts", "to", "convert", "a", "str", "to", "either", "an", "int", "or", "float" ]
python
train
23.263158
ssato/python-anyconfig
src/anyconfig/backend/yaml/pyyaml.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/yaml/pyyaml.py#L58-L104
def _customized_loader(container, loader=Loader, mapping_tag=_MAPPING_TAG): """ Create or update loader with making given callble 'container' to make mapping objects such as dict and OrderedDict, used to construct python object from yaml mapping node internally. :param container: Set container used internally """ def construct_mapping(loader, node, deep=False): """Construct python object from yaml mapping node, based on :meth:`yaml.BaseConstructor.construct_mapping` in PyYAML (MIT). """ loader.flatten_mapping(node) if not isinstance(node, yaml.MappingNode): msg = "expected a mapping node, but found %s" % node.id raise yaml.constructor.ConstructorError(None, None, msg, node.start_mark) mapping = container() for key_node, value_node in node.value: key = loader.construct_object(key_node, deep=deep) try: hash(key) except TypeError as exc: eargs = ("while constructing a mapping", node.start_mark, "found unacceptable key (%s)" % exc, key_node.start_mark) raise yaml.constructor.ConstructorError(*eargs) value = loader.construct_object(value_node, deep=deep) mapping[key] = value return mapping tag = "tag:yaml.org,2002:python/unicode" def construct_ustr(loader, node): """Unicode string constructor""" return loader.construct_scalar(node) try: loader.add_constructor(tag, construct_ustr) except NameError: pass if type(container) != dict: loader.add_constructor(mapping_tag, construct_mapping) return loader
[ "def", "_customized_loader", "(", "container", ",", "loader", "=", "Loader", ",", "mapping_tag", "=", "_MAPPING_TAG", ")", ":", "def", "construct_mapping", "(", "loader", ",", "node", ",", "deep", "=", "False", ")", ":", "\"\"\"Construct python object from yaml mapping node, based on\n :meth:`yaml.BaseConstructor.construct_mapping` in PyYAML (MIT).\n \"\"\"", "loader", ".", "flatten_mapping", "(", "node", ")", "if", "not", "isinstance", "(", "node", ",", "yaml", ".", "MappingNode", ")", ":", "msg", "=", "\"expected a mapping node, but found %s\"", "%", "node", ".", "id", "raise", "yaml", ".", "constructor", ".", "ConstructorError", "(", "None", ",", "None", ",", "msg", ",", "node", ".", "start_mark", ")", "mapping", "=", "container", "(", ")", "for", "key_node", ",", "value_node", "in", "node", ".", "value", ":", "key", "=", "loader", ".", "construct_object", "(", "key_node", ",", "deep", "=", "deep", ")", "try", ":", "hash", "(", "key", ")", "except", "TypeError", "as", "exc", ":", "eargs", "=", "(", "\"while constructing a mapping\"", ",", "node", ".", "start_mark", ",", "\"found unacceptable key (%s)\"", "%", "exc", ",", "key_node", ".", "start_mark", ")", "raise", "yaml", ".", "constructor", ".", "ConstructorError", "(", "*", "eargs", ")", "value", "=", "loader", ".", "construct_object", "(", "value_node", ",", "deep", "=", "deep", ")", "mapping", "[", "key", "]", "=", "value", "return", "mapping", "tag", "=", "\"tag:yaml.org,2002:python/unicode\"", "def", "construct_ustr", "(", "loader", ",", "node", ")", ":", "\"\"\"Unicode string constructor\"\"\"", "return", "loader", ".", "construct_scalar", "(", "node", ")", "try", ":", "loader", ".", "add_constructor", "(", "tag", ",", "construct_ustr", ")", "except", "NameError", ":", "pass", "if", "type", "(", "container", ")", "!=", "dict", ":", "loader", ".", "add_constructor", "(", "mapping_tag", ",", "construct_mapping", ")", "return", "loader" ]
Create or update loader with making given callble 'container' to make mapping objects such as dict and OrderedDict, used to construct python object from yaml mapping node internally. :param container: Set container used internally
[ "Create", "or", "update", "loader", "with", "making", "given", "callble", "container", "to", "make", "mapping", "objects", "such", "as", "dict", "and", "OrderedDict", "used", "to", "construct", "python", "object", "from", "yaml", "mapping", "node", "internally", "." ]
python
train
38.085106
codelv/enaml-native
src/enamlnative/android/android_bottom_sheet_dialog.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_bottom_sheet_dialog.py#L34-L42
def create_widget(self): """ Create the underlying widget. A dialog is not a subclass of view, hence we don't set name as widget or children will try to use it as their parent. """ d = self.declaration self.dialog = BottomSheetDialog(self.get_context(), d.style)
[ "def", "create_widget", "(", "self", ")", ":", "d", "=", "self", ".", "declaration", "self", ".", "dialog", "=", "BottomSheetDialog", "(", "self", ".", "get_context", "(", ")", ",", "d", ".", "style", ")" ]
Create the underlying widget. A dialog is not a subclass of view, hence we don't set name as widget or children will try to use it as their parent.
[ "Create", "the", "underlying", "widget", "." ]
python
train
33.777778
phoebe-project/phoebe2
phoebe/backend/mesh.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/mesh.py#L33-L85
def euler_trans_matrix(etheta, elongan, eincl): """ Get the transformation matrix R to translate/rotate a mesh according to euler angles. The matrix is R(long,incl,theta) = Rz(pi).Rz(long).Rx(incl).Rz(theta) Rz(long).Rx(-incl).Rz(theta).Rz(pi) where Rx(u) = 1, 0, 0 0, cos(u), -sin(u) 0, sin(u), cos(u) Ry(u) = cos(u), 0, sin(u) 0, 1, 0 -sin(u), 0, cos(u) Rz(u) = cos(u), -sin(u), 0 sin(u), cos(u), 0 0, 0, 1 Rz(pi) = reflection across z-axis Note: R(0,0,0) = -1, 0, 0 0, -1, 0 0, 0, 1 :parameter float etheta: euler theta angle :parameter float elongan: euler long of asc node angle :parameter float eincl: euler inclination angle :return: matrix with size 3x3 """ s1 = sin(eincl); c1 = cos(eincl); s2 = sin(elongan); c2 = cos(elongan); s3 = sin(etheta); c3 = cos(etheta); c1s3 = c1*s3; c1c3 = c1*c3; return np.array([ [-c2*c3+s2*c1s3, c2*s3+s2*c1c3, -s2*s1], [-s2*c3-c2*c1s3, s2*s3-c2*c1c3, c2*s1], [s1*s3, s1*c3, c1] ])
[ "def", "euler_trans_matrix", "(", "etheta", ",", "elongan", ",", "eincl", ")", ":", "s1", "=", "sin", "(", "eincl", ")", "c1", "=", "cos", "(", "eincl", ")", "s2", "=", "sin", "(", "elongan", ")", "c2", "=", "cos", "(", "elongan", ")", "s3", "=", "sin", "(", "etheta", ")", "c3", "=", "cos", "(", "etheta", ")", "c1s3", "=", "c1", "*", "s3", "c1c3", "=", "c1", "*", "c3", "return", "np", ".", "array", "(", "[", "[", "-", "c2", "*", "c3", "+", "s2", "*", "c1s3", ",", "c2", "*", "s3", "+", "s2", "*", "c1c3", ",", "-", "s2", "*", "s1", "]", ",", "[", "-", "s2", "*", "c3", "-", "c2", "*", "c1s3", ",", "s2", "*", "s3", "-", "c2", "*", "c1c3", ",", "c2", "*", "s1", "]", ",", "[", "s1", "*", "s3", ",", "s1", "*", "c3", ",", "c1", "]", "]", ")" ]
Get the transformation matrix R to translate/rotate a mesh according to euler angles. The matrix is R(long,incl,theta) = Rz(pi).Rz(long).Rx(incl).Rz(theta) Rz(long).Rx(-incl).Rz(theta).Rz(pi) where Rx(u) = 1, 0, 0 0, cos(u), -sin(u) 0, sin(u), cos(u) Ry(u) = cos(u), 0, sin(u) 0, 1, 0 -sin(u), 0, cos(u) Rz(u) = cos(u), -sin(u), 0 sin(u), cos(u), 0 0, 0, 1 Rz(pi) = reflection across z-axis Note: R(0,0,0) = -1, 0, 0 0, -1, 0 0, 0, 1 :parameter float etheta: euler theta angle :parameter float elongan: euler long of asc node angle :parameter float eincl: euler inclination angle :return: matrix with size 3x3
[ "Get", "the", "transformation", "matrix", "R", "to", "translate", "/", "rotate", "a", "mesh", "according", "to", "euler", "angles", "." ]
python
train
24.09434
MKLab-ITI/reveal-user-annotation
reveal_user_annotation/twitter/clean_twitter_list.py
https://github.com/MKLab-ITI/reveal-user-annotation/blob/ed019c031857b091e5601f53ba3f01a499a0e3ef/reveal_user_annotation/twitter/clean_twitter_list.py#L82-L114
def user_twitter_list_bag_of_words(twitter_list_corpus, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set): """ Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user. Inputs: - twitter_list_corpus: A python list of Twitter lists in json format. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - bag_of_words: A bag-of-words in python dictionary format. - lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords. """ # Extract a bag-of-words from a list of Twitter lists. # May result in empty sets list_of_keyword_sets, list_of_lemma_to_keywordbags = clean_list_of_twitter_list(twitter_list_corpus, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set) # Reduce keyword sets. bag_of_words = reduce_list_of_bags_of_words(list_of_keyword_sets) # Reduce lemma to keywordbag maps. lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int)) for lemma_to_keywordbag in list_of_lemma_to_keywordbags: for lemma, keywordbag in lemma_to_keywordbag.items(): for keyword, multiplicity in keywordbag.items(): lemma_to_keywordbag_total[lemma][keyword] += multiplicity return bag_of_words, lemma_to_keywordbag_total
[ "def", "user_twitter_list_bag_of_words", "(", "twitter_list_corpus", ",", "sent_tokenize", ",", "_treebank_word_tokenize", ",", "tagger", ",", "lemmatizer", ",", "lemmatize", ",", "stopset", ",", "first_cap_re", ",", "all_cap_re", ",", "digits_punctuation_whitespace_re", ",", "pos_set", ")", ":", "# Extract a bag-of-words from a list of Twitter lists.", "# May result in empty sets", "list_of_keyword_sets", ",", "list_of_lemma_to_keywordbags", "=", "clean_list_of_twitter_list", "(", "twitter_list_corpus", ",", "sent_tokenize", ",", "_treebank_word_tokenize", ",", "tagger", ",", "lemmatizer", ",", "lemmatize", ",", "stopset", ",", "first_cap_re", ",", "all_cap_re", ",", "digits_punctuation_whitespace_re", ",", "pos_set", ")", "# Reduce keyword sets.", "bag_of_words", "=", "reduce_list_of_bags_of_words", "(", "list_of_keyword_sets", ")", "# Reduce lemma to keywordbag maps.", "lemma_to_keywordbag_total", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "int", ")", ")", "for", "lemma_to_keywordbag", "in", "list_of_lemma_to_keywordbags", ":", "for", "lemma", ",", "keywordbag", "in", "lemma_to_keywordbag", ".", "items", "(", ")", ":", "for", "keyword", ",", "multiplicity", "in", "keywordbag", ".", "items", "(", ")", ":", "lemma_to_keywordbag_total", "[", "lemma", "]", "[", "keyword", "]", "+=", "multiplicity", "return", "bag_of_words", ",", "lemma_to_keywordbag_total" ]
Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user. Inputs: - twitter_list_corpus: A python list of Twitter lists in json format. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - bag_of_words: A bag-of-words in python dictionary format. - lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
[ "Extract", "a", "bag", "-", "of", "-", "words", "for", "a", "corpus", "of", "Twitter", "lists", "pertaining", "to", "a", "Twitter", "user", "." ]
python
train
60.454545
nugget/python-anthemav
anthemav/protocol.py
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L208-L216
def _populate_inputs(self, total): """Request the names for all active, configured inputs on the device. Once we learn how many inputs are configured, this function is called which will ask for the name of each active input. """ total = total + 1 for input_number in range(1, total): self.query('ISN'+str(input_number).zfill(2))
[ "def", "_populate_inputs", "(", "self", ",", "total", ")", ":", "total", "=", "total", "+", "1", "for", "input_number", "in", "range", "(", "1", ",", "total", ")", ":", "self", ".", "query", "(", "'ISN'", "+", "str", "(", "input_number", ")", ".", "zfill", "(", "2", ")", ")" ]
Request the names for all active, configured inputs on the device. Once we learn how many inputs are configured, this function is called which will ask for the name of each active input.
[ "Request", "the", "names", "for", "all", "active", "configured", "inputs", "on", "the", "device", "." ]
python
train
42.333333
saltstack/salt
salt/modules/keystoneng.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/keystoneng.py#L726-L739
def service_create(auth=None, **kwargs): ''' Create a service CLI Example: .. code-block:: bash salt '*' keystoneng.service_create name=glance type=image salt '*' keystoneng.service_create name=glance type=image description="Image" ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_service(**kwargs)
[ "def", "service_create", "(", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cloud", "=", "get_operator_cloud", "(", "auth", ")", "kwargs", "=", "_clean_kwargs", "(", "keep_name", "=", "True", ",", "*", "*", "kwargs", ")", "return", "cloud", ".", "create_service", "(", "*", "*", "kwargs", ")" ]
Create a service CLI Example: .. code-block:: bash salt '*' keystoneng.service_create name=glance type=image salt '*' keystoneng.service_create name=glance type=image description="Image"
[ "Create", "a", "service" ]
python
train
28.071429
Tristramg/mumoro
virtualenv.py
https://github.com/Tristramg/mumoro/blob/e37d6ddb72fd23fb485c80fd8a5cda520ca08187/virtualenv.py#L622-L652
def path_locations(home_dir): """Return the path locations for the environment (where libraries are, where scripts go, etc)""" # XXX: We'd use distutils.sysconfig.get_python_inc/lib but its # prefix arg is broken: http://bugs.python.org/issue3386 if sys.platform == 'win32': # Windows has lots of problems with executables with spaces in # the name; this function will remove them (using the ~1 # format): mkdir(home_dir) if ' ' in home_dir: try: import win32api except ImportError: print 'Error: the path "%s" has a space in it' % home_dir print 'To handle these kinds of paths, the win32api module must be installed:' print ' http://sourceforge.net/projects/pywin32/' sys.exit(3) home_dir = win32api.GetShortPathName(home_dir) lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'Scripts') elif is_jython: lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'bin') else: lib_dir = join(home_dir, 'lib', py_version) inc_dir = join(home_dir, 'include', py_version) bin_dir = join(home_dir, 'bin') return home_dir, lib_dir, inc_dir, bin_dir
[ "def", "path_locations", "(", "home_dir", ")", ":", "# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its", "# prefix arg is broken: http://bugs.python.org/issue3386", "if", "sys", ".", "platform", "==", "'win32'", ":", "# Windows has lots of problems with executables with spaces in", "# the name; this function will remove them (using the ~1", "# format):", "mkdir", "(", "home_dir", ")", "if", "' '", "in", "home_dir", ":", "try", ":", "import", "win32api", "except", "ImportError", ":", "print", "'Error: the path \"%s\" has a space in it'", "%", "home_dir", "print", "'To handle these kinds of paths, the win32api module must be installed:'", "print", "' http://sourceforge.net/projects/pywin32/'", "sys", ".", "exit", "(", "3", ")", "home_dir", "=", "win32api", ".", "GetShortPathName", "(", "home_dir", ")", "lib_dir", "=", "join", "(", "home_dir", ",", "'Lib'", ")", "inc_dir", "=", "join", "(", "home_dir", ",", "'Include'", ")", "bin_dir", "=", "join", "(", "home_dir", ",", "'Scripts'", ")", "elif", "is_jython", ":", "lib_dir", "=", "join", "(", "home_dir", ",", "'Lib'", ")", "inc_dir", "=", "join", "(", "home_dir", ",", "'Include'", ")", "bin_dir", "=", "join", "(", "home_dir", ",", "'bin'", ")", "else", ":", "lib_dir", "=", "join", "(", "home_dir", ",", "'lib'", ",", "py_version", ")", "inc_dir", "=", "join", "(", "home_dir", ",", "'include'", ",", "py_version", ")", "bin_dir", "=", "join", "(", "home_dir", ",", "'bin'", ")", "return", "home_dir", ",", "lib_dir", ",", "inc_dir", ",", "bin_dir" ]
Return the path locations for the environment (where libraries are, where scripts go, etc)
[ "Return", "the", "path", "locations", "for", "the", "environment", "(", "where", "libraries", "are", "where", "scripts", "go", "etc", ")" ]
python
train
43.612903
laginha/django-key-auth
src/keyauth/models.py
https://github.com/laginha/django-key-auth/blob/7fc719cf9e9b5b725ea45b9e9717f647e4dc687f/src/keyauth/models.py#L74-L80
def extend_expiration_date(self, days=KEY_EXPIRATION_DELTA): """ Extend expiration date a number of given years """ delta = timedelta_days(days) self.expiration_date = self.expiration_date + delta self.save()
[ "def", "extend_expiration_date", "(", "self", ",", "days", "=", "KEY_EXPIRATION_DELTA", ")", ":", "delta", "=", "timedelta_days", "(", "days", ")", "self", ".", "expiration_date", "=", "self", ".", "expiration_date", "+", "delta", "self", ".", "save", "(", ")" ]
Extend expiration date a number of given years
[ "Extend", "expiration", "date", "a", "number", "of", "given", "years" ]
python
train
35.714286
oanda/v20-python
src/v20/account.py
https://github.com/oanda/v20-python/blob/f28192f4a31bce038cf6dfa302f5878bec192fe5/src/v20/account.py#L496-L589
def from_dict(data, ctx): """ Instantiate a new AccountChangesState from a dict (generally from loading a JSON response). The data used to instantiate the AccountChangesState is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('unrealizedPL') is not None: data['unrealizedPL'] = ctx.convert_decimal_number( data.get('unrealizedPL') ) if data.get('NAV') is not None: data['NAV'] = ctx.convert_decimal_number( data.get('NAV') ) if data.get('marginUsed') is not None: data['marginUsed'] = ctx.convert_decimal_number( data.get('marginUsed') ) if data.get('marginAvailable') is not None: data['marginAvailable'] = ctx.convert_decimal_number( data.get('marginAvailable') ) if data.get('positionValue') is not None: data['positionValue'] = ctx.convert_decimal_number( data.get('positionValue') ) if data.get('marginCloseoutUnrealizedPL') is not None: data['marginCloseoutUnrealizedPL'] = ctx.convert_decimal_number( data.get('marginCloseoutUnrealizedPL') ) if data.get('marginCloseoutNAV') is not None: data['marginCloseoutNAV'] = ctx.convert_decimal_number( data.get('marginCloseoutNAV') ) if data.get('marginCloseoutMarginUsed') is not None: data['marginCloseoutMarginUsed'] = ctx.convert_decimal_number( data.get('marginCloseoutMarginUsed') ) if data.get('marginCloseoutPercent') is not None: data['marginCloseoutPercent'] = ctx.convert_decimal_number( data.get('marginCloseoutPercent') ) if data.get('marginCloseoutPositionValue') is not None: data['marginCloseoutPositionValue'] = ctx.convert_decimal_number( data.get('marginCloseoutPositionValue') ) if data.get('withdrawalLimit') is not None: data['withdrawalLimit'] = ctx.convert_decimal_number( data.get('withdrawalLimit') ) if data.get('marginCallMarginUsed') is not None: data['marginCallMarginUsed'] = ctx.convert_decimal_number( data.get('marginCallMarginUsed') ) if data.get('marginCallPercent') is not None: data['marginCallPercent'] = ctx.convert_decimal_number( data.get('marginCallPercent') ) if data.get('orders') is not None: data['orders'] = [ ctx.order.DynamicOrderState.from_dict(d, ctx) for d in data.get('orders') ] if data.get('trades') is not None: data['trades'] = [ ctx.trade.CalculatedTradeState.from_dict(d, ctx) for d in data.get('trades') ] if data.get('positions') is not None: data['positions'] = [ ctx.position.CalculatedPositionState.from_dict(d, ctx) for d in data.get('positions') ] return AccountChangesState(**data)
[ "def", "from_dict", "(", "data", ",", "ctx", ")", ":", "data", "=", "data", ".", "copy", "(", ")", "if", "data", ".", "get", "(", "'unrealizedPL'", ")", "is", "not", "None", ":", "data", "[", "'unrealizedPL'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'unrealizedPL'", ")", ")", "if", "data", ".", "get", "(", "'NAV'", ")", "is", "not", "None", ":", "data", "[", "'NAV'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'NAV'", ")", ")", "if", "data", ".", "get", "(", "'marginUsed'", ")", "is", "not", "None", ":", "data", "[", "'marginUsed'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginUsed'", ")", ")", "if", "data", ".", "get", "(", "'marginAvailable'", ")", "is", "not", "None", ":", "data", "[", "'marginAvailable'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginAvailable'", ")", ")", "if", "data", ".", "get", "(", "'positionValue'", ")", "is", "not", "None", ":", "data", "[", "'positionValue'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'positionValue'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutUnrealizedPL'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutUnrealizedPL'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutUnrealizedPL'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutNAV'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutNAV'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutNAV'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutMarginUsed'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutMarginUsed'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutMarginUsed'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutPercent'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutPercent'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutPercent'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutPositionValue'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutPositionValue'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutPositionValue'", ")", ")", "if", "data", ".", "get", "(", "'withdrawalLimit'", ")", "is", "not", "None", ":", "data", "[", "'withdrawalLimit'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'withdrawalLimit'", ")", ")", "if", "data", ".", "get", "(", "'marginCallMarginUsed'", ")", "is", "not", "None", ":", "data", "[", "'marginCallMarginUsed'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCallMarginUsed'", ")", ")", "if", "data", ".", "get", "(", "'marginCallPercent'", ")", "is", "not", "None", ":", "data", "[", "'marginCallPercent'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCallPercent'", ")", ")", "if", "data", ".", "get", "(", "'orders'", ")", "is", "not", "None", ":", "data", "[", "'orders'", "]", "=", "[", "ctx", ".", "order", ".", "DynamicOrderState", ".", "from_dict", "(", "d", ",", "ctx", ")", "for", "d", "in", "data", ".", "get", "(", "'orders'", ")", "]", "if", "data", ".", "get", "(", "'trades'", ")", "is", "not", "None", ":", "data", "[", "'trades'", "]", "=", "[", "ctx", ".", "trade", ".", "CalculatedTradeState", ".", "from_dict", "(", "d", ",", "ctx", ")", "for", "d", "in", "data", ".", "get", "(", "'trades'", ")", "]", "if", "data", ".", "get", "(", "'positions'", ")", "is", "not", "None", ":", "data", "[", "'positions'", "]", "=", "[", "ctx", ".", "position", ".", "CalculatedPositionState", ".", "from_dict", "(", "d", ",", "ctx", ")", "for", "d", "in", "data", ".", "get", "(", "'positions'", ")", "]", "return", "AccountChangesState", "(", "*", "*", "data", ")" ]
Instantiate a new AccountChangesState from a dict (generally from loading a JSON response). The data used to instantiate the AccountChangesState is a shallow copy of the dict passed in, with any complex child types instantiated appropriately.
[ "Instantiate", "a", "new", "AccountChangesState", "from", "a", "dict", "(", "generally", "from", "loading", "a", "JSON", "response", ")", ".", "The", "data", "used", "to", "instantiate", "the", "AccountChangesState", "is", "a", "shallow", "copy", "of", "the", "dict", "passed", "in", "with", "any", "complex", "child", "types", "instantiated", "appropriately", "." ]
python
train
34.946809
datacats/datacats
datacats/environment.py
https://github.com/datacats/datacats/blob/e4bae503efa997660fb3f34fe166699569653157/datacats/environment.py#L496-L566
def _run_web_container(self, port, command, address, log_syslog=False, datapusher=True, interactive=False): """ Start web container on port with command """ if is_boot2docker(): ro = {} volumes_from = self._get_container_name('venv') else: ro = {self.datadir + '/venv': '/usr/lib/ckan'} volumes_from = None links = { self._get_container_name('solr'): 'solr', self._get_container_name('postgres'): 'db' } links.update({self._get_container_name(container): container for container in self.extra_containers}) if datapusher: if 'datapusher' not in self.containers_running(): raise DatacatsError(container_logs(self._get_container_name('datapusher'), "all", False, False)) links[self._get_container_name('datapusher')] = 'datapusher' ro = dict({ self.target: '/project/', scripts.get_script_path('web.sh'): '/scripts/web.sh', scripts.get_script_path('adjust_devini.py'): '/scripts/adjust_devini.py'}, **ro) rw = { self.sitedir + '/files': '/var/www/storage', self.sitedir + '/run/development.ini': '/project/development.ini' } try: if not interactive: run_container( name=self._get_container_name('web'), image='datacats/web', rw=rw, ro=ro, links=links, volumes_from=volumes_from, command=command, port_bindings={ 5000: port if is_boot2docker() else (address, port)}, log_syslog=log_syslog ) else: # FIXME: share more code with interactive_shell if is_boot2docker(): switches = ['--volumes-from', self._get_container_name('pgdata'), '--volumes-from', self._get_container_name('venv')] else: switches = [] switches += ['--volume={}:{}:ro'.format(vol, ro[vol]) for vol in ro] switches += ['--volume={}:{}'.format(vol, rw[vol]) for vol in rw] links = ['--link={}:{}'.format(link, links[link]) for link in links] args = ['docker', 'run', '-it', '--name', self._get_container_name('web'), '-p', '{}:5000'.format(port) if is_boot2docker() else '{}:{}:5000'.format(address, port)] + \ switches + links + ['datacats/web', ] + command subprocess.call(args) except APIError as e: if '409' in str(e): raise DatacatsError('Web container already running. ' 'Please stop_web before running.') else: raise
[ "def", "_run_web_container", "(", "self", ",", "port", ",", "command", ",", "address", ",", "log_syslog", "=", "False", ",", "datapusher", "=", "True", ",", "interactive", "=", "False", ")", ":", "if", "is_boot2docker", "(", ")", ":", "ro", "=", "{", "}", "volumes_from", "=", "self", ".", "_get_container_name", "(", "'venv'", ")", "else", ":", "ro", "=", "{", "self", ".", "datadir", "+", "'/venv'", ":", "'/usr/lib/ckan'", "}", "volumes_from", "=", "None", "links", "=", "{", "self", ".", "_get_container_name", "(", "'solr'", ")", ":", "'solr'", ",", "self", ".", "_get_container_name", "(", "'postgres'", ")", ":", "'db'", "}", "links", ".", "update", "(", "{", "self", ".", "_get_container_name", "(", "container", ")", ":", "container", "for", "container", "in", "self", ".", "extra_containers", "}", ")", "if", "datapusher", ":", "if", "'datapusher'", "not", "in", "self", ".", "containers_running", "(", ")", ":", "raise", "DatacatsError", "(", "container_logs", "(", "self", ".", "_get_container_name", "(", "'datapusher'", ")", ",", "\"all\"", ",", "False", ",", "False", ")", ")", "links", "[", "self", ".", "_get_container_name", "(", "'datapusher'", ")", "]", "=", "'datapusher'", "ro", "=", "dict", "(", "{", "self", ".", "target", ":", "'/project/'", ",", "scripts", ".", "get_script_path", "(", "'web.sh'", ")", ":", "'/scripts/web.sh'", ",", "scripts", ".", "get_script_path", "(", "'adjust_devini.py'", ")", ":", "'/scripts/adjust_devini.py'", "}", ",", "*", "*", "ro", ")", "rw", "=", "{", "self", ".", "sitedir", "+", "'/files'", ":", "'/var/www/storage'", ",", "self", ".", "sitedir", "+", "'/run/development.ini'", ":", "'/project/development.ini'", "}", "try", ":", "if", "not", "interactive", ":", "run_container", "(", "name", "=", "self", ".", "_get_container_name", "(", "'web'", ")", ",", "image", "=", "'datacats/web'", ",", "rw", "=", "rw", ",", "ro", "=", "ro", ",", "links", "=", "links", ",", "volumes_from", "=", "volumes_from", ",", "command", "=", "command", ",", "port_bindings", "=", "{", "5000", ":", "port", "if", "is_boot2docker", "(", ")", "else", "(", "address", ",", "port", ")", "}", ",", "log_syslog", "=", "log_syslog", ")", "else", ":", "# FIXME: share more code with interactive_shell", "if", "is_boot2docker", "(", ")", ":", "switches", "=", "[", "'--volumes-from'", ",", "self", ".", "_get_container_name", "(", "'pgdata'", ")", ",", "'--volumes-from'", ",", "self", ".", "_get_container_name", "(", "'venv'", ")", "]", "else", ":", "switches", "=", "[", "]", "switches", "+=", "[", "'--volume={}:{}:ro'", ".", "format", "(", "vol", ",", "ro", "[", "vol", "]", ")", "for", "vol", "in", "ro", "]", "switches", "+=", "[", "'--volume={}:{}'", ".", "format", "(", "vol", ",", "rw", "[", "vol", "]", ")", "for", "vol", "in", "rw", "]", "links", "=", "[", "'--link={}:{}'", ".", "format", "(", "link", ",", "links", "[", "link", "]", ")", "for", "link", "in", "links", "]", "args", "=", "[", "'docker'", ",", "'run'", ",", "'-it'", ",", "'--name'", ",", "self", ".", "_get_container_name", "(", "'web'", ")", ",", "'-p'", ",", "'{}:5000'", ".", "format", "(", "port", ")", "if", "is_boot2docker", "(", ")", "else", "'{}:{}:5000'", ".", "format", "(", "address", ",", "port", ")", "]", "+", "switches", "+", "links", "+", "[", "'datacats/web'", ",", "]", "+", "command", "subprocess", ".", "call", "(", "args", ")", "except", "APIError", "as", "e", ":", "if", "'409'", "in", "str", "(", "e", ")", ":", "raise", "DatacatsError", "(", "'Web container already running. '", "'Please stop_web before running.'", ")", "else", ":", "raise" ]
Start web container on port with command
[ "Start", "web", "container", "on", "port", "with", "command" ]
python
train
43.647887
quantopian/zipline
zipline/utils/cache.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/cache.py#L358-L368
def ensure_dir(self, *path_parts): """Ensures a subdirectory of the working directory. Parameters ---------- path_parts : iterable[str] The parts of the path after the working directory. """ path = self.getpath(*path_parts) ensure_directory(path) return path
[ "def", "ensure_dir", "(", "self", ",", "*", "path_parts", ")", ":", "path", "=", "self", ".", "getpath", "(", "*", "path_parts", ")", "ensure_directory", "(", "path", ")", "return", "path" ]
Ensures a subdirectory of the working directory. Parameters ---------- path_parts : iterable[str] The parts of the path after the working directory.
[ "Ensures", "a", "subdirectory", "of", "the", "working", "directory", "." ]
python
train
29.545455
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L10725-L10749
def position_target_global_int_encode(self, time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate): ''' Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float) ''' return MAVLink_position_target_global_int_message(time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate)
[ "def", "position_target_global_int_encode", "(", "self", ",", "time_boot_ms", ",", "coordinate_frame", ",", "type_mask", ",", "lat_int", ",", "lon_int", ",", "alt", ",", "vx", ",", "vy", ",", "vz", ",", "afx", ",", "afy", ",", "afz", ",", "yaw", ",", "yaw_rate", ")", ":", "return", "MAVLink_position_target_global_int_message", "(", "time_boot_ms", ",", "coordinate_frame", ",", "type_mask", ",", "lat_int", ",", "lon_int", ",", "alt", ",", "vx", ",", "vy", ",", "vz", ",", "afx", ",", "afy", ",", "afz", ",", "yaw", ",", "yaw_rate", ")" ]
Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float)
[ "Reports", "the", "current", "commanded", "vehicle", "position", "velocity", "and", "acceleration", "as", "specified", "by", "the", "autopilot", ".", "This", "should", "match", "the", "commands", "sent", "in", "SET_POSITION_TARGET_GLOBAL_INT", "if", "the", "vehicle", "is", "being", "controlled", "this", "way", "." ]
python
train
112.12
pytorch/vision
torchvision/models/inception.py
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/inception.py#L19-L49
def inception_v3(pretrained=False, **kwargs): r"""Inception v3 model architecture from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. .. note:: **Important**: In contrast to the other models the inception_v3 expects tensors with a size of N x 3 x 299 x 299, so ensure your images are sized accordingly. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet aux_logits (bool): If True, add an auxiliary branch that can improve training. Default: *True* transform_input (bool): If True, preprocesses the input according to the method with which it was trained on ImageNet. Default: *False* """ if pretrained: if 'transform_input' not in kwargs: kwargs['transform_input'] = True if 'aux_logits' in kwargs: original_aux_logits = kwargs['aux_logits'] kwargs['aux_logits'] = True else: original_aux_logits = True model = Inception3(**kwargs) model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google'])) if not original_aux_logits: model.aux_logits = False del model.AuxLogits return model return Inception3(**kwargs)
[ "def", "inception_v3", "(", "pretrained", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "pretrained", ":", "if", "'transform_input'", "not", "in", "kwargs", ":", "kwargs", "[", "'transform_input'", "]", "=", "True", "if", "'aux_logits'", "in", "kwargs", ":", "original_aux_logits", "=", "kwargs", "[", "'aux_logits'", "]", "kwargs", "[", "'aux_logits'", "]", "=", "True", "else", ":", "original_aux_logits", "=", "True", "model", "=", "Inception3", "(", "*", "*", "kwargs", ")", "model", ".", "load_state_dict", "(", "model_zoo", ".", "load_url", "(", "model_urls", "[", "'inception_v3_google'", "]", ")", ")", "if", "not", "original_aux_logits", ":", "model", ".", "aux_logits", "=", "False", "del", "model", ".", "AuxLogits", "return", "model", "return", "Inception3", "(", "*", "*", "kwargs", ")" ]
r"""Inception v3 model architecture from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. .. note:: **Important**: In contrast to the other models the inception_v3 expects tensors with a size of N x 3 x 299 x 299, so ensure your images are sized accordingly. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet aux_logits (bool): If True, add an auxiliary branch that can improve training. Default: *True* transform_input (bool): If True, preprocesses the input according to the method with which it was trained on ImageNet. Default: *False*
[ "r", "Inception", "v3", "model", "architecture", "from", "Rethinking", "the", "Inception", "Architecture", "for", "Computer", "Vision", "<http", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1512", ".", "00567", ">", "_", "." ]
python
test
41.612903
koalalorenzo/python-digitalocean
digitalocean/Manager.py
https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/Manager.py#L266-L273
def get_load_balancer(self, id): """ Returns a Load Balancer object by its ID. Args: id (str): Load Balancer ID """ return LoadBalancer.get_object(api_token=self.token, id=id)
[ "def", "get_load_balancer", "(", "self", ",", "id", ")", ":", "return", "LoadBalancer", ".", "get_object", "(", "api_token", "=", "self", ".", "token", ",", "id", "=", "id", ")" ]
Returns a Load Balancer object by its ID. Args: id (str): Load Balancer ID
[ "Returns", "a", "Load", "Balancer", "object", "by", "its", "ID", "." ]
python
valid
29.125
hvac/hvac
hvac/api/system_backend/audit.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/system_backend/audit.py#L24-L59
def enable_audit_device(self, device_type, description=None, options=None, path=None): """Enable a new audit device at the supplied path. The path can be a single word name or a more complex, nested path. Supported methods: PUT: /sys/audit/{path}. Produces: 204 (empty body) :param device_type: Specifies the type of the audit device. :type device_type: str | unicode :param description: Human-friendly description of the audit device. :type description: str | unicode :param options: Configuration options to pass to the audit device itself. This is dependent on the audit device type. :type options: str | unicode :param path: Specifies the path in which to enable the audit device. This is part of the request URL. :type path: str | unicode :return: The response of the request. :rtype: requests.Response """ if path is None: path = device_type params = { 'type': device_type, 'description': description, 'options': options, } api_path = '/v1/sys/audit/{path}'.format(path=path) return self._adapter.post( url=api_path, json=params )
[ "def", "enable_audit_device", "(", "self", ",", "device_type", ",", "description", "=", "None", ",", "options", "=", "None", ",", "path", "=", "None", ")", ":", "if", "path", "is", "None", ":", "path", "=", "device_type", "params", "=", "{", "'type'", ":", "device_type", ",", "'description'", ":", "description", ",", "'options'", ":", "options", ",", "}", "api_path", "=", "'/v1/sys/audit/{path}'", ".", "format", "(", "path", "=", "path", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ")" ]
Enable a new audit device at the supplied path. The path can be a single word name or a more complex, nested path. Supported methods: PUT: /sys/audit/{path}. Produces: 204 (empty body) :param device_type: Specifies the type of the audit device. :type device_type: str | unicode :param description: Human-friendly description of the audit device. :type description: str | unicode :param options: Configuration options to pass to the audit device itself. This is dependent on the audit device type. :type options: str | unicode :param path: Specifies the path in which to enable the audit device. This is part of the request URL. :type path: str | unicode :return: The response of the request. :rtype: requests.Response
[ "Enable", "a", "new", "audit", "device", "at", "the", "supplied", "path", "." ]
python
train
35.416667
jason-weirather/py-seq-tools
seqtools/range/multi.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/range/multi.py#L7-L16
def sort_ranges(inranges): """from an array of ranges, make a sorted array of ranges :param inranges: List of GenomicRange data :type inranges: GenomicRange[] :returns: a new sorted GenomicRange list :rtype: GenomicRange[] """ return sorted(inranges,key=lambda x: (x.chr,x.start,x.end,x.direction))
[ "def", "sort_ranges", "(", "inranges", ")", ":", "return", "sorted", "(", "inranges", ",", "key", "=", "lambda", "x", ":", "(", "x", ".", "chr", ",", "x", ".", "start", ",", "x", ".", "end", ",", "x", ".", "direction", ")", ")" ]
from an array of ranges, make a sorted array of ranges :param inranges: List of GenomicRange data :type inranges: GenomicRange[] :returns: a new sorted GenomicRange list :rtype: GenomicRange[]
[ "from", "an", "array", "of", "ranges", "make", "a", "sorted", "array", "of", "ranges" ]
python
train
30.5
MozillaSecurity/laniakea
laniakea/__init__.py
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/__init__.py#L111-L161
def main(cls): """Main entry point of Laniakea. """ args = cls.parse_args() if args.focus: Focus.init() else: Focus.disable() logging.basicConfig(format='[Laniakea] %(asctime)s %(levelname)s: %(message)s', level=args.verbosity * 10, datefmt='%Y-%m-%d %H:%M:%S') # Laniakea base configuration logger.info('Loading Laniakea configuration from %s', Focus.data(args.settings.name)) try: settings = json.loads(args.settings.read()) except ValueError as msg: logger.error('Unable to parse %s: %s', args.settings.name, msg) return 1 # UserData userdata = '' if args.userdata: logger.info('Reading user data script content from %s', Focus.info(args.userdata.name)) try: userdata = UserData.handle_import_tags(args.userdata.read(), os.path.dirname(args.userdata.name)) except UserDataException as msg: logging.error(msg) return 1 if args.list_userdata_macros: UserData.list_tags(userdata) return 0 if args.userdata_macros: args.userdata_macros = UserData.convert_pair_to_dict(args.userdata_macros or '') userdata = UserData.handle_tags(userdata, args.userdata_macros) if args.print_userdata: logger.info('Combined UserData script:\n%s', userdata) return 0 if args.provider: provider = getattr(globals()[args.provider], args.provider.title() + 'CommandLine') provider().main(args, settings, userdata) return 0
[ "def", "main", "(", "cls", ")", ":", "args", "=", "cls", ".", "parse_args", "(", ")", "if", "args", ".", "focus", ":", "Focus", ".", "init", "(", ")", "else", ":", "Focus", ".", "disable", "(", ")", "logging", ".", "basicConfig", "(", "format", "=", "'[Laniakea] %(asctime)s %(levelname)s: %(message)s'", ",", "level", "=", "args", ".", "verbosity", "*", "10", ",", "datefmt", "=", "'%Y-%m-%d %H:%M:%S'", ")", "# Laniakea base configuration", "logger", ".", "info", "(", "'Loading Laniakea configuration from %s'", ",", "Focus", ".", "data", "(", "args", ".", "settings", ".", "name", ")", ")", "try", ":", "settings", "=", "json", ".", "loads", "(", "args", ".", "settings", ".", "read", "(", ")", ")", "except", "ValueError", "as", "msg", ":", "logger", ".", "error", "(", "'Unable to parse %s: %s'", ",", "args", ".", "settings", ".", "name", ",", "msg", ")", "return", "1", "# UserData", "userdata", "=", "''", "if", "args", ".", "userdata", ":", "logger", ".", "info", "(", "'Reading user data script content from %s'", ",", "Focus", ".", "info", "(", "args", ".", "userdata", ".", "name", ")", ")", "try", ":", "userdata", "=", "UserData", ".", "handle_import_tags", "(", "args", ".", "userdata", ".", "read", "(", ")", ",", "os", ".", "path", ".", "dirname", "(", "args", ".", "userdata", ".", "name", ")", ")", "except", "UserDataException", "as", "msg", ":", "logging", ".", "error", "(", "msg", ")", "return", "1", "if", "args", ".", "list_userdata_macros", ":", "UserData", ".", "list_tags", "(", "userdata", ")", "return", "0", "if", "args", ".", "userdata_macros", ":", "args", ".", "userdata_macros", "=", "UserData", ".", "convert_pair_to_dict", "(", "args", ".", "userdata_macros", "or", "''", ")", "userdata", "=", "UserData", ".", "handle_tags", "(", "userdata", ",", "args", ".", "userdata_macros", ")", "if", "args", ".", "print_userdata", ":", "logger", ".", "info", "(", "'Combined UserData script:\\n%s'", ",", "userdata", ")", "return", "0", "if", "args", ".", "provider", ":", "provider", "=", "getattr", "(", "globals", "(", ")", "[", "args", ".", "provider", "]", ",", "args", ".", "provider", ".", "title", "(", ")", "+", "'CommandLine'", ")", "provider", "(", ")", ".", "main", "(", "args", ",", "settings", ",", "userdata", ")", "return", "0" ]
Main entry point of Laniakea.
[ "Main", "entry", "point", "of", "Laniakea", "." ]
python
train
34.392157
IdentityPython/SATOSA
src/satosa/micro_services/consent.py
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/micro_services/consent.py#L106-L141
def process(self, context, internal_response): """ Manage consent and attribute filtering :type context: satosa.context.Context :type internal_response: satosa.internal.InternalData :rtype: satosa.response.Response :param context: response context :param internal_response: the response :return: response """ consent_state = context.state[STATE_KEY] internal_response.attributes = self._filter_attributes(internal_response.attributes, consent_state["filter"]) id_hash = self._get_consent_id(internal_response.requester, internal_response.subject_id, internal_response.attributes) try: # Check if consent is already given consent_attributes = self._verify_consent(id_hash) except requests.exceptions.ConnectionError as e: satosa_logging(logger, logging.ERROR, "Consent service is not reachable, no consent given.", context.state) # Send an internal_response without any attributes internal_response.attributes = {} return self._end_consent(context, internal_response) # Previous consent was given if consent_attributes is not None: satosa_logging(logger, logging.DEBUG, "Previous consent was given", context.state) internal_response.attributes = self._filter_attributes(internal_response.attributes, consent_attributes) return self._end_consent(context, internal_response) # No previous consent, request consent by user return self._approve_new_consent(context, internal_response, id_hash)
[ "def", "process", "(", "self", ",", "context", ",", "internal_response", ")", ":", "consent_state", "=", "context", ".", "state", "[", "STATE_KEY", "]", "internal_response", ".", "attributes", "=", "self", ".", "_filter_attributes", "(", "internal_response", ".", "attributes", ",", "consent_state", "[", "\"filter\"", "]", ")", "id_hash", "=", "self", ".", "_get_consent_id", "(", "internal_response", ".", "requester", ",", "internal_response", ".", "subject_id", ",", "internal_response", ".", "attributes", ")", "try", ":", "# Check if consent is already given", "consent_attributes", "=", "self", ".", "_verify_consent", "(", "id_hash", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "e", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "ERROR", ",", "\"Consent service is not reachable, no consent given.\"", ",", "context", ".", "state", ")", "# Send an internal_response without any attributes", "internal_response", ".", "attributes", "=", "{", "}", "return", "self", ".", "_end_consent", "(", "context", ",", "internal_response", ")", "# Previous consent was given", "if", "consent_attributes", "is", "not", "None", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"Previous consent was given\"", ",", "context", ".", "state", ")", "internal_response", ".", "attributes", "=", "self", ".", "_filter_attributes", "(", "internal_response", ".", "attributes", ",", "consent_attributes", ")", "return", "self", ".", "_end_consent", "(", "context", ",", "internal_response", ")", "# No previous consent, request consent by user", "return", "self", ".", "_approve_new_consent", "(", "context", ",", "internal_response", ",", "id_hash", ")" ]
Manage consent and attribute filtering :type context: satosa.context.Context :type internal_response: satosa.internal.InternalData :rtype: satosa.response.Response :param context: response context :param internal_response: the response :return: response
[ "Manage", "consent", "and", "attribute", "filtering" ]
python
train
46.583333
neherlab/treetime
treetime/treeanc.py
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeanc.py#L181-L193
def gtr(self, value): """ Set a new GTR object Parameters ----------- value : GTR the new GTR object """ if not (isinstance(value, GTR) or isinstance(value, GTR_site_specific)): raise TypeError(" GTR instance expected") self._gtr = value
[ "def", "gtr", "(", "self", ",", "value", ")", ":", "if", "not", "(", "isinstance", "(", "value", ",", "GTR", ")", "or", "isinstance", "(", "value", ",", "GTR_site_specific", ")", ")", ":", "raise", "TypeError", "(", "\" GTR instance expected\"", ")", "self", ".", "_gtr", "=", "value" ]
Set a new GTR object Parameters ----------- value : GTR the new GTR object
[ "Set", "a", "new", "GTR", "object" ]
python
test
24.307692
gnosis/gnosis-py
gnosis/eth/ethereum_client.py
https://github.com/gnosis/gnosis-py/blob/2a9a5d75a375fc9813ac04df133e6910c82f9d49/gnosis/eth/ethereum_client.py#L518-L543
def send_eth_to(self, private_key: str, to: str, gas_price: int, value: int, gas: int=22000, retry: bool = False, block_identifier=None, max_eth_to_send: int = 0) -> bytes: """ Send ether using configured account :param to: to :param gas_price: gas_price :param value: value(wei) :param gas: gas, defaults to 22000 :param retry: Retry if a problem is found :param block_identifier: None default, 'pending' not confirmed txs :return: tx_hash """ assert check_checksum(to) if max_eth_to_send and value > self.w3.toWei(max_eth_to_send, 'ether'): raise EtherLimitExceeded('%d is bigger than %f' % (value, max_eth_to_send)) tx = { 'to': to, 'value': value, 'gas': gas, 'gasPrice': gas_price, } return self.send_unsigned_transaction(tx, private_key=private_key, retry=retry, block_identifier=block_identifier)
[ "def", "send_eth_to", "(", "self", ",", "private_key", ":", "str", ",", "to", ":", "str", ",", "gas_price", ":", "int", ",", "value", ":", "int", ",", "gas", ":", "int", "=", "22000", ",", "retry", ":", "bool", "=", "False", ",", "block_identifier", "=", "None", ",", "max_eth_to_send", ":", "int", "=", "0", ")", "->", "bytes", ":", "assert", "check_checksum", "(", "to", ")", "if", "max_eth_to_send", "and", "value", ">", "self", ".", "w3", ".", "toWei", "(", "max_eth_to_send", ",", "'ether'", ")", ":", "raise", "EtherLimitExceeded", "(", "'%d is bigger than %f'", "%", "(", "value", ",", "max_eth_to_send", ")", ")", "tx", "=", "{", "'to'", ":", "to", ",", "'value'", ":", "value", ",", "'gas'", ":", "gas", ",", "'gasPrice'", ":", "gas_price", ",", "}", "return", "self", ".", "send_unsigned_transaction", "(", "tx", ",", "private_key", "=", "private_key", ",", "retry", "=", "retry", ",", "block_identifier", "=", "block_identifier", ")" ]
Send ether using configured account :param to: to :param gas_price: gas_price :param value: value(wei) :param gas: gas, defaults to 22000 :param retry: Retry if a problem is found :param block_identifier: None default, 'pending' not confirmed txs :return: tx_hash
[ "Send", "ether", "using", "configured", "account", ":", "param", "to", ":", "to", ":", "param", "gas_price", ":", "gas_price", ":", "param", "value", ":", "value", "(", "wei", ")", ":", "param", "gas", ":", "gas", "defaults", "to", "22000", ":", "param", "retry", ":", "Retry", "if", "a", "problem", "is", "found", ":", "param", "block_identifier", ":", "None", "default", "pending", "not", "confirmed", "txs", ":", "return", ":", "tx_hash" ]
python
test
39.5
threeML/astromodels
astromodels/core/parameter.py
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L325-L346
def in_unit_of(self, unit, as_quantity=False): """ Return the current value transformed to the new units :param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit instance, like "1 / (erg cm**2 s)" :param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number. Default is False :return: either a floating point or a astropy.Quantity depending on the value of "as_quantity" """ new_unit = u.Unit(unit) new_quantity = self.as_quantity.to(new_unit) if as_quantity: return new_quantity else: return new_quantity.value
[ "def", "in_unit_of", "(", "self", ",", "unit", ",", "as_quantity", "=", "False", ")", ":", "new_unit", "=", "u", ".", "Unit", "(", "unit", ")", "new_quantity", "=", "self", ".", "as_quantity", ".", "to", "(", "new_unit", ")", "if", "as_quantity", ":", "return", "new_quantity", "else", ":", "return", "new_quantity", ".", "value" ]
Return the current value transformed to the new units :param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit instance, like "1 / (erg cm**2 s)" :param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number. Default is False :return: either a floating point or a astropy.Quantity depending on the value of "as_quantity"
[ "Return", "the", "current", "value", "transformed", "to", "the", "new", "units" ]
python
train
32.409091
scour-project/scour
scour/scour.py
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L2685-L2690
def serializeTransform(transformObj): """ Reserializes the transform data with some cleanups. """ return ' '.join([command + '(' + ' '.join([scourUnitlessLength(number) for number in numbers]) + ')' for command, numbers in transformObj])
[ "def", "serializeTransform", "(", "transformObj", ")", ":", "return", "' '", ".", "join", "(", "[", "command", "+", "'('", "+", "' '", ".", "join", "(", "[", "scourUnitlessLength", "(", "number", ")", "for", "number", "in", "numbers", "]", ")", "+", "')'", "for", "command", ",", "numbers", "in", "transformObj", "]", ")" ]
Reserializes the transform data with some cleanups.
[ "Reserializes", "the", "transform", "data", "with", "some", "cleanups", "." ]
python
train
45.333333
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L1092-L1096
def department_update(self, department_id, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/departments#update-department-by-id" api_path = "/api/v2/departments/{department_id}" api_path = api_path.format(department_id=department_id) return self.call(api_path, method="PUT", data=data, **kwargs)
[ "def", "department_update", "(", "self", ",", "department_id", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/departments/{department_id}\"", "api_path", "=", "api_path", ".", "format", "(", "department_id", "=", "department_id", ")", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"PUT\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/chat/departments#update-department-by-id
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "chat", "/", "departments#update", "-", "department", "-", "by", "-", "id" ]
python
train
68.2
saltstack/salt
salt/daemons/masterapi.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L698-L706
def _mine_flush(self, load, skip_verify=False): ''' Allow the minion to delete all of its own mine contents ''' if not skip_verify and 'id' not in load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): return self.cache.flush('minions/{0}'.format(load['id']), 'mine') return True
[ "def", "_mine_flush", "(", "self", ",", "load", ",", "skip_verify", "=", "False", ")", ":", "if", "not", "skip_verify", "and", "'id'", "not", "in", "load", ":", "return", "False", "if", "self", ".", "opts", ".", "get", "(", "'minion_data_cache'", ",", "False", ")", "or", "self", ".", "opts", ".", "get", "(", "'enforce_mine_cache'", ",", "False", ")", ":", "return", "self", ".", "cache", ".", "flush", "(", "'minions/{0}'", ".", "format", "(", "load", "[", "'id'", "]", ")", ",", "'mine'", ")", "return", "True" ]
Allow the minion to delete all of its own mine contents
[ "Allow", "the", "minion", "to", "delete", "all", "of", "its", "own", "mine", "contents" ]
python
train
44.333333
hyperledger/indy-plenum
plenum/common/messages/client_request.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/messages/client_request.py#L78-L94
def validate(self, dct): """ Choose a schema for client request operation and validate the operation field. If the schema is not found skips validation. :param dct: an operation field from client request :return: raises exception if invalid request """ if not isinstance(dct, dict): # TODO this check should be in side of the validator not here self._raise_invalid_fields('', dct, 'wrong type') txn_type = dct.get(TXN_TYPE) if txn_type is None: self._raise_missed_fields(TXN_TYPE) if txn_type in self.operations: # check only if the schema is defined op = self.operations[txn_type] op.validate(dct)
[ "def", "validate", "(", "self", ",", "dct", ")", ":", "if", "not", "isinstance", "(", "dct", ",", "dict", ")", ":", "# TODO this check should be in side of the validator not here", "self", ".", "_raise_invalid_fields", "(", "''", ",", "dct", ",", "'wrong type'", ")", "txn_type", "=", "dct", ".", "get", "(", "TXN_TYPE", ")", "if", "txn_type", "is", "None", ":", "self", ".", "_raise_missed_fields", "(", "TXN_TYPE", ")", "if", "txn_type", "in", "self", ".", "operations", ":", "# check only if the schema is defined", "op", "=", "self", ".", "operations", "[", "txn_type", "]", "op", ".", "validate", "(", "dct", ")" ]
Choose a schema for client request operation and validate the operation field. If the schema is not found skips validation. :param dct: an operation field from client request :return: raises exception if invalid request
[ "Choose", "a", "schema", "for", "client", "request", "operation", "and", "validate", "the", "operation", "field", ".", "If", "the", "schema", "is", "not", "found", "skips", "validation", ".", ":", "param", "dct", ":", "an", "operation", "field", "from", "client", "request", ":", "return", ":", "raises", "exception", "if", "invalid", "request" ]
python
train
43.176471
Brightmd/TxPx
txpx/process.py
https://github.com/Brightmd/TxPx/blob/403c18b3006fc68842ec05b259e8611fe80763aa/txpx/process.py#L114-L125
def outLineReceived(self, line): """ Handle data via stdout linewise. This is useful if you turned off buffering. In your subclass, override this if you want to handle the line as a protocol line in addition to logging it. (You may upcall this function safely.) """ log_debug('<<< {name} stdout >>> {line}', name=self.name, line=self.outFilter(line))
[ "def", "outLineReceived", "(", "self", ",", "line", ")", ":", "log_debug", "(", "'<<< {name} stdout >>> {line}'", ",", "name", "=", "self", ".", "name", ",", "line", "=", "self", ".", "outFilter", "(", "line", ")", ")" ]
Handle data via stdout linewise. This is useful if you turned off buffering. In your subclass, override this if you want to handle the line as a protocol line in addition to logging it. (You may upcall this function safely.)
[ "Handle", "data", "via", "stdout", "linewise", ".", "This", "is", "useful", "if", "you", "turned", "off", "buffering", "." ]
python
train
36.416667
tokibito/django-ftpserver
django_ftpserver/authorizers.py
https://github.com/tokibito/django-ftpserver/blob/18cf9f6645df9c2d9c5188bf21e74c188d55de47/django_ftpserver/authorizers.py#L57-L65
def validate_authentication(self, username, password, handler): """authenticate user with password """ user = authenticate( **{self.username_field: username, 'password': password} ) account = self.get_account(username) if not (user and account): raise AuthenticationFailed("Authentication failed.")
[ "def", "validate_authentication", "(", "self", ",", "username", ",", "password", ",", "handler", ")", ":", "user", "=", "authenticate", "(", "*", "*", "{", "self", ".", "username_field", ":", "username", ",", "'password'", ":", "password", "}", ")", "account", "=", "self", ".", "get_account", "(", "username", ")", "if", "not", "(", "user", "and", "account", ")", ":", "raise", "AuthenticationFailed", "(", "\"Authentication failed.\"", ")" ]
authenticate user with password
[ "authenticate", "user", "with", "password" ]
python
train
40.222222
BD2KGenomics/protect
src/protect/pipeline/ProTECT.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L957-L971
def get_patient_expression(job, patient_dict): """ Convenience function to get the expression from the patient dict :param dict patient_dict: dict of patient info :return: The gene and isoform expression :rtype: toil.fileStore.FileID """ expression_archive = job.fileStore.readGlobalFile(patient_dict['expression_files']) expression_archive = untargz(expression_archive, os.getcwd()) output_dict = {} for filename in 'rsem.genes.results', 'rsem.isoforms.results': output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(expression_archive, filename)) return output_dict
[ "def", "get_patient_expression", "(", "job", ",", "patient_dict", ")", ":", "expression_archive", "=", "job", ".", "fileStore", ".", "readGlobalFile", "(", "patient_dict", "[", "'expression_files'", "]", ")", "expression_archive", "=", "untargz", "(", "expression_archive", ",", "os", ".", "getcwd", "(", ")", ")", "output_dict", "=", "{", "}", "for", "filename", "in", "'rsem.genes.results'", ",", "'rsem.isoforms.results'", ":", "output_dict", "[", "filename", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "expression_archive", ",", "filename", ")", ")", "return", "output_dict" ]
Convenience function to get the expression from the patient dict :param dict patient_dict: dict of patient info :return: The gene and isoform expression :rtype: toil.fileStore.FileID
[ "Convenience", "function", "to", "get", "the", "expression", "from", "the", "patient", "dict" ]
python
train
46.266667
zyga/python-phablet
phablet.py
https://github.com/zyga/python-phablet/blob/c281045dfb8b55dd2888e1efe9631f72ffc77ac8/phablet.py#L165-L187
def run(self, cmd, timeout=None, key=None): """ Run a command on the phablet device using ssh :param cmd: a list of strings to execute as a command :param timeout: a timeout (in seconds) for device discovery :param key: a path to a public ssh key to use for connection :returns: the exit code of the command This method will not allow you to capture stdout/stderr from the target process. If you wish to do that please consider switching to one of subprocess functions along with. :meth:`cmdline()`. """ if not isinstance(cmd, list): raise TypeError("cmd needs to be a list") if not all(isinstance(item, str) for item in cmd): raise TypeError("cmd needs to be a list of strings") self.connect(timeout, key) return self._run_ssh(cmd)
[ "def", "run", "(", "self", ",", "cmd", ",", "timeout", "=", "None", ",", "key", "=", "None", ")", ":", "if", "not", "isinstance", "(", "cmd", ",", "list", ")", ":", "raise", "TypeError", "(", "\"cmd needs to be a list\"", ")", "if", "not", "all", "(", "isinstance", "(", "item", ",", "str", ")", "for", "item", "in", "cmd", ")", ":", "raise", "TypeError", "(", "\"cmd needs to be a list of strings\"", ")", "self", ".", "connect", "(", "timeout", ",", "key", ")", "return", "self", ".", "_run_ssh", "(", "cmd", ")" ]
Run a command on the phablet device using ssh :param cmd: a list of strings to execute as a command :param timeout: a timeout (in seconds) for device discovery :param key: a path to a public ssh key to use for connection :returns: the exit code of the command This method will not allow you to capture stdout/stderr from the target process. If you wish to do that please consider switching to one of subprocess functions along with. :meth:`cmdline()`.
[ "Run", "a", "command", "on", "the", "phablet", "device", "using", "ssh" ]
python
train
38.956522
xmikos/soapy_power
soapypower/psd.py
https://github.com/xmikos/soapy_power/blob/46e12659b8d08af764dc09a1f31b0e85a68f808f/soapypower/psd.py#L46-L62
def result(self, psd_state): """Return freqs and averaged PSD for given center frequency""" freq_array = numpy.fft.fftshift(psd_state['freq_array']) pwr_array = numpy.fft.fftshift(psd_state['pwr_array']) if self._crop_factor: crop_bins_half = round((self._crop_factor * self._bins) / 2) freq_array = freq_array[crop_bins_half:-crop_bins_half] pwr_array = pwr_array[crop_bins_half:-crop_bins_half] if psd_state['repeats'] > 1: pwr_array = pwr_array / psd_state['repeats'] if self._log_scale: pwr_array = 10 * numpy.log10(pwr_array) return (freq_array, pwr_array)
[ "def", "result", "(", "self", ",", "psd_state", ")", ":", "freq_array", "=", "numpy", ".", "fft", ".", "fftshift", "(", "psd_state", "[", "'freq_array'", "]", ")", "pwr_array", "=", "numpy", ".", "fft", ".", "fftshift", "(", "psd_state", "[", "'pwr_array'", "]", ")", "if", "self", ".", "_crop_factor", ":", "crop_bins_half", "=", "round", "(", "(", "self", ".", "_crop_factor", "*", "self", ".", "_bins", ")", "/", "2", ")", "freq_array", "=", "freq_array", "[", "crop_bins_half", ":", "-", "crop_bins_half", "]", "pwr_array", "=", "pwr_array", "[", "crop_bins_half", ":", "-", "crop_bins_half", "]", "if", "psd_state", "[", "'repeats'", "]", ">", "1", ":", "pwr_array", "=", "pwr_array", "/", "psd_state", "[", "'repeats'", "]", "if", "self", ".", "_log_scale", ":", "pwr_array", "=", "10", "*", "numpy", ".", "log10", "(", "pwr_array", ")", "return", "(", "freq_array", ",", "pwr_array", ")" ]
Return freqs and averaged PSD for given center frequency
[ "Return", "freqs", "and", "averaged", "PSD", "for", "given", "center", "frequency" ]
python
test
39.117647
mushkevych/scheduler
synergy/scheduler/tree.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/tree.py#L216-L221
def validate(self): """ method starts validation of the tree. @see TreeNode.validate """ for timeperiod, child in self.root.children.items(): child.validate() self.validation_timestamp = datetime.utcnow()
[ "def", "validate", "(", "self", ")", ":", "for", "timeperiod", ",", "child", "in", "self", ".", "root", ".", "children", ".", "items", "(", ")", ":", "child", ".", "validate", "(", ")", "self", ".", "validation_timestamp", "=", "datetime", ".", "utcnow", "(", ")" ]
method starts validation of the tree. @see TreeNode.validate
[ "method", "starts", "validation", "of", "the", "tree", "." ]
python
train
41.166667
luismsgomes/stringology
src/stringology/align.py
https://github.com/luismsgomes/stringology/blob/c627dc5a0d4c6af10946040a6463d5495d39d960/src/stringology/align.py#L47-L81
def mismatches(s1, s2, context=0, eq=operator.eq): '''extract mismatched segments from aligned strings >>> list(mismatches(*align('pharmacy', 'farmácia'), context=1)) [('pha', ' fa'), ('mac', 'mác'), ('c y', 'cia')] >>> list(mismatches(*align('constitution', 'constituição'), context=1)) [('ution', 'uição')] >>> list(mismatches(*align('idea', 'ideia'), context=1)) [('e a', 'eia')] >>> list(mismatches(*align('instructed', 'instruído'), context=1)) [('ucted', 'u ído')] >>> list(mismatches(*align('concluded', 'concluído'), context=1)) [('uded', 'uído')] ''' n = len(s1) assert(len(s2) == n) lct, rct = context, context if isinstance(context, int) else context i = None for j in range(n): if eq(s1[j], s2[j]): if i is not None: # report mismatch segment [i:j] with lct chars of left context # and rct chars of right context p, q = max(0, i-lct), min(j+rct, n) yield s1[p:q], s2[p:q] i = None elif i is None: i = j if i is not None: p = max(i-lct, 0) yield s1[p:], s2[p:]
[ "def", "mismatches", "(", "s1", ",", "s2", ",", "context", "=", "0", ",", "eq", "=", "operator", ".", "eq", ")", ":", "n", "=", "len", "(", "s1", ")", "assert", "(", "len", "(", "s2", ")", "==", "n", ")", "lct", ",", "rct", "=", "context", ",", "context", "if", "isinstance", "(", "context", ",", "int", ")", "else", "context", "i", "=", "None", "for", "j", "in", "range", "(", "n", ")", ":", "if", "eq", "(", "s1", "[", "j", "]", ",", "s2", "[", "j", "]", ")", ":", "if", "i", "is", "not", "None", ":", "# report mismatch segment [i:j] with lct chars of left context", "# and rct chars of right context", "p", ",", "q", "=", "max", "(", "0", ",", "i", "-", "lct", ")", ",", "min", "(", "j", "+", "rct", ",", "n", ")", "yield", "s1", "[", "p", ":", "q", "]", ",", "s2", "[", "p", ":", "q", "]", "i", "=", "None", "elif", "i", "is", "None", ":", "i", "=", "j", "if", "i", "is", "not", "None", ":", "p", "=", "max", "(", "i", "-", "lct", ",", "0", ")", "yield", "s1", "[", "p", ":", "]", ",", "s2", "[", "p", ":", "]" ]
extract mismatched segments from aligned strings >>> list(mismatches(*align('pharmacy', 'farmácia'), context=1)) [('pha', ' fa'), ('mac', 'mác'), ('c y', 'cia')] >>> list(mismatches(*align('constitution', 'constituição'), context=1)) [('ution', 'uição')] >>> list(mismatches(*align('idea', 'ideia'), context=1)) [('e a', 'eia')] >>> list(mismatches(*align('instructed', 'instruído'), context=1)) [('ucted', 'u ído')] >>> list(mismatches(*align('concluded', 'concluído'), context=1)) [('uded', 'uído')]
[ "extract", "mismatched", "segments", "from", "aligned", "strings" ]
python
train
32.971429
lambdamusic/Ontospy
ontospy/core/utils.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L413-L440
def inferMainPropertyType(uriref): """ Attempt to reduce the property types to 4 main types (without the OWL ontology - which would be the propert way) In [3]: for x in g.all_properties: ...: print x.rdftype ...: http://www.w3.org/2002/07/owl#FunctionalProperty http://www.w3.org/2002/07/owl#FunctionalProperty http://www.w3.org/2002/07/owl#InverseFunctionalProperty http://www.w3.org/2002/07/owl#ObjectProperty http://www.w3.org/2002/07/owl#ObjectProperty http://www.w3.org/2002/07/owl#TransitiveProperty http://www.w3.org/2002/07/owl#TransitiveProperty etc..... """ if uriref: if uriref == rdflib.OWL.DatatypeProperty: return uriref elif uriref == rdflib.OWL.AnnotationProperty: return uriref elif uriref == rdflib.RDF.Property: return uriref else: # hack.. return rdflib.OWL.ObjectProperty else: return None
[ "def", "inferMainPropertyType", "(", "uriref", ")", ":", "if", "uriref", ":", "if", "uriref", "==", "rdflib", ".", "OWL", ".", "DatatypeProperty", ":", "return", "uriref", "elif", "uriref", "==", "rdflib", ".", "OWL", ".", "AnnotationProperty", ":", "return", "uriref", "elif", "uriref", "==", "rdflib", ".", "RDF", ".", "Property", ":", "return", "uriref", "else", ":", "# hack..", "return", "rdflib", ".", "OWL", ".", "ObjectProperty", "else", ":", "return", "None" ]
Attempt to reduce the property types to 4 main types (without the OWL ontology - which would be the propert way) In [3]: for x in g.all_properties: ...: print x.rdftype ...: http://www.w3.org/2002/07/owl#FunctionalProperty http://www.w3.org/2002/07/owl#FunctionalProperty http://www.w3.org/2002/07/owl#InverseFunctionalProperty http://www.w3.org/2002/07/owl#ObjectProperty http://www.w3.org/2002/07/owl#ObjectProperty http://www.w3.org/2002/07/owl#TransitiveProperty http://www.w3.org/2002/07/owl#TransitiveProperty etc.....
[ "Attempt", "to", "reduce", "the", "property", "types", "to", "4", "main", "types", "(", "without", "the", "OWL", "ontology", "-", "which", "would", "be", "the", "propert", "way", ")" ]
python
train
33.857143
pantsbuild/pex
pex/third_party/__init__.py
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/third_party/__init__.py#L415-L431
def expose(dists): """Exposes vendored code in isolated chroots. Any vendored distributions listed in ``dists`` will be unpacked to individual chroots for addition to the ``sys.path``; ie: ``expose(['setuptools', 'wheel'])`` will unpack these vendored distributions and yield the two chroot paths they were unpacked to. :param dists: A list of vendored distribution names to expose. :type dists: list of str :raise: :class:`ValueError` if any distributions to expose cannot be found. :returns: An iterator of exposed vendored distribution chroot paths. """ from pex.common import safe_delete for path in VendorImporter.expose(dists, root=isolated()): safe_delete(os.path.join(path, '__init__.py')) yield path
[ "def", "expose", "(", "dists", ")", ":", "from", "pex", ".", "common", "import", "safe_delete", "for", "path", "in", "VendorImporter", ".", "expose", "(", "dists", ",", "root", "=", "isolated", "(", ")", ")", ":", "safe_delete", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'__init__.py'", ")", ")", "yield", "path" ]
Exposes vendored code in isolated chroots. Any vendored distributions listed in ``dists`` will be unpacked to individual chroots for addition to the ``sys.path``; ie: ``expose(['setuptools', 'wheel'])`` will unpack these vendored distributions and yield the two chroot paths they were unpacked to. :param dists: A list of vendored distribution names to expose. :type dists: list of str :raise: :class:`ValueError` if any distributions to expose cannot be found. :returns: An iterator of exposed vendored distribution chroot paths.
[ "Exposes", "vendored", "code", "in", "isolated", "chroots", "." ]
python
train
42.647059
ArchiveTeam/wpull
wpull/processor/web.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/processor/web.py#L360-L368
def _should_fetch_reason_with_robots(self, request: Request) -> Tuple[bool, str]: '''Return info whether the URL should be fetched including checking robots.txt. Coroutine. ''' result = yield from \ self._fetch_rule.check_initial_web_request(self._item_session, request) return result
[ "def", "_should_fetch_reason_with_robots", "(", "self", ",", "request", ":", "Request", ")", "->", "Tuple", "[", "bool", ",", "str", "]", ":", "result", "=", "yield", "from", "self", ".", "_fetch_rule", ".", "check_initial_web_request", "(", "self", ".", "_item_session", ",", "request", ")", "return", "result" ]
Return info whether the URL should be fetched including checking robots.txt. Coroutine.
[ "Return", "info", "whether", "the", "URL", "should", "be", "fetched", "including", "checking", "robots", ".", "txt", "." ]
python
train
37.444444