nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
andresriancho/w3af
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
w3af/core/data/parsers/doc/sgml.py
python
SGMLParser.get_references
(self)
return self.references
[]
def get_references(self): return self.references
[ "def", "get_references", "(", "self", ")", ":", "return", "self", ".", "references" ]
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/core/data/parsers/doc/sgml.py#L473-L474
robinhood/faust
01b4c0ad8390221db71751d80001b0fd879291e2
faust/types/settings/settings.py
python
Settings.stream_ack_cancelled_tasks
(self)
Deprecated setting has no effect.
Deprecated setting has no effect.
[ "Deprecated", "setting", "has", "no", "effect", "." ]
def stream_ack_cancelled_tasks(self) -> bool: """Deprecated setting has no effect."""
[ "def", "stream_ack_cancelled_tasks", "(", "self", ")", "->", "bool", ":" ]
https://github.com/robinhood/faust/blob/01b4c0ad8390221db71751d80001b0fd879291e2/faust/types/settings/settings.py#L2315-L2316
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/google/appengine/ext/ndb/key.py
python
Key.get_async
(self, **ctx_options)
return fut
Return a Future whose result is the entity for this Key. If no such entity exists, a Future is still returned, and the Future's eventual return result be None.
Return a Future whose result is the entity for this Key.
[ "Return", "a", "Future", "whose", "result", "is", "the", "entity", "for", "this", "Key", "." ]
def get_async(self, **ctx_options): """Return a Future whose result is the entity for this Key. If no such entity exists, a Future is still returned, and the Future's eventual return result be None. """ from . import model, tasklets ctx = tasklets.get_context() cls = model.Model._kind_map.get(self.kind()) if cls: cls._pre_get_hook(self) fut = ctx.get(self, **ctx_options) if cls: post_hook = cls._post_get_hook if not cls._is_default_hook(model.Model._default_post_get_hook, post_hook): fut.add_immediate_callback(post_hook, self, fut) return fut
[ "def", "get_async", "(", "self", ",", "*", "*", "ctx_options", ")", ":", "from", ".", "import", "model", ",", "tasklets", "ctx", "=", "tasklets", ".", "get_context", "(", ")", "cls", "=", "model", ".", "Model", ".", "_kind_map", ".", "get", "(", "self", ".", "kind", "(", ")", ")", "if", "cls", ":", "cls", ".", "_pre_get_hook", "(", "self", ")", "fut", "=", "ctx", ".", "get", "(", "self", ",", "*", "*", "ctx_options", ")", "if", "cls", ":", "post_hook", "=", "cls", ".", "_post_get_hook", "if", "not", "cls", ".", "_is_default_hook", "(", "model", ".", "Model", ".", "_default_post_get_hook", ",", "post_hook", ")", ":", "fut", ".", "add_immediate_callback", "(", "post_hook", ",", "self", ",", "fut", ")", "return", "fut" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/ext/ndb/key.py#L534-L551
dhconnelly/paip-python
e6784004bea1d16c90a4ca75c798ae31e6ab698a
paip/examples/search/pathfinding.py
python
print_map
(map)
Pretty-prints the given map to standard output.
Pretty-prints the given map to standard output.
[ "Pretty", "-", "prints", "the", "given", "map", "to", "standard", "output", "." ]
def print_map(map): """Pretty-prints the given map to standard output.""" print '-' * (2 * len(map) + 3) for row in map: print '|', for col in row: print '%s' % (col if col == 1 or col == 'X' else ' '), print '|' print '-' * (2 * len(map) + 3)
[ "def", "print_map", "(", "map", ")", ":", "print", "'-'", "*", "(", "2", "*", "len", "(", "map", ")", "+", "3", ")", "for", "row", "in", "map", ":", "print", "'|'", ",", "for", "col", "in", "row", ":", "print", "'%s'", "%", "(", "col", "if", "col", "==", "1", "or", "col", "==", "'X'", "else", "' '", ")", ",", "print", "'|'", "print", "'-'", "*", "(", "2", "*", "len", "(", "map", ")", "+", "3", ")" ]
https://github.com/dhconnelly/paip-python/blob/e6784004bea1d16c90a4ca75c798ae31e6ab698a/paip/examples/search/pathfinding.py#L35-L43
MeanEYE/Sunflower
1024bbdde3b8e202ddad3553b321a7b6230bffc9
sunflower/gui/input_dialog.py
python
CreateToolbarWidgetDialog.set_transient_for
(self, window)
Set dialog window transistency
Set dialog window transistency
[ "Set", "dialog", "window", "transistency" ]
def set_transient_for(self, window): """Set dialog window transistency""" self._dialog.set_transient_for(window)
[ "def", "set_transient_for", "(", "self", ",", "window", ")", ":", "self", ".", "_dialog", ".", "set_transient_for", "(", "window", ")" ]
https://github.com/MeanEYE/Sunflower/blob/1024bbdde3b8e202ddad3553b321a7b6230bffc9/sunflower/gui/input_dialog.py#L1589-L1591
janpipek/physt
e7bce911532fac5f96e4e2d54881152e7e668a41
physt/histogram_base.py
python
HistogramBase._apply_bin_map
( self, old_frequencies: np.ndarray, new_frequencies: np.ndarray, old_errors2: np.ndarray, new_errors2: np.ndarray, bin_map: Union[Iterable[Tuple[int, int]], int], axis: int, )
Fill new data arrays using a map. Parameters ---------- old_frequencies : Source of frequencies data new_frequencies : Target of frequencies data old_errors2 : Source of errors data new_errors2 : Target of errors data bin_map: Iterable[(old, new)] or int or None As in _reshape_data axis: On which axis to apply See also -------- HistogramBase._reshape_data
Fill new data arrays using a map.
[ "Fill", "new", "data", "arrays", "using", "a", "map", "." ]
def _apply_bin_map( self, old_frequencies: np.ndarray, new_frequencies: np.ndarray, old_errors2: np.ndarray, new_errors2: np.ndarray, bin_map: Union[Iterable[Tuple[int, int]], int], axis: int, ): """Fill new data arrays using a map. Parameters ---------- old_frequencies : Source of frequencies data new_frequencies : Target of frequencies data old_errors2 : Source of errors data new_errors2 : Target of errors data bin_map: Iterable[(old, new)] or int or None As in _reshape_data axis: On which axis to apply See also -------- HistogramBase._reshape_data """ if old_frequencies is not None and old_frequencies.shape[axis] > 0: if isinstance(bin_map, int): new_index: List[Union[int, slice]] = [slice(None) for i in range(self.ndim)] new_index[axis] = slice(bin_map, bin_map + old_frequencies.shape[axis]) new_frequencies[tuple(new_index)] += old_frequencies new_errors2[tuple(new_index)] += old_errors2 else: for (old, new) in bin_map: # Generic enough new_index = [slice(None) for i in range(self.ndim)] new_index[axis] = new old_index: List[Union[int, slice]] = [slice(None) for i in range(self.ndim)] old_index[axis] = old new_frequencies[tuple(new_index)] += old_frequencies[tuple(old_index)] new_errors2[tuple(new_index)] += old_errors2[tuple(old_index)]
[ "def", "_apply_bin_map", "(", "self", ",", "old_frequencies", ":", "np", ".", "ndarray", ",", "new_frequencies", ":", "np", ".", "ndarray", ",", "old_errors2", ":", "np", ".", "ndarray", ",", "new_errors2", ":", "np", ".", "ndarray", ",", "bin_map", ":", "Union", "[", "Iterable", "[", "Tuple", "[", "int", ",", "int", "]", "]", ",", "int", "]", ",", "axis", ":", "int", ",", ")", ":", "if", "old_frequencies", "is", "not", "None", "and", "old_frequencies", ".", "shape", "[", "axis", "]", ">", "0", ":", "if", "isinstance", "(", "bin_map", ",", "int", ")", ":", "new_index", ":", "List", "[", "Union", "[", "int", ",", "slice", "]", "]", "=", "[", "slice", "(", "None", ")", "for", "i", "in", "range", "(", "self", ".", "ndim", ")", "]", "new_index", "[", "axis", "]", "=", "slice", "(", "bin_map", ",", "bin_map", "+", "old_frequencies", ".", "shape", "[", "axis", "]", ")", "new_frequencies", "[", "tuple", "(", "new_index", ")", "]", "+=", "old_frequencies", "new_errors2", "[", "tuple", "(", "new_index", ")", "]", "+=", "old_errors2", "else", ":", "for", "(", "old", ",", "new", ")", "in", "bin_map", ":", "# Generic enough", "new_index", "=", "[", "slice", "(", "None", ")", "for", "i", "in", "range", "(", "self", ".", "ndim", ")", "]", "new_index", "[", "axis", "]", "=", "new", "old_index", ":", "List", "[", "Union", "[", "int", ",", "slice", "]", "]", "=", "[", "slice", "(", "None", ")", "for", "i", "in", "range", "(", "self", ".", "ndim", ")", "]", "old_index", "[", "axis", "]", "=", "old", "new_frequencies", "[", "tuple", "(", "new_index", ")", "]", "+=", "old_frequencies", "[", "tuple", "(", "old_index", ")", "]", "new_errors2", "[", "tuple", "(", "new_index", ")", "]", "+=", "old_errors2", "[", "tuple", "(", "old_index", ")", "]" ]
https://github.com/janpipek/physt/blob/e7bce911532fac5f96e4e2d54881152e7e668a41/physt/histogram_base.py#L584-L622
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit /tools/sqli/plugins/dbms/firebird/syntax.py
python
Syntax.__init__
(self)
[]
def __init__(self): GenericSyntax.__init__(self)
[ "def", "__init__", "(", "self", ")", ":", "GenericSyntax", ".", "__init__", "(", "self", ")" ]
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /tools/sqli/plugins/dbms/firebird/syntax.py#L13-L14
Kozea/WeasyPrint
6cce2978165134e37683cb5b3d156cac6a11a7f9
weasyprint/css/validation/properties.py
python
footnote_display
(keyword)
return keyword in ('block', 'inline', 'compact')
Validation for ``footnote-display``.
Validation for ``footnote-display``.
[ "Validation", "for", "footnote", "-", "display", "." ]
def footnote_display(keyword): """Validation for ``footnote-display``.""" return keyword in ('block', 'inline', 'compact')
[ "def", "footnote_display", "(", "keyword", ")", ":", "return", "keyword", "in", "(", "'block'", ",", "'inline'", ",", "'compact'", ")" ]
https://github.com/Kozea/WeasyPrint/blob/6cce2978165134e37683cb5b3d156cac6a11a7f9/weasyprint/css/validation/properties.py#L1469-L1471
pandaproject/panda
133baa47882a289773a30c9656e2ea4efe569387
panda/models/task_status.py
python
TaskStatus.complete
(self, message)
Mark that task has completed.
Mark that task has completed.
[ "Mark", "that", "task", "has", "completed", "." ]
def complete(self, message): """ Mark that task has completed. """ self.status = 'SUCCESS' self.end = now() self.message = message self.save()
[ "def", "complete", "(", "self", ",", "message", ")", ":", "self", ".", "status", "=", "'SUCCESS'", "self", ".", "end", "=", "now", "(", ")", "self", ".", "message", "=", "message", "self", ".", "save", "(", ")" ]
https://github.com/pandaproject/panda/blob/133baa47882a289773a30c9656e2ea4efe569387/panda/models/task_status.py#L85-L92
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
lib-python/2.7/urllib.py
python
toBytes
(url)
return url
toBytes(u"URL") --> 'URL'.
toBytes(u"URL") --> 'URL'.
[ "toBytes", "(", "u", "URL", ")", "--", ">", "URL", "." ]
def toBytes(url): """toBytes(u"URL") --> 'URL'.""" # Most URL schemes require ASCII. If that changes, the conversion # can be relaxed if _is_unicode(url): try: url = url.encode("ASCII") except UnicodeError: raise UnicodeError("URL " + repr(url) + " contains non-ASCII characters") return url
[ "def", "toBytes", "(", "url", ")", ":", "# Most URL schemes require ASCII. If that changes, the conversion", "# can be relaxed", "if", "_is_unicode", "(", "url", ")", ":", "try", ":", "url", "=", "url", ".", "encode", "(", "\"ASCII\"", ")", "except", "UnicodeError", ":", "raise", "UnicodeError", "(", "\"URL \"", "+", "repr", "(", "url", ")", "+", "\" contains non-ASCII characters\"", ")", "return", "url" ]
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/urllib.py#L1065-L1075
HymanLiuTS/flaskTs
286648286976e85d9b9a5873632331efcafe0b21
flasky/lib/python2.7/site-packages/sqlalchemy/sql/elements.py
python
outparam
(key, type_=None)
return BindParameter( key, None, type_=type_, unique=False, isoutparam=True)
Create an 'OUT' parameter for usage in functions (stored procedures), for databases which support them. The ``outparam`` can be used like a regular function parameter. The "output" value will be available from the :class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters`` attribute, which returns a dictionary containing the values.
Create an 'OUT' parameter for usage in functions (stored procedures), for databases which support them.
[ "Create", "an", "OUT", "parameter", "for", "usage", "in", "functions", "(", "stored", "procedures", ")", "for", "databases", "which", "support", "them", "." ]
def outparam(key, type_=None): """Create an 'OUT' parameter for usage in functions (stored procedures), for databases which support them. The ``outparam`` can be used like a regular function parameter. The "output" value will be available from the :class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters`` attribute, which returns a dictionary containing the values. """ return BindParameter( key, None, type_=type_, unique=False, isoutparam=True)
[ "def", "outparam", "(", "key", ",", "type_", "=", "None", ")", ":", "return", "BindParameter", "(", "key", ",", "None", ",", "type_", "=", "type_", ",", "unique", "=", "False", ",", "isoutparam", "=", "True", ")" ]
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/sqlalchemy/sql/elements.py#L129-L140
FSecureLABS/Jandroid
e31d0dab58a2bfd6ed8e0a387172b8bd7c893436
libs/platform-tools/platform-tools_windows/systrace/catapult/devil/devil/base_error.py
python
BaseError.is_infra_error
(self)
return self._is_infra_error
Property to indicate if error was caused by an infrastructure issue.
Property to indicate if error was caused by an infrastructure issue.
[ "Property", "to", "indicate", "if", "error", "was", "caused", "by", "an", "infrastructure", "issue", "." ]
def is_infra_error(self): """Property to indicate if error was caused by an infrastructure issue.""" return self._is_infra_error
[ "def", "is_infra_error", "(", "self", ")", ":", "return", "self", ".", "_is_infra_error" ]
https://github.com/FSecureLABS/Jandroid/blob/e31d0dab58a2bfd6ed8e0a387172b8bd7c893436/libs/platform-tools/platform-tools_windows/systrace/catapult/devil/devil/base_error.py#L21-L23
jotyGill/openpyn-nordvpn
74a2c4d8529f0a2ca138bbb0b338da02f3233d8c
openpyn/converter.py
python
Converter.set_certs_folder
(self, certs_output)
Sets the destination folder for the certificates
Sets the destination folder for the certificates
[ "Sets", "the", "destination", "folder", "for", "the", "certificates" ]
def set_certs_folder(self, certs_output): """Sets the destination folder for the certificates""" if not certs_output or not os.path.isdir(certs_output): raise RuntimeError("Please specify a valid path for the certificates.") self._certs_folder = certs_output
[ "def", "set_certs_folder", "(", "self", ",", "certs_output", ")", ":", "if", "not", "certs_output", "or", "not", "os", ".", "path", ".", "isdir", "(", "certs_output", ")", ":", "raise", "RuntimeError", "(", "\"Please specify a valid path for the certificates.\"", ")", "self", ".", "_certs_folder", "=", "certs_output" ]
https://github.com/jotyGill/openpyn-nordvpn/blob/74a2c4d8529f0a2ca138bbb0b338da02f3233d8c/openpyn/converter.py#L125-L130
heynemann/pyccuracy
0bbe3bcff4d13a6501bf77d5af9457f6a1491ab6
pyccuracy/airspeed.py
python
CachingFileLoader.load_template
(self, name)
return template
[]
def load_template(self, name): if self.debugging: print "Loading template...", name, mtime = os.path.getmtime(self.filename_of(name)) if self.known_templates.has_key(name): template, prev_mtime = self.known_templates[name] if mtime <= prev_mtime: if self.debugging: print "loading parsed template from cache" return template if self.debugging: print "loading text from disk" template = Template(self.load_text(name)) template.ensure_compiled() self.known_templates[name] = (template, mtime) return template
[ "def", "load_template", "(", "self", ",", "name", ")", ":", "if", "self", ".", "debugging", ":", "print", "\"Loading template...\"", ",", "name", ",", "mtime", "=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "filename_of", "(", "name", ")", ")", "if", "self", ".", "known_templates", ".", "has_key", "(", "name", ")", ":", "template", ",", "prev_mtime", "=", "self", ".", "known_templates", "[", "name", "]", "if", "mtime", "<=", "prev_mtime", ":", "if", "self", ".", "debugging", ":", "print", "\"loading parsed template from cache\"", "return", "template", "if", "self", ".", "debugging", ":", "print", "\"loading text from disk\"", "template", "=", "Template", "(", "self", ".", "load_text", "(", "name", ")", ")", "template", ".", "ensure_compiled", "(", ")", "self", ".", "known_templates", "[", "name", "]", "=", "(", "template", ",", "mtime", ")", "return", "template" ]
https://github.com/heynemann/pyccuracy/blob/0bbe3bcff4d13a6501bf77d5af9457f6a1491ab6/pyccuracy/airspeed.py#L120-L132
jimgoo/zipline-tensorboard
2c1aa45569dfa9506ff9689c2decb56e9da21403
olmar_tensorboard.py
python
simplex_projection
(v, b=1)
return w
Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print(proj) array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print(proj.sum()) 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2013 by Thomas Wiecki ([email protected]).
Projection vectors to the simplex domain
[ "Projection", "vectors", "to", "the", "simplex", "domain" ]
def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print(proj) array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print(proj.sum()) 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2013 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho + 1)]) w = (v - theta) w[w < 0] = 0 return w
[ "def", "simplex_projection", "(", "v", ",", "b", "=", "1", ")", ":", "v", "=", "np", ".", "asarray", "(", "v", ")", "p", "=", "len", "(", "v", ")", "# Sort v into u in descending order", "v", "=", "(", "v", ">", "0", ")", "*", "v", "u", "=", "np", ".", "sort", "(", "v", ")", "[", ":", ":", "-", "1", "]", "sv", "=", "np", ".", "cumsum", "(", "u", ")", "rho", "=", "np", ".", "where", "(", "u", ">", "(", "sv", "-", "b", ")", "/", "np", ".", "arange", "(", "1", ",", "p", "+", "1", ")", ")", "[", "0", "]", "[", "-", "1", "]", "theta", "=", "np", ".", "max", "(", "[", "0", ",", "(", "sv", "[", "rho", "]", "-", "b", ")", "/", "(", "rho", "+", "1", ")", "]", ")", "w", "=", "(", "v", "-", "theta", ")", "w", "[", "w", "<", "0", "]", "=", "0", "return", "w" ]
https://github.com/jimgoo/zipline-tensorboard/blob/2c1aa45569dfa9506ff9689c2decb56e9da21403/olmar_tensorboard.py#L133-L168
analysiscenter/batchflow
294747da0bca309785f925be891441fdd824e9fa
batchflow/research/distributor.py
python
DynamicQueue.stop_workers
(self, n_workers)
Stop all workers by putting `None` task into queue.
Stop all workers by putting `None` task into queue.
[ "Stop", "all", "workers", "by", "putting", "None", "task", "into", "queue", "." ]
def stop_workers(self, n_workers): """ Stop all workers by putting `None` task into queue. """ for _ in range(n_workers): self.put(None)
[ "def", "stop_workers", "(", "self", ",", "n_workers", ")", ":", "for", "_", "in", "range", "(", "n_workers", ")", ":", "self", ".", "put", "(", "None", ")" ]
https://github.com/analysiscenter/batchflow/blob/294747da0bca309785f925be891441fdd824e9fa/batchflow/research/distributor.py#L81-L84
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_vendored_deps/filter_plugins/openshift_master.py
python
FilterModule.translate_idps
(idps, api_version)
return u(yaml.dump([idp.to_dict() for idp in idp_list], allow_unicode=True, default_flow_style=False, width=float("inf"), Dumper=AnsibleDumper))
Translates a list of dictionaries into a valid identityProviders config
Translates a list of dictionaries into a valid identityProviders config
[ "Translates", "a", "list", "of", "dictionaries", "into", "a", "valid", "identityProviders", "config" ]
def translate_idps(idps, api_version): ''' Translates a list of dictionaries into a valid identityProviders config ''' idp_list = [] if not isinstance(idps, list): raise errors.AnsibleFilterError("|failed expects to filter on a list of identity providers") for idp in idps: if not isinstance(idp, dict): raise errors.AnsibleFilterError("|failed identity providers must be a list of dictionaries") cur_module = sys.modules[__name__] idp_class = getattr(cur_module, idp['kind'], None) idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp) idp_inst.set_provider_items() idp_list.append(idp_inst) IdentityProviderBase.validate_idp_list(idp_list) return u(yaml.dump([idp.to_dict() for idp in idp_list], allow_unicode=True, default_flow_style=False, width=float("inf"), Dumper=AnsibleDumper))
[ "def", "translate_idps", "(", "idps", ",", "api_version", ")", ":", "idp_list", "=", "[", "]", "if", "not", "isinstance", "(", "idps", ",", "list", ")", ":", "raise", "errors", ".", "AnsibleFilterError", "(", "\"|failed expects to filter on a list of identity providers\"", ")", "for", "idp", "in", "idps", ":", "if", "not", "isinstance", "(", "idp", ",", "dict", ")", ":", "raise", "errors", ".", "AnsibleFilterError", "(", "\"|failed identity providers must be a list of dictionaries\"", ")", "cur_module", "=", "sys", ".", "modules", "[", "__name__", "]", "idp_class", "=", "getattr", "(", "cur_module", ",", "idp", "[", "'kind'", "]", ",", "None", ")", "idp_inst", "=", "idp_class", "(", "api_version", ",", "idp", ")", "if", "idp_class", "is", "not", "None", "else", "IdentityProviderBase", "(", "api_version", ",", "idp", ")", "idp_inst", ".", "set_provider_items", "(", ")", "idp_list", ".", "append", "(", "idp_inst", ")", "IdentityProviderBase", ".", "validate_idp_list", "(", "idp_list", ")", "return", "u", "(", "yaml", ".", "dump", "(", "[", "idp", ".", "to_dict", "(", ")", "for", "idp", "in", "idp_list", "]", ",", "allow_unicode", "=", "True", ",", "default_flow_style", "=", "False", ",", "width", "=", "float", "(", "\"inf\"", ")", ",", "Dumper", "=", "AnsibleDumper", ")", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_vendored_deps/filter_plugins/openshift_master.py#L461-L482
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/email/utils.py
python
unquote
(str)
return str
Remove quotes from a string.
Remove quotes from a string.
[ "Remove", "quotes", "from", "a", "string", "." ]
def unquote(str): """Remove quotes from a string.""" if len(str) > 1: if str.startswith('"') and str.endswith('"'): return str[1:-1].replace('\\\\', '\\').replace('\\"', '"') if str.startswith('<') and str.endswith('>'): return str[1:-1] return str
[ "def", "unquote", "(", "str", ")", ":", "if", "len", "(", "str", ")", ">", "1", ":", "if", "str", ".", "startswith", "(", "'\"'", ")", "and", "str", ".", "endswith", "(", "'\"'", ")", ":", "return", "str", "[", "1", ":", "-", "1", "]", ".", "replace", "(", "'\\\\\\\\'", ",", "'\\\\'", ")", ".", "replace", "(", "'\\\\\"'", ",", "'\"'", ")", "if", "str", ".", "startswith", "(", "'<'", ")", "and", "str", ".", "endswith", "(", "'>'", ")", ":", "return", "str", "[", "1", ":", "-", "1", "]", "return", "str" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/email/utils.py#L238-L245
apple/coremltools
141a83af482fcbdd5179807c9eaff9a7999c2c49
coremltools/models/neural_network/builder.py
python
NeuralNetworkBuilder.add_mvn
( self, name, input_name, output_name, across_channels=True, normalize_variance=True, epsilon=1e-5, )
return spec_layer
Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input. Refer to the ``MeanVarianceNormalizeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. across_channels: boolean If False, each channel plane is normalized separately If True, mean/variance is computed across all C, H and W dimensions normalize_variance: boolean If False, only mean subtraction is performed. epsilon: float small bias to avoid division by zero. See Also -------- add_l2_normalize, add_lrn
Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input. Refer to the ``MeanVarianceNormalizeLayerParams`` message in the specification (NeuralNetwork.proto) for more details.
[ "Add", "an", "MVN", "(", "mean", "variance", "normalization", ")", "layer", ".", "Computes", "mean", "variance", "and", "normalizes", "the", "input", ".", "Refer", "to", "the", "MeanVarianceNormalizeLayerParams", "message", "in", "the", "specification", "(", "NeuralNetwork", ".", "proto", ")", "for", "more", "details", "." ]
def add_mvn( self, name, input_name, output_name, across_channels=True, normalize_variance=True, epsilon=1e-5, ): """ Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input. Refer to the ``MeanVarianceNormalizeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. across_channels: boolean If False, each channel plane is normalized separately If True, mean/variance is computed across all C, H and W dimensions normalize_variance: boolean If False, only mean subtraction is performed. epsilon: float small bias to avoid division by zero. See Also -------- add_l2_normalize, add_lrn """ spec_layer = self._add_generic_layer(name, [input_name], [output_name]) spec_layer_params = spec_layer.mvn spec_layer_params.acrossChannels = across_channels spec_layer_params.normalizeVariance = normalize_variance spec_layer_params.epsilon = epsilon return spec_layer
[ "def", "add_mvn", "(", "self", ",", "name", ",", "input_name", ",", "output_name", ",", "across_channels", "=", "True", ",", "normalize_variance", "=", "True", ",", "epsilon", "=", "1e-5", ",", ")", ":", "spec_layer", "=", "self", ".", "_add_generic_layer", "(", "name", ",", "[", "input_name", "]", ",", "[", "output_name", "]", ")", "spec_layer_params", "=", "spec_layer", ".", "mvn", "spec_layer_params", ".", "acrossChannels", "=", "across_channels", "spec_layer_params", ".", "normalizeVariance", "=", "normalize_variance", "spec_layer_params", ".", "epsilon", "=", "epsilon", "return", "spec_layer" ]
https://github.com/apple/coremltools/blob/141a83af482fcbdd5179807c9eaff9a7999c2c49/coremltools/models/neural_network/builder.py#L4237-L4281
pyecharts/pyecharts
727339e6c21fab6197983acbd2ea04783bd647a3
pyecharts/datasets/__init__.py
python
FuzzyDict._search
(self, lookfor: typing.Any, stop_on_first: bool = False)
return best_ratio >= self.cutoff, best_key, best_match, best_ratio
Returns the value whose key best matches lookfor if stop_on_first is True then the method returns as soon as it finds the first item
Returns the value whose key best matches lookfor
[ "Returns", "the", "value", "whose", "key", "best", "matches", "lookfor" ]
def _search(self, lookfor: typing.Any, stop_on_first: bool = False): """Returns the value whose key best matches lookfor if stop_on_first is True then the method returns as soon as it finds the first item """ # if the item is in the dictionary then just return it if self._dict_contains(lookfor): return True, lookfor, self._dict_getitem(lookfor), 1 # set up the fuzzy matching tool ratio_calc = difflib.SequenceMatcher() ratio_calc.set_seq1(lookfor) # test each key in the dictionary best_ratio = 0 best_match = None best_key = None for key in self: # if the current key is not a string # then we just skip it try: # set up the SequenceMatcher with other text ratio_calc.set_seq2(key) except TypeError: continue # we get an error here if the item to look for is not a # string - if it cannot be fuzzy matched and we are here # this it is definitely not in the dictionary try: # calculate the match value ratio = ratio_calc.ratio() except TypeError: break # if this is the best ratio so far - save it and the value if ratio > best_ratio: best_ratio = ratio best_key = key best_match = self._dict_getitem(key) if stop_on_first and ratio >= self.cutoff: break return best_ratio >= self.cutoff, best_key, best_match, best_ratio
[ "def", "_search", "(", "self", ",", "lookfor", ":", "typing", ".", "Any", ",", "stop_on_first", ":", "bool", "=", "False", ")", ":", "# if the item is in the dictionary then just return it", "if", "self", ".", "_dict_contains", "(", "lookfor", ")", ":", "return", "True", ",", "lookfor", ",", "self", ".", "_dict_getitem", "(", "lookfor", ")", ",", "1", "# set up the fuzzy matching tool", "ratio_calc", "=", "difflib", ".", "SequenceMatcher", "(", ")", "ratio_calc", ".", "set_seq1", "(", "lookfor", ")", "# test each key in the dictionary", "best_ratio", "=", "0", "best_match", "=", "None", "best_key", "=", "None", "for", "key", "in", "self", ":", "# if the current key is not a string", "# then we just skip it", "try", ":", "# set up the SequenceMatcher with other text", "ratio_calc", ".", "set_seq2", "(", "key", ")", "except", "TypeError", ":", "continue", "# we get an error here if the item to look for is not a", "# string - if it cannot be fuzzy matched and we are here", "# this it is definitely not in the dictionary", "try", ":", "# calculate the match value", "ratio", "=", "ratio_calc", ".", "ratio", "(", ")", "except", "TypeError", ":", "break", "# if this is the best ratio so far - save it and the value", "if", "ratio", ">", "best_ratio", ":", "best_ratio", "=", "ratio", "best_key", "=", "key", "best_match", "=", "self", ".", "_dict_getitem", "(", "key", ")", "if", "stop_on_first", "and", "ratio", ">=", "self", ".", "cutoff", ":", "break", "return", "best_ratio", ">=", "self", ".", "cutoff", ",", "best_key", ",", "best_match", ",", "best_ratio" ]
https://github.com/pyecharts/pyecharts/blob/727339e6c21fab6197983acbd2ea04783bd647a3/pyecharts/datasets/__init__.py#L26-L72
pypa/pipenv
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
pipenv/patched/notpip/_internal/metadata/base.py
python
BaseDistribution.metadata_version
(self)
return self.metadata.get("Metadata-Version")
Value of "Metadata-Version:" in distribution metadata, if available.
Value of "Metadata-Version:" in distribution metadata, if available.
[ "Value", "of", "Metadata", "-", "Version", ":", "in", "distribution", "metadata", "if", "available", "." ]
def metadata_version(self) -> Optional[str]: """Value of "Metadata-Version:" in distribution metadata, if available.""" return self.metadata.get("Metadata-Version")
[ "def", "metadata_version", "(", "self", ")", "->", "Optional", "[", "str", "]", ":", "return", "self", ".", "metadata", ".", "get", "(", "\"Metadata-Version\"", ")" ]
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/patched/notpip/_internal/metadata/base.py#L154-L156
wangzheng0822/algo
b2c1228ff915287ad7ebeae4355fa26854ea1557
python/39_back_track/01_bag.py
python
get_value
(items_info: List, pick_items: List)
return sum([a*b for a, b in zip(values, pick_items)])
[]
def get_value(items_info: List, pick_items: List): values = [_[1] for _ in items_info] return sum([a*b for a, b in zip(values, pick_items)])
[ "def", "get_value", "(", "items_info", ":", "List", ",", "pick_items", ":", "List", ")", ":", "values", "=", "[", "_", "[", "1", "]", "for", "_", "in", "items_info", "]", "return", "sum", "(", "[", "a", "*", "b", "for", "a", ",", "b", "in", "zip", "(", "values", ",", "pick_items", ")", "]", ")" ]
https://github.com/wangzheng0822/algo/blob/b2c1228ff915287ad7ebeae4355fa26854ea1557/python/39_back_track/01_bag.py#L36-L38
yinhm/datafeed
62193278212c2441d8e49b45d71b8d9d79aab31c
datafeed/dividend.py
python
Dividend.ex_date
(self)
return datetime.date.fromtimestamp(self._npd['time'])
[]
def ex_date(self): return datetime.date.fromtimestamp(self._npd['time'])
[ "def", "ex_date", "(", "self", ")", ":", "return", "datetime", ".", "date", ".", "fromtimestamp", "(", "self", ".", "_npd", "[", "'time'", "]", ")" ]
https://github.com/yinhm/datafeed/blob/62193278212c2441d8e49b45d71b8d9d79aab31c/datafeed/dividend.py#L81-L82
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/live/v20180801/models.py
python
BillDataInfo.__init__
(self)
r""" :param Time: 时间点,格式: yyyy-mm-dd HH:MM:SS。 :type Time: str :param Bandwidth: 带宽,单位是 Mbps。 :type Bandwidth: float :param Flux: 流量,单位是 MB。 :type Flux: float :param PeakTime: 峰值时间点,格式: yyyy-mm-dd HH:MM:SS,原始数据为5分钟粒度,如果查询小时和天粒度数据,则返回对应粒度内的带宽峰值时间点。 :type PeakTime: str
r""" :param Time: 时间点,格式: yyyy-mm-dd HH:MM:SS。 :type Time: str :param Bandwidth: 带宽,单位是 Mbps。 :type Bandwidth: float :param Flux: 流量,单位是 MB。 :type Flux: float :param PeakTime: 峰值时间点,格式: yyyy-mm-dd HH:MM:SS,原始数据为5分钟粒度,如果查询小时和天粒度数据,则返回对应粒度内的带宽峰值时间点。 :type PeakTime: str
[ "r", ":", "param", "Time", ":", "时间点,格式", ":", "yyyy", "-", "mm", "-", "dd", "HH", ":", "MM", ":", "SS。", ":", "type", "Time", ":", "str", ":", "param", "Bandwidth", ":", "带宽,单位是", "Mbps。", ":", "type", "Bandwidth", ":", "float", ":", "param", "Flux", ":", "流量,单位是", "MB。", ":", "type", "Flux", ":", "float", ":", "param", "PeakTime", ":", "峰值时间点,格式", ":", "yyyy", "-", "mm", "-", "dd", "HH", ":", "MM", ":", "SS,原始数据为5分钟粒度,如果查询小时和天粒度数据,则返回对应粒度内的带宽峰值时间点。", ":", "type", "PeakTime", ":", "str" ]
def __init__(self): r""" :param Time: 时间点,格式: yyyy-mm-dd HH:MM:SS。 :type Time: str :param Bandwidth: 带宽,单位是 Mbps。 :type Bandwidth: float :param Flux: 流量,单位是 MB。 :type Flux: float :param PeakTime: 峰值时间点,格式: yyyy-mm-dd HH:MM:SS,原始数据为5分钟粒度,如果查询小时和天粒度数据,则返回对应粒度内的带宽峰值时间点。 :type PeakTime: str """ self.Time = None self.Bandwidth = None self.Flux = None self.PeakTime = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "Time", "=", "None", "self", ".", "Bandwidth", "=", "None", "self", ".", "Flux", "=", "None", "self", ".", "PeakTime", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/live/v20180801/models.py#L319-L333
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/min/optparse.py
python
Option._check_callback
(self)
[]
def _check_callback(self): if self.action == "callback": if not callable(self.callback): raise OptionError( "callback not callable: %r" % self.callback, self) if (self.callback_args is not None and not isinstance(self.callback_args, tuple)): raise OptionError( "callback_args, if supplied, must be a tuple: not %r" % self.callback_args, self) if (self.callback_kwargs is not None and not isinstance(self.callback_kwargs, dict)): raise OptionError( "callback_kwargs, if supplied, must be a dict: not %r" % self.callback_kwargs, self) else: if self.callback is not None: raise OptionError( "callback supplied (%r) for non-callback option" % self.callback, self) if self.callback_args is not None: raise OptionError( "callback_args supplied for non-callback option", self) if self.callback_kwargs is not None: raise OptionError( "callback_kwargs supplied for non-callback option", self)
[ "def", "_check_callback", "(", "self", ")", ":", "if", "self", ".", "action", "==", "\"callback\"", ":", "if", "not", "callable", "(", "self", ".", "callback", ")", ":", "raise", "OptionError", "(", "\"callback not callable: %r\"", "%", "self", ".", "callback", ",", "self", ")", "if", "(", "self", ".", "callback_args", "is", "not", "None", "and", "not", "isinstance", "(", "self", ".", "callback_args", ",", "tuple", ")", ")", ":", "raise", "OptionError", "(", "\"callback_args, if supplied, must be a tuple: not %r\"", "%", "self", ".", "callback_args", ",", "self", ")", "if", "(", "self", ".", "callback_kwargs", "is", "not", "None", "and", "not", "isinstance", "(", "self", ".", "callback_kwargs", ",", "dict", ")", ")", ":", "raise", "OptionError", "(", "\"callback_kwargs, if supplied, must be a dict: not %r\"", "%", "self", ".", "callback_kwargs", ",", "self", ")", "else", ":", "if", "self", ".", "callback", "is", "not", "None", ":", "raise", "OptionError", "(", "\"callback supplied (%r) for non-callback option\"", "%", "self", ".", "callback", ",", "self", ")", "if", "self", ".", "callback_args", "is", "not", "None", ":", "raise", "OptionError", "(", "\"callback_args supplied for non-callback option\"", ",", "self", ")", "if", "self", ".", "callback_kwargs", "is", "not", "None", ":", "raise", "OptionError", "(", "\"callback_kwargs supplied for non-callback option\"", ",", "self", ")" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/optparse.py#L705-L730
selfboot/LeetCode
473c0c5451651140d75cbd143309c51cd8fe1cf1
ToBeOptimized/18_4Sum.py
python
Solution.fourSum
(self, nums, target)
:type nums: List[int] :type target: int :rtype: List[List[int]]
:type nums: List[int] :type target: int :rtype: List[List[int]]
[ ":", "type", "nums", ":", "List", "[", "int", "]", ":", "type", "target", ":", "int", ":", "rtype", ":", "List", "[", "List", "[", "int", "]]" ]
def fourSum(self, nums, target): """ :type nums: List[int] :type target: int :rtype: List[List[int]] """ nums.sort() length = len(nums) # Get all the two sums and their two addend's index. two_sums_dict = {} for i in range(length): for j in range(i+1, length): two_sums = nums[i] + nums[j] if two_sums not in two_sums_dict: two_sums_dict[two_sums] = [] two_sums_dict[two_sums].append([i, j]) sums_list = two_sums_dict.keys sums_list.sort() solution = []
[ "def", "fourSum", "(", "self", ",", "nums", ",", "target", ")", ":", "nums", ".", "sort", "(", ")", "length", "=", "len", "(", "nums", ")", "# Get all the two sums and their two addend's index.", "two_sums_dict", "=", "{", "}", "for", "i", "in", "range", "(", "length", ")", ":", "for", "j", "in", "range", "(", "i", "+", "1", ",", "length", ")", ":", "two_sums", "=", "nums", "[", "i", "]", "+", "nums", "[", "j", "]", "if", "two_sums", "not", "in", "two_sums_dict", ":", "two_sums_dict", "[", "two_sums", "]", "=", "[", "]", "two_sums_dict", "[", "two_sums", "]", ".", "append", "(", "[", "i", ",", "j", "]", ")", "sums_list", "=", "two_sums_dict", ".", "keys", "sums_list", ".", "sort", "(", ")", "solution", "=", "[", "]" ]
https://github.com/selfboot/LeetCode/blob/473c0c5451651140d75cbd143309c51cd8fe1cf1/ToBeOptimized/18_4Sum.py#L6-L26
asweigart/PythonStdioGames
8bdabf93e6b1bb6af3e26fea24da93f85e8314b6
src/gamesbyexample/ninetyninebottles2.py
python
slowPrint
(text, pauseAmount=0.1)
Slowly print out the characters in text one at a time.
Slowly print out the characters in text one at a time.
[ "Slowly", "print", "out", "the", "characters", "in", "text", "one", "at", "a", "time", "." ]
def slowPrint(text, pauseAmount=0.1): """Slowly print out the characters in text one at a time.""" for character in text: # Set flush=True here so the text is immediately printed: print(character, flush=True, end='') # end='' means no newline. time.sleep(pauseAmount) # Pause in between each character. print()
[ "def", "slowPrint", "(", "text", ",", "pauseAmount", "=", "0.1", ")", ":", "for", "character", "in", "text", ":", "# Set flush=True here so the text is immediately printed:", "print", "(", "character", ",", "flush", "=", "True", ",", "end", "=", "''", ")", "# end='' means no newline.", "time", ".", "sleep", "(", "pauseAmount", ")", "# Pause in between each character.", "print", "(", ")" ]
https://github.com/asweigart/PythonStdioGames/blob/8bdabf93e6b1bb6af3e26fea24da93f85e8314b6/src/gamesbyexample/ninetyninebottles2.py#L16-L22
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/flex_api/v1/__init__.py
python
V1.web_channel
(self)
return self._web_channel
:rtype: twilio.rest.flex_api.v1.web_channel.WebChannelList
:rtype: twilio.rest.flex_api.v1.web_channel.WebChannelList
[ ":", "rtype", ":", "twilio", ".", "rest", ".", "flex_api", ".", "v1", ".", "web_channel", ".", "WebChannelList" ]
def web_channel(self): """ :rtype: twilio.rest.flex_api.v1.web_channel.WebChannelList """ if self._web_channel is None: self._web_channel = WebChannelList(self) return self._web_channel
[ "def", "web_channel", "(", "self", ")", ":", "if", "self", ".", "_web_channel", "is", "None", ":", "self", ".", "_web_channel", "=", "WebChannelList", "(", "self", ")", "return", "self", ".", "_web_channel" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/flex_api/v1/__init__.py#L60-L66
OpenCobolIDE/OpenCobolIDE
c78d0d335378e5fe0a5e74f53c19b68b55e85388
open_cobol_ide/extlibs/future/backports/email/headerregistry.py
python
BaseHeader.defects
(self)
return tuple(self._defects)
[]
def defects(self): return tuple(self._defects)
[ "def", "defects", "(", "self", ")", ":", "return", "tuple", "(", "self", ".", "_defects", ")" ]
https://github.com/OpenCobolIDE/OpenCobolIDE/blob/c78d0d335378e5fe0a5e74f53c19b68b55e85388/open_cobol_ide/extlibs/future/backports/email/headerregistry.py#L220-L221
HariSekhon/Nagios-Plugins
a436fc63e10ab8a64d623df109777dea2eda5758
older/lib_nagios.py
python
NagiosTester.sighandler
(self, _discarded, _discarded2)
Function to be called by signal.alarm to kill the plugin
Function to be called by signal.alarm to kill the plugin
[ "Function", "to", "be", "called", "by", "signal", ".", "alarm", "to", "kill", "the", "plugin" ]
def sighandler(self, _discarded, _discarded2): """Function to be called by signal.alarm to kill the plugin""" if self.timeout == 1: timeout = "(1 second)" else: timeout = "(%s seconds)" % self.timeout if not CHECK_NAME: check_name = "" else: check_name = CHECK_NAME.lower().strip() + " " end(CRITICAL, "%splugin has self terminated after " % check_name \ + "exceeding the timeout %s" % timeout)
[ "def", "sighandler", "(", "self", ",", "_discarded", ",", "_discarded2", ")", ":", "if", "self", ".", "timeout", "==", "1", ":", "timeout", "=", "\"(1 second)\"", "else", ":", "timeout", "=", "\"(%s seconds)\"", "%", "self", ".", "timeout", "if", "not", "CHECK_NAME", ":", "check_name", "=", "\"\"", "else", ":", "check_name", "=", "CHECK_NAME", ".", "lower", "(", ")", ".", "strip", "(", ")", "+", "\" \"", "end", "(", "CRITICAL", ",", "\"%splugin has self terminated after \"", "%", "check_name", "+", "\"exceeding the timeout %s\"", "%", "timeout", ")" ]
https://github.com/HariSekhon/Nagios-Plugins/blob/a436fc63e10ab8a64d623df109777dea2eda5758/older/lib_nagios.py#L210-L224
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/pip/_vendor/urllib3/connectionpool.py
python
HTTPSConnectionPool._prepare_proxy
(self, conn)
Establish tunnel connection early, because otherwise httplib would improperly set Host: header to proxy's IP:port.
Establish tunnel connection early, because otherwise httplib would improperly set Host: header to proxy's IP:port.
[ "Establish", "tunnel", "connection", "early", "because", "otherwise", "httplib", "would", "improperly", "set", "Host", ":", "header", "to", "proxy", "s", "IP", ":", "port", "." ]
def _prepare_proxy(self, conn): """ Establish tunnel connection early, because otherwise httplib would improperly set Host: header to proxy's IP:port. """ conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers) conn.connect()
[ "def", "_prepare_proxy", "(", "self", ",", "conn", ")", ":", "conn", ".", "set_tunnel", "(", "self", ".", "_proxy_host", ",", "self", ".", "port", ",", "self", ".", "proxy_headers", ")", "conn", ".", "connect", "(", ")" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/pip/_vendor/urllib3/connectionpool.py#L799-L805
StyraHem/ShellyForHASS
902c04ab25b0a7667718eeef53bb6ad43614fe2f
custom_components/shelly/__init__.py
python
ShellyInstance.stop
(self, _=None)
Stop Shelly.
Stop Shelly.
[ "Stop", "Shelly", "." ]
async def stop(self, _=None): """Stop Shelly.""" _LOGGER.info("Shutting down Shelly") entity_reg = \ await self.hass.helpers.entity_registry.async_get_registry() #entities_to_remove = [] #for entity in entity_reg.entities.values(): # if entity.platform == "shelly": # entities_to_remove.append(entity.entity_id) #for entity_id in entities_to_remove: # entity_reg.async_remove(entity_id) if self.cancel_update_listener: self.cancel_update_listener() if self.pys: self.pys.close()
[ "async", "def", "stop", "(", "self", ",", "_", "=", "None", ")", ":", "_LOGGER", ".", "info", "(", "\"Shutting down Shelly\"", ")", "entity_reg", "=", "await", "self", ".", "hass", ".", "helpers", ".", "entity_registry", ".", "async_get_registry", "(", ")", "#entities_to_remove = []", "#for entity in entity_reg.entities.values():", "# if entity.platform == \"shelly\":", "# entities_to_remove.append(entity.entity_id)", "#for entity_id in entities_to_remove:", "# entity_reg.async_remove(entity_id)", "if", "self", ".", "cancel_update_listener", ":", "self", ".", "cancel_update_listener", "(", ")", "if", "self", ".", "pys", ":", "self", ".", "pys", ".", "close", "(", ")" ]
https://github.com/StyraHem/ShellyForHASS/blob/902c04ab25b0a7667718eeef53bb6ad43614fe2f/custom_components/shelly/__init__.py#L407-L421
samuelcolvin/arq
5f6162c94816d4b4302e9c8666da1f4a46cac228
arq/worker.py
python
FailedJobs.__str__
(self)
[]
def __str__(self) -> str: if self.count == 1 and self.job_results: exc = self.job_results[0].result return f'1 job failed {exc!r}' else: return f'{self.count} jobs failed:\n' + '\n'.join(repr(r.result) for r in self.job_results)
[ "def", "__str__", "(", "self", ")", "->", "str", ":", "if", "self", ".", "count", "==", "1", "and", "self", ".", "job_results", ":", "exc", "=", "self", ".", "job_results", "[", "0", "]", ".", "result", "return", "f'1 job failed {exc!r}'", "else", ":", "return", "f'{self.count} jobs failed:\\n'", "+", "'\\n'", ".", "join", "(", "repr", "(", "r", ".", "result", ")", "for", "r", "in", "self", ".", "job_results", ")" ]
https://github.com/samuelcolvin/arq/blob/5f6162c94816d4b4302e9c8666da1f4a46cac228/arq/worker.py#L113-L118
richrd/suplemon
8bb67d6758e5bc5ca200fdce7a0fb6635abb66f4
suplemon/modules/battery.py
python
Battery.readf
(self, path)
return data
Read and return file contents at path.
Read and return file contents at path.
[ "Read", "and", "return", "file", "contents", "at", "path", "." ]
def readf(self, path): """Read and return file contents at path.""" f = open(path) data = f.read() f.close() return data
[ "def", "readf", "(", "self", ",", "path", ")", ":", "f", "=", "open", "(", "path", ")", "data", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "return", "data" ]
https://github.com/richrd/suplemon/blob/8bb67d6758e5bc5ca200fdce7a0fb6635abb66f4/suplemon/modules/battery.py#L106-L111
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/_weakrefset.py
python
WeakSet.discard
(self, item)
[]
def discard(self, item): if self._pending_removals: self._commit_removals() self.data.discard(ref(item))
[ "def", "discard", "(", "self", ",", "item", ")", ":", "if", "self", ".", "_pending_removals", ":", "self", ".", "_commit_removals", "(", ")", "self", ".", "data", ".", "discard", "(", "ref", "(", "item", ")", ")" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/_weakrefset.py#L111-L114
ilastik/ilastik
6acd2c554bc517e9c8ddad3623a7aaa2e6970c28
ilastik/shell/gui/startShellGui.py
python
_applyStyleSheet
(app)
Apply application-wide style-sheet rules.
Apply application-wide style-sheet rules.
[ "Apply", "application", "-", "wide", "style", "-", "sheet", "rules", "." ]
def _applyStyleSheet(app): """ Apply application-wide style-sheet rules. """ styleSheetPath = os.path.join(os.path.split(__file__)[0], "ilastik-style.qss") with open(styleSheetPath, "r") as f: styleSheetText = f.read() app.setStyleSheet(styleSheetText)
[ "def", "_applyStyleSheet", "(", "app", ")", ":", "styleSheetPath", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "split", "(", "__file__", ")", "[", "0", "]", ",", "\"ilastik-style.qss\"", ")", "with", "open", "(", "styleSheetPath", ",", "\"r\"", ")", "as", "f", ":", "styleSheetText", "=", "f", ".", "read", "(", ")", "app", ".", "setStyleSheet", "(", "styleSheetText", ")" ]
https://github.com/ilastik/ilastik/blob/6acd2c554bc517e9c8ddad3623a7aaa2e6970c28/ilastik/shell/gui/startShellGui.py#L73-L80
TurboWay/spiderman
168f18552e0abb06187388b542d6a0df057ba852
SP/pipelines/pipelines_hdfs.py
python
HdfsPipeline.process_item
(self, item, spider)
return item
:param item: :param spider: :return: 数据分表入库
:param item: :param spider: :return: 数据分表入库
[ ":", "param", "item", ":", ":", "param", "spider", ":", ":", "return", ":", "数据分表入库" ]
def process_item(self, item, spider): """ :param item: :param spider: :return: 数据分表入库 """ if item.tablename in self.buckets_map: self.buckets_map[item.tablename].append(item) else: cols, col_default = [], {} for field, value in item.fields.items(): cols.append(field) col_default[field] = item.fields[field].get('default', '') cols.sort(key=lambda x: item.fields[x].get('idx', 1)) self.table_cols_map.setdefault(item.tablename, (cols, col_default)) # 定义表结构、字段顺序、默认值 self.buckets_map.setdefault(item.tablename, [item]) if self.hive_auto_create: self.checktable(item.tablename, cols) # 建表 self.buckets2db() # 将满足条件的桶 入库 return item
[ "def", "process_item", "(", "self", ",", "item", ",", "spider", ")", ":", "if", "item", ".", "tablename", "in", "self", ".", "buckets_map", ":", "self", ".", "buckets_map", "[", "item", ".", "tablename", "]", ".", "append", "(", "item", ")", "else", ":", "cols", ",", "col_default", "=", "[", "]", ",", "{", "}", "for", "field", ",", "value", "in", "item", ".", "fields", ".", "items", "(", ")", ":", "cols", ".", "append", "(", "field", ")", "col_default", "[", "field", "]", "=", "item", ".", "fields", "[", "field", "]", ".", "get", "(", "'default'", ",", "''", ")", "cols", ".", "sort", "(", "key", "=", "lambda", "x", ":", "item", ".", "fields", "[", "x", "]", ".", "get", "(", "'idx'", ",", "1", ")", ")", "self", ".", "table_cols_map", ".", "setdefault", "(", "item", ".", "tablename", ",", "(", "cols", ",", "col_default", ")", ")", "# 定义表结构、字段顺序、默认值", "self", ".", "buckets_map", ".", "setdefault", "(", "item", ".", "tablename", ",", "[", "item", "]", ")", "if", "self", ".", "hive_auto_create", ":", "self", ".", "checktable", "(", "item", ".", "tablename", ",", "cols", ")", "# 建表", "self", ".", "buckets2db", "(", ")", "# 将满足条件的桶 入库", "return", "item" ]
https://github.com/TurboWay/spiderman/blob/168f18552e0abb06187388b542d6a0df057ba852/SP/pipelines/pipelines_hdfs.py#L41-L60
p-christ/nn_builder
a79b45d15176b4d333dbed094e78bb3b216ed037
nn_builder/pytorch/CNN.py
python
CNN.check_CNN_layers_valid
(self)
Checks that the user inputs for cnn_hidden_layers were valid. cnn_hidden_layers must be a list of layers where each layer must be of one of these forms: - ["conv", channels, kernel_size, stride, padding] - ["maxpool", kernel_size, stride, padding] - ["avgpool", kernel_size, stride, padding] - ["adaptivemaxpool", output height, output width] - ["adaptiveavgpool", output height, output width] - ["linear", out]
Checks that the user inputs for cnn_hidden_layers were valid. cnn_hidden_layers must be a list of layers where each layer must be of one of these forms: - ["conv", channels, kernel_size, stride, padding] - ["maxpool", kernel_size, stride, padding] - ["avgpool", kernel_size, stride, padding] - ["adaptivemaxpool", output height, output width] - ["adaptiveavgpool", output height, output width] - ["linear", out]
[ "Checks", "that", "the", "user", "inputs", "for", "cnn_hidden_layers", "were", "valid", ".", "cnn_hidden_layers", "must", "be", "a", "list", "of", "layers", "where", "each", "layer", "must", "be", "of", "one", "of", "these", "forms", ":", "-", "[", "conv", "channels", "kernel_size", "stride", "padding", "]", "-", "[", "maxpool", "kernel_size", "stride", "padding", "]", "-", "[", "avgpool", "kernel_size", "stride", "padding", "]", "-", "[", "adaptivemaxpool", "output", "height", "output", "width", "]", "-", "[", "adaptiveavgpool", "output", "height", "output", "width", "]", "-", "[", "linear", "out", "]" ]
def check_CNN_layers_valid(self): """Checks that the user inputs for cnn_hidden_layers were valid. cnn_hidden_layers must be a list of layers where each layer must be of one of these forms: - ["conv", channels, kernel_size, stride, padding] - ["maxpool", kernel_size, stride, padding] - ["avgpool", kernel_size, stride, padding] - ["adaptivemaxpool", output height, output width] - ["adaptiveavgpool", output height, output width] - ["linear", out] """ error_msg_layer_type = "First element in a layer specification must be one of {}".format(self.valid_cnn_hidden_layer_types) error_msg_conv_layer = """Conv layer must be of form ['conv', channels, kernel_size, stride, padding] where the final 4 elements are non-negative integers""" error_msg_maxpool_layer = """Maxpool layer must be of form ['maxpool', kernel_size, stride, padding] where the final 2 elements are non-negative integers""" error_msg_avgpool_layer = """Avgpool layer must be of form ['avgpool', kernel_size, stride, padding] where the final 2 elements are non-negative integers""" error_msg_adaptivemaxpool_layer = """Adaptivemaxpool layer must be of form ['adaptivemaxpool', output height, output width]""" error_msg_adaptiveavgpool_layer = """Adaptiveavgpool layer must be of form ['adaptiveavgpool', output height, output width]""" error_msg_linear_layer = """Linear layer must be of form ['linear', out] where out is a non-negative integers""" assert isinstance(self.layers_info, list), "layers must be a list" all_layers = self.layers_info[:-1] output_layer = self.layers_info[-1] assert isinstance(output_layer, list), "layers must be a list" if isinstance(output_layer[0], list): assert len(output_layer) == len( self.output_activation), "Number of output activations must equal number of output heads" for layer in output_layer: all_layers.append(layer) assert layer[0].lower() == "linear", "Final layer must be linear" else: all_layers.append(output_layer) assert isinstance(output_layer[0], str), error_msg_layer_type assert output_layer[0].lower() == "linear", "Final layer must be linear" for layer in all_layers: assert isinstance(layer, list), "Each layer must be a list" assert isinstance(layer[0], str), error_msg_layer_type layer_type_name = layer[0].lower() assert layer_type_name in self.valid_cnn_hidden_layer_types, "Layer name {} not valid, use one of {}".format(layer_type_name, self.valid_cnn_hidden_layer_types) if layer_type_name == "conv": assert len(layer) == 5, error_msg_conv_layer for ix in range(3): assert isinstance(layer[ix+1], int) and layer[ix+1] > 0, error_msg_conv_layer assert isinstance(layer[4], int) and layer[4] >= 0, error_msg_conv_layer elif layer_type_name == "maxpool": assert len(layer) == 4, error_msg_maxpool_layer for ix in range(2): assert isinstance(layer[ix + 1], int) and layer[ix + 1] > 0, error_msg_maxpool_layer if layer[1] != layer[2]: print("NOTE that your maxpool kernel size {} isn't the same as your stride {}".format(layer[1], layer[2])) assert isinstance(layer[3], int) and layer[3] >= 0, error_msg_conv_layer elif layer_type_name == "avgpool": assert len(layer) == 4, error_msg_avgpool_layer for ix in range(2): assert isinstance(layer[ix + 1], int) and layer[ix + 1] > 0, error_msg_avgpool_layer assert isinstance(layer[3], int) and layer[3] >= 0, error_msg_conv_layer if layer[1] != layer[2]:print("NOTE that your avgpool kernel size {} isn't the same as your stride {}".format(layer[1], layer[2])) elif layer_type_name == "adaptivemaxpool": assert len(layer) == 3, error_msg_adaptivemaxpool_layer for ix in range(2): assert isinstance(layer[ix + 1], int) and layer[ix + 1] > 0, error_msg_adaptivemaxpool_layer elif layer_type_name == "adaptiveavgpool": assert len(layer) == 3, error_msg_adaptiveavgpool_layer for ix in range(2): assert isinstance(layer[ix + 1], int) and layer[ ix + 1] > 0, error_msg_adaptiveavgpool_layer elif layer_type_name == "linear": assert len(layer) == 2, error_msg_linear_layer for ix in range(1): assert isinstance(layer[ix+1], int) and layer[ix+1] > 0 else: raise ValueError("Invalid layer name") rest_must_be_linear = False for ix, layer in enumerate(all_layers): if rest_must_be_linear: assert layer[0].lower() == "linear", "If have linear layers then they must come at end" if layer[0].lower() == "linear": rest_must_be_linear = True
[ "def", "check_CNN_layers_valid", "(", "self", ")", ":", "error_msg_layer_type", "=", "\"First element in a layer specification must be one of {}\"", ".", "format", "(", "self", ".", "valid_cnn_hidden_layer_types", ")", "error_msg_conv_layer", "=", "\"\"\"Conv layer must be of form ['conv', channels, kernel_size, stride, padding] where the \n final 4 elements are non-negative integers\"\"\"", "error_msg_maxpool_layer", "=", "\"\"\"Maxpool layer must be of form ['maxpool', kernel_size, stride, padding] where the \n final 2 elements are non-negative integers\"\"\"", "error_msg_avgpool_layer", "=", "\"\"\"Avgpool layer must be of form ['avgpool', kernel_size, stride, padding] where the \n final 2 elements are non-negative integers\"\"\"", "error_msg_adaptivemaxpool_layer", "=", "\"\"\"Adaptivemaxpool layer must be of form ['adaptivemaxpool', output height, output width]\"\"\"", "error_msg_adaptiveavgpool_layer", "=", "\"\"\"Adaptiveavgpool layer must be of form ['adaptiveavgpool', output height, output width]\"\"\"", "error_msg_linear_layer", "=", "\"\"\"Linear layer must be of form ['linear', out] where out is a non-negative integers\"\"\"", "assert", "isinstance", "(", "self", ".", "layers_info", ",", "list", ")", ",", "\"layers must be a list\"", "all_layers", "=", "self", ".", "layers_info", "[", ":", "-", "1", "]", "output_layer", "=", "self", ".", "layers_info", "[", "-", "1", "]", "assert", "isinstance", "(", "output_layer", ",", "list", ")", ",", "\"layers must be a list\"", "if", "isinstance", "(", "output_layer", "[", "0", "]", ",", "list", ")", ":", "assert", "len", "(", "output_layer", ")", "==", "len", "(", "self", ".", "output_activation", ")", ",", "\"Number of output activations must equal number of output heads\"", "for", "layer", "in", "output_layer", ":", "all_layers", ".", "append", "(", "layer", ")", "assert", "layer", "[", "0", "]", ".", "lower", "(", ")", "==", "\"linear\"", ",", "\"Final layer must be linear\"", "else", ":", "all_layers", ".", "append", "(", "output_layer", ")", "assert", "isinstance", "(", "output_layer", "[", "0", "]", ",", "str", ")", ",", "error_msg_layer_type", "assert", "output_layer", "[", "0", "]", ".", "lower", "(", ")", "==", "\"linear\"", ",", "\"Final layer must be linear\"", "for", "layer", "in", "all_layers", ":", "assert", "isinstance", "(", "layer", ",", "list", ")", ",", "\"Each layer must be a list\"", "assert", "isinstance", "(", "layer", "[", "0", "]", ",", "str", ")", ",", "error_msg_layer_type", "layer_type_name", "=", "layer", "[", "0", "]", ".", "lower", "(", ")", "assert", "layer_type_name", "in", "self", ".", "valid_cnn_hidden_layer_types", ",", "\"Layer name {} not valid, use one of {}\"", ".", "format", "(", "layer_type_name", ",", "self", ".", "valid_cnn_hidden_layer_types", ")", "if", "layer_type_name", "==", "\"conv\"", ":", "assert", "len", "(", "layer", ")", "==", "5", ",", "error_msg_conv_layer", "for", "ix", "in", "range", "(", "3", ")", ":", "assert", "isinstance", "(", "layer", "[", "ix", "+", "1", "]", ",", "int", ")", "and", "layer", "[", "ix", "+", "1", "]", ">", "0", ",", "error_msg_conv_layer", "assert", "isinstance", "(", "layer", "[", "4", "]", ",", "int", ")", "and", "layer", "[", "4", "]", ">=", "0", ",", "error_msg_conv_layer", "elif", "layer_type_name", "==", "\"maxpool\"", ":", "assert", "len", "(", "layer", ")", "==", "4", ",", "error_msg_maxpool_layer", "for", "ix", "in", "range", "(", "2", ")", ":", "assert", "isinstance", "(", "layer", "[", "ix", "+", "1", "]", ",", "int", ")", "and", "layer", "[", "ix", "+", "1", "]", ">", "0", ",", "error_msg_maxpool_layer", "if", "layer", "[", "1", "]", "!=", "layer", "[", "2", "]", ":", "print", "(", "\"NOTE that your maxpool kernel size {} isn't the same as your stride {}\"", ".", "format", "(", "layer", "[", "1", "]", ",", "layer", "[", "2", "]", ")", ")", "assert", "isinstance", "(", "layer", "[", "3", "]", ",", "int", ")", "and", "layer", "[", "3", "]", ">=", "0", ",", "error_msg_conv_layer", "elif", "layer_type_name", "==", "\"avgpool\"", ":", "assert", "len", "(", "layer", ")", "==", "4", ",", "error_msg_avgpool_layer", "for", "ix", "in", "range", "(", "2", ")", ":", "assert", "isinstance", "(", "layer", "[", "ix", "+", "1", "]", ",", "int", ")", "and", "layer", "[", "ix", "+", "1", "]", ">", "0", ",", "error_msg_avgpool_layer", "assert", "isinstance", "(", "layer", "[", "3", "]", ",", "int", ")", "and", "layer", "[", "3", "]", ">=", "0", ",", "error_msg_conv_layer", "if", "layer", "[", "1", "]", "!=", "layer", "[", "2", "]", ":", "print", "(", "\"NOTE that your avgpool kernel size {} isn't the same as your stride {}\"", ".", "format", "(", "layer", "[", "1", "]", ",", "layer", "[", "2", "]", ")", ")", "elif", "layer_type_name", "==", "\"adaptivemaxpool\"", ":", "assert", "len", "(", "layer", ")", "==", "3", ",", "error_msg_adaptivemaxpool_layer", "for", "ix", "in", "range", "(", "2", ")", ":", "assert", "isinstance", "(", "layer", "[", "ix", "+", "1", "]", ",", "int", ")", "and", "layer", "[", "ix", "+", "1", "]", ">", "0", ",", "error_msg_adaptivemaxpool_layer", "elif", "layer_type_name", "==", "\"adaptiveavgpool\"", ":", "assert", "len", "(", "layer", ")", "==", "3", ",", "error_msg_adaptiveavgpool_layer", "for", "ix", "in", "range", "(", "2", ")", ":", "assert", "isinstance", "(", "layer", "[", "ix", "+", "1", "]", ",", "int", ")", "and", "layer", "[", "ix", "+", "1", "]", ">", "0", ",", "error_msg_adaptiveavgpool_layer", "elif", "layer_type_name", "==", "\"linear\"", ":", "assert", "len", "(", "layer", ")", "==", "2", ",", "error_msg_linear_layer", "for", "ix", "in", "range", "(", "1", ")", ":", "assert", "isinstance", "(", "layer", "[", "ix", "+", "1", "]", ",", "int", ")", "and", "layer", "[", "ix", "+", "1", "]", ">", "0", "else", ":", "raise", "ValueError", "(", "\"Invalid layer name\"", ")", "rest_must_be_linear", "=", "False", "for", "ix", ",", "layer", "in", "enumerate", "(", "all_layers", ")", ":", "if", "rest_must_be_linear", ":", "assert", "layer", "[", "0", "]", ".", "lower", "(", ")", "==", "\"linear\"", ",", "\"If have linear layers then they must come at end\"", "if", "layer", "[", "0", "]", ".", "lower", "(", ")", "==", "\"linear\"", ":", "rest_must_be_linear", "=", "True" ]
https://github.com/p-christ/nn_builder/blob/a79b45d15176b4d333dbed094e78bb3b216ed037/nn_builder/pytorch/CNN.py#L65-L137
thusiyuan/holistic_scene_parsing
b4a73b2cfe218e1374a8a6e63f1df52ae57431d0
utils/plyfile.py
python
PlyProperty.dtype
(self, byte_order='=')
return byte_order + self.val_dtype
Return the numpy dtype description for this property (as a tuple of strings).
Return the numpy dtype description for this property (as a tuple of strings).
[ "Return", "the", "numpy", "dtype", "description", "for", "this", "property", "(", "as", "a", "tuple", "of", "strings", ")", "." ]
def dtype(self, byte_order='='): ''' Return the numpy dtype description for this property (as a tuple of strings). ''' return byte_order + self.val_dtype
[ "def", "dtype", "(", "self", ",", "byte_order", "=", "'='", ")", ":", "return", "byte_order", "+", "self", ".", "val_dtype" ]
https://github.com/thusiyuan/holistic_scene_parsing/blob/b4a73b2cfe218e1374a8a6e63f1df52ae57431d0/utils/plyfile.py#L770-L776
microsoft/hummingbird
2705c42585ac1d3ee69e65a93e5f59d7d42f44d5
hummingbird/ml/operator_converters/sparkml/discretizer.py
python
convert_sparkml_bucketizer
(operator, device, extra_config)
return KBinsDiscretizer(operator, None, None, np.array(bin_edges), labels, device)
Converter for `pyspark.ml.feature.Bucketizer` Args: operator: An operator wrapping a `pyspark.ml.feature.QuantileDiscretizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model
Converter for `pyspark.ml.feature.Bucketizer`
[ "Converter", "for", "pyspark", ".", "ml", ".", "feature", ".", "Bucketizer" ]
def convert_sparkml_bucketizer(operator, device, extra_config): """ Converter for `pyspark.ml.feature.Bucketizer` Args: operator: An operator wrapping a `pyspark.ml.feature.QuantileDiscretizer` model device: String defining the type of device the converted operator should be run on extra_config: Extra configuration used to select the best conversion strategy Returns: A PyTorch model """ bin_edges = [operator.raw_operator.getSplits()] max_bin_edges = max([len(bins) for bins in bin_edges]) labels = [] for i in range(len(bin_edges)): labels.append(np.array([i for i in range(len(bin_edges[i]) - 1)])) if len(bin_edges[i]) < max_bin_edges: bin_edges[i] = bin_edges[i] + [np.inf for _ in range((max_bin_edges - len(bin_edges[i])))] return KBinsDiscretizer(operator, None, None, np.array(bin_edges), labels, device)
[ "def", "convert_sparkml_bucketizer", "(", "operator", ",", "device", ",", "extra_config", ")", ":", "bin_edges", "=", "[", "operator", ".", "raw_operator", ".", "getSplits", "(", ")", "]", "max_bin_edges", "=", "max", "(", "[", "len", "(", "bins", ")", "for", "bins", "in", "bin_edges", "]", ")", "labels", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "bin_edges", ")", ")", ":", "labels", ".", "append", "(", "np", ".", "array", "(", "[", "i", "for", "i", "in", "range", "(", "len", "(", "bin_edges", "[", "i", "]", ")", "-", "1", ")", "]", ")", ")", "if", "len", "(", "bin_edges", "[", "i", "]", ")", "<", "max_bin_edges", ":", "bin_edges", "[", "i", "]", "=", "bin_edges", "[", "i", "]", "+", "[", "np", ".", "inf", "for", "_", "in", "range", "(", "(", "max_bin_edges", "-", "len", "(", "bin_edges", "[", "i", "]", ")", ")", ")", "]", "return", "KBinsDiscretizer", "(", "operator", ",", "None", ",", "None", ",", "np", ".", "array", "(", "bin_edges", ")", ",", "labels", ",", "device", ")" ]
https://github.com/microsoft/hummingbird/blob/2705c42585ac1d3ee69e65a93e5f59d7d42f44d5/hummingbird/ml/operator_converters/sparkml/discretizer.py#L19-L40
KalleHallden/AutoTimer
2d954216700c4930baa154e28dbddc34609af7ce
env/lib/python2.7/site-packages/setuptools/_vendor/pyparsing.py
python
FollowedBy.parseImpl
( self, instring, loc, doActions=True )
return loc, []
[]
def parseImpl( self, instring, loc, doActions=True ): self.expr.tryParse( instring, loc ) return loc, []
[ "def", "parseImpl", "(", "self", ",", "instring", ",", "loc", ",", "doActions", "=", "True", ")", ":", "self", ".", "expr", ".", "tryParse", "(", "instring", ",", "loc", ")", "return", "loc", ",", "[", "]" ]
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/setuptools/_vendor/pyparsing.py#L3813-L3815
cool-RR/python_toolbox
cb9ef64b48f1d03275484d707dc5079b6701ad0c
python_toolbox/nifty_collections/lazy_tuple.py
python
_with_lock
(method, *args, **kwargs)
Decorator for using the `LazyTuple`'s lock.
Decorator for using the `LazyTuple`'s lock.
[ "Decorator", "for", "using", "the", "LazyTuple", "s", "lock", "." ]
def _with_lock(method, *args, **kwargs): '''Decorator for using the `LazyTuple`'s lock.''' self = args[0] with self.lock: return method(*args, **kwargs)
[ "def", "_with_lock", "(", "method", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", "=", "args", "[", "0", "]", "with", "self", ".", "lock", ":", "return", "method", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/cool-RR/python_toolbox/blob/cb9ef64b48f1d03275484d707dc5079b6701ad0c/python_toolbox/nifty_collections/lazy_tuple.py#L41-L45
SUSE/DeepSea
9c7fad93915ba1250c40d50c855011e9fe41ed21
cli/deepsea.py
python
cli
(log_level, log_file)
DeepSea CLI tool. Use this tool to visualize the execution progress of DeepSea, either by running the stages directly through "stage run" command, or by monitoring the salt-run execution using the "monitor" command.
DeepSea CLI tool.
[ "DeepSea", "CLI", "tool", "." ]
def cli(log_level, log_file): """ DeepSea CLI tool. Use this tool to visualize the execution progress of DeepSea, either by running the stages directly through "stage run" command, or by monitoring the salt-run execution using the "monitor" command. """ Config.LOG_LEVEL = log_level Config.LOG_FILE_PATH = log_file
[ "def", "cli", "(", "log_level", ",", "log_file", ")", ":", "Config", ".", "LOG_LEVEL", "=", "log_level", "Config", ".", "LOG_FILE_PATH", "=", "log_file" ]
https://github.com/SUSE/DeepSea/blob/9c7fad93915ba1250c40d50c855011e9fe41ed21/cli/deepsea.py#L256-L265
kovidgoyal/calibre
2b41671370f2a9eb1109b9ae901ccf915f1bd0c8
src/calibre/library/catalogs/epub_mobi_builder.py
python
CatalogBuilder.generate_html_description_header
(self, book)
return soup
Generate the HTML Description header from template. Create HTML Description from book metadata and template. Called by generate_html_descriptions() Args: book (dict): book metadata Return: soup (BeautifulSoup): HTML Description for book
Generate the HTML Description header from template.
[ "Generate", "the", "HTML", "Description", "header", "from", "template", "." ]
def generate_html_description_header(self, book): """ Generate the HTML Description header from template. Create HTML Description from book metadata and template. Called by generate_html_descriptions() Args: book (dict): book metadata Return: soup (BeautifulSoup): HTML Description for book """ from calibre.ebooks.oeb.base import XHTML_NS def _generate_html(): args = dict( author=escape(author), author_prefix=escape(author_prefix), comments=comments, css=css, formats=formats, genres=genres, note_content=note_content, note_source=note_source, pubdate=pubdate, publisher=publisher, pubmonth=pubmonth, pubyear=pubyear, rating=rating, series=escape(series), series_index=series_index, thumb=thumb, title=escape(title), title_str=escape(title_str), xmlns=XHTML_NS, ) for k, v in iteritems(args): if isbytestring(v): args[k] = v.decode('utf-8') generated_html = P('catalog/template.xhtml', data=True).decode('utf-8').format(**args) generated_html = substitute_entites(generated_html) return BeautifulSoup(generated_html) # Generate the template arguments css = P('catalog/stylesheet.css', data=True).decode('utf-8') title_str = title = book['title'] series = '' series_index = '' if book['series']: series = book['series'] series_index = str(book['series_index']) if series_index.endswith('.0'): series_index = series_index[:-2] # Author, author_prefix (read|reading|none symbol or missing symbol) author = book['author'] if book['prefix']: author_prefix = book['prefix'] + ' ' + _("by ") elif self.opts.connected_kindle and book['id'] in self.bookmarked_books: author_prefix = self.SYMBOL_READING + ' ' + _("by ") else: author_prefix = _("by ") # Genres genres = '' if 'genres' in book: _soup = BeautifulSoup('') genresTag = _soup.new_tag('p') gtc = 0 for (i, tag) in enumerate(sorted(book.get('genres', []))): aTag = _soup.new_tag('a') if self.opts.generate_genres: try: aTag['href'] = "Genre_%s.html" % self.genre_tags_dict[tag] except KeyError: pass aTag.insert(0, NavigableString(tag)) genresTag.insert(gtc, aTag) gtc += 1 if i < len(book['genres']) - 1: genresTag.insert(gtc, NavigableString(' · ')) gtc += 1 genres = genresTag.decode_contents() # Formats formats = [] if 'formats' in book: for format in sorted(book['formats']): formats.append(format.rpartition('.')[2].upper()) formats = ' · '.join(formats) # Date of publication if book['date']: pubdate = book['date'] try: pubmonth, pubyear = pubdate.split() except Exception: pubmonth = pubyear = '' else: pubdate = pubyear = pubmonth = '' # Thumb _soup = BeautifulSoup('<html>', selfClosingTags=['img']) thumb = _soup.new_tag("img") if 'cover' in book and book['cover']: thumb['src'] = "../images/thumbnail_%d.jpg" % int(book['id']) else: thumb['src'] = "../images/thumbnail_default.jpg" thumb['alt'] = "cover thumbnail" # Publisher publisher = ' ' if 'publisher' in book: publisher = book['publisher'] # Rating stars = int(book['rating']) // 2 rating = '' if stars: star_string = self.SYMBOL_FULL_RATING * stars empty_stars = self.SYMBOL_EMPTY_RATING * (5 - stars) rating = f'{star_string}{empty_stars} <br/>' # Notes note_source = '' note_content = '' if 'notes' in book: note_source = book['notes']['source'] note_content = book['notes']['content'] # Comments comments = '' if book.get('description'): comments = book['description'] # >>>> Populate the template <<<< soup = _generate_html() # >>>> Post-process the template <<<< body = soup.find('body') btc = 0 # Insert the title anchor for inbound links aTag = soup.new_tag("a") aTag['id'] = "book%d" % int(book['id']) divTag = soup.new_tag('div') divTag.insert(0, aTag) body.insert(btc, divTag) btc += 1 # Insert the link to the series or remove <a class="series"> aTag = body.find('a', attrs={'class': 'series_id'}) if aTag: if book['series']: if self.opts.generate_series: aTag['href'] = "{}.html#{}".format('BySeries', self.generate_series_anchor(book['series'])) else: aTag.extract() # Insert the author link aTag = body.find('a', attrs={'class': 'author'}) if self.opts.generate_authors and aTag: aTag['href'] = "{}.html#{}".format("ByAlphaAuthor", self.generate_author_anchor(book['author'])) if publisher == ' ': publisherTag = body.find('td', attrs={'class': 'publisher'}) if publisherTag: publisherTag.contents[0].replaceWith(NBSP) if not genres: genresTag = body.find('p', attrs={'class': 'genres'}) if genresTag: genresTag.extract() if not formats: formatsTag = body.find('p', attrs={'class': 'formats'}) if formatsTag: formatsTag.extract() if note_content == '': tdTag = body.find('td', attrs={'class': 'notes'}) if tdTag: tdTag.contents[0].replaceWith(NBSP) emptyTags = body.findAll('td', attrs={'class': 'empty'}) for mt in emptyTags: newEmptyTag = soup.new_tag('td') newEmptyTag.insert(0, NBSP) mt.replaceWith(newEmptyTag) return soup
[ "def", "generate_html_description_header", "(", "self", ",", "book", ")", ":", "from", "calibre", ".", "ebooks", ".", "oeb", ".", "base", "import", "XHTML_NS", "def", "_generate_html", "(", ")", ":", "args", "=", "dict", "(", "author", "=", "escape", "(", "author", ")", ",", "author_prefix", "=", "escape", "(", "author_prefix", ")", ",", "comments", "=", "comments", ",", "css", "=", "css", ",", "formats", "=", "formats", ",", "genres", "=", "genres", ",", "note_content", "=", "note_content", ",", "note_source", "=", "note_source", ",", "pubdate", "=", "pubdate", ",", "publisher", "=", "publisher", ",", "pubmonth", "=", "pubmonth", ",", "pubyear", "=", "pubyear", ",", "rating", "=", "rating", ",", "series", "=", "escape", "(", "series", ")", ",", "series_index", "=", "series_index", ",", "thumb", "=", "thumb", ",", "title", "=", "escape", "(", "title", ")", ",", "title_str", "=", "escape", "(", "title_str", ")", ",", "xmlns", "=", "XHTML_NS", ",", ")", "for", "k", ",", "v", "in", "iteritems", "(", "args", ")", ":", "if", "isbytestring", "(", "v", ")", ":", "args", "[", "k", "]", "=", "v", ".", "decode", "(", "'utf-8'", ")", "generated_html", "=", "P", "(", "'catalog/template.xhtml'", ",", "data", "=", "True", ")", ".", "decode", "(", "'utf-8'", ")", ".", "format", "(", "*", "*", "args", ")", "generated_html", "=", "substitute_entites", "(", "generated_html", ")", "return", "BeautifulSoup", "(", "generated_html", ")", "# Generate the template arguments", "css", "=", "P", "(", "'catalog/stylesheet.css'", ",", "data", "=", "True", ")", ".", "decode", "(", "'utf-8'", ")", "title_str", "=", "title", "=", "book", "[", "'title'", "]", "series", "=", "''", "series_index", "=", "''", "if", "book", "[", "'series'", "]", ":", "series", "=", "book", "[", "'series'", "]", "series_index", "=", "str", "(", "book", "[", "'series_index'", "]", ")", "if", "series_index", ".", "endswith", "(", "'.0'", ")", ":", "series_index", "=", "series_index", "[", ":", "-", "2", "]", "# Author, author_prefix (read|reading|none symbol or missing symbol)", "author", "=", "book", "[", "'author'", "]", "if", "book", "[", "'prefix'", "]", ":", "author_prefix", "=", "book", "[", "'prefix'", "]", "+", "' '", "+", "_", "(", "\"by \"", ")", "elif", "self", ".", "opts", ".", "connected_kindle", "and", "book", "[", "'id'", "]", "in", "self", ".", "bookmarked_books", ":", "author_prefix", "=", "self", ".", "SYMBOL_READING", "+", "' '", "+", "_", "(", "\"by \"", ")", "else", ":", "author_prefix", "=", "_", "(", "\"by \"", ")", "# Genres", "genres", "=", "''", "if", "'genres'", "in", "book", ":", "_soup", "=", "BeautifulSoup", "(", "''", ")", "genresTag", "=", "_soup", ".", "new_tag", "(", "'p'", ")", "gtc", "=", "0", "for", "(", "i", ",", "tag", ")", "in", "enumerate", "(", "sorted", "(", "book", ".", "get", "(", "'genres'", ",", "[", "]", ")", ")", ")", ":", "aTag", "=", "_soup", ".", "new_tag", "(", "'a'", ")", "if", "self", ".", "opts", ".", "generate_genres", ":", "try", ":", "aTag", "[", "'href'", "]", "=", "\"Genre_%s.html\"", "%", "self", ".", "genre_tags_dict", "[", "tag", "]", "except", "KeyError", ":", "pass", "aTag", ".", "insert", "(", "0", ",", "NavigableString", "(", "tag", ")", ")", "genresTag", ".", "insert", "(", "gtc", ",", "aTag", ")", "gtc", "+=", "1", "if", "i", "<", "len", "(", "book", "[", "'genres'", "]", ")", "-", "1", ":", "genresTag", ".", "insert", "(", "gtc", ",", "NavigableString", "(", "' · ')", ")", "", "gtc", "+=", "1", "genres", "=", "genresTag", ".", "decode_contents", "(", ")", "# Formats", "formats", "=", "[", "]", "if", "'formats'", "in", "book", ":", "for", "format", "in", "sorted", "(", "book", "[", "'formats'", "]", ")", ":", "formats", ".", "append", "(", "format", ".", "rpartition", "(", "'.'", ")", "[", "2", "]", ".", "upper", "(", ")", ")", "formats", "=", "' · '.", "j", "oin(", "f", "ormats)", "", "# Date of publication", "if", "book", "[", "'date'", "]", ":", "pubdate", "=", "book", "[", "'date'", "]", "try", ":", "pubmonth", ",", "pubyear", "=", "pubdate", ".", "split", "(", ")", "except", "Exception", ":", "pubmonth", "=", "pubyear", "=", "''", "else", ":", "pubdate", "=", "pubyear", "=", "pubmonth", "=", "''", "# Thumb", "_soup", "=", "BeautifulSoup", "(", "'<html>'", ",", "selfClosingTags", "=", "[", "'img'", "]", ")", "thumb", "=", "_soup", ".", "new_tag", "(", "\"img\"", ")", "if", "'cover'", "in", "book", "and", "book", "[", "'cover'", "]", ":", "thumb", "[", "'src'", "]", "=", "\"../images/thumbnail_%d.jpg\"", "%", "int", "(", "book", "[", "'id'", "]", ")", "else", ":", "thumb", "[", "'src'", "]", "=", "\"../images/thumbnail_default.jpg\"", "thumb", "[", "'alt'", "]", "=", "\"cover thumbnail\"", "# Publisher", "publisher", "=", "' '", "if", "'publisher'", "in", "book", ":", "publisher", "=", "book", "[", "'publisher'", "]", "# Rating", "stars", "=", "int", "(", "book", "[", "'rating'", "]", ")", "//", "2", "rating", "=", "''", "if", "stars", ":", "star_string", "=", "self", ".", "SYMBOL_FULL_RATING", "*", "stars", "empty_stars", "=", "self", ".", "SYMBOL_EMPTY_RATING", "*", "(", "5", "-", "stars", ")", "rating", "=", "f'{star_string}{empty_stars} <br/>'", "# Notes", "note_source", "=", "''", "note_content", "=", "''", "if", "'notes'", "in", "book", ":", "note_source", "=", "book", "[", "'notes'", "]", "[", "'source'", "]", "note_content", "=", "book", "[", "'notes'", "]", "[", "'content'", "]", "# Comments", "comments", "=", "''", "if", "book", ".", "get", "(", "'description'", ")", ":", "comments", "=", "book", "[", "'description'", "]", "# >>>> Populate the template <<<<", "soup", "=", "_generate_html", "(", ")", "# >>>> Post-process the template <<<<", "body", "=", "soup", ".", "find", "(", "'body'", ")", "btc", "=", "0", "# Insert the title anchor for inbound links", "aTag", "=", "soup", ".", "new_tag", "(", "\"a\"", ")", "aTag", "[", "'id'", "]", "=", "\"book%d\"", "%", "int", "(", "book", "[", "'id'", "]", ")", "divTag", "=", "soup", ".", "new_tag", "(", "'div'", ")", "divTag", ".", "insert", "(", "0", ",", "aTag", ")", "body", ".", "insert", "(", "btc", ",", "divTag", ")", "btc", "+=", "1", "# Insert the link to the series or remove <a class=\"series\">", "aTag", "=", "body", ".", "find", "(", "'a'", ",", "attrs", "=", "{", "'class'", ":", "'series_id'", "}", ")", "if", "aTag", ":", "if", "book", "[", "'series'", "]", ":", "if", "self", ".", "opts", ".", "generate_series", ":", "aTag", "[", "'href'", "]", "=", "\"{}.html#{}\"", ".", "format", "(", "'BySeries'", ",", "self", ".", "generate_series_anchor", "(", "book", "[", "'series'", "]", ")", ")", "else", ":", "aTag", ".", "extract", "(", ")", "# Insert the author link", "aTag", "=", "body", ".", "find", "(", "'a'", ",", "attrs", "=", "{", "'class'", ":", "'author'", "}", ")", "if", "self", ".", "opts", ".", "generate_authors", "and", "aTag", ":", "aTag", "[", "'href'", "]", "=", "\"{}.html#{}\"", ".", "format", "(", "\"ByAlphaAuthor\"", ",", "self", ".", "generate_author_anchor", "(", "book", "[", "'author'", "]", ")", ")", "if", "publisher", "==", "' '", ":", "publisherTag", "=", "body", ".", "find", "(", "'td'", ",", "attrs", "=", "{", "'class'", ":", "'publisher'", "}", ")", "if", "publisherTag", ":", "publisherTag", ".", "contents", "[", "0", "]", ".", "replaceWith", "(", "NBSP", ")", "if", "not", "genres", ":", "genresTag", "=", "body", ".", "find", "(", "'p'", ",", "attrs", "=", "{", "'class'", ":", "'genres'", "}", ")", "if", "genresTag", ":", "genresTag", ".", "extract", "(", ")", "if", "not", "formats", ":", "formatsTag", "=", "body", ".", "find", "(", "'p'", ",", "attrs", "=", "{", "'class'", ":", "'formats'", "}", ")", "if", "formatsTag", ":", "formatsTag", ".", "extract", "(", ")", "if", "note_content", "==", "''", ":", "tdTag", "=", "body", ".", "find", "(", "'td'", ",", "attrs", "=", "{", "'class'", ":", "'notes'", "}", ")", "if", "tdTag", ":", "tdTag", ".", "contents", "[", "0", "]", ".", "replaceWith", "(", "NBSP", ")", "emptyTags", "=", "body", ".", "findAll", "(", "'td'", ",", "attrs", "=", "{", "'class'", ":", "'empty'", "}", ")", "for", "mt", "in", "emptyTags", ":", "newEmptyTag", "=", "soup", ".", "new_tag", "(", "'td'", ")", "newEmptyTag", ".", "insert", "(", "0", ",", "NBSP", ")", "mt", ".", "replaceWith", "(", "newEmptyTag", ")", "return", "soup" ]
https://github.com/kovidgoyal/calibre/blob/2b41671370f2a9eb1109b9ae901ccf915f1bd0c8/src/calibre/library/catalogs/epub_mobi_builder.py#L2651-L2844
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/ssl.py
python
_ASN1Object.fromnid
(cls, nid)
return super().__new__(cls, *_nid2obj(nid))
Create _ASN1Object from OpenSSL numeric ID
Create _ASN1Object from OpenSSL numeric ID
[ "Create", "_ASN1Object", "from", "OpenSSL", "numeric", "ID" ]
def fromnid(cls, nid): """Create _ASN1Object from OpenSSL numeric ID """ return super().__new__(cls, *_nid2obj(nid))
[ "def", "fromnid", "(", "cls", ",", "nid", ")", ":", "return", "super", "(", ")", ".", "__new__", "(", "cls", ",", "*", "_nid2obj", "(", "nid", ")", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/ssl.py#L456-L459
fortharris/Pcode
147962d160a834c219e12cb456abc130826468e4
venv/__init__.py
python
EnvBuilder.create
(self, env_dir)
Create a virtual environment in a directory. :param env_dir: The target directory to create an environment in.
Create a virtual environment in a directory.
[ "Create", "a", "virtual", "environment", "in", "a", "directory", "." ]
def create(self, env_dir): """ Create a virtual environment in a directory. :param env_dir: The target directory to create an environment in. """ env_dir = os.path.abspath(env_dir) context = self.ensure_directories(env_dir) self.create_configuration(context) self.setup_python(context) if not self.upgrade: self.setup_scripts(context) self.post_setup(context)
[ "def", "create", "(", "self", ",", "env_dir", ")", ":", "env_dir", "=", "os", ".", "path", ".", "abspath", "(", "env_dir", ")", "context", "=", "self", ".", "ensure_directories", "(", "env_dir", ")", "self", ".", "create_configuration", "(", "context", ")", "self", ".", "setup_python", "(", "context", ")", "if", "not", "self", ".", "upgrade", ":", "self", ".", "setup_scripts", "(", "context", ")", "self", ".", "post_setup", "(", "context", ")" ]
https://github.com/fortharris/Pcode/blob/147962d160a834c219e12cb456abc130826468e4/venv/__init__.py#L81-L94
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/distutils/msvc9compiler.py
python
normalize_and_reduce_paths
(paths)
return reduced_paths
Return a list of normalized paths with duplicates removed. The current order of paths is maintained.
Return a list of normalized paths with duplicates removed.
[ "Return", "a", "list", "of", "normalized", "paths", "with", "duplicates", "removed", "." ]
def normalize_and_reduce_paths(paths): """Return a list of normalized paths with duplicates removed. The current order of paths is maintained. """ # Paths are normalized so things like: /a and /a/ aren't both preserved. reduced_paths = [] for p in paths: np = os.path.normpath(p) # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set. if np not in reduced_paths: reduced_paths.append(np) return reduced_paths
[ "def", "normalize_and_reduce_paths", "(", "paths", ")", ":", "# Paths are normalized so things like: /a and /a/ aren't both preserved.", "reduced_paths", "=", "[", "]", "for", "p", "in", "paths", ":", "np", "=", "os", ".", "path", ".", "normpath", "(", "p", ")", "# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.", "if", "np", "not", "in", "reduced_paths", ":", "reduced_paths", ".", "append", "(", "np", ")", "return", "reduced_paths" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/distutils/msvc9compiler.py#L194-L206
IJDykeman/wangTiles
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
experimental_code/tiles_3d/venv_mac/lib/python2.7/site-packages/setuptools/command/easy_install.py
python
get_exe_prefixes
(exe_filename)
return prefixes
Get exe->egg path translations for a given .exe file
Get exe->egg path translations for a given .exe file
[ "Get", "exe", "-", ">", "egg", "path", "translations", "for", "a", "given", ".", "exe", "file" ]
def get_exe_prefixes(exe_filename): """Get exe->egg path translations for a given .exe file""" prefixes = [ ('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''), ('PLATLIB/', ''), ('SCRIPTS/', 'EGG-INFO/scripts/'), ('DATA/lib/site-packages', ''), ] z = zipfile.ZipFile(exe_filename) try: for info in z.infolist(): name = info.filename parts = name.split('/') if len(parts) == 3 and parts[2] == 'PKG-INFO': if parts[1].endswith('.egg-info'): prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/')) break if len(parts) != 2 or not name.endswith('.pth'): continue if name.endswith('-nspkg.pth'): continue if parts[0].upper() in ('PURELIB', 'PLATLIB'): contents = z.read(name) if six.PY3: contents = contents.decode() for pth in yield_lines(contents): pth = pth.strip().replace('\\', '/') if not pth.startswith('import'): prefixes.append((('%s/%s/' % (parts[0], pth)), '')) finally: z.close() prefixes = [(x.lower(), y) for x, y in prefixes] prefixes.sort() prefixes.reverse() return prefixes
[ "def", "get_exe_prefixes", "(", "exe_filename", ")", ":", "prefixes", "=", "[", "(", "'PURELIB/'", ",", "''", ")", ",", "(", "'PLATLIB/pywin32_system32'", ",", "''", ")", ",", "(", "'PLATLIB/'", ",", "''", ")", ",", "(", "'SCRIPTS/'", ",", "'EGG-INFO/scripts/'", ")", ",", "(", "'DATA/lib/site-packages'", ",", "''", ")", ",", "]", "z", "=", "zipfile", ".", "ZipFile", "(", "exe_filename", ")", "try", ":", "for", "info", "in", "z", ".", "infolist", "(", ")", ":", "name", "=", "info", ".", "filename", "parts", "=", "name", ".", "split", "(", "'/'", ")", "if", "len", "(", "parts", ")", "==", "3", "and", "parts", "[", "2", "]", "==", "'PKG-INFO'", ":", "if", "parts", "[", "1", "]", ".", "endswith", "(", "'.egg-info'", ")", ":", "prefixes", ".", "insert", "(", "0", ",", "(", "'/'", ".", "join", "(", "parts", "[", ":", "2", "]", ")", ",", "'EGG-INFO/'", ")", ")", "break", "if", "len", "(", "parts", ")", "!=", "2", "or", "not", "name", ".", "endswith", "(", "'.pth'", ")", ":", "continue", "if", "name", ".", "endswith", "(", "'-nspkg.pth'", ")", ":", "continue", "if", "parts", "[", "0", "]", ".", "upper", "(", ")", "in", "(", "'PURELIB'", ",", "'PLATLIB'", ")", ":", "contents", "=", "z", ".", "read", "(", "name", ")", "if", "six", ".", "PY3", ":", "contents", "=", "contents", ".", "decode", "(", ")", "for", "pth", "in", "yield_lines", "(", "contents", ")", ":", "pth", "=", "pth", ".", "strip", "(", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "if", "not", "pth", ".", "startswith", "(", "'import'", ")", ":", "prefixes", ".", "append", "(", "(", "(", "'%s/%s/'", "%", "(", "parts", "[", "0", "]", ",", "pth", ")", ")", ",", "''", ")", ")", "finally", ":", "z", ".", "close", "(", ")", "prefixes", "=", "[", "(", "x", ".", "lower", "(", ")", ",", "y", ")", "for", "x", ",", "y", "in", "prefixes", "]", "prefixes", ".", "sort", "(", ")", "prefixes", ".", "reverse", "(", ")", "return", "prefixes" ]
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac/lib/python2.7/site-packages/setuptools/command/easy_install.py#L1534-L1570
Pyomo/pyomo
dbd4faee151084f343b893cc2b0c04cf2b76fd92
pyomo/dae/plugins/finitedifference.py
python
_central_transform_order2
(v, s)
return _ctr_fun2
Applies the Central Difference formula of order O(h^2) for second derivatives
Applies the Central Difference formula of order O(h^2) for second derivatives
[ "Applies", "the", "Central", "Difference", "formula", "of", "order", "O", "(", "h^2", ")", "for", "second", "derivatives" ]
def _central_transform_order2(v, s): """ Applies the Central Difference formula of order O(h^2) for second derivatives """ def _ctr_fun2(i): tmp = list(s) idx = s.ord(i)-1 if idx == 0: # Needed since '-1' is considered a valid index in Python raise IndexError("list index out of range") return 1 / ((tmp[idx + 1] - tmp[idx]) * (tmp[idx] - tmp[idx - 1])) * \ (v(tmp[idx + 1]) - 2 * v(tmp[idx]) + v(tmp[idx - 1])) return _ctr_fun2
[ "def", "_central_transform_order2", "(", "v", ",", "s", ")", ":", "def", "_ctr_fun2", "(", "i", ")", ":", "tmp", "=", "list", "(", "s", ")", "idx", "=", "s", ".", "ord", "(", "i", ")", "-", "1", "if", "idx", "==", "0", ":", "# Needed since '-1' is considered a valid index in Python", "raise", "IndexError", "(", "\"list index out of range\"", ")", "return", "1", "/", "(", "(", "tmp", "[", "idx", "+", "1", "]", "-", "tmp", "[", "idx", "]", ")", "*", "(", "tmp", "[", "idx", "]", "-", "tmp", "[", "idx", "-", "1", "]", ")", ")", "*", "(", "v", "(", "tmp", "[", "idx", "+", "1", "]", ")", "-", "2", "*", "v", "(", "tmp", "[", "idx", "]", ")", "+", "v", "(", "tmp", "[", "idx", "-", "1", "]", ")", ")", "return", "_ctr_fun2" ]
https://github.com/Pyomo/pyomo/blob/dbd4faee151084f343b893cc2b0c04cf2b76fd92/pyomo/dae/plugins/finitedifference.py#L45-L57
karlicoss/HPI
be21606075cbc15018d1f36c2581ab138e4a44cc
my/core/__main__.py
python
module_cmd
(list_all: bool)
List available modules
List available modules
[ "List", "available", "modules" ]
def module_cmd(list_all: bool) -> None: '''List available modules''' list_modules(list_all=list_all)
[ "def", "module_cmd", "(", "list_all", ":", "bool", ")", "->", "None", ":", "list_modules", "(", "list_all", "=", "list_all", ")" ]
https://github.com/karlicoss/HPI/blob/be21606075cbc15018d1f36c2581ab138e4a44cc/my/core/__main__.py#L548-L550
DxCx/plugin.video.9anime
34358c2f701e5ddf19d3276926374a16f63f7b6a
resources/lib/ui/js2py/translators/friendly_nodes.py
python
compose_regex
(val)
return u'/%s/%s' % (reg, flags)
[]
def compose_regex(val): reg, flags = val #reg = REGEXP_CONVERTER._unescape_string(reg) return u'/%s/%s' % (reg, flags)
[ "def", "compose_regex", "(", "val", ")", ":", "reg", ",", "flags", "=", "val", "#reg = REGEXP_CONVERTER._unescape_string(reg)", "return", "u'/%s/%s'", "%", "(", "reg", ",", "flags", ")" ]
https://github.com/DxCx/plugin.video.9anime/blob/34358c2f701e5ddf19d3276926374a16f63f7b6a/resources/lib/ui/js2py/translators/friendly_nodes.py#L55-L58
IronLanguages/ironpython3
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
Src/StdLib/Lib/xml/etree/ElementTree.py
python
ElementTree.getiterator
(self, tag=None)
return list(self.iter(tag))
[]
def getiterator(self, tag=None): # Change for a DeprecationWarning in 1.4 warnings.warn( "This method will be removed in future versions. " "Use 'tree.iter()' or 'list(tree.iter())' instead.", PendingDeprecationWarning, stacklevel=2 ) return list(self.iter(tag))
[ "def", "getiterator", "(", "self", ",", "tag", "=", "None", ")", ":", "# Change for a DeprecationWarning in 1.4", "warnings", ".", "warn", "(", "\"This method will be removed in future versions. \"", "\"Use 'tree.iter()' or 'list(tree.iter())' instead.\"", ",", "PendingDeprecationWarning", ",", "stacklevel", "=", "2", ")", "return", "list", "(", "self", ".", "iter", "(", "tag", ")", ")" ]
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/xml/etree/ElementTree.py#L624-L631
tenpy/tenpy
bbdd3dbbdb511948eb0e6ba7ff619ac6ca657fff
tenpy/linalg/np_conserved.py
python
grid_concat
(grid, axes, copy=True)
return _grid_concat_recursion(grid, axes, copy)
Given an np.array of npc.Arrays, performs a multi-dimensional concatentation along 'axes'. Similar to :func:`numpy.block`, but only for uniform blocking. Stacks the qind of the array, *without* sorting/blocking. Parameters ---------- grid : array_like of :class:`Array` The grid of arrays. axes : list of int The axes along which to concatenate the arrays, same len as the dimension of the grid. Concatenate arrays of the `i`th axis of the grid along the axis ``axes[i]`` copy : bool Whether the _data blocks are copied. Examples -------- Assume we have prepared rank 2 Arrays ``A, B, C, D`` sharing the legs of equal sizes and looking like this: .. testsetup :: grid_concat A = npc.Array.from_ndarray_trivial(np.arange(2).reshape(1, 2)) B = npc.Array.from_ndarray_trivial(np.arange(10, 14).reshape(1, 4)) C = npc.Array.from_ndarray_trivial(np.arange(20, 26).reshape(3, 2)) D = npc.Array.from_ndarray_trivial(np.arange(30, 42).reshape(3, 4)) .. doctest :: grid_concat >>> print(A.to_ndarray()) [[0 1]] >>> print(B.to_ndarray()) [[10 11 12 13]] >>> print(C.to_ndarray()) [[20 21] [22 23] [24 25]] >>> print(D.to_ndarray()) [[30 31 32 33] [34 35 36 37] [38 39 40 41]] Then the following grid will result in a ``(1+3, 2+4)`` shaped array: .. doctest :: grid_concat >>> g = npc.grid_concat([[A, B], ... [C, D]], axes=[0, 1]) >>> g.shape (4, 6) >>> print(g.to_ndarray()) [[ 0 1 10 11 12 13] [20 21 30 31 32 33] [22 23 34 35 36 37] [24 25 38 39 40 41]] If ``A, B, C, D`` were rank 4 arrays, with the first and last leg as before, and sharing *common* legs ``1`` and ``2`` of dimensions 1, 2, then you would get a rank-4 array: .. doctest :: grid_concat :options: +SKIP >>> g = grid_concat([[A, B], [C, D]], axes=[0, 3]) >>> g.shape (4, 1, 2, 6) See also -------- Array.sort_legcharge : can be used to block by charges.
Given an np.array of npc.Arrays, performs a multi-dimensional concatentation along 'axes'.
[ "Given", "an", "np", ".", "array", "of", "npc", ".", "Arrays", "performs", "a", "multi", "-", "dimensional", "concatentation", "along", "axes", "." ]
def grid_concat(grid, axes, copy=True): """Given an np.array of npc.Arrays, performs a multi-dimensional concatentation along 'axes'. Similar to :func:`numpy.block`, but only for uniform blocking. Stacks the qind of the array, *without* sorting/blocking. Parameters ---------- grid : array_like of :class:`Array` The grid of arrays. axes : list of int The axes along which to concatenate the arrays, same len as the dimension of the grid. Concatenate arrays of the `i`th axis of the grid along the axis ``axes[i]`` copy : bool Whether the _data blocks are copied. Examples -------- Assume we have prepared rank 2 Arrays ``A, B, C, D`` sharing the legs of equal sizes and looking like this: .. testsetup :: grid_concat A = npc.Array.from_ndarray_trivial(np.arange(2).reshape(1, 2)) B = npc.Array.from_ndarray_trivial(np.arange(10, 14).reshape(1, 4)) C = npc.Array.from_ndarray_trivial(np.arange(20, 26).reshape(3, 2)) D = npc.Array.from_ndarray_trivial(np.arange(30, 42).reshape(3, 4)) .. doctest :: grid_concat >>> print(A.to_ndarray()) [[0 1]] >>> print(B.to_ndarray()) [[10 11 12 13]] >>> print(C.to_ndarray()) [[20 21] [22 23] [24 25]] >>> print(D.to_ndarray()) [[30 31 32 33] [34 35 36 37] [38 39 40 41]] Then the following grid will result in a ``(1+3, 2+4)`` shaped array: .. doctest :: grid_concat >>> g = npc.grid_concat([[A, B], ... [C, D]], axes=[0, 1]) >>> g.shape (4, 6) >>> print(g.to_ndarray()) [[ 0 1 10 11 12 13] [20 21 30 31 32 33] [22 23 34 35 36 37] [24 25 38 39 40 41]] If ``A, B, C, D`` were rank 4 arrays, with the first and last leg as before, and sharing *common* legs ``1`` and ``2`` of dimensions 1, 2, then you would get a rank-4 array: .. doctest :: grid_concat :options: +SKIP >>> g = grid_concat([[A, B], [C, D]], axes=[0, 3]) >>> g.shape (4, 1, 2, 6) See also -------- Array.sort_legcharge : can be used to block by charges. """ grid = np.asarray(grid, dtype=object) if grid.ndim < 1 or grid.ndim != len(axes): raise ValueError("grid has wrong dimension") if grid.ndim == 1: if any([g is None for g in grid]): raise ValueError("`None` entry in 1D grid") return concatenate(grid, axes[0], copy) if any([g is None for g in grid.flat]): new_legs = [] for a, ax in enumerate(axes): tr = [a] + [i for i in range(grid.ndim) if i != a] grid_tr = np.transpose(grid, tr) leg = [] for grid_tr_row in grid_tr: # get first g which is not None in grid_tr_row first_g = next((g for g in grid_tr_row.flat if g is not None), None) if first_g is None: raise ValueError("Full row/column with only `None` entries") else: leg.append(first_g.get_leg(ax)) new_legs.append(leg) assert first_g is not None labels = first_g.get_leg_labels() axes = first_g.get_leg_indices(axes) zeros_legs = first_g.legs[:] for idx, entry in np.ndenumerate(grid): if entry is None: for ax, new_leg, i in zip(axes, new_legs, idx): zeros_legs[ax] = new_leg[i] entry = zeros(zeros_legs, first_g.dtype, first_g.qtotal) entry.iset_leg_labels(labels) grid[idx] = entry return _grid_concat_recursion(grid, axes, copy)
[ "def", "grid_concat", "(", "grid", ",", "axes", ",", "copy", "=", "True", ")", ":", "grid", "=", "np", ".", "asarray", "(", "grid", ",", "dtype", "=", "object", ")", "if", "grid", ".", "ndim", "<", "1", "or", "grid", ".", "ndim", "!=", "len", "(", "axes", ")", ":", "raise", "ValueError", "(", "\"grid has wrong dimension\"", ")", "if", "grid", ".", "ndim", "==", "1", ":", "if", "any", "(", "[", "g", "is", "None", "for", "g", "in", "grid", "]", ")", ":", "raise", "ValueError", "(", "\"`None` entry in 1D grid\"", ")", "return", "concatenate", "(", "grid", ",", "axes", "[", "0", "]", ",", "copy", ")", "if", "any", "(", "[", "g", "is", "None", "for", "g", "in", "grid", ".", "flat", "]", ")", ":", "new_legs", "=", "[", "]", "for", "a", ",", "ax", "in", "enumerate", "(", "axes", ")", ":", "tr", "=", "[", "a", "]", "+", "[", "i", "for", "i", "in", "range", "(", "grid", ".", "ndim", ")", "if", "i", "!=", "a", "]", "grid_tr", "=", "np", ".", "transpose", "(", "grid", ",", "tr", ")", "leg", "=", "[", "]", "for", "grid_tr_row", "in", "grid_tr", ":", "# get first g which is not None in grid_tr_row", "first_g", "=", "next", "(", "(", "g", "for", "g", "in", "grid_tr_row", ".", "flat", "if", "g", "is", "not", "None", ")", ",", "None", ")", "if", "first_g", "is", "None", ":", "raise", "ValueError", "(", "\"Full row/column with only `None` entries\"", ")", "else", ":", "leg", ".", "append", "(", "first_g", ".", "get_leg", "(", "ax", ")", ")", "new_legs", ".", "append", "(", "leg", ")", "assert", "first_g", "is", "not", "None", "labels", "=", "first_g", ".", "get_leg_labels", "(", ")", "axes", "=", "first_g", ".", "get_leg_indices", "(", "axes", ")", "zeros_legs", "=", "first_g", ".", "legs", "[", ":", "]", "for", "idx", ",", "entry", "in", "np", ".", "ndenumerate", "(", "grid", ")", ":", "if", "entry", "is", "None", ":", "for", "ax", ",", "new_leg", ",", "i", "in", "zip", "(", "axes", ",", "new_legs", ",", "idx", ")", ":", "zeros_legs", "[", "ax", "]", "=", "new_leg", "[", "i", "]", "entry", "=", "zeros", "(", "zeros_legs", ",", "first_g", ".", "dtype", ",", "first_g", ".", "qtotal", ")", "entry", ".", "iset_leg_labels", "(", "labels", ")", "grid", "[", "idx", "]", "=", "entry", "return", "_grid_concat_recursion", "(", "grid", ",", "axes", ",", "copy", ")" ]
https://github.com/tenpy/tenpy/blob/bbdd3dbbdb511948eb0e6ba7ff619ac6ca657fff/tenpy/linalg/np_conserved.py#L2958-L3062
rembo10/headphones
b3199605be1ebc83a7a8feab6b1e99b64014187c
lib/oauth2/__init__.py
python
Client.__init__
(self, consumer, token=None, cache=None, timeout=None, proxy_info=None)
[]
def __init__(self, consumer, token=None, cache=None, timeout=None, proxy_info=None): if consumer is not None and not isinstance(consumer, Consumer): raise ValueError("Invalid consumer.") if token is not None and not isinstance(token, Token): raise ValueError("Invalid token.") self.consumer = consumer self.token = token self.method = SignatureMethod_HMAC_SHA1() httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
[ "def", "__init__", "(", "self", ",", "consumer", ",", "token", "=", "None", ",", "cache", "=", "None", ",", "timeout", "=", "None", ",", "proxy_info", "=", "None", ")", ":", "if", "consumer", "is", "not", "None", "and", "not", "isinstance", "(", "consumer", ",", "Consumer", ")", ":", "raise", "ValueError", "(", "\"Invalid consumer.\"", ")", "if", "token", "is", "not", "None", "and", "not", "isinstance", "(", "token", ",", "Token", ")", ":", "raise", "ValueError", "(", "\"Invalid token.\"", ")", "self", ".", "consumer", "=", "consumer", "self", ".", "token", "=", "token", "self", ".", "method", "=", "SignatureMethod_HMAC_SHA1", "(", ")", "httplib2", ".", "Http", ".", "__init__", "(", "self", ",", "cache", "=", "cache", ",", "timeout", "=", "timeout", ",", "proxy_info", "=", "proxy_info", ")" ]
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/oauth2/__init__.py#L557-L571
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/io/ascii/core.py
python
BaseHeader.get_cols
(self, lines)
Initialize the header Column objects from the table ``lines``. Based on the previously set Header attributes find or create the column names. Sets ``self.cols`` with the list of Columns. Parameters ---------- lines : list List of table lines
Initialize the header Column objects from the table ``lines``.
[ "Initialize", "the", "header", "Column", "objects", "from", "the", "table", "lines", "." ]
def get_cols(self, lines): """Initialize the header Column objects from the table ``lines``. Based on the previously set Header attributes find or create the column names. Sets ``self.cols`` with the list of Columns. Parameters ---------- lines : list List of table lines """ start_line = _get_line_index(self.start_line, self.process_lines(lines)) if start_line is None: # No header line so auto-generate names from n_data_cols # Get the data values from the first line of table data to determine n_data_cols try: first_data_vals = next(self.data.get_str_vals()) except StopIteration: raise InconsistentTableError('No data lines found so cannot autogenerate ' 'column names') n_data_cols = len(first_data_vals) self.names = [self.auto_format.format(i) for i in range(1, n_data_cols + 1)] else: for i, line in enumerate(self.process_lines(lines)): if i == start_line: break else: # No header line matching raise ValueError('No header line found in table') self.names = next(self.splitter([line])) self._set_cols_from_names()
[ "def", "get_cols", "(", "self", ",", "lines", ")", ":", "start_line", "=", "_get_line_index", "(", "self", ".", "start_line", ",", "self", ".", "process_lines", "(", "lines", ")", ")", "if", "start_line", "is", "None", ":", "# No header line so auto-generate names from n_data_cols", "# Get the data values from the first line of table data to determine n_data_cols", "try", ":", "first_data_vals", "=", "next", "(", "self", ".", "data", ".", "get_str_vals", "(", ")", ")", "except", "StopIteration", ":", "raise", "InconsistentTableError", "(", "'No data lines found so cannot autogenerate '", "'column names'", ")", "n_data_cols", "=", "len", "(", "first_data_vals", ")", "self", ".", "names", "=", "[", "self", ".", "auto_format", ".", "format", "(", "i", ")", "for", "i", "in", "range", "(", "1", ",", "n_data_cols", "+", "1", ")", "]", "else", ":", "for", "i", ",", "line", "in", "enumerate", "(", "self", ".", "process_lines", "(", "lines", ")", ")", ":", "if", "i", "==", "start_line", ":", "break", "else", ":", "# No header line matching", "raise", "ValueError", "(", "'No header line found in table'", ")", "self", ".", "names", "=", "next", "(", "self", ".", "splitter", "(", "[", "line", "]", ")", ")", "self", ".", "_set_cols_from_names", "(", ")" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/io/ascii/core.py#L557-L592
learningequality/ka-lite
571918ea668013dcf022286ea85eff1c5333fb8b
kalite/packages/bundled/django/contrib/gis/db/models/fields.py
python
GeometryField.get_srid
(self, geom)
Returns the default SRID for the given geometry, taking into account the SRID set for the field. For example, if the input geometry has no SRID, then that of the field will be returned.
Returns the default SRID for the given geometry, taking into account the SRID set for the field. For example, if the input geometry has no SRID, then that of the field will be returned.
[ "Returns", "the", "default", "SRID", "for", "the", "given", "geometry", "taking", "into", "account", "the", "SRID", "set", "for", "the", "field", ".", "For", "example", "if", "the", "input", "geometry", "has", "no", "SRID", "then", "that", "of", "the", "field", "will", "be", "returned", "." ]
def get_srid(self, geom): """ Returns the default SRID for the given geometry, taking into account the SRID set for the field. For example, if the input geometry has no SRID, then that of the field will be returned. """ gsrid = geom.srid # SRID of given geometry. if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1): return self.srid else: return gsrid
[ "def", "get_srid", "(", "self", ",", "geom", ")", ":", "gsrid", "=", "geom", ".", "srid", "# SRID of given geometry.", "if", "gsrid", "is", "None", "or", "self", ".", "srid", "==", "-", "1", "or", "(", "gsrid", "==", "-", "1", "and", "self", ".", "srid", "!=", "-", "1", ")", ":", "return", "self", ".", "srid", "else", ":", "return", "gsrid" ]
https://github.com/learningequality/ka-lite/blob/571918ea668013dcf022286ea85eff1c5333fb8b/kalite/packages/bundled/django/contrib/gis/db/models/fields.py#L181-L191
open-mmlab/OpenPCDet
0f4d3f1f5c1fbe551c35917220e75eb90e28035f
pcdet/models/dense_heads/target_assigner/atss_target_assigner.py
python
ATSSTargetAssigner.assign_targets
(self, anchors_list, gt_boxes_with_classes, use_multihead=False)
return ret_dict
Args: anchors: [(N, 7), ...] gt_boxes: (B, M, 8) Returns:
Args: anchors: [(N, 7), ...] gt_boxes: (B, M, 8) Returns:
[ "Args", ":", "anchors", ":", "[", "(", "N", "7", ")", "...", "]", "gt_boxes", ":", "(", "B", "M", "8", ")", "Returns", ":" ]
def assign_targets(self, anchors_list, gt_boxes_with_classes, use_multihead=False): """ Args: anchors: [(N, 7), ...] gt_boxes: (B, M, 8) Returns: """ if not isinstance(anchors_list, list): anchors_list = [anchors_list] single_set_of_anchor = True else: single_set_of_anchor = len(anchors_list) == 1 cls_labels_list, reg_targets_list, reg_weights_list = [], [], [] for anchors in anchors_list: batch_size = gt_boxes_with_classes.shape[0] gt_classes = gt_boxes_with_classes[:, :, -1] gt_boxes = gt_boxes_with_classes[:, :, :-1] if use_multihead: anchors = anchors.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchors.shape[-1]) else: anchors = anchors.view(-1, anchors.shape[-1]) cls_labels, reg_targets, reg_weights = [], [], [] for k in range(batch_size): cur_gt = gt_boxes[k] cnt = cur_gt.__len__() - 1 while cnt > 0 and cur_gt[cnt].sum() == 0: cnt -= 1 cur_gt = cur_gt[:cnt + 1] cur_gt_classes = gt_classes[k][:cnt + 1] cur_cls_labels, cur_reg_targets, cur_reg_weights = self.assign_targets_single( anchors, cur_gt, cur_gt_classes ) cls_labels.append(cur_cls_labels) reg_targets.append(cur_reg_targets) reg_weights.append(cur_reg_weights) cls_labels = torch.stack(cls_labels, dim=0) reg_targets = torch.stack(reg_targets, dim=0) reg_weights = torch.stack(reg_weights, dim=0) cls_labels_list.append(cls_labels) reg_targets_list.append(reg_targets) reg_weights_list.append(reg_weights) if single_set_of_anchor: ret_dict = { 'box_cls_labels': cls_labels_list[0], 'box_reg_targets': reg_targets_list[0], 'reg_weights': reg_weights_list[0] } else: ret_dict = { 'box_cls_labels': torch.cat(cls_labels_list, dim=1), 'box_reg_targets': torch.cat(reg_targets_list, dim=1), 'reg_weights': torch.cat(reg_weights_list, dim=1) } return ret_dict
[ "def", "assign_targets", "(", "self", ",", "anchors_list", ",", "gt_boxes_with_classes", ",", "use_multihead", "=", "False", ")", ":", "if", "not", "isinstance", "(", "anchors_list", ",", "list", ")", ":", "anchors_list", "=", "[", "anchors_list", "]", "single_set_of_anchor", "=", "True", "else", ":", "single_set_of_anchor", "=", "len", "(", "anchors_list", ")", "==", "1", "cls_labels_list", ",", "reg_targets_list", ",", "reg_weights_list", "=", "[", "]", ",", "[", "]", ",", "[", "]", "for", "anchors", "in", "anchors_list", ":", "batch_size", "=", "gt_boxes_with_classes", ".", "shape", "[", "0", "]", "gt_classes", "=", "gt_boxes_with_classes", "[", ":", ",", ":", ",", "-", "1", "]", "gt_boxes", "=", "gt_boxes_with_classes", "[", ":", ",", ":", ",", ":", "-", "1", "]", "if", "use_multihead", ":", "anchors", "=", "anchors", ".", "permute", "(", "3", ",", "4", ",", "0", ",", "1", ",", "2", ",", "5", ")", ".", "contiguous", "(", ")", ".", "view", "(", "-", "1", ",", "anchors", ".", "shape", "[", "-", "1", "]", ")", "else", ":", "anchors", "=", "anchors", ".", "view", "(", "-", "1", ",", "anchors", ".", "shape", "[", "-", "1", "]", ")", "cls_labels", ",", "reg_targets", ",", "reg_weights", "=", "[", "]", ",", "[", "]", ",", "[", "]", "for", "k", "in", "range", "(", "batch_size", ")", ":", "cur_gt", "=", "gt_boxes", "[", "k", "]", "cnt", "=", "cur_gt", ".", "__len__", "(", ")", "-", "1", "while", "cnt", ">", "0", "and", "cur_gt", "[", "cnt", "]", ".", "sum", "(", ")", "==", "0", ":", "cnt", "-=", "1", "cur_gt", "=", "cur_gt", "[", ":", "cnt", "+", "1", "]", "cur_gt_classes", "=", "gt_classes", "[", "k", "]", "[", ":", "cnt", "+", "1", "]", "cur_cls_labels", ",", "cur_reg_targets", ",", "cur_reg_weights", "=", "self", ".", "assign_targets_single", "(", "anchors", ",", "cur_gt", ",", "cur_gt_classes", ")", "cls_labels", ".", "append", "(", "cur_cls_labels", ")", "reg_targets", ".", "append", "(", "cur_reg_targets", ")", "reg_weights", ".", "append", "(", "cur_reg_weights", ")", "cls_labels", "=", "torch", ".", "stack", "(", "cls_labels", ",", "dim", "=", "0", ")", "reg_targets", "=", "torch", ".", "stack", "(", "reg_targets", ",", "dim", "=", "0", ")", "reg_weights", "=", "torch", ".", "stack", "(", "reg_weights", ",", "dim", "=", "0", ")", "cls_labels_list", ".", "append", "(", "cls_labels", ")", "reg_targets_list", ".", "append", "(", "reg_targets", ")", "reg_weights_list", ".", "append", "(", "reg_weights", ")", "if", "single_set_of_anchor", ":", "ret_dict", "=", "{", "'box_cls_labels'", ":", "cls_labels_list", "[", "0", "]", ",", "'box_reg_targets'", ":", "reg_targets_list", "[", "0", "]", ",", "'reg_weights'", ":", "reg_weights_list", "[", "0", "]", "}", "else", ":", "ret_dict", "=", "{", "'box_cls_labels'", ":", "torch", ".", "cat", "(", "cls_labels_list", ",", "dim", "=", "1", ")", ",", "'box_reg_targets'", ":", "torch", ".", "cat", "(", "reg_targets_list", ",", "dim", "=", "1", ")", ",", "'reg_weights'", ":", "torch", ".", "cat", "(", "reg_weights_list", ",", "dim", "=", "1", ")", "}", "return", "ret_dict" ]
https://github.com/open-mmlab/OpenPCDet/blob/0f4d3f1f5c1fbe551c35917220e75eb90e28035f/pcdet/models/dense_heads/target_assigner/atss_target_assigner.py#L16-L73
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/integrate/_ode.py
python
IntegratorBase.reset
(self, n, has_jac)
Prepare integrator for call: allocate memory, set flags, etc. n - number of equations. has_jac - if user has supplied function for evaluating Jacobian.
Prepare integrator for call: allocate memory, set flags, etc. n - number of equations. has_jac - if user has supplied function for evaluating Jacobian.
[ "Prepare", "integrator", "for", "call", ":", "allocate", "memory", "set", "flags", "etc", ".", "n", "-", "number", "of", "equations", ".", "has_jac", "-", "if", "user", "has", "supplied", "function", "for", "evaluating", "Jacobian", "." ]
def reset(self, n, has_jac): """Prepare integrator for call: allocate memory, set flags, etc. n - number of equations. has_jac - if user has supplied function for evaluating Jacobian. """
[ "def", "reset", "(", "self", ",", "n", ",", "has_jac", ")", ":" ]
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/integrate/_ode.py#L793-L797
n1nj4sec/pupy
a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39
pupy/network/lib/picocmd/picocmd.py
python
PortQuizPort.__init__
(self, ports)
[]
def __init__(self, ports): self.ports = [int(x) for x in ports]
[ "def", "__init__", "(", "self", ",", "ports", ")", ":", "self", ".", "ports", "=", "[", "int", "(", "x", ")", "for", "x", "in", "ports", "]" ]
https://github.com/n1nj4sec/pupy/blob/a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39/pupy/network/lib/picocmd/picocmd.py#L1027-L1028
tdamdouni/Pythonista
3e082d53b6b9b501a3c8cf3251a8ad4c8be9c2ad
markdown/markdown2pdf/PyPDF2/generic.py
python
PdfObject.getObject
(self)
return self
Resolves indirect references.
Resolves indirect references.
[ "Resolves", "indirect", "references", "." ]
def getObject(self): """Resolves indirect references.""" return self
[ "def", "getObject", "(", "self", ")", ":", "return", "self" ]
https://github.com/tdamdouni/Pythonista/blob/3e082d53b6b9b501a3c8cf3251a8ad4c8be9c2ad/markdown/markdown2pdf/PyPDF2/generic.py#L94-L96
zhanlaoban/Transformers_for_Text_Classification
5e12b21616b29e445e11fe307948e5c55084bb0e
transformers/modeling_ctrl.py
python
CTRLPreTrainedModel._init_weights
(self, module)
Initialize the weights.
Initialize the weights.
[ "Initialize", "the", "weights", "." ]
def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
[ "def", "_init_weights", "(", "self", ",", "module", ")", ":", "if", "isinstance", "(", "module", ",", "(", "nn", ".", "Linear", ",", "nn", ".", "Embedding", ",", "Conv1D", ")", ")", ":", "# Slightly different from the TF version which uses truncated_normal for initialization", "# cf https://github.com/pytorch/pytorch/pull/5617", "module", ".", "weight", ".", "data", ".", "normal_", "(", "mean", "=", "0.0", ",", "std", "=", "self", ".", "config", ".", "initializer_range", ")", "if", "isinstance", "(", "module", ",", "(", "nn", ".", "Linear", ",", "Conv1D", ")", ")", "and", "module", ".", "bias", "is", "not", "None", ":", "module", ".", "bias", ".", "data", ".", "zero_", "(", ")", "elif", "isinstance", "(", "module", ",", "nn", ".", "LayerNorm", ")", ":", "module", ".", "bias", ".", "data", ".", "zero_", "(", ")", "module", ".", "weight", ".", "data", ".", "fill_", "(", "1.0", ")" ]
https://github.com/zhanlaoban/Transformers_for_Text_Classification/blob/5e12b21616b29e445e11fe307948e5c55084bb0e/transformers/modeling_ctrl.py#L178-L189
zhou13/lcnn
e07e48b98d9c5b067a63d48445e757c2460ba4b6
lcnn/utils.py
python
np_softmax
(x, axis=0)
return e_x / e_x.sum(axis=axis, keepdims=True)
Compute softmax values for each sets of scores in x.
Compute softmax values for each sets of scores in x.
[ "Compute", "softmax", "values", "for", "each", "sets", "of", "scores", "in", "x", "." ]
def np_softmax(x, axis=0): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=axis, keepdims=True)
[ "def", "np_softmax", "(", "x", ",", "axis", "=", "0", ")", ":", "e_x", "=", "np", ".", "exp", "(", "x", "-", "np", ".", "max", "(", "x", ")", ")", "return", "e_x", "/", "e_x", ".", "sum", "(", "axis", "=", "axis", ",", "keepdims", "=", "True", ")" ]
https://github.com/zhou13/lcnn/blob/e07e48b98d9c5b067a63d48445e757c2460ba4b6/lcnn/utils.py#L60-L63
standardebooks/tools
f57af3c5938a9aeed9e97e82b2c130424f6033e5
se/commands/create_draft.py
python
_generate_contributor_string
(contributors: List[Dict], include_xhtml: bool)
return output
Given a list of contributors, generate a contributor string like `Bob Smith, Jane Doe, and Sam Johnson`. With include_xhtml, the string looks like: `<b epub:type="z3998:personal-name">Bob Smith</b>, <a href="https://en.wikipedia.org/wiki/Jane_Doe">Jane Doe</a>, and <b epub:type="z3998:personal-name">Sam Johnson</b>` INPUTS contributors: A list of contributor dicts include_xhtml: Include <b> or <a> for each contributor, making the output suitable for the coloiphon OUTPUTS A string of XML representing the contributors
Given a list of contributors, generate a contributor string like `Bob Smith, Jane Doe, and Sam Johnson`. With include_xhtml, the string looks like: `<b epub:type="z3998:personal-name">Bob Smith</b>, <a href="https://en.wikipedia.org/wiki/Jane_Doe">Jane Doe</a>, and <b epub:type="z3998:personal-name">Sam Johnson</b>`
[ "Given", "a", "list", "of", "contributors", "generate", "a", "contributor", "string", "like", "Bob", "Smith", "Jane", "Doe", "and", "Sam", "Johnson", ".", "With", "include_xhtml", "the", "string", "looks", "like", ":", "<b", "epub", ":", "type", "=", "z3998", ":", "personal", "-", "name", ">", "Bob", "Smith<", "/", "b", ">", "<a", "href", "=", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Jane_Doe", ">", "Jane", "Doe<", "/", "a", ">", "and", "<b", "epub", ":", "type", "=", "z3998", ":", "personal", "-", "name", ">", "Sam", "Johnson<", "/", "b", ">" ]
def _generate_contributor_string(contributors: List[Dict], include_xhtml: bool) -> str: """ Given a list of contributors, generate a contributor string like `Bob Smith, Jane Doe, and Sam Johnson`. With include_xhtml, the string looks like: `<b epub:type="z3998:personal-name">Bob Smith</b>, <a href="https://en.wikipedia.org/wiki/Jane_Doe">Jane Doe</a>, and <b epub:type="z3998:personal-name">Sam Johnson</b>` INPUTS contributors: A list of contributor dicts include_xhtml: Include <b> or <a> for each contributor, making the output suitable for the coloiphon OUTPUTS A string of XML representing the contributors """ output = "" # Don't include "anonymous" contributors contributors = [contributor for contributor in contributors if contributor["name"].lower() != "anonymous"] if len(contributors) == 1: if include_xhtml: if contributors[0]["wiki_url"]: output += f"""<a href="{contributors[0]['wiki_url']}">{_add_name_abbr(escape(contributors[0]['name']))}</a>""" else: output += f"""<b epub:type="z3998:personal-name">{_add_name_abbr(escape(contributors[0]['name']))}</b>""" else: output += contributors[0]["name"] elif len(contributors) == 2: if include_xhtml: if contributors[0]["wiki_url"]: output += f"""<a href="{contributors[0]['wiki_url']}">{_add_name_abbr(escape(contributors[0]['name']))}</a>""" else: output += f"""<b epub:type="z3998:personal-name">{_add_name_abbr(escape(contributors[0]['name']))}</b>""" output += " and " if contributors[1]["wiki_url"]: output += f"""<a href="{contributors[1]['wiki_url']}">{_add_name_abbr(escape(contributors[1]['name']))}</a>""" else: output += f"""<b epub:type="z3998:personal-name">{_add_name_abbr(escape(contributors[1]['name']))}</b>""" else: output += contributors[0]["name"] + " and " + contributors[1]["name"] else: for i, contributor in enumerate(contributors): if 0 < i <= len(contributors) - 2: output += ", " if i > 0 and i == len(contributors) - 1: output += ", and " if include_xhtml: if contributor["wiki_url"]: output += f"""<a href="{contributor['wiki_url']}">{_add_name_abbr(escape(contributor['name']))}</a>""" else: output += f"""<b epub:type="z3998:personal-name">{_add_name_abbr(escape(contributor['name']))}</b>""" else: output += contributor["name"] return output
[ "def", "_generate_contributor_string", "(", "contributors", ":", "List", "[", "Dict", "]", ",", "include_xhtml", ":", "bool", ")", "->", "str", ":", "output", "=", "\"\"", "# Don't include \"anonymous\" contributors", "contributors", "=", "[", "contributor", "for", "contributor", "in", "contributors", "if", "contributor", "[", "\"name\"", "]", ".", "lower", "(", ")", "!=", "\"anonymous\"", "]", "if", "len", "(", "contributors", ")", "==", "1", ":", "if", "include_xhtml", ":", "if", "contributors", "[", "0", "]", "[", "\"wiki_url\"", "]", ":", "output", "+=", "f\"\"\"<a href=\"{contributors[0]['wiki_url']}\">{_add_name_abbr(escape(contributors[0]['name']))}</a>\"\"\"", "else", ":", "output", "+=", "f\"\"\"<b epub:type=\"z3998:personal-name\">{_add_name_abbr(escape(contributors[0]['name']))}</b>\"\"\"", "else", ":", "output", "+=", "contributors", "[", "0", "]", "[", "\"name\"", "]", "elif", "len", "(", "contributors", ")", "==", "2", ":", "if", "include_xhtml", ":", "if", "contributors", "[", "0", "]", "[", "\"wiki_url\"", "]", ":", "output", "+=", "f\"\"\"<a href=\"{contributors[0]['wiki_url']}\">{_add_name_abbr(escape(contributors[0]['name']))}</a>\"\"\"", "else", ":", "output", "+=", "f\"\"\"<b epub:type=\"z3998:personal-name\">{_add_name_abbr(escape(contributors[0]['name']))}</b>\"\"\"", "output", "+=", "\" and \"", "if", "contributors", "[", "1", "]", "[", "\"wiki_url\"", "]", ":", "output", "+=", "f\"\"\"<a href=\"{contributors[1]['wiki_url']}\">{_add_name_abbr(escape(contributors[1]['name']))}</a>\"\"\"", "else", ":", "output", "+=", "f\"\"\"<b epub:type=\"z3998:personal-name\">{_add_name_abbr(escape(contributors[1]['name']))}</b>\"\"\"", "else", ":", "output", "+=", "contributors", "[", "0", "]", "[", "\"name\"", "]", "+", "\" and \"", "+", "contributors", "[", "1", "]", "[", "\"name\"", "]", "else", ":", "for", "i", ",", "contributor", "in", "enumerate", "(", "contributors", ")", ":", "if", "0", "<", "i", "<=", "len", "(", "contributors", ")", "-", "2", ":", "output", "+=", "\", \"", "if", "i", ">", "0", "and", "i", "==", "len", "(", "contributors", ")", "-", "1", ":", "output", "+=", "\", and \"", "if", "include_xhtml", ":", "if", "contributor", "[", "\"wiki_url\"", "]", ":", "output", "+=", "f\"\"\"<a href=\"{contributor['wiki_url']}\">{_add_name_abbr(escape(contributor['name']))}</a>\"\"\"", "else", ":", "output", "+=", "f\"\"\"<b epub:type=\"z3998:personal-name\">{_add_name_abbr(escape(contributor['name']))}</b>\"\"\"", "else", ":", "output", "+=", "contributor", "[", "\"name\"", "]", "return", "output" ]
https://github.com/standardebooks/tools/blob/f57af3c5938a9aeed9e97e82b2c130424f6033e5/se/commands/create_draft.py#L416-L475
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
ansible/roles/lib_oa_openshift/library/oc_objectvalidator.py
python
locate_oc_binary
()
return oc_binary
Find and return oc binary file
Find and return oc binary file
[ "Find", "and", "return", "oc", "binary", "file" ]
def locate_oc_binary(): ''' Find and return oc binary file ''' # https://github.com/openshift/openshift-ansible/issues/3410 # oc can be in /usr/local/bin in some cases, but that may not # be in $PATH due to ansible/sudo paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS oc_binary = 'oc' # Use shutil.which if it is available, otherwise fallback to a naive path search try: which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) if which_result is not None: oc_binary = which_result except AttributeError: for path in paths: if os.path.exists(os.path.join(path, oc_binary)): oc_binary = os.path.join(path, oc_binary) break return oc_binary
[ "def", "locate_oc_binary", "(", ")", ":", "# https://github.com/openshift/openshift-ansible/issues/3410", "# oc can be in /usr/local/bin in some cases, but that may not", "# be in $PATH due to ansible/sudo", "paths", "=", "os", ".", "environ", ".", "get", "(", "\"PATH\"", ",", "os", ".", "defpath", ")", ".", "split", "(", "os", ".", "pathsep", ")", "+", "ADDITIONAL_PATH_LOOKUPS", "oc_binary", "=", "'oc'", "# Use shutil.which if it is available, otherwise fallback to a naive path search", "try", ":", "which_result", "=", "shutil", ".", "which", "(", "oc_binary", ",", "path", "=", "os", ".", "pathsep", ".", "join", "(", "paths", ")", ")", "if", "which_result", "is", "not", "None", ":", "oc_binary", "=", "which_result", "except", "AttributeError", ":", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "oc_binary", ")", ")", ":", "oc_binary", "=", "os", ".", "path", ".", "join", "(", "path", ",", "oc_binary", ")", "break", "return", "oc_binary" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_oa_openshift/library/oc_objectvalidator.py#L803-L823
nicolas-chaulet/torch-points3d
8e4c19ecb81926626231bf185e9eca77d92a0606
torch_points3d/models/base_architectures/unet.py
python
UnwrappedUnetBasedModel._fetch_arguments
(self, conv_opt, index, flow)
return args
Fetches arguments for building a convolution (up or down) Arguments: conv_opt index in sequential order (as they come in the config) flow "UP" or "DOWN"
Fetches arguments for building a convolution (up or down)
[ "Fetches", "arguments", "for", "building", "a", "convolution", "(", "up", "or", "down", ")" ]
def _fetch_arguments(self, conv_opt, index, flow): """Fetches arguments for building a convolution (up or down) Arguments: conv_opt index in sequential order (as they come in the config) flow "UP" or "DOWN" """ args = self._fetch_arguments_from_list(conv_opt, index) args["conv_cls"] = self._factory_module.get_module(flow) args["index"] = index return args
[ "def", "_fetch_arguments", "(", "self", ",", "conv_opt", ",", "index", ",", "flow", ")", ":", "args", "=", "self", ".", "_fetch_arguments_from_list", "(", "conv_opt", ",", "index", ")", "args", "[", "\"conv_cls\"", "]", "=", "self", ".", "_factory_module", ".", "get_module", "(", "flow", ")", "args", "[", "\"index\"", "]", "=", "index", "return", "args" ]
https://github.com/nicolas-chaulet/torch-points3d/blob/8e4c19ecb81926626231bf185e9eca77d92a0606/torch_points3d/models/base_architectures/unet.py#L476-L487
mrkipling/maraschino
c6be9286937783ae01df2d6d8cebfc8b2734a7d7
lib/werkzeug/wrappers.py
python
ETagResponseMixin.add_etag
(self, overwrite=False, weak=False)
Add an etag for the current response if there is none yet.
Add an etag for the current response if there is none yet.
[ "Add", "an", "etag", "for", "the", "current", "response", "if", "there", "is", "none", "yet", "." ]
def add_etag(self, overwrite=False, weak=False): """Add an etag for the current response if there is none yet.""" if overwrite or 'etag' not in self.headers: self.set_etag(generate_etag(self.data), weak)
[ "def", "add_etag", "(", "self", ",", "overwrite", "=", "False", ",", "weak", "=", "False", ")", ":", "if", "overwrite", "or", "'etag'", "not", "in", "self", ".", "headers", ":", "self", ".", "set_etag", "(", "generate_etag", "(", "self", ".", "data", ")", ",", "weak", ")" ]
https://github.com/mrkipling/maraschino/blob/c6be9286937783ae01df2d6d8cebfc8b2734a7d7/lib/werkzeug/wrappers.py#L1278-L1281
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/google/appengine/datastore/datastore_v4_pb.py
python
DatastoreV4Service.BeginTransaction
(self, rpc, request, response)
Handles a BeginTransaction RPC call. You should override this. Args: rpc: a Stubby RPC object request: a BeginTransactionRequest that contains the client request response: a BeginTransactionResponse that should be modified to send the response
Handles a BeginTransaction RPC call. You should override this.
[ "Handles", "a", "BeginTransaction", "RPC", "call", ".", "You", "should", "override", "this", "." ]
def BeginTransaction(self, rpc, request, response): """Handles a BeginTransaction RPC call. You should override this. Args: rpc: a Stubby RPC object request: a BeginTransactionRequest that contains the client request response: a BeginTransactionResponse that should be modified to send the response """ raise NotImplementedError
[ "def", "BeginTransaction", "(", "self", ",", "rpc", ",", "request", ",", "response", ")", ":", "raise", "NotImplementedError" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/datastore/datastore_v4_pb.py#L6630-L6638
timkpaine/paperboy
6c0854b2c0dad139c25153e520ca79ffed820fa4
paperboy/config/user.py
python
UserConfig.store
(self)
return ret
Generate response modal for client when saving a User object
Generate response modal for client when saving a User object
[ "Generate", "response", "modal", "for", "client", "when", "saving", "a", "User", "object" ]
def store(self): '''Generate response modal for client when saving a User object''' ret = Response() ret.entries = [ DOMEntry(type='p', value='Success!'), DOMEntry(type='p', value='Successfully stored user {}'.format(self.name)), ] return ret
[ "def", "store", "(", "self", ")", ":", "ret", "=", "Response", "(", ")", "ret", ".", "entries", "=", "[", "DOMEntry", "(", "type", "=", "'p'", ",", "value", "=", "'Success!'", ")", ",", "DOMEntry", "(", "type", "=", "'p'", ",", "value", "=", "'Successfully stored user {}'", ".", "format", "(", "self", ".", "name", ")", ")", ",", "]", "return", "ret" ]
https://github.com/timkpaine/paperboy/blob/6c0854b2c0dad139c25153e520ca79ffed820fa4/paperboy/config/user.py#L45-L52
ratschlab/RGAN
f41731b965348259dcd94b0dcb1374d3e1c4ca7d
data_utils.py
python
get_data
(data_type, data_options=None)
return samples, pdf, labels
Helper/wrapper function to get the requested data.
Helper/wrapper function to get the requested data.
[ "Helper", "/", "wrapper", "function", "to", "get", "the", "requested", "data", "." ]
def get_data(data_type, data_options=None): """ Helper/wrapper function to get the requested data. """ labels = None pdf = None if data_type == 'load': data_dict = np.load(data_options).item() samples = data_dict['samples'] pdf = data_dict['pdf'] labels = data_dict['labels'] elif data_type == 'sine': samples = sine_wave(**data_options) elif data_type == 'mnist': if data_options['full_mnist']: samples, labels = mnist() else: #samples, labels = load_resized_mnist_0_5(14) samples, labels = load_resized_mnist(14) # this is the 0-2 setting elif data_type == 'gp_rbf': print(data_options) samples, pdf = GP(**data_options, kernel='rbf') elif data_type == 'linear': samples, pdf = linear(**data_options) elif data_type == 'eICU_task': samples, labels = eICU_task() elif data_type == 'resampled_eICU': samples, labels = resampled_eICU(**data_options) else: raise ValueError(data_type) print('Generated/loaded', len(samples), 'samples from data-type', data_type) return samples, pdf, labels
[ "def", "get_data", "(", "data_type", ",", "data_options", "=", "None", ")", ":", "labels", "=", "None", "pdf", "=", "None", "if", "data_type", "==", "'load'", ":", "data_dict", "=", "np", ".", "load", "(", "data_options", ")", ".", "item", "(", ")", "samples", "=", "data_dict", "[", "'samples'", "]", "pdf", "=", "data_dict", "[", "'pdf'", "]", "labels", "=", "data_dict", "[", "'labels'", "]", "elif", "data_type", "==", "'sine'", ":", "samples", "=", "sine_wave", "(", "*", "*", "data_options", ")", "elif", "data_type", "==", "'mnist'", ":", "if", "data_options", "[", "'full_mnist'", "]", ":", "samples", ",", "labels", "=", "mnist", "(", ")", "else", ":", "#samples, labels = load_resized_mnist_0_5(14)", "samples", ",", "labels", "=", "load_resized_mnist", "(", "14", ")", "# this is the 0-2 setting", "elif", "data_type", "==", "'gp_rbf'", ":", "print", "(", "data_options", ")", "samples", ",", "pdf", "=", "GP", "(", "*", "*", "data_options", ",", "kernel", "=", "'rbf'", ")", "elif", "data_type", "==", "'linear'", ":", "samples", ",", "pdf", "=", "linear", "(", "*", "*", "data_options", ")", "elif", "data_type", "==", "'eICU_task'", ":", "samples", ",", "labels", "=", "eICU_task", "(", ")", "elif", "data_type", "==", "'resampled_eICU'", ":", "samples", ",", "labels", "=", "resampled_eICU", "(", "*", "*", "data_options", ")", "else", ":", "raise", "ValueError", "(", "data_type", ")", "print", "(", "'Generated/loaded'", ",", "len", "(", "samples", ")", ",", "'samples from data-type'", ",", "data_type", ")", "return", "samples", ",", "pdf", ",", "labels" ]
https://github.com/ratschlab/RGAN/blob/f41731b965348259dcd94b0dcb1374d3e1c4ca7d/data_utils.py#L110-L141
google/budou
d45791a244e00d84f87da2a4678da2b63a9c232f
budou/chunk.py
python
Chunk.is_space
(self)
return self.pos == self._SPACE_POS
Whether the chunk is a space. Returns: bool: True if it is a space.
Whether the chunk is a space.
[ "Whether", "the", "chunk", "is", "a", "space", "." ]
def is_space(self): """Whether the chunk is a space. Returns: bool: True if it is a space. """ return self.pos == self._SPACE_POS
[ "def", "is_space", "(", "self", ")", ":", "return", "self", ".", "pos", "==", "self", ".", "_SPACE_POS" ]
https://github.com/google/budou/blob/d45791a244e00d84f87da2a4678da2b63a9c232f/budou/chunk.py#L93-L99
urwid/urwid
e2423b5069f51d318ea1ac0f355a0efe5448f7eb
urwid/lcd_display.py
python
KeyRepeatSimulator.release
(self, key)
[]
def release(self, key): if key not in self.pressed: return # ignore extra release events del self.pressed[key] if not self.pressed: self.multiple_pressed = False
[ "def", "release", "(", "self", ",", "key", ")", ":", "if", "key", "not", "in", "self", ".", "pressed", ":", "return", "# ignore extra release events", "del", "self", ".", "pressed", "[", "key", "]", "if", "not", "self", ".", "pressed", ":", "self", ".", "multiple_pressed", "=", "False" ]
https://github.com/urwid/urwid/blob/e2423b5069f51d318ea1ac0f355a0efe5448f7eb/urwid/lcd_display.py#L222-L227
rm-hull/luma.examples
3a5110ab27a2d4f10cd62dc0156f439f4decd1e7
examples/maze.py
python
Maze.coords
(self, offset)
return (offset % self.width, offset // self.width)
Converts offset to [x,y] co-ords
Converts offset to [x,y] co-ords
[ "Converts", "offset", "to", "[", "x", "y", "]", "co", "-", "ords" ]
def coords(self, offset): """ Converts offset to [x,y] co-ords """ return (offset % self.width, offset // self.width)
[ "def", "coords", "(", "self", ",", "offset", ")", ":", "return", "(", "offset", "%", "self", ".", "width", ",", "offset", "//", "self", ".", "width", ")" ]
https://github.com/rm-hull/luma.examples/blob/3a5110ab27a2d4f10cd62dc0156f439f4decd1e7/examples/maze.py#L35-L37
bloomberg/phabricator-tools
09bd1587fe8945d93a891162fd4c89640c6fada7
py/abd/abdt_naming.py
python
isStatusBad
(working_branch)
return working_branch.status.startswith(WB_STATUS_PREFIX_BAD)
Return True if the status of 'working_branch' is bad. :working_branch: a WorkingBranch :returns: True if the branch is bad
Return True if the status of 'working_branch' is bad.
[ "Return", "True", "if", "the", "status", "of", "working_branch", "is", "bad", "." ]
def isStatusBad(working_branch): """Return True if the status of 'working_branch' is bad. :working_branch: a WorkingBranch :returns: True if the branch is bad """ return working_branch.status.startswith(WB_STATUS_PREFIX_BAD)
[ "def", "isStatusBad", "(", "working_branch", ")", ":", "return", "working_branch", ".", "status", ".", "startswith", "(", "WB_STATUS_PREFIX_BAD", ")" ]
https://github.com/bloomberg/phabricator-tools/blob/09bd1587fe8945d93a891162fd4c89640c6fada7/py/abd/abdt_naming.py#L93-L100
w3h/isf
6faf0a3df185465ec17369c90ccc16e2a03a1870
lib/thirdparty/DateTime/interfaces.py
python
IDateTime.__int__
()
Convert to an integer number of seconds since the epoch (gmt)
Convert to an integer number of seconds since the epoch (gmt)
[ "Convert", "to", "an", "integer", "number", "of", "seconds", "since", "the", "epoch", "(", "gmt", ")" ]
def __int__(): """Convert to an integer number of seconds since the epoch (gmt)"""
[ "def", "__int__", "(", ")", ":" ]
https://github.com/w3h/isf/blob/6faf0a3df185465ec17369c90ccc16e2a03a1870/lib/thirdparty/DateTime/interfaces.py#L368-L369
calico/basenji
2dae9b54744bd0495041c4259c22593054eef50b
basenji/archive/seqnn_util.py
python
SeqNNModel.test_h5
(self, sess, batcher, test_batches=None)
return acc
Compute model accuracy on a test set. Args: sess: TensorFlow session batcher: Batcher object to provide data test_batches: Number of test batches Returns: acc: Accuracy object
Compute model accuracy on a test set.
[ "Compute", "model", "accuracy", "on", "a", "test", "set", "." ]
def test_h5(self, sess, batcher, test_batches=None): """ Compute model accuracy on a test set. Args: sess: TensorFlow session batcher: Batcher object to provide data test_batches: Number of test batches Returns: acc: Accuracy object """ # setup feed dict fd = self.set_mode('test') # initialize prediction and target arrays preds = [] targets = [] targets_na = [] batch_losses = [] batch_target_losses = [] batch_sizes = [] # get first batch batch_num = 0 Xb, Yb, NAb, Nb = batcher.next() while Xb is not None and (test_batches is None or batch_num < test_batches): # update feed dict fd[self.inputs_ph] = Xb fd[self.targets_ph] = Yb # make predictions run_ops = [self.targets_eval, self.preds_eval_loss, self.loss_eval, self.loss_eval_targets] run_returns = sess.run(run_ops, feed_dict=fd) targets_batch, preds_batch, loss_batch, target_losses_batch = run_returns # accumulate predictions and targets preds.append(preds_batch[:Nb,:,:].astype('float16')) targets.append(targets_batch[:Nb,:,:].astype('float16')) targets_na.append(np.zeros([Nb, self.preds_length], dtype='bool')) # accumulate loss batch_losses.append(loss_batch) batch_target_losses.append(target_losses_batch) batch_sizes.append(Nb) # next batch batch_num += 1 Xb, Yb, NAb, Nb = batcher.next() # reset batcher batcher.reset() # construct arrays targets = np.concatenate(targets, axis=0) preds = np.concatenate(preds, axis=0) targets_na = np.concatenate(targets_na, axis=0) # mean across batches batch_losses = np.array(batch_losses, dtype='float64') batch_losses = np.average(batch_losses, weights=batch_sizes) batch_target_losses = np.array(batch_target_losses, dtype='float64') batch_target_losses = np.average(batch_target_losses, axis=0, weights=batch_sizes) # instantiate accuracy object acc = accuracy.Accuracy(targets, preds, targets_na, batch_losses, batch_target_losses) return acc
[ "def", "test_h5", "(", "self", ",", "sess", ",", "batcher", ",", "test_batches", "=", "None", ")", ":", "# setup feed dict", "fd", "=", "self", ".", "set_mode", "(", "'test'", ")", "# initialize prediction and target arrays", "preds", "=", "[", "]", "targets", "=", "[", "]", "targets_na", "=", "[", "]", "batch_losses", "=", "[", "]", "batch_target_losses", "=", "[", "]", "batch_sizes", "=", "[", "]", "# get first batch", "batch_num", "=", "0", "Xb", ",", "Yb", ",", "NAb", ",", "Nb", "=", "batcher", ".", "next", "(", ")", "while", "Xb", "is", "not", "None", "and", "(", "test_batches", "is", "None", "or", "batch_num", "<", "test_batches", ")", ":", "# update feed dict", "fd", "[", "self", ".", "inputs_ph", "]", "=", "Xb", "fd", "[", "self", ".", "targets_ph", "]", "=", "Yb", "# make predictions", "run_ops", "=", "[", "self", ".", "targets_eval", ",", "self", ".", "preds_eval_loss", ",", "self", ".", "loss_eval", ",", "self", ".", "loss_eval_targets", "]", "run_returns", "=", "sess", ".", "run", "(", "run_ops", ",", "feed_dict", "=", "fd", ")", "targets_batch", ",", "preds_batch", ",", "loss_batch", ",", "target_losses_batch", "=", "run_returns", "# accumulate predictions and targets", "preds", ".", "append", "(", "preds_batch", "[", ":", "Nb", ",", ":", ",", ":", "]", ".", "astype", "(", "'float16'", ")", ")", "targets", ".", "append", "(", "targets_batch", "[", ":", "Nb", ",", ":", ",", ":", "]", ".", "astype", "(", "'float16'", ")", ")", "targets_na", ".", "append", "(", "np", ".", "zeros", "(", "[", "Nb", ",", "self", ".", "preds_length", "]", ",", "dtype", "=", "'bool'", ")", ")", "# accumulate loss", "batch_losses", ".", "append", "(", "loss_batch", ")", "batch_target_losses", ".", "append", "(", "target_losses_batch", ")", "batch_sizes", ".", "append", "(", "Nb", ")", "# next batch", "batch_num", "+=", "1", "Xb", ",", "Yb", ",", "NAb", ",", "Nb", "=", "batcher", ".", "next", "(", ")", "# reset batcher", "batcher", ".", "reset", "(", ")", "# construct arrays", "targets", "=", "np", ".", "concatenate", "(", "targets", ",", "axis", "=", "0", ")", "preds", "=", "np", ".", "concatenate", "(", "preds", ",", "axis", "=", "0", ")", "targets_na", "=", "np", ".", "concatenate", "(", "targets_na", ",", "axis", "=", "0", ")", "# mean across batches", "batch_losses", "=", "np", ".", "array", "(", "batch_losses", ",", "dtype", "=", "'float64'", ")", "batch_losses", "=", "np", ".", "average", "(", "batch_losses", ",", "weights", "=", "batch_sizes", ")", "batch_target_losses", "=", "np", ".", "array", "(", "batch_target_losses", ",", "dtype", "=", "'float64'", ")", "batch_target_losses", "=", "np", ".", "average", "(", "batch_target_losses", ",", "axis", "=", "0", ",", "weights", "=", "batch_sizes", ")", "# instantiate accuracy object", "acc", "=", "accuracy", ".", "Accuracy", "(", "targets", ",", "preds", ",", "targets_na", ",", "batch_losses", ",", "batch_target_losses", ")", "return", "acc" ]
https://github.com/calico/basenji/blob/2dae9b54744bd0495041c4259c22593054eef50b/basenji/archive/seqnn_util.py#L1096-L1167
pyg-team/pytorch_geometric
b920e9a3a64e22c8356be55301c88444ff051cae
benchmark/citation/arma.py
python
Net.reset_parameters
(self)
[]
def reset_parameters(self): self.conv1.reset_parameters() self.conv2.reset_parameters()
[ "def", "reset_parameters", "(", "self", ")", ":", "self", ".", "conv1", ".", "reset_parameters", "(", ")", "self", ".", "conv2", ".", "reset_parameters", "(", ")" ]
https://github.com/pyg-team/pytorch_geometric/blob/b920e9a3a64e22c8356be55301c88444ff051cae/benchmark/citation/arma.py#L36-L38
luispedro/jug
e967c6388ca69c78698e1522b9535d647a2c5b22
jug/backends/base.py
python
base_store.getlock
(self, name)
lock = store.getlock(name) Retrieve a lock object associated with ``name``. Parameters ---------- name : str Key Returns ------- lock : Lock object This should obey the Lock Interface See Also -------- base_lock : Generic lock
lock = store.getlock(name)
[ "lock", "=", "store", ".", "getlock", "(", "name", ")" ]
def getlock(self, name): ''' lock = store.getlock(name) Retrieve a lock object associated with ``name``. Parameters ---------- name : str Key Returns ------- lock : Lock object This should obey the Lock Interface See Also -------- base_lock : Generic lock '''
[ "def", "getlock", "(", "self", ",", "name", ")", ":" ]
https://github.com/luispedro/jug/blob/e967c6388ca69c78698e1522b9535d647a2c5b22/jug/backends/base.py#L166-L185
oracle/oci-python-sdk
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
src/oci/database_management/db_management_client.py
python
DbManagementClient.get_work_request
(self, work_request_id, **kwargs)
Gets the status of the work request with the given Work Request ID :param str work_request_id: (required) The `OCID`__ of the asynchronous work request. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param str opc_request_id: (optional) The client request ID for tracing. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.database_management.models.WorkRequest` :rtype: :class:`~oci.response.Response` :example: Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/databasemanagement/get_work_request.py.html>`__ to see an example of how to use get_work_request API.
Gets the status of the work request with the given Work Request ID
[ "Gets", "the", "status", "of", "the", "work", "request", "with", "the", "given", "Work", "Request", "ID" ]
def get_work_request(self, work_request_id, **kwargs): """ Gets the status of the work request with the given Work Request ID :param str work_request_id: (required) The `OCID`__ of the asynchronous work request. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param str opc_request_id: (optional) The client request ID for tracing. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.database_management.models.WorkRequest` :rtype: :class:`~oci.response.Response` :example: Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/databasemanagement/get_work_request.py.html>`__ to see an example of how to use get_work_request API. """ resource_path = "/workRequests/{workRequestId}" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_request_id" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "get_work_request got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "workRequestId": work_request_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-request-id": kwargs.get("opc_request_id", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.base_client.get_preferred_retry_strategy( operation_retry_strategy=kwargs.get('retry_strategy'), client_retry_strategy=self.retry_strategy ) if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_client_retries_header(header_params) retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="WorkRequest") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="WorkRequest")
[ "def", "get_work_request", "(", "self", ",", "work_request_id", ",", "*", "*", "kwargs", ")", ":", "resource_path", "=", "\"/workRequests/{workRequestId}\"", "method", "=", "\"GET\"", "# Don't accept unknown kwargs", "expected_kwargs", "=", "[", "\"retry_strategy\"", ",", "\"opc_request_id\"", "]", "extra_kwargs", "=", "[", "_key", "for", "_key", "in", "six", ".", "iterkeys", "(", "kwargs", ")", "if", "_key", "not", "in", "expected_kwargs", "]", "if", "extra_kwargs", ":", "raise", "ValueError", "(", "\"get_work_request got unknown kwargs: {!r}\"", ".", "format", "(", "extra_kwargs", ")", ")", "path_params", "=", "{", "\"workRequestId\"", ":", "work_request_id", "}", "path_params", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "path_params", ")", "if", "v", "is", "not", "missing", "}", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "path_params", ")", ":", "if", "v", "is", "None", "or", "(", "isinstance", "(", "v", ",", "six", ".", "string_types", ")", "and", "len", "(", "v", ".", "strip", "(", ")", ")", "==", "0", ")", ":", "raise", "ValueError", "(", "'Parameter {} cannot be None, whitespace or empty string'", ".", "format", "(", "k", ")", ")", "header_params", "=", "{", "\"accept\"", ":", "\"application/json\"", ",", "\"content-type\"", ":", "\"application/json\"", ",", "\"opc-request-id\"", ":", "kwargs", ".", "get", "(", "\"opc_request_id\"", ",", "missing", ")", "}", "header_params", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "header_params", ")", "if", "v", "is", "not", "missing", "and", "v", "is", "not", "None", "}", "retry_strategy", "=", "self", ".", "base_client", ".", "get_preferred_retry_strategy", "(", "operation_retry_strategy", "=", "kwargs", ".", "get", "(", "'retry_strategy'", ")", ",", "client_retry_strategy", "=", "self", ".", "retry_strategy", ")", "if", "retry_strategy", ":", "if", "not", "isinstance", "(", "retry_strategy", ",", "retry", ".", "NoneRetryStrategy", ")", ":", "self", ".", "base_client", ".", "add_opc_client_retries_header", "(", "header_params", ")", "retry_strategy", ".", "add_circuit_breaker_callback", "(", "self", ".", "circuit_breaker_callback", ")", "return", "retry_strategy", ".", "make_retrying_call", "(", "self", ".", "base_client", ".", "call_api", ",", "resource_path", "=", "resource_path", ",", "method", "=", "method", ",", "path_params", "=", "path_params", ",", "header_params", "=", "header_params", ",", "response_type", "=", "\"WorkRequest\"", ")", "else", ":", "return", "self", ".", "base_client", ".", "call_api", "(", "resource_path", "=", "resource_path", ",", "method", "=", "method", ",", "path_params", "=", "path_params", ",", "header_params", "=", "header_params", ",", "response_type", "=", "\"WorkRequest\"", ")" ]
https://github.com/oracle/oci-python-sdk/blob/3c1604e4e212008fb6718e2f68cdb5ef71fd5793/src/oci/database_management/db_management_client.py#L2447-L2526
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/yeelight/light.py
python
YeelightGenericLight.async_set_rgb
(self, rgb, duration)
Set bulb's color.
Set bulb's color.
[ "Set", "bulb", "s", "color", "." ]
async def async_set_rgb(self, rgb, duration) -> None: """Set bulb's color.""" if not rgb or COLOR_MODE_RGB not in self.supported_color_modes: return if ( not self.device.is_color_flow_enabled and self.color_mode == COLOR_MODE_RGB and self.rgb_color == rgb ): _LOGGER.debug("RGB already set to: %s", rgb) # Already set, and since we get pushed updates # we avoid setting it again to ensure we do not # hit the rate limit return _LOGGER.debug("Setting RGB: %s", rgb) await self._bulb.async_set_rgb( *rgb, duration=duration, light_type=self.light_type )
[ "async", "def", "async_set_rgb", "(", "self", ",", "rgb", ",", "duration", ")", "->", "None", ":", "if", "not", "rgb", "or", "COLOR_MODE_RGB", "not", "in", "self", ".", "supported_color_modes", ":", "return", "if", "(", "not", "self", ".", "device", ".", "is_color_flow_enabled", "and", "self", ".", "color_mode", "==", "COLOR_MODE_RGB", "and", "self", ".", "rgb_color", "==", "rgb", ")", ":", "_LOGGER", ".", "debug", "(", "\"RGB already set to: %s\"", ",", "rgb", ")", "# Already set, and since we get pushed updates", "# we avoid setting it again to ensure we do not", "# hit the rate limit", "return", "_LOGGER", ".", "debug", "(", "\"Setting RGB: %s\"", ",", "rgb", ")", "await", "self", ".", "_bulb", ".", "async_set_rgb", "(", "*", "rgb", ",", "duration", "=", "duration", ",", "light_type", "=", "self", ".", "light_type", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/yeelight/light.py#L656-L674
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
src/transformers/pipelines/conversational.py
python
Conversation.__eq__
(self, other)
return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses )
[]
def __eq__(self, other): if not isinstance(other, Conversation): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses )
[ "def", "__eq__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "Conversation", ")", ":", "return", "False", "if", "self", ".", "uuid", "==", "other", ".", "uuid", ":", "return", "True", "return", "(", "self", ".", "new_user_input", "==", "other", ".", "new_user_input", "and", "self", ".", "past_user_inputs", "==", "other", ".", "past_user_inputs", "and", "self", ".", "generated_responses", "==", "other", ".", "generated_responses", ")" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/pipelines/conversational.py#L72-L81
spesmilo/electrum
bdbd59300fbd35b01605e66145458e5f396108e8
electrum/daemon.py
python
Daemon._run
(self, jobs: Iterable = None)
[]
async def _run(self, jobs: Iterable = None): if jobs is None: jobs = [] self.logger.info("starting taskgroup.") try: async with self.taskgroup as group: [await group.spawn(job) for job in jobs] await group.spawn(asyncio.Event().wait) # run forever (until cancel) except asyncio.CancelledError: raise except Exception as e: self.logger.exception("taskgroup died.") util.send_exception_to_crash_reporter(e) finally: self.logger.info("taskgroup stopped.") # note: we could just "await self.stop()", but in that case GUI users would # not see the exception (especially if the GUI did not start yet). self._stopping_soon_or_errored.set()
[ "async", "def", "_run", "(", "self", ",", "jobs", ":", "Iterable", "=", "None", ")", ":", "if", "jobs", "is", "None", ":", "jobs", "=", "[", "]", "self", ".", "logger", ".", "info", "(", "\"starting taskgroup.\"", ")", "try", ":", "async", "with", "self", ".", "taskgroup", "as", "group", ":", "[", "await", "group", ".", "spawn", "(", "job", ")", "for", "job", "in", "jobs", "]", "await", "group", ".", "spawn", "(", "asyncio", ".", "Event", "(", ")", ".", "wait", ")", "# run forever (until cancel)", "except", "asyncio", ".", "CancelledError", ":", "raise", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "exception", "(", "\"taskgroup died.\"", ")", "util", ".", "send_exception_to_crash_reporter", "(", "e", ")", "finally", ":", "self", ".", "logger", ".", "info", "(", "\"taskgroup stopped.\"", ")", "# note: we could just \"await self.stop()\", but in that case GUI users would", "# not see the exception (especially if the GUI did not start yet).", "self", ".", "_stopping_soon_or_errored", ".", "set", "(", ")" ]
https://github.com/spesmilo/electrum/blob/bdbd59300fbd35b01605e66145458e5f396108e8/electrum/daemon.py#L500-L517
tensorflow/mesh
57ed4018e6a173952501b074daabad32b6449f3d
mesh_tensorflow/ops.py
python
random_uniform
(mesh, shape, **kwargs)
return RandomOperation(mesh, shape, tf.random.uniform, **kwargs).outputs[0]
Random uniform. Args: mesh: a Mesh shape: a Shape **kwargs: keyword args for tf.random.uniform, except seed Returns: a Tensor
Random uniform.
[ "Random", "uniform", "." ]
def random_uniform(mesh, shape, **kwargs): """Random uniform. Args: mesh: a Mesh shape: a Shape **kwargs: keyword args for tf.random.uniform, except seed Returns: a Tensor """ shape = convert_to_shape(shape) return RandomOperation(mesh, shape, tf.random.uniform, **kwargs).outputs[0]
[ "def", "random_uniform", "(", "mesh", ",", "shape", ",", "*", "*", "kwargs", ")", ":", "shape", "=", "convert_to_shape", "(", "shape", ")", "return", "RandomOperation", "(", "mesh", ",", "shape", ",", "tf", ".", "random", ".", "uniform", ",", "*", "*", "kwargs", ")", ".", "outputs", "[", "0", "]" ]
https://github.com/tensorflow/mesh/blob/57ed4018e6a173952501b074daabad32b6449f3d/mesh_tensorflow/ops.py#L5850-L5862
openstack/cinder
23494a6d6c51451688191e1847a458f1d3cdcaa5
cinder/volume/drivers/dell_emc/powermax/masking.py
python
PowerMaxMasking._check_adding_volume_to_storage_group
( self, serial_number, device_id, storagegroup_name, volume_name, extra_specs)
return msg
Check if a volume is part of an sg and add it if not. :param serial_number: the array serial number :param device_id: the device id :param storagegroup_name: the storage group name :param volume_name: volume name :param extra_specs: extra specifications :returns: msg
Check if a volume is part of an sg and add it if not.
[ "Check", "if", "a", "volume", "is", "part", "of", "an", "sg", "and", "add", "it", "if", "not", "." ]
def _check_adding_volume_to_storage_group( self, serial_number, device_id, storagegroup_name, volume_name, extra_specs): """Check if a volume is part of an sg and add it if not. :param serial_number: the array serial number :param device_id: the device id :param storagegroup_name: the storage group name :param volume_name: volume name :param extra_specs: extra specifications :returns: msg """ msg = None if self.rest.is_volume_in_storagegroup( serial_number, device_id, storagegroup_name): LOG.info("Volume: %(volume_name)s is already part " "of storage group %(sg_name)s.", {'volume_name': volume_name, 'sg_name': storagegroup_name}) else: try: force = True if extra_specs.get(utils.IS_RE) else False self.add_volume_to_storage_group( serial_number, device_id, storagegroup_name, volume_name, extra_specs, force) except Exception as e: msg = ("Exception adding volume %(vol)s to %(sg)s. " "Exception received was %(e)s." % {'vol': volume_name, 'sg': storagegroup_name, 'e': six.text_type(e)}) LOG.error(msg) return msg
[ "def", "_check_adding_volume_to_storage_group", "(", "self", ",", "serial_number", ",", "device_id", ",", "storagegroup_name", ",", "volume_name", ",", "extra_specs", ")", ":", "msg", "=", "None", "if", "self", ".", "rest", ".", "is_volume_in_storagegroup", "(", "serial_number", ",", "device_id", ",", "storagegroup_name", ")", ":", "LOG", ".", "info", "(", "\"Volume: %(volume_name)s is already part \"", "\"of storage group %(sg_name)s.\"", ",", "{", "'volume_name'", ":", "volume_name", ",", "'sg_name'", ":", "storagegroup_name", "}", ")", "else", ":", "try", ":", "force", "=", "True", "if", "extra_specs", ".", "get", "(", "utils", ".", "IS_RE", ")", "else", "False", "self", ".", "add_volume_to_storage_group", "(", "serial_number", ",", "device_id", ",", "storagegroup_name", ",", "volume_name", ",", "extra_specs", ",", "force", ")", "except", "Exception", "as", "e", ":", "msg", "=", "(", "\"Exception adding volume %(vol)s to %(sg)s. \"", "\"Exception received was %(e)s.\"", "%", "{", "'vol'", ":", "volume_name", ",", "'sg'", ":", "storagegroup_name", ",", "'e'", ":", "six", ".", "text_type", "(", "e", ")", "}", ")", "LOG", ".", "error", "(", "msg", ")", "return", "msg" ]
https://github.com/openstack/cinder/blob/23494a6d6c51451688191e1847a458f1d3cdcaa5/cinder/volume/drivers/dell_emc/powermax/masking.py#L708-L739
tp4a/teleport
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
server/www/packages/packages-linux/x64/psutil/__init__.py
python
Process.name
(self)
return name
The process name. The return value is cached after first call.
The process name. The return value is cached after first call.
[ "The", "process", "name", ".", "The", "return", "value", "is", "cached", "after", "first", "call", "." ]
def name(self): """The process name. The return value is cached after first call.""" # Process name is only cached on Windows as on POSIX it may # change, see: # https://github.com/giampaolo/psutil/issues/692 if WINDOWS and self._name is not None: return self._name name = self._proc.name() if POSIX and len(name) >= 15: # On UNIX the name gets truncated to the first 15 characters. # If it matches the first part of the cmdline we return that # one instead because it's usually more explicative. # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon". try: cmdline = self.cmdline() except AccessDenied: pass else: if cmdline: extended_name = os.path.basename(cmdline[0]) if extended_name.startswith(name): name = extended_name self._name = name self._proc._name = name return name
[ "def", "name", "(", "self", ")", ":", "# Process name is only cached on Windows as on POSIX it may", "# change, see:", "# https://github.com/giampaolo/psutil/issues/692", "if", "WINDOWS", "and", "self", ".", "_name", "is", "not", "None", ":", "return", "self", ".", "_name", "name", "=", "self", ".", "_proc", ".", "name", "(", ")", "if", "POSIX", "and", "len", "(", "name", ")", ">=", "15", ":", "# On UNIX the name gets truncated to the first 15 characters.", "# If it matches the first part of the cmdline we return that", "# one instead because it's usually more explicative.", "# Examples are \"gnome-keyring-d\" vs. \"gnome-keyring-daemon\".", "try", ":", "cmdline", "=", "self", ".", "cmdline", "(", ")", "except", "AccessDenied", ":", "pass", "else", ":", "if", "cmdline", ":", "extended_name", "=", "os", ".", "path", ".", "basename", "(", "cmdline", "[", "0", "]", ")", "if", "extended_name", ".", "startswith", "(", "name", ")", ":", "name", "=", "extended_name", "self", ".", "_name", "=", "name", "self", ".", "_proc", ".", "_name", "=", "name", "return", "name" ]
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-linux/x64/psutil/__init__.py#L623-L647
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/os2emxpath.py
python
splitunc
(p)
return '', p
Split a pathname into UNC mount point and relative path specifiers. Return a 2-tuple (unc, rest); either part may be empty. If unc is not empty, it has the form '//host/mount' (or similar using backslashes). unc+rest is always the input path. Paths containing drive letters never have an UNC part.
Split a pathname into UNC mount point and relative path specifiers.
[ "Split", "a", "pathname", "into", "UNC", "mount", "point", "and", "relative", "path", "specifiers", "." ]
def splitunc(p): """Split a pathname into UNC mount point and relative path specifiers. Return a 2-tuple (unc, rest); either part may be empty. If unc is not empty, it has the form '//host/mount' (or similar using backslashes). unc+rest is always the input path. Paths containing drive letters never have an UNC part. """ if p[1:2] == ':': return '', p # Drive letter present firstTwo = p[0:2] if firstTwo == '/' * 2 or firstTwo == '\\' * 2: # is a UNC path: # vvvvvvvvvvvvvvvvvvvv equivalent to drive letter # \\machine\mountpoint\directories... # directory ^^^^^^^^^^^^^^^ normp = normcase(p) index = normp.find('/', 2) if index == -1: ##raise RuntimeError, 'illegal UNC path: "' + p + '"' return ("", p) index = normp.find('/', index + 1) if index == -1: index = len(p) return p[:index], p[index:] return '', p
[ "def", "splitunc", "(", "p", ")", ":", "if", "p", "[", "1", ":", "2", "]", "==", "':'", ":", "return", "''", ",", "p", "# Drive letter present", "firstTwo", "=", "p", "[", "0", ":", "2", "]", "if", "firstTwo", "==", "'/'", "*", "2", "or", "firstTwo", "==", "'\\\\'", "*", "2", ":", "# is a UNC path:", "# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter", "# \\\\machine\\mountpoint\\directories...", "# directory ^^^^^^^^^^^^^^^", "normp", "=", "normcase", "(", "p", ")", "index", "=", "normp", ".", "find", "(", "'/'", ",", "2", ")", "if", "index", "==", "-", "1", ":", "##raise RuntimeError, 'illegal UNC path: \"' + p + '\"'", "return", "(", "\"\"", ",", "p", ")", "index", "=", "normp", ".", "find", "(", "'/'", ",", "index", "+", "1", ")", "if", "index", "==", "-", "1", ":", "index", "=", "len", "(", "p", ")", "return", "p", "[", ":", "index", "]", ",", "p", "[", "index", ":", "]", "return", "''", ",", "p" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/os2emxpath.py#L58-L83
PrefectHQ/prefect
67bdc94e2211726d99561f6f52614bec8970e981
src/prefect/engine/results/local_result.py
python
LocalResult.read
(self, location: str)
return new
Reads a result from the local file system and returns the corresponding `Result` instance. Args: - location (str): the location to read from Returns: - Result: a new result instance with the data represented by the location
Reads a result from the local file system and returns the corresponding `Result` instance.
[ "Reads", "a", "result", "from", "the", "local", "file", "system", "and", "returns", "the", "corresponding", "Result", "instance", "." ]
def read(self, location: str) -> Result: """ Reads a result from the local file system and returns the corresponding `Result` instance. Args: - location (str): the location to read from Returns: - Result: a new result instance with the data represented by the location """ new = self.copy() new.location = location self.logger.debug("Starting to read result from {}...".format(location)) with open(os.path.join(self.dir, location), "rb") as f: value = f.read() new.value = self.serializer.deserialize(value) self.logger.debug("Finished reading result from {}...".format(location)) return new
[ "def", "read", "(", "self", ",", "location", ":", "str", ")", "->", "Result", ":", "new", "=", "self", ".", "copy", "(", ")", "new", ".", "location", "=", "location", "self", ".", "logger", ".", "debug", "(", "\"Starting to read result from {}...\"", ".", "format", "(", "location", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "location", ")", ",", "\"rb\"", ")", "as", "f", ":", "value", "=", "f", ".", "read", "(", ")", "new", ".", "value", "=", "self", ".", "serializer", ".", "deserialize", "(", "value", ")", "self", ".", "logger", ".", "debug", "(", "\"Finished reading result from {}...\"", ".", "format", "(", "location", ")", ")", "return", "new" ]
https://github.com/PrefectHQ/prefect/blob/67bdc94e2211726d99561f6f52614bec8970e981/src/prefect/engine/results/local_result.py#L68-L90
google/grr
8ad8a4d2c5a93c92729206b7771af19d92d4f915
grr/server/grr_response_server/databases/mysql_flows.py
python
MySQLDBFlowMixin._FlowObjectFromRow
(self, row)
return flow_obj
Generates a flow object from a database row.
Generates a flow object from a database row.
[ "Generates", "a", "flow", "object", "from", "a", "database", "row", "." ]
def _FlowObjectFromRow(self, row): """Generates a flow object from a database row.""" datetime = mysql_utils.TimestampToRDFDatetime cpu_time = db_utils.MicrosToSeconds # pyformat: disable (client_id, flow_id, long_flow_id, parent_flow_id, parent_hunt_id, name, creator, flow, flow_state, client_crash_info, pending_termination, next_request_to_process, processing_deadline, processing_on, processing_since, user_cpu_time, system_cpu_time, network_bytes_sent, num_replies_sent, timestamp, last_update_timestamp) = row # pyformat: enable flow_obj = rdf_flow_objects.Flow.FromSerializedBytes(flow) # We treat column values as the source of truth, not the proto. flow_obj.client_id = db_utils.IntToClientID(client_id) flow_obj.flow_id = db_utils.IntToFlowID(flow_id) flow_obj.long_flow_id = long_flow_id if parent_flow_id is not None: flow_obj.parent_flow_id = db_utils.IntToFlowID(parent_flow_id) if parent_hunt_id is not None: flow_obj.parent_hunt_id = db_utils.IntToHuntID(parent_hunt_id) if name is not None: flow_obj.flow_class_name = name if creator is not None: flow_obj.creator = creator if flow_state not in [None, rdf_flow_objects.Flow.FlowState.UNSET]: flow_obj.flow_state = flow_state if client_crash_info is not None: deserialize = rdf_client.ClientCrash.FromSerializedBytes flow_obj.client_crash_info = deserialize(client_crash_info) if pending_termination is not None: deserialize = rdf_flow_objects.PendingFlowTermination.FromSerializedBytes flow_obj.pending_termination = deserialize(pending_termination) if next_request_to_process: flow_obj.next_request_to_process = next_request_to_process if processing_deadline is not None: flow_obj.processing_deadline = datetime(processing_deadline) if processing_on is not None: flow_obj.processing_on = processing_on if processing_since is not None: flow_obj.processing_since = datetime(processing_since) flow_obj.cpu_time_used.user_cpu_time = cpu_time(user_cpu_time) flow_obj.cpu_time_used.system_cpu_time = cpu_time(system_cpu_time) flow_obj.network_bytes_sent = network_bytes_sent if num_replies_sent: flow_obj.num_replies_sent = num_replies_sent flow_obj.last_update_time = datetime(last_update_timestamp) # In case the create time is not stored in the serialized flow (which might # be the case), we fallback to the timestamp information stored in the # column. if flow_obj.create_time is None: flow_obj.create_time = datetime(timestamp) return flow_obj
[ "def", "_FlowObjectFromRow", "(", "self", ",", "row", ")", ":", "datetime", "=", "mysql_utils", ".", "TimestampToRDFDatetime", "cpu_time", "=", "db_utils", ".", "MicrosToSeconds", "# pyformat: disable", "(", "client_id", ",", "flow_id", ",", "long_flow_id", ",", "parent_flow_id", ",", "parent_hunt_id", ",", "name", ",", "creator", ",", "flow", ",", "flow_state", ",", "client_crash_info", ",", "pending_termination", ",", "next_request_to_process", ",", "processing_deadline", ",", "processing_on", ",", "processing_since", ",", "user_cpu_time", ",", "system_cpu_time", ",", "network_bytes_sent", ",", "num_replies_sent", ",", "timestamp", ",", "last_update_timestamp", ")", "=", "row", "# pyformat: enable", "flow_obj", "=", "rdf_flow_objects", ".", "Flow", ".", "FromSerializedBytes", "(", "flow", ")", "# We treat column values as the source of truth, not the proto.", "flow_obj", ".", "client_id", "=", "db_utils", ".", "IntToClientID", "(", "client_id", ")", "flow_obj", ".", "flow_id", "=", "db_utils", ".", "IntToFlowID", "(", "flow_id", ")", "flow_obj", ".", "long_flow_id", "=", "long_flow_id", "if", "parent_flow_id", "is", "not", "None", ":", "flow_obj", ".", "parent_flow_id", "=", "db_utils", ".", "IntToFlowID", "(", "parent_flow_id", ")", "if", "parent_hunt_id", "is", "not", "None", ":", "flow_obj", ".", "parent_hunt_id", "=", "db_utils", ".", "IntToHuntID", "(", "parent_hunt_id", ")", "if", "name", "is", "not", "None", ":", "flow_obj", ".", "flow_class_name", "=", "name", "if", "creator", "is", "not", "None", ":", "flow_obj", ".", "creator", "=", "creator", "if", "flow_state", "not", "in", "[", "None", ",", "rdf_flow_objects", ".", "Flow", ".", "FlowState", ".", "UNSET", "]", ":", "flow_obj", ".", "flow_state", "=", "flow_state", "if", "client_crash_info", "is", "not", "None", ":", "deserialize", "=", "rdf_client", ".", "ClientCrash", ".", "FromSerializedBytes", "flow_obj", ".", "client_crash_info", "=", "deserialize", "(", "client_crash_info", ")", "if", "pending_termination", "is", "not", "None", ":", "deserialize", "=", "rdf_flow_objects", ".", "PendingFlowTermination", ".", "FromSerializedBytes", "flow_obj", ".", "pending_termination", "=", "deserialize", "(", "pending_termination", ")", "if", "next_request_to_process", ":", "flow_obj", ".", "next_request_to_process", "=", "next_request_to_process", "if", "processing_deadline", "is", "not", "None", ":", "flow_obj", ".", "processing_deadline", "=", "datetime", "(", "processing_deadline", ")", "if", "processing_on", "is", "not", "None", ":", "flow_obj", ".", "processing_on", "=", "processing_on", "if", "processing_since", "is", "not", "None", ":", "flow_obj", ".", "processing_since", "=", "datetime", "(", "processing_since", ")", "flow_obj", ".", "cpu_time_used", ".", "user_cpu_time", "=", "cpu_time", "(", "user_cpu_time", ")", "flow_obj", ".", "cpu_time_used", ".", "system_cpu_time", "=", "cpu_time", "(", "system_cpu_time", ")", "flow_obj", ".", "network_bytes_sent", "=", "network_bytes_sent", "if", "num_replies_sent", ":", "flow_obj", ".", "num_replies_sent", "=", "num_replies_sent", "flow_obj", ".", "last_update_time", "=", "datetime", "(", "last_update_timestamp", ")", "# In case the create time is not stored in the serialized flow (which might", "# be the case), we fallback to the timestamp information stored in the", "# column.", "if", "flow_obj", ".", "create_time", "is", "None", ":", "flow_obj", ".", "create_time", "=", "datetime", "(", "timestamp", ")", "return", "flow_obj" ]
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/server/grr_response_server/databases/mysql_flows.py#L347-L408
wandb/client
3963364d8112b7dedb928fa423b6878ea1b467d9
wandb/sdk/interface/artifacts.py
python
Artifact.__setitem__
(self, name: str, item: WBValue)
Adds `item` to the artifact at path `name` Arguments: name: (str) The path within the artifact to add the object. item: (wandb.WBValue) The object to add. Returns: ArtifactManifestEntry: the added manifest entry Examples: Basic usage ``` artifact = wandb.Artifact('my_table', 'dataset') table = wandb.Table(columns=["a", "b", "c"], data=[[i, i*2, 2**i]]) artifact["my_table"] = table wandb.log_artifact(artifact) ``` Retrieving an object: ``` artifact = wandb.use_artifact('my_table:latest') table = artifact["my_table"] ```
Adds `item` to the artifact at path `name`
[ "Adds", "item", "to", "the", "artifact", "at", "path", "name" ]
def __setitem__(self, name: str, item: WBValue): """ Adds `item` to the artifact at path `name` Arguments: name: (str) The path within the artifact to add the object. item: (wandb.WBValue) The object to add. Returns: ArtifactManifestEntry: the added manifest entry Examples: Basic usage ``` artifact = wandb.Artifact('my_table', 'dataset') table = wandb.Table(columns=["a", "b", "c"], data=[[i, i*2, 2**i]]) artifact["my_table"] = table wandb.log_artifact(artifact) ``` Retrieving an object: ``` artifact = wandb.use_artifact('my_table:latest') table = artifact["my_table"] ``` """ raise NotImplementedError
[ "def", "__setitem__", "(", "self", ",", "name", ":", "str", ",", "item", ":", "WBValue", ")", ":", "raise", "NotImplementedError" ]
https://github.com/wandb/client/blob/3963364d8112b7dedb928fa423b6878ea1b467d9/wandb/sdk/interface/artifacts.py#L700-L727
FSecureLABS/Jandroid
e31d0dab58a2bfd6ed8e0a387172b8bd7c893436
libs/platform-tools/platform-tools_windows/systrace/catapult/devil/devil/utils/geometry.py
python
Point.__add__
(self, other)
Sum of two points, e.g. p + q.
Sum of two points, e.g. p + q.
[ "Sum", "of", "two", "points", "e", ".", "g", ".", "p", "+", "q", "." ]
def __add__(self, other): """Sum of two points, e.g. p + q.""" if isinstance(other, Point): return Point(self.x + other.x, self.y + other.y) else: return NotImplemented
[ "def", "__add__", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "Point", ")", ":", "return", "Point", "(", "self", ".", "x", "+", "other", ".", "x", ",", "self", ".", "y", "+", "other", ".", "y", ")", "else", ":", "return", "NotImplemented" ]
https://github.com/FSecureLABS/Jandroid/blob/e31d0dab58a2bfd6ed8e0a387172b8bd7c893436/libs/platform-tools/platform-tools_windows/systrace/catapult/devil/devil/utils/geometry.py#L22-L27
scrapy/scrapy
b04cfa48328d5d5749dca6f50fa34e0cfc664c89
scrapy/utils/python.py
python
MutableChain.extend
(self, *iterables: Iterable)
[]
def extend(self, *iterables: Iterable): self.data = chain(self.data, chain.from_iterable(iterables))
[ "def", "extend", "(", "self", ",", "*", "iterables", ":", "Iterable", ")", ":", "self", ".", "data", "=", "chain", "(", "self", ".", "data", ",", "chain", ".", "from_iterable", "(", "iterables", ")", ")" ]
https://github.com/scrapy/scrapy/blob/b04cfa48328d5d5749dca6f50fa34e0cfc664c89/scrapy/utils/python.py#L347-L348
alteryx/featuretools
d59e11082962f163540fd6e185901f65c506326a
featuretools/synthesis/deep_feature_synthesis.py
python
_match_contains_numeric_foreign_key
(match)
return any(is_valid_input(f.column_schema, match_schema) for f in match)
[]
def _match_contains_numeric_foreign_key(match): match_schema = ColumnSchema(semantic_tags={'foreign_key', 'numeric'}) return any(is_valid_input(f.column_schema, match_schema) for f in match)
[ "def", "_match_contains_numeric_foreign_key", "(", "match", ")", ":", "match_schema", "=", "ColumnSchema", "(", "semantic_tags", "=", "{", "'foreign_key'", ",", "'numeric'", "}", ")", "return", "any", "(", "is_valid_input", "(", "f", ".", "column_schema", ",", "match_schema", ")", "for", "f", "in", "match", ")" ]
https://github.com/alteryx/featuretools/blob/d59e11082962f163540fd6e185901f65c506326a/featuretools/synthesis/deep_feature_synthesis.py#L842-L844
bitcraze/crazyflie-lib-python
876f0dc003b91ba5e4de05daae9d0b79cf600f81
cflib/crtp/udpdriver.py
python
UdpDriver.get_name
(self)
return 'udp'
[]
def get_name(self): return 'udp'
[ "def", "get_name", "(", "self", ")", ":", "return", "'udp'" ]
https://github.com/bitcraze/crazyflie-lib-python/blob/876f0dc003b91ba5e4de05daae9d0b79cf600f81/cflib/crtp/udpdriver.py#L100-L101
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/Babel-2.5.1/babel/messages/frontend.py
python
CommandLineInterface._configure_command
(self, cmdname, argv)
return cmdinst
:type cmdname: str :type argv: list[str]
:type cmdname: str :type argv: list[str]
[ ":", "type", "cmdname", ":", "str", ":", "type", "argv", ":", "list", "[", "str", "]" ]
def _configure_command(self, cmdname, argv): """ :type cmdname: str :type argv: list[str] """ cmdclass = self.command_classes[cmdname] cmdinst = cmdclass() if self.log: cmdinst.log = self.log # Use our logger, not distutils'. assert isinstance(cmdinst, Command) cmdinst.initialize_options() parser = optparse.OptionParser( usage=self.usage % (cmdname, ''), description=self.commands[cmdname] ) as_args = getattr(cmdclass, "as_args", ()) for long, short, help in cmdclass.user_options: name = long.strip("=") default = getattr(cmdinst, name.replace('-', '_')) strs = ["--%s" % name] if short: strs.append("-%s" % short) strs.extend(cmdclass.option_aliases.get(name, ())) choices = cmdclass.option_choices.get(name, None) if name == as_args: parser.usage += "<%s>" % name elif name in cmdclass.boolean_options: parser.add_option(*strs, action="store_true", help=help) elif name in cmdclass.multiple_value_options: parser.add_option(*strs, action="append", help=help, choices=choices) else: parser.add_option(*strs, help=help, default=default, choices=choices) options, args = parser.parse_args(argv) if as_args: setattr(options, as_args.replace('-', '_'), args) for key, value in vars(options).items(): setattr(cmdinst, key, value) try: cmdinst.ensure_finalized() except DistutilsOptionError as err: parser.error(str(err)) return cmdinst
[ "def", "_configure_command", "(", "self", ",", "cmdname", ",", "argv", ")", ":", "cmdclass", "=", "self", ".", "command_classes", "[", "cmdname", "]", "cmdinst", "=", "cmdclass", "(", ")", "if", "self", ".", "log", ":", "cmdinst", ".", "log", "=", "self", ".", "log", "# Use our logger, not distutils'.", "assert", "isinstance", "(", "cmdinst", ",", "Command", ")", "cmdinst", ".", "initialize_options", "(", ")", "parser", "=", "optparse", ".", "OptionParser", "(", "usage", "=", "self", ".", "usage", "%", "(", "cmdname", ",", "''", ")", ",", "description", "=", "self", ".", "commands", "[", "cmdname", "]", ")", "as_args", "=", "getattr", "(", "cmdclass", ",", "\"as_args\"", ",", "(", ")", ")", "for", "long", ",", "short", ",", "help", "in", "cmdclass", ".", "user_options", ":", "name", "=", "long", ".", "strip", "(", "\"=\"", ")", "default", "=", "getattr", "(", "cmdinst", ",", "name", ".", "replace", "(", "'-'", ",", "'_'", ")", ")", "strs", "=", "[", "\"--%s\"", "%", "name", "]", "if", "short", ":", "strs", ".", "append", "(", "\"-%s\"", "%", "short", ")", "strs", ".", "extend", "(", "cmdclass", ".", "option_aliases", ".", "get", "(", "name", ",", "(", ")", ")", ")", "choices", "=", "cmdclass", ".", "option_choices", ".", "get", "(", "name", ",", "None", ")", "if", "name", "==", "as_args", ":", "parser", ".", "usage", "+=", "\"<%s>\"", "%", "name", "elif", "name", "in", "cmdclass", ".", "boolean_options", ":", "parser", ".", "add_option", "(", "*", "strs", ",", "action", "=", "\"store_true\"", ",", "help", "=", "help", ")", "elif", "name", "in", "cmdclass", ".", "multiple_value_options", ":", "parser", ".", "add_option", "(", "*", "strs", ",", "action", "=", "\"append\"", ",", "help", "=", "help", ",", "choices", "=", "choices", ")", "else", ":", "parser", ".", "add_option", "(", "*", "strs", ",", "help", "=", "help", ",", "default", "=", "default", ",", "choices", "=", "choices", ")", "options", ",", "args", "=", "parser", ".", "parse_args", "(", "argv", ")", "if", "as_args", ":", "setattr", "(", "options", ",", "as_args", ".", "replace", "(", "'-'", ",", "'_'", ")", ",", "args", ")", "for", "key", ",", "value", "in", "vars", "(", "options", ")", ".", "items", "(", ")", ":", "setattr", "(", "cmdinst", ",", "key", ",", "value", ")", "try", ":", "cmdinst", ".", "ensure_finalized", "(", ")", "except", "DistutilsOptionError", "as", "err", ":", "parser", ".", "error", "(", "str", "(", "err", ")", ")", "return", "cmdinst" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/Babel-2.5.1/babel/messages/frontend.py#L858-L904
pawamoy/aria2p
2855c6a9a38e36278671258439f6caf59c39cfc3
src/aria2p/downloads.py
python
Download.move_up
(self, pos: int = 1)
return self.api.move_up(self, pos)
Move the download up in the queue. Arguments: pos: Number of times to move up. Returns: The new position of the download.
Move the download up in the queue.
[ "Move", "the", "download", "up", "in", "the", "queue", "." ]
def move_up(self, pos: int = 1) -> int: """ Move the download up in the queue. Arguments: pos: Number of times to move up. Returns: The new position of the download. """ return self.api.move_up(self, pos)
[ "def", "move_up", "(", "self", ",", "pos", ":", "int", "=", "1", ")", "->", "int", ":", "return", "self", ".", "api", ".", "move_up", "(", "self", ",", "pos", ")" ]
https://github.com/pawamoy/aria2p/blob/2855c6a9a38e36278671258439f6caf59c39cfc3/src/aria2p/downloads.py#L969-L979
pyansys/pymapdl
c07291fc062b359abf0e92b95a92d753a95ef3d7
ansys/mapdl/core/logging.py
python
PymapdlCustomAdapter.log_to_stdout
(self, level=LOG_LEVEL)
Add standard output handler to the logger. Parameters ---------- level : str, optional Level of logging record. By default LOG_LEVEL
Add standard output handler to the logger.
[ "Add", "standard", "output", "handler", "to", "the", "logger", "." ]
def log_to_stdout(self, level=LOG_LEVEL): """Add standard output handler to the logger. Parameters ---------- level : str, optional Level of logging record. By default LOG_LEVEL """ if self.std_out_handler: raise Exception('Stdout logger already defined.') self.logger = add_stdout_handler(self.logger, level=level) self.std_out_handler = self.logger.std_out_handler
[ "def", "log_to_stdout", "(", "self", ",", "level", "=", "LOG_LEVEL", ")", ":", "if", "self", ".", "std_out_handler", ":", "raise", "Exception", "(", "'Stdout logger already defined.'", ")", "self", ".", "logger", "=", "add_stdout_handler", "(", "self", ".", "logger", ",", "level", "=", "level", ")", "self", ".", "std_out_handler", "=", "self", ".", "logger", ".", "std_out_handler" ]
https://github.com/pyansys/pymapdl/blob/c07291fc062b359abf0e92b95a92d753a95ef3d7/ansys/mapdl/core/logging.py#L198-L210
vially/googlemusic-xbmc
ecdc7748356307008bfc18297da5ffb943665685
resources/Lib/gmusicapi/protocol/metadata.py
python
detail_line
(e)
return line
Given an expectation, return a readable one-line explanation of it.
Given an expectation, return a readable one-line explanation of it.
[ "Given", "an", "expectation", "return", "a", "readable", "one", "-", "line", "explanation", "of", "it", "." ]
def detail_line(e): """Given an expectation, return a readable one-line explanation of it.""" fields = [fname for fname in ('mutable', 'optional', 'volatile') if getattr(e, fname, None)] if e.depends_on: fields.append("depends_on=%s" % e.depends_on) line = ', '.join(fields) if line: line = "*(%s)*" % line return line
[ "def", "detail_line", "(", "e", ")", ":", "fields", "=", "[", "fname", "for", "fname", "in", "(", "'mutable'", ",", "'optional'", ",", "'volatile'", ")", "if", "getattr", "(", "e", ",", "fname", ",", "None", ")", "]", "if", "e", ".", "depends_on", ":", "fields", ".", "append", "(", "\"depends_on=%s\"", "%", "e", ".", "depends_on", ")", "line", "=", "', '", ".", "join", "(", "fields", ")", "if", "line", ":", "line", "=", "\"*(%s)*\"", "%", "line", "return", "line" ]
https://github.com/vially/googlemusic-xbmc/blob/ecdc7748356307008bfc18297da5ffb943665685/resources/Lib/gmusicapi/protocol/metadata.py#L204-L216
biolab/orange3
41685e1c7b1d1babe680113685a2d44bcc9fec0b
Orange/widgets/visualize/owscatterplot.py
python
OWScatterPlot.attr_changed
(self)
[]
def attr_changed(self): self.cb_reg_line.setEnabled(self.can_draw_regresssion_line()) self.setup_plot() self.commit.deferred()
[ "def", "attr_changed", "(", "self", ")", ":", "self", ".", "cb_reg_line", ".", "setEnabled", "(", "self", ".", "can_draw_regresssion_line", "(", ")", ")", "self", ".", "setup_plot", "(", ")", "self", ".", "commit", ".", "deferred", "(", ")" ]
https://github.com/biolab/orange3/blob/41685e1c7b1d1babe680113685a2d44bcc9fec0b/Orange/widgets/visualize/owscatterplot.py#L607-L610
rdiff-backup/rdiff-backup
321e0cd6e5e47d4c158a0172e47ab38240a8b653
src/rdiff_backup/Main.py
python
main_run_and_exit
(arglist)
Main function to be called with arguments list without the name of the program, aka $0 resp. sys.argv[0]. The function simply calls the internal function '_main_run' and exits with the code returned.
Main function to be called with arguments list without the name of the program, aka $0 resp. sys.argv[0].
[ "Main", "function", "to", "be", "called", "with", "arguments", "list", "without", "the", "name", "of", "the", "program", "aka", "$0", "resp", ".", "sys", ".", "argv", "[", "0", "]", "." ]
def main_run_and_exit(arglist): """ Main function to be called with arguments list without the name of the program, aka $0 resp. sys.argv[0]. The function simply calls the internal function '_main_run' and exits with the code returned. """ sys.exit(_main_run(arglist))
[ "def", "main_run_and_exit", "(", "arglist", ")", ":", "sys", ".", "exit", "(", "_main_run", "(", "arglist", ")", ")" ]
https://github.com/rdiff-backup/rdiff-backup/blob/321e0cd6e5e47d4c158a0172e47ab38240a8b653/src/rdiff_backup/Main.py#L29-L37
rowliny/DiffHelper
ab3a96f58f9579d0023aed9ebd785f4edf26f8af
Tool/SitePackages/nltk/draw/util.py
python
CanvasWidget._manage
(self)
Arrange the child widgets of this canvas widget. This method is called when the canvas widget is initially created. It is also called if the user calls the ``manage`` method on this canvas widget or any of its ancestors. :rtype: None
Arrange the child widgets of this canvas widget. This method is called when the canvas widget is initially created. It is also called if the user calls the ``manage`` method on this canvas widget or any of its ancestors.
[ "Arrange", "the", "child", "widgets", "of", "this", "canvas", "widget", ".", "This", "method", "is", "called", "when", "the", "canvas", "widget", "is", "initially", "created", ".", "It", "is", "also", "called", "if", "the", "user", "calls", "the", "manage", "method", "on", "this", "canvas", "widget", "or", "any", "of", "its", "ancestors", "." ]
def _manage(self): """ Arrange the child widgets of this canvas widget. This method is called when the canvas widget is initially created. It is also called if the user calls the ``manage`` method on this canvas widget or any of its ancestors. :rtype: None """
[ "def", "_manage", "(", "self", ")", ":" ]
https://github.com/rowliny/DiffHelper/blob/ab3a96f58f9579d0023aed9ebd785f4edf26f8af/Tool/SitePackages/nltk/draw/util.py#L716-L724
chribsen/simple-machine-learning-examples
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
venv/lib/python2.7/site-packages/pandas/indexes/base.py
python
Index.get_slice_bound
(self, label, side, kind)
Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} kind : {'ix', 'loc', 'getitem'}
Calculate slice bound that corresponds to given label.
[ "Calculate", "slice", "bound", "that", "corresponds", "to", "given", "label", "." ]
def get_slice_bound(self, label, side, kind): """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} kind : {'ix', 'loc', 'getitem'} """ assert kind in ['ix', 'loc', 'getitem', None] if side not in ('left', 'right'): raise ValueError("Invalid value for side kwarg," " must be either 'left' or 'right': %s" % (side, )) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side, kind) # we need to look up the label try: slc = self.get_loc(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array or an array of indices, which # is OK as long as they are representable by a slice. if is_bool_dtype(slc): slc = lib.maybe_booleans_to_slice(slc.view('u1')) else: slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self)) if isinstance(slc, np.ndarray): raise KeyError("Cannot get %s slice bound for non-unique " "label: %r" % (side, original_label)) if isinstance(slc, slice): if side == 'left': return slc.start else: return slc.stop else: if side == 'right': return slc + 1 else: return slc
[ "def", "get_slice_bound", "(", "self", ",", "label", ",", "side", ",", "kind", ")", ":", "assert", "kind", "in", "[", "'ix'", ",", "'loc'", ",", "'getitem'", ",", "None", "]", "if", "side", "not", "in", "(", "'left'", ",", "'right'", ")", ":", "raise", "ValueError", "(", "\"Invalid value for side kwarg,\"", "\" must be either 'left' or 'right': %s\"", "%", "(", "side", ",", ")", ")", "original_label", "=", "label", "# For datetime indices label may be a string that has to be converted", "# to datetime boundary according to its resolution.", "label", "=", "self", ".", "_maybe_cast_slice_bound", "(", "label", ",", "side", ",", "kind", ")", "# we need to look up the label", "try", ":", "slc", "=", "self", ".", "get_loc", "(", "label", ")", "except", "KeyError", "as", "err", ":", "try", ":", "return", "self", ".", "_searchsorted_monotonic", "(", "label", ",", "side", ")", "except", "ValueError", ":", "# raise the original KeyError", "raise", "err", "if", "isinstance", "(", "slc", ",", "np", ".", "ndarray", ")", ":", "# get_loc may return a boolean array or an array of indices, which", "# is OK as long as they are representable by a slice.", "if", "is_bool_dtype", "(", "slc", ")", ":", "slc", "=", "lib", ".", "maybe_booleans_to_slice", "(", "slc", ".", "view", "(", "'u1'", ")", ")", "else", ":", "slc", "=", "lib", ".", "maybe_indices_to_slice", "(", "slc", ".", "astype", "(", "'i8'", ")", ",", "len", "(", "self", ")", ")", "if", "isinstance", "(", "slc", ",", "np", ".", "ndarray", ")", ":", "raise", "KeyError", "(", "\"Cannot get %s slice bound for non-unique \"", "\"label: %r\"", "%", "(", "side", ",", "original_label", ")", ")", "if", "isinstance", "(", "slc", ",", "slice", ")", ":", "if", "side", "==", "'left'", ":", "return", "slc", ".", "start", "else", ":", "return", "slc", ".", "stop", "else", ":", "if", "side", "==", "'right'", ":", "return", "slc", "+", "1", "else", ":", "return", "slc" ]
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/pandas/indexes/base.py#L3090-L3147