repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
peterbrittain/asciimatics
samples/maps.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/samples/maps.py#L301-L312
def _draw_polygons(self, feature, bg, colour, extent, polygons, xo, yo): """Draw a set of polygons from a vector tile.""" coords = [] for polygon in polygons: coords.append([self._scale_coords(x, y, extent, xo, yo) for x, y in polygon]) # Polygons are expensive to draw and the buildings layer is huge - so we convert to # lines in order to process updates fast enough to animate. if "type" in feature["properties"] and "building" in feature["properties"]["type"]: for line in coords: self._draw_lines_internal(line, colour, bg) else: self._screen.fill_polygon(coords, colour=colour, bg=bg)
[ "def", "_draw_polygons", "(", "self", ",", "feature", ",", "bg", ",", "colour", ",", "extent", ",", "polygons", ",", "xo", ",", "yo", ")", ":", "coords", "=", "[", "]", "for", "polygon", "in", "polygons", ":", "coords", ".", "append", "(", "[", "self", ".", "_scale_coords", "(", "x", ",", "y", ",", "extent", ",", "xo", ",", "yo", ")", "for", "x", ",", "y", "in", "polygon", "]", ")", "# Polygons are expensive to draw and the buildings layer is huge - so we convert to", "# lines in order to process updates fast enough to animate.", "if", "\"type\"", "in", "feature", "[", "\"properties\"", "]", "and", "\"building\"", "in", "feature", "[", "\"properties\"", "]", "[", "\"type\"", "]", ":", "for", "line", "in", "coords", ":", "self", ".", "_draw_lines_internal", "(", "line", ",", "colour", ",", "bg", ")", "else", ":", "self", ".", "_screen", ".", "fill_polygon", "(", "coords", ",", "colour", "=", "colour", ",", "bg", "=", "bg", ")" ]
Draw a set of polygons from a vector tile.
[ "Draw", "a", "set", "of", "polygons", "from", "a", "vector", "tile", "." ]
python
train
57.25
dddomodossola/remi
examples/examples_from_contributors/Display_TreeTable.py
https://github.com/dddomodossola/remi/blob/85206f62220662bb7ecd471042268def71ccad28/examples/examples_from_contributors/Display_TreeTable.py#L33-L49
def Define_TreeTable(self, heads, heads2=None): ''' Define a TreeTable with a heading row and optionally a second heading row. ''' display_heads = [] display_heads.append(tuple(heads[2:])) self.tree_table = TreeTable() self.tree_table.append_from_list(display_heads, fill_title=True) if heads2 is not None: heads2_color = heads2[1] row_widget = gui.TableRow() for index, field in enumerate(heads2[2:]): row_item = gui.TableItem(text=field, style={'background-color': heads2_color}) row_widget.append(row_item, field) self.tree_table.append(row_widget, heads2[0]) self.wid.append(self.tree_table)
[ "def", "Define_TreeTable", "(", "self", ",", "heads", ",", "heads2", "=", "None", ")", ":", "display_heads", "=", "[", "]", "display_heads", ".", "append", "(", "tuple", "(", "heads", "[", "2", ":", "]", ")", ")", "self", ".", "tree_table", "=", "TreeTable", "(", ")", "self", ".", "tree_table", ".", "append_from_list", "(", "display_heads", ",", "fill_title", "=", "True", ")", "if", "heads2", "is", "not", "None", ":", "heads2_color", "=", "heads2", "[", "1", "]", "row_widget", "=", "gui", ".", "TableRow", "(", ")", "for", "index", ",", "field", "in", "enumerate", "(", "heads2", "[", "2", ":", "]", ")", ":", "row_item", "=", "gui", ".", "TableItem", "(", "text", "=", "field", ",", "style", "=", "{", "'background-color'", ":", "heads2_color", "}", ")", "row_widget", ".", "append", "(", "row_item", ",", "field", ")", "self", ".", "tree_table", ".", "append", "(", "row_widget", ",", "heads2", "[", "0", "]", ")", "self", ".", "wid", ".", "append", "(", "self", ".", "tree_table", ")" ]
Define a TreeTable with a heading row and optionally a second heading row.
[ "Define", "a", "TreeTable", "with", "a", "heading", "row", "and", "optionally", "a", "second", "heading", "row", "." ]
python
train
45.647059
harlowja/fasteners
fasteners/process_lock.py
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/process_lock.py#L265-L280
def interprocess_locked(path): """Acquires & releases a interprocess lock around call into decorated function.""" lock = InterProcessLock(path) def decorator(f): @six.wraps(f) def wrapper(*args, **kwargs): with lock: return f(*args, **kwargs) return wrapper return decorator
[ "def", "interprocess_locked", "(", "path", ")", ":", "lock", "=", "InterProcessLock", "(", "path", ")", "def", "decorator", "(", "f", ")", ":", "@", "six", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "lock", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator" ]
Acquires & releases a interprocess lock around call into decorated function.
[ "Acquires", "&", "releases", "a", "interprocess", "lock", "around", "call", "into", "decorated", "function", "." ]
python
train
21.1875
renalreg/cornflake
cornflake/serializers.py
https://github.com/renalreg/cornflake/blob/ce0c0b260c95e84046f108d05773f1f130ae886c/cornflake/serializers.py#L95-L113
def _merge_fields(a, b): """Merge two lists of fields. Fields in `b` override fields in `a`. Fields in `a` are output first. """ a_names = set(x[0] for x in a) b_names = set(x[0] for x in b) a_keep = a_names - b_names fields = [] for name, field in a: if name in a_keep: fields.append((name, field)) fields.extend(b) return fields
[ "def", "_merge_fields", "(", "a", ",", "b", ")", ":", "a_names", "=", "set", "(", "x", "[", "0", "]", "for", "x", "in", "a", ")", "b_names", "=", "set", "(", "x", "[", "0", "]", "for", "x", "in", "b", ")", "a_keep", "=", "a_names", "-", "b_names", "fields", "=", "[", "]", "for", "name", ",", "field", "in", "a", ":", "if", "name", "in", "a_keep", ":", "fields", ".", "append", "(", "(", "name", ",", "field", ")", ")", "fields", ".", "extend", "(", "b", ")", "return", "fields" ]
Merge two lists of fields. Fields in `b` override fields in `a`. Fields in `a` are output first.
[ "Merge", "two", "lists", "of", "fields", "." ]
python
train
19.894737
ontio/ontology-python-sdk
ontology/smart_contract/neo_contract/abi/abi_function.py
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/neo_contract/abi/abi_function.py#L12-L22
def set_params_value(self, *params): """ This interface is used to set parameter value for an function in abi file. """ if len(params) != len(self.parameters): raise Exception("parameter error") temp = self.parameters self.parameters = [] for i in range(len(params)): self.parameters.append(Parameter(temp[i]['name'], temp[i]['type'])) self.parameters[i].set_value(params[i])
[ "def", "set_params_value", "(", "self", ",", "*", "params", ")", ":", "if", "len", "(", "params", ")", "!=", "len", "(", "self", ".", "parameters", ")", ":", "raise", "Exception", "(", "\"parameter error\"", ")", "temp", "=", "self", ".", "parameters", "self", ".", "parameters", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "params", ")", ")", ":", "self", ".", "parameters", ".", "append", "(", "Parameter", "(", "temp", "[", "i", "]", "[", "'name'", "]", ",", "temp", "[", "i", "]", "[", "'type'", "]", ")", ")", "self", ".", "parameters", "[", "i", "]", ".", "set_value", "(", "params", "[", "i", "]", ")" ]
This interface is used to set parameter value for an function in abi file.
[ "This", "interface", "is", "used", "to", "set", "parameter", "value", "for", "an", "function", "in", "abi", "file", "." ]
python
train
41.545455
gabstopper/smc-python
smc/core/collection.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/collection.py#L107-L119
def get_all_loopbacks(engine): """ Get all loopback interfaces for a given engine """ data = [] if 'fw_cluster' in engine.type: for cvi in engine.data.get('loopback_cluster_virtual_interface', []): data.append( LoopbackClusterInterface(cvi, engine)) for node in engine.nodes: for lb in node.data.get('loopback_node_dedicated_interface', []): data.append(LoopbackInterface(lb, engine)) return data
[ "def", "get_all_loopbacks", "(", "engine", ")", ":", "data", "=", "[", "]", "if", "'fw_cluster'", "in", "engine", ".", "type", ":", "for", "cvi", "in", "engine", ".", "data", ".", "get", "(", "'loopback_cluster_virtual_interface'", ",", "[", "]", ")", ":", "data", ".", "append", "(", "LoopbackClusterInterface", "(", "cvi", ",", "engine", ")", ")", "for", "node", "in", "engine", ".", "nodes", ":", "for", "lb", "in", "node", ".", "data", ".", "get", "(", "'loopback_node_dedicated_interface'", ",", "[", "]", ")", ":", "data", ".", "append", "(", "LoopbackInterface", "(", "lb", ",", "engine", ")", ")", "return", "data" ]
Get all loopback interfaces for a given engine
[ "Get", "all", "loopback", "interfaces", "for", "a", "given", "engine" ]
python
train
36.076923
mgedmin/check-manifest
check_manifest.py
https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L138-L160
def run(command, encoding=None, decode=True, cwd=None): """Run a command [cmd, arg1, arg2, ...]. Returns the output (stdout + stderr). Raises CommandFailed in cases of error. """ if not encoding: encoding = locale.getpreferredencoding() try: with open(os.devnull, 'rb') as devnull: pipe = subprocess.Popen(command, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd) except OSError as e: raise Failure("could not run %s: %s" % (command, e)) output = pipe.communicate()[0] if decode: output = output.decode(encoding) status = pipe.wait() if status != 0: raise CommandFailed(command, status, output) return output
[ "def", "run", "(", "command", ",", "encoding", "=", "None", ",", "decode", "=", "True", ",", "cwd", "=", "None", ")", ":", "if", "not", "encoding", ":", "encoding", "=", "locale", ".", "getpreferredencoding", "(", ")", "try", ":", "with", "open", "(", "os", ".", "devnull", ",", "'rb'", ")", "as", "devnull", ":", "pipe", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdin", "=", "devnull", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "cwd", "=", "cwd", ")", "except", "OSError", "as", "e", ":", "raise", "Failure", "(", "\"could not run %s: %s\"", "%", "(", "command", ",", "e", ")", ")", "output", "=", "pipe", ".", "communicate", "(", ")", "[", "0", "]", "if", "decode", ":", "output", "=", "output", ".", "decode", "(", "encoding", ")", "status", "=", "pipe", ".", "wait", "(", ")", "if", "status", "!=", "0", ":", "raise", "CommandFailed", "(", "command", ",", "status", ",", "output", ")", "return", "output" ]
Run a command [cmd, arg1, arg2, ...]. Returns the output (stdout + stderr). Raises CommandFailed in cases of error.
[ "Run", "a", "command", "[", "cmd", "arg1", "arg2", "...", "]", "." ]
python
train
34.130435
mfcloud/python-zvm-sdk
zvmsdk/vmops.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/vmops.py#L134-L138
def guest_reboot(self, userid): """Reboot a guest vm.""" LOG.info("Begin to reboot vm %s", userid) self._smtclient.guest_reboot(userid) LOG.info("Complete reboot vm %s", userid)
[ "def", "guest_reboot", "(", "self", ",", "userid", ")", ":", "LOG", ".", "info", "(", "\"Begin to reboot vm %s\"", ",", "userid", ")", "self", ".", "_smtclient", ".", "guest_reboot", "(", "userid", ")", "LOG", ".", "info", "(", "\"Complete reboot vm %s\"", ",", "userid", ")" ]
Reboot a guest vm.
[ "Reboot", "a", "guest", "vm", "." ]
python
train
41
bitesofcode/projexui
projexui/widgets/xswitchbutton.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xswitchbutton.py#L47-L61
def currentPixmapRect(self): """ Returns the rect that defines the boundary for the current pixmap based on the size of the button and the size of the pixmap. :return <QtCore.QRect> """ pixmap = self.currentPixmap() rect = self.rect() size = pixmap.size() x = rect.center().x() - (size.width() / 2.0) y = rect.center().y() - (size.height() / 2.0) return QtCore.QRect(x, y, size.width(), size.height())
[ "def", "currentPixmapRect", "(", "self", ")", ":", "pixmap", "=", "self", ".", "currentPixmap", "(", ")", "rect", "=", "self", ".", "rect", "(", ")", "size", "=", "pixmap", ".", "size", "(", ")", "x", "=", "rect", ".", "center", "(", ")", ".", "x", "(", ")", "-", "(", "size", ".", "width", "(", ")", "/", "2.0", ")", "y", "=", "rect", ".", "center", "(", ")", ".", "y", "(", ")", "-", "(", "size", ".", "height", "(", ")", "/", "2.0", ")", "return", "QtCore", ".", "QRect", "(", "x", ",", "y", ",", "size", ".", "width", "(", ")", ",", "size", ".", "height", "(", ")", ")" ]
Returns the rect that defines the boundary for the current pixmap based on the size of the button and the size of the pixmap. :return <QtCore.QRect>
[ "Returns", "the", "rect", "that", "defines", "the", "boundary", "for", "the", "current", "pixmap", "based", "on", "the", "size", "of", "the", "button", "and", "the", "size", "of", "the", "pixmap", ".", ":", "return", "<QtCore", ".", "QRect", ">" ]
python
train
34.933333
DarkEnergySurvey/ugali
ugali/analysis/model.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/model.py#L98-L113
def getp(self, name): """ Get the named parameter. Parameters ---------- name : string The parameter name. Returns ------- param : The parameter object. """ name = self._mapping.get(name,name) return self.params[name]
[ "def", "getp", "(", "self", ",", "name", ")", ":", "name", "=", "self", ".", "_mapping", ".", "get", "(", "name", ",", "name", ")", "return", "self", ".", "params", "[", "name", "]" ]
Get the named parameter. Parameters ---------- name : string The parameter name. Returns ------- param : The parameter object.
[ "Get", "the", "named", "parameter", "." ]
python
train
19.875
pycontribs/pyrax
pyrax/cloudblockstorage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudblockstorage.py#L483-L490
def update(self, volume, display_name=None, display_description=None): """ Update the specified values on the specified volume. You may specify one or more values to update. If no values are specified as non-None, the call is a no-op; no exception will be raised. """ return volume.update(display_name=display_name, display_description=display_description)
[ "def", "update", "(", "self", ",", "volume", ",", "display_name", "=", "None", ",", "display_description", "=", "None", ")", ":", "return", "volume", ".", "update", "(", "display_name", "=", "display_name", ",", "display_description", "=", "display_description", ")" ]
Update the specified values on the specified volume. You may specify one or more values to update. If no values are specified as non-None, the call is a no-op; no exception will be raised.
[ "Update", "the", "specified", "values", "on", "the", "specified", "volume", ".", "You", "may", "specify", "one", "or", "more", "values", "to", "update", ".", "If", "no", "values", "are", "specified", "as", "non", "-", "None", "the", "call", "is", "a", "no", "-", "op", ";", "no", "exception", "will", "be", "raised", "." ]
python
train
51.625
datadesk/django-bakery
bakery/views/dates.py
https://github.com/datadesk/django-bakery/blob/e2feb13a66552a388fbcfaaacdd504bba08d3c69/bakery/views/dates.py#L103-L111
def build_year(self, dt): """ Build the page for the provided year. """ self.year = str(dt.year) logger.debug("Building %s" % self.year) self.request = self.create_request(self.get_url()) target_path = self.get_build_path() self.build_file(target_path, self.get_content())
[ "def", "build_year", "(", "self", ",", "dt", ")", ":", "self", ".", "year", "=", "str", "(", "dt", ".", "year", ")", "logger", ".", "debug", "(", "\"Building %s\"", "%", "self", ".", "year", ")", "self", ".", "request", "=", "self", ".", "create_request", "(", "self", ".", "get_url", "(", ")", ")", "target_path", "=", "self", ".", "get_build_path", "(", ")", "self", ".", "build_file", "(", "target_path", ",", "self", ".", "get_content", "(", ")", ")" ]
Build the page for the provided year.
[ "Build", "the", "page", "for", "the", "provided", "year", "." ]
python
train
36.444444
azraq27/neural
neural/freesurfer.py
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/freesurfer.py#L34-L41
def mgz_to_nifti(filename,prefix=None,gzip=True): '''Convert ``filename`` to a NIFTI file using ``mri_convert``''' setup_freesurfer() if prefix==None: prefix = nl.prefix(filename) + '.nii' if gzip and not prefix.endswith('.gz'): prefix += '.gz' nl.run([os.path.join(freesurfer_home,'bin','mri_convert'),filename,prefix],products=prefix)
[ "def", "mgz_to_nifti", "(", "filename", ",", "prefix", "=", "None", ",", "gzip", "=", "True", ")", ":", "setup_freesurfer", "(", ")", "if", "prefix", "==", "None", ":", "prefix", "=", "nl", ".", "prefix", "(", "filename", ")", "+", "'.nii'", "if", "gzip", "and", "not", "prefix", ".", "endswith", "(", "'.gz'", ")", ":", "prefix", "+=", "'.gz'", "nl", ".", "run", "(", "[", "os", ".", "path", ".", "join", "(", "freesurfer_home", ",", "'bin'", ",", "'mri_convert'", ")", ",", "filename", ",", "prefix", "]", ",", "products", "=", "prefix", ")" ]
Convert ``filename`` to a NIFTI file using ``mri_convert``
[ "Convert", "filename", "to", "a", "NIFTI", "file", "using", "mri_convert" ]
python
train
45.625
StackStorm/pybind
pybind/nos/v6_0_2f/zoning/defined_configuration/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/zoning/defined_configuration/__init__.py#L96-L117
def _set_cfg(self, v, load=False): """ Setter method for cfg, mapped from YANG variable /zoning/defined_configuration/cfg (list) If this variable is read-only (config: false) in the source YANG file, then _set_cfg is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cfg() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("cfg_name",cfg.cfg, yang_name="cfg", rest_name="cfg", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='cfg-name', extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}), is_container='list', yang_name="cfg", rest_name="cfg", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """cfg must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("cfg_name",cfg.cfg, yang_name="cfg", rest_name="cfg", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='cfg-name', extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}), is_container='list', yang_name="cfg", rest_name="cfg", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)""", }) self.__cfg = t if hasattr(self, '_set'): self._set()
[ "def", "_set_cfg", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"cfg_name\"", ",", "cfg", ".", "cfg", ",", "yang_name", "=", "\"cfg\"", ",", "rest_name", "=", "\"cfg\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'cfg-name'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'List of defined CFGs'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-full-no'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-full-command'", ":", "None", ",", "u'callpoint'", ":", "u'zone_defined_cfg'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"cfg\"", ",", "rest_name", "=", "\"cfg\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'List of defined CFGs'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-full-no'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-full-command'", ":", "None", ",", "u'callpoint'", ":", "u'zone_defined_cfg'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-zone'", ",", "defining_module", "=", "'brocade-zone'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"cfg must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"cfg_name\",cfg.cfg, yang_name=\"cfg\", rest_name=\"cfg\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='cfg-name', extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}), is_container='list', yang_name=\"cfg\", rest_name=\"cfg\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__cfg", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for cfg, mapped from YANG variable /zoning/defined_configuration/cfg (list) If this variable is read-only (config: false) in the source YANG file, then _set_cfg is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cfg() directly.
[ "Setter", "method", "for", "cfg", "mapped", "from", "YANG", "variable", "/", "zoning", "/", "defined_configuration", "/", "cfg", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_cfg", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_cfg", "()", "directly", "." ]
python
train
114.136364
kensho-technologies/graphql-compiler
graphql_compiler/compiler/emit_sql.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/emit_sql.py#L93-L108
def _create_query(node, context): """Create a query from a SqlNode. Args: node: SqlNode, the current node. context: CompilationContext, global compilation state and metadata. Returns: Selectable, selectable of the generated query. """ visited_nodes = [node] output_columns = _get_output_columns(visited_nodes, context) filters = _get_filters(visited_nodes, context) selectable = sql_context_helpers.get_node_selectable(node, context) query = select(output_columns).select_from(selectable).where(and_(*filters)) return query
[ "def", "_create_query", "(", "node", ",", "context", ")", ":", "visited_nodes", "=", "[", "node", "]", "output_columns", "=", "_get_output_columns", "(", "visited_nodes", ",", "context", ")", "filters", "=", "_get_filters", "(", "visited_nodes", ",", "context", ")", "selectable", "=", "sql_context_helpers", ".", "get_node_selectable", "(", "node", ",", "context", ")", "query", "=", "select", "(", "output_columns", ")", ".", "select_from", "(", "selectable", ")", ".", "where", "(", "and_", "(", "*", "filters", ")", ")", "return", "query" ]
Create a query from a SqlNode. Args: node: SqlNode, the current node. context: CompilationContext, global compilation state and metadata. Returns: Selectable, selectable of the generated query.
[ "Create", "a", "query", "from", "a", "SqlNode", "." ]
python
train
35.875
jaraco/hgtools
hgtools/versioning.py
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/versioning.py#L70-L77
def as_number(self): """ >>> round(SummableVersion('1.9.3').as_number(), 12) 1.93 """ def combine(subver, ver): return subver / 10 + ver return reduce(combine, reversed(self.version))
[ "def", "as_number", "(", "self", ")", ":", "def", "combine", "(", "subver", ",", "ver", ")", ":", "return", "subver", "/", "10", "+", "ver", "return", "reduce", "(", "combine", ",", "reversed", "(", "self", ".", "version", ")", ")" ]
>>> round(SummableVersion('1.9.3').as_number(), 12) 1.93
[ ">>>", "round", "(", "SummableVersion", "(", "1", ".", "9", ".", "3", ")", ".", "as_number", "()", "12", ")", "1", ".", "93" ]
python
train
23.875
SuperCowPowers/workbench
workbench_apps/workbench_cli/help_content.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench_apps/workbench_cli/help_content.py#L39-L50
def help_cli_search(self): """ Help for Workbench CLI Search """ help = '%sSearch: %s returns sample_sets, a sample_set is a set/list of md5s.' % (color.Yellow, color.Green) help += '\n\n\t%sSearch for all samples in the database that are known bad pe files,' % (color.Green) help += '\n\t%sthis command returns the sample_set containing the matching items'% (color.Green) help += '\n\t%s> my_bad_exes = search([\'bad\', \'exe\'])' % (color.LightBlue) help += '\n\n\t%sRun workers on this sample_set:' % (color.Green) help += '\n\t%s> pe_outputs = pe_features(my_bad_exes) %s' % (color.LightBlue, color.Normal) help += '\n\n\t%sLoop on the generator (or make a DataFrame see >help dataframe)' % (color.Green) help += '\n\t%s> for output in pe_outputs: %s' % (color.LightBlue, color.Normal) help += '\n\t\t%s print output %s' % (color.LightBlue, color.Normal) return help
[ "def", "help_cli_search", "(", "self", ")", ":", "help", "=", "'%sSearch: %s returns sample_sets, a sample_set is a set/list of md5s.'", "%", "(", "color", ".", "Yellow", ",", "color", ".", "Green", ")", "help", "+=", "'\\n\\n\\t%sSearch for all samples in the database that are known bad pe files,'", "%", "(", "color", ".", "Green", ")", "help", "+=", "'\\n\\t%sthis command returns the sample_set containing the matching items'", "%", "(", "color", ".", "Green", ")", "help", "+=", "'\\n\\t%s> my_bad_exes = search([\\'bad\\', \\'exe\\'])'", "%", "(", "color", ".", "LightBlue", ")", "help", "+=", "'\\n\\n\\t%sRun workers on this sample_set:'", "%", "(", "color", ".", "Green", ")", "help", "+=", "'\\n\\t%s> pe_outputs = pe_features(my_bad_exes) %s'", "%", "(", "color", ".", "LightBlue", ",", "color", ".", "Normal", ")", "help", "+=", "'\\n\\n\\t%sLoop on the generator (or make a DataFrame see >help dataframe)'", "%", "(", "color", ".", "Green", ")", "help", "+=", "'\\n\\t%s> for output in pe_outputs: %s'", "%", "(", "color", ".", "LightBlue", ",", "color", ".", "Normal", ")", "help", "+=", "'\\n\\t\\t%s print output %s'", "%", "(", "color", ".", "LightBlue", ",", "color", ".", "Normal", ")", "return", "help" ]
Help for Workbench CLI Search
[ "Help", "for", "Workbench", "CLI", "Search" ]
python
train
79.25
jtambasco/gnuplotpy
gnuplotpy/gnuplot.py
https://github.com/jtambasco/gnuplotpy/blob/0e67fa0b839f94981f8e18dfd42c30f98b68f500/gnuplotpy/gnuplot.py#L98-L140
def gnuplot_2d(x, y, filename, title='', x_label='', y_label=''): ''' Function to produce a general 2D plot. Args: x (list): x points. y (list): y points. filename (str): Filename of the output image. title (str): Title of the plot. Default is '' (no title). x_label (str): x-axis label. y_label (str): y-axis label. ''' _, ext = os.path.splitext(filename) if ext != '.png': filename += '.png' gnuplot_cmds = \ ''' set datafile separator "," set term pngcairo size 30cm,25cm set out filename unset key set border lw 1.5 set grid lt -1 lc rgb "gray80" set title title set xlabel x_label set ylabel y_label plot filename_data u 1:2 w lp pt 6 ps 0.5 ''' scr = _GnuplotScriptTemp(gnuplot_cmds) data = _GnuplotDataTemp(x, y) args_dict = { 'filename': filename, 'filename_data': data.name, 'title': title, 'x_label': x_label, 'y_label': y_label } gnuplot(scr.name, args_dict)
[ "def", "gnuplot_2d", "(", "x", ",", "y", ",", "filename", ",", "title", "=", "''", ",", "x_label", "=", "''", ",", "y_label", "=", "''", ")", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "ext", "!=", "'.png'", ":", "filename", "+=", "'.png'", "gnuplot_cmds", "=", "'''\n set datafile separator \",\"\n set term pngcairo size 30cm,25cm\n set out filename\n\n unset key\n set border lw 1.5\n set grid lt -1 lc rgb \"gray80\"\n\n set title title\n set xlabel x_label\n set ylabel y_label\n\n plot filename_data u 1:2 w lp pt 6 ps 0.5\n '''", "scr", "=", "_GnuplotScriptTemp", "(", "gnuplot_cmds", ")", "data", "=", "_GnuplotDataTemp", "(", "x", ",", "y", ")", "args_dict", "=", "{", "'filename'", ":", "filename", ",", "'filename_data'", ":", "data", ".", "name", ",", "'title'", ":", "title", ",", "'x_label'", ":", "x_label", ",", "'y_label'", ":", "y_label", "}", "gnuplot", "(", "scr", ".", "name", ",", "args_dict", ")" ]
Function to produce a general 2D plot. Args: x (list): x points. y (list): y points. filename (str): Filename of the output image. title (str): Title of the plot. Default is '' (no title). x_label (str): x-axis label. y_label (str): y-axis label.
[ "Function", "to", "produce", "a", "general", "2D", "plot", "." ]
python
train
23.860465
prompt-toolkit/pymux
pymux/layout.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/layout.py#L317-L339
def _get_status_tokens(self): " The tokens for the status bar. " result = [] # Display panes. for i, w in enumerate(self.pymux.arrangement.windows): if i > 0: result.append(('', ' ')) if w == self.pymux.arrangement.get_active_window(): style = 'class:window.current' format_str = self.pymux.window_status_current_format else: style = 'class:window' format_str = self.pymux.window_status_format result.append(( style, format_pymux_string(self.pymux, format_str, window=w), self._create_select_window_handler(w))) return result
[ "def", "_get_status_tokens", "(", "self", ")", ":", "result", "=", "[", "]", "# Display panes.", "for", "i", ",", "w", "in", "enumerate", "(", "self", ".", "pymux", ".", "arrangement", ".", "windows", ")", ":", "if", "i", ">", "0", ":", "result", ".", "append", "(", "(", "''", ",", "' '", ")", ")", "if", "w", "==", "self", ".", "pymux", ".", "arrangement", ".", "get_active_window", "(", ")", ":", "style", "=", "'class:window.current'", "format_str", "=", "self", ".", "pymux", ".", "window_status_current_format", "else", ":", "style", "=", "'class:window'", "format_str", "=", "self", ".", "pymux", ".", "window_status_format", "result", ".", "append", "(", "(", "style", ",", "format_pymux_string", "(", "self", ".", "pymux", ",", "format_str", ",", "window", "=", "w", ")", ",", "self", ".", "_create_select_window_handler", "(", "w", ")", ")", ")", "return", "result" ]
The tokens for the status bar.
[ "The", "tokens", "for", "the", "status", "bar", "." ]
python
train
31.478261
gnosis/gnosis-py
gnosis/safe/safe_creation_tx.py
https://github.com/gnosis/gnosis-py/blob/2a9a5d75a375fc9813ac04df133e6910c82f9d49/gnosis/safe/safe_creation_tx.py#L210-L236
def _build_contract_creation_tx_with_valid_signature(self, tx_dict: Dict[str, None], s: int) -> Transaction: """ Use pyethereum `Transaction` to generate valid tx using a random signature :param tx_dict: Web3 tx dictionary :param s: Signature s value :return: PyEthereum creation tx for the proxy contract """ zero_address = HexBytes('0x' + '0' * 40) f_address = HexBytes('0x' + 'f' * 40) nonce = tx_dict['nonce'] gas_price = tx_dict['gasPrice'] gas = tx_dict['gas'] to = tx_dict.get('to', b'') # Contract creation should always have `to` empty value = tx_dict['value'] data = tx_dict['data'] for _ in range(100): try: v, r = self.find_valid_random_signature(s) contract_creation_tx = Transaction(nonce, gas_price, gas, to, value, HexBytes(data), v=v, r=r, s=s) sender_address = contract_creation_tx.sender contract_address = contract_creation_tx.creates if sender_address in (zero_address, f_address) or contract_address in (zero_address, f_address): raise InvalidTransaction return contract_creation_tx except InvalidTransaction: pass raise ValueError('Valid signature not found with s=%d', s)
[ "def", "_build_contract_creation_tx_with_valid_signature", "(", "self", ",", "tx_dict", ":", "Dict", "[", "str", ",", "None", "]", ",", "s", ":", "int", ")", "->", "Transaction", ":", "zero_address", "=", "HexBytes", "(", "'0x'", "+", "'0'", "*", "40", ")", "f_address", "=", "HexBytes", "(", "'0x'", "+", "'f'", "*", "40", ")", "nonce", "=", "tx_dict", "[", "'nonce'", "]", "gas_price", "=", "tx_dict", "[", "'gasPrice'", "]", "gas", "=", "tx_dict", "[", "'gas'", "]", "to", "=", "tx_dict", ".", "get", "(", "'to'", ",", "b''", ")", "# Contract creation should always have `to` empty", "value", "=", "tx_dict", "[", "'value'", "]", "data", "=", "tx_dict", "[", "'data'", "]", "for", "_", "in", "range", "(", "100", ")", ":", "try", ":", "v", ",", "r", "=", "self", ".", "find_valid_random_signature", "(", "s", ")", "contract_creation_tx", "=", "Transaction", "(", "nonce", ",", "gas_price", ",", "gas", ",", "to", ",", "value", ",", "HexBytes", "(", "data", ")", ",", "v", "=", "v", ",", "r", "=", "r", ",", "s", "=", "s", ")", "sender_address", "=", "contract_creation_tx", ".", "sender", "contract_address", "=", "contract_creation_tx", ".", "creates", "if", "sender_address", "in", "(", "zero_address", ",", "f_address", ")", "or", "contract_address", "in", "(", "zero_address", ",", "f_address", ")", ":", "raise", "InvalidTransaction", "return", "contract_creation_tx", "except", "InvalidTransaction", ":", "pass", "raise", "ValueError", "(", "'Valid signature not found with s=%d'", ",", "s", ")" ]
Use pyethereum `Transaction` to generate valid tx using a random signature :param tx_dict: Web3 tx dictionary :param s: Signature s value :return: PyEthereum creation tx for the proxy contract
[ "Use", "pyethereum", "Transaction", "to", "generate", "valid", "tx", "using", "a", "random", "signature", ":", "param", "tx_dict", ":", "Web3", "tx", "dictionary", ":", "param", "s", ":", "Signature", "s", "value", ":", "return", ":", "PyEthereum", "creation", "tx", "for", "the", "proxy", "contract" ]
python
test
50.148148
archman/beamline
beamline/models.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/models.py#L146-L159
def getAllConfig(self, fmt='json'): """ return all element configurations as json string file. could be further processed by beamline.Lattice class :param fmt: 'json' (default) or 'dict' """ for e in self.getCtrlConf(msgout=False): self._lattice_confdict.update(e.dumpConfig(type='simu')) self._lattice_confdict.update(self._lattice.dumpConfig()) if fmt == 'json': return json.dumps(self._lattice_confdict) else: return self._lattice_confdict
[ "def", "getAllConfig", "(", "self", ",", "fmt", "=", "'json'", ")", ":", "for", "e", "in", "self", ".", "getCtrlConf", "(", "msgout", "=", "False", ")", ":", "self", ".", "_lattice_confdict", ".", "update", "(", "e", ".", "dumpConfig", "(", "type", "=", "'simu'", ")", ")", "self", ".", "_lattice_confdict", ".", "update", "(", "self", ".", "_lattice", ".", "dumpConfig", "(", ")", ")", "if", "fmt", "==", "'json'", ":", "return", "json", ".", "dumps", "(", "self", ".", "_lattice_confdict", ")", "else", ":", "return", "self", ".", "_lattice_confdict" ]
return all element configurations as json string file. could be further processed by beamline.Lattice class :param fmt: 'json' (default) or 'dict'
[ "return", "all", "element", "configurations", "as", "json", "string", "file", ".", "could", "be", "further", "processed", "by", "beamline", ".", "Lattice", "class" ]
python
train
39.285714
RazerM/parver
src/parver/_helpers.py
https://github.com/RazerM/parver/blob/e5133085ac8c4619e7c392223a2e78ba394c2a10/src/parver/_helpers.py#L123-L169
def kwonly_args(kws, required, withdefaults=(), leftovers=False): """ Based on the snippet by Eric Snow http://code.activestate.com/recipes/577940 SPDX-License-Identifier: MIT """ if hasattr(withdefaults, 'items'): # allows for OrderedDict to be passed withdefaults = withdefaults.items() kwonly = [] # extract the required keyword-only arguments missing = [] for name in required: if name not in kws: missing.append(name) else: kwonly.append(kws.pop(name)) # validate required keyword-only arguments if missing: if len(missing) > 2: end = 's: %s, and %s' % (', '.join(missing[:-1]), missing[-1]) elif len(missing) == 2: end = 's: %s and %s' % tuple(missing) else: end = ': %s' % tuple(missing) msg = 'missing %s required keyword-only argument%s' raise TypeError(msg % (len(missing), end)) # handle the withdefaults for name, value in withdefaults: if name not in kws: kwonly.append(value) else: kwonly.append(kws.pop(name)) # handle any leftovers if not leftovers and kws: msg = "got an unexpected keyword argument '%s'" raise TypeError(msg % (kws.keys()[0])) return [kws] + kwonly
[ "def", "kwonly_args", "(", "kws", ",", "required", ",", "withdefaults", "=", "(", ")", ",", "leftovers", "=", "False", ")", ":", "if", "hasattr", "(", "withdefaults", ",", "'items'", ")", ":", "# allows for OrderedDict to be passed", "withdefaults", "=", "withdefaults", ".", "items", "(", ")", "kwonly", "=", "[", "]", "# extract the required keyword-only arguments", "missing", "=", "[", "]", "for", "name", "in", "required", ":", "if", "name", "not", "in", "kws", ":", "missing", ".", "append", "(", "name", ")", "else", ":", "kwonly", ".", "append", "(", "kws", ".", "pop", "(", "name", ")", ")", "# validate required keyword-only arguments", "if", "missing", ":", "if", "len", "(", "missing", ")", ">", "2", ":", "end", "=", "'s: %s, and %s'", "%", "(", "', '", ".", "join", "(", "missing", "[", ":", "-", "1", "]", ")", ",", "missing", "[", "-", "1", "]", ")", "elif", "len", "(", "missing", ")", "==", "2", ":", "end", "=", "'s: %s and %s'", "%", "tuple", "(", "missing", ")", "else", ":", "end", "=", "': %s'", "%", "tuple", "(", "missing", ")", "msg", "=", "'missing %s required keyword-only argument%s'", "raise", "TypeError", "(", "msg", "%", "(", "len", "(", "missing", ")", ",", "end", ")", ")", "# handle the withdefaults", "for", "name", ",", "value", "in", "withdefaults", ":", "if", "name", "not", "in", "kws", ":", "kwonly", ".", "append", "(", "value", ")", "else", ":", "kwonly", ".", "append", "(", "kws", ".", "pop", "(", "name", ")", ")", "# handle any leftovers", "if", "not", "leftovers", "and", "kws", ":", "msg", "=", "\"got an unexpected keyword argument '%s'\"", "raise", "TypeError", "(", "msg", "%", "(", "kws", ".", "keys", "(", ")", "[", "0", "]", ")", ")", "return", "[", "kws", "]", "+", "kwonly" ]
Based on the snippet by Eric Snow http://code.activestate.com/recipes/577940 SPDX-License-Identifier: MIT
[ "Based", "on", "the", "snippet", "by", "Eric", "Snow", "http", ":", "//", "code", ".", "activestate", ".", "com", "/", "recipes", "/", "577940" ]
python
train
27.680851
AguaClara/aide_document-DEPRECATED
aide_document/translate.py
https://github.com/AguaClara/aide_document-DEPRECATED/blob/3f3b5c9f321264e0e4d8ed68dfbc080762579815/aide_document/translate.py#L6-L47
def replace(dict,line): """ Find and replace the special words according to the dictionary. Parameters ========== dict : Dictionary A dictionary derived from a yaml file. Source language as keys and the target language as values. line : String A string need to be processed. """ words = line.split() new_line = "" for word in words: fst = word[0] last = word[-1] # Check if the word ends with a punctuation if last == "," or last == ";" or last == ".": clean_word = word[0:-1] last = last + " " elif last == "]": clean_word = word[0:-1] else: clean_word = word last = " " # Check if the word starts with "[" if fst == "[": clean_word = clean_word[1:] else: clean_word = clean_word fst = "" find = dict.get(clean_word) if find == None: new_line = new_line + fst + str(clean_word) + last else: new_line = new_line + fst + str(find) + last return new_line
[ "def", "replace", "(", "dict", ",", "line", ")", ":", "words", "=", "line", ".", "split", "(", ")", "new_line", "=", "\"\"", "for", "word", "in", "words", ":", "fst", "=", "word", "[", "0", "]", "last", "=", "word", "[", "-", "1", "]", "# Check if the word ends with a punctuation", "if", "last", "==", "\",\"", "or", "last", "==", "\";\"", "or", "last", "==", "\".\"", ":", "clean_word", "=", "word", "[", "0", ":", "-", "1", "]", "last", "=", "last", "+", "\" \"", "elif", "last", "==", "\"]\"", ":", "clean_word", "=", "word", "[", "0", ":", "-", "1", "]", "else", ":", "clean_word", "=", "word", "last", "=", "\" \"", "# Check if the word starts with \"[\"", "if", "fst", "==", "\"[\"", ":", "clean_word", "=", "clean_word", "[", "1", ":", "]", "else", ":", "clean_word", "=", "clean_word", "fst", "=", "\"\"", "find", "=", "dict", ".", "get", "(", "clean_word", ")", "if", "find", "==", "None", ":", "new_line", "=", "new_line", "+", "fst", "+", "str", "(", "clean_word", ")", "+", "last", "else", ":", "new_line", "=", "new_line", "+", "fst", "+", "str", "(", "find", ")", "+", "last", "return", "new_line" ]
Find and replace the special words according to the dictionary. Parameters ========== dict : Dictionary A dictionary derived from a yaml file. Source language as keys and the target language as values. line : String A string need to be processed.
[ "Find", "and", "replace", "the", "special", "words", "according", "to", "the", "dictionary", "." ]
python
train
26.309524
jnrbsn/daemonocle
daemonocle/core.py
https://github.com/jnrbsn/daemonocle/blob/a1e09bc99608eab8dfe024c6741b7ecb7143f717/daemonocle/core.py#L78-L86
def _setup_piddir(self): """Create the directory for the PID file if necessary.""" if self.pidfile is None: return piddir = os.path.dirname(self.pidfile) if not os.path.isdir(piddir): # Create the directory with sensible mode and ownership os.makedirs(piddir, 0o777 & ~self.umask) os.chown(piddir, self.uid, self.gid)
[ "def", "_setup_piddir", "(", "self", ")", ":", "if", "self", ".", "pidfile", "is", "None", ":", "return", "piddir", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "pidfile", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "piddir", ")", ":", "# Create the directory with sensible mode and ownership", "os", ".", "makedirs", "(", "piddir", ",", "0o777", "&", "~", "self", ".", "umask", ")", "os", ".", "chown", "(", "piddir", ",", "self", ".", "uid", ",", "self", ".", "gid", ")" ]
Create the directory for the PID file if necessary.
[ "Create", "the", "directory", "for", "the", "PID", "file", "if", "necessary", "." ]
python
train
43.222222
klahnakoski/pyLibrary
mo_threads/lock.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_threads/lock.py#L75-L109
def wait(self, till=None): """ THE ASSUMPTION IS wait() WILL ALWAYS RETURN WITH THE LOCK ACQUIRED :param till: WHEN TO GIVE UP WAITING FOR ANOTHER THREAD TO SIGNAL :return: True IF SIGNALED TO GO, False IF till WAS SIGNALED """ waiter = Signal() if self.waiting: DEBUG and _Log.note("waiting with {{num}} others on {{name|quote}}", num=len(self.waiting), name=self.name, stack_depth=1) self.waiting.insert(0, waiter) else: DEBUG and _Log.note("waiting by self on {{name|quote}}", name=self.name) self.waiting = [waiter] try: self.lock.release() DEBUG and _Log.note("out of lock {{name|quote}}", name=self.name) (waiter | till).wait() if DEBUG: _Log.note("done minimum wait (for signal {{till|quote}})", till=till.name if till else "", name=self.name) except Exception as e: if not _Log: _late_import() _Log.warning("problem", cause=e) finally: self.lock.acquire() DEBUG and _Log.note("re-acquired lock {{name|quote}}", name=self.name) try: self.waiting.remove(waiter) DEBUG and _Log.note("removed own signal from {{name|quote}}", name=self.name) except Exception: pass return bool(waiter)
[ "def", "wait", "(", "self", ",", "till", "=", "None", ")", ":", "waiter", "=", "Signal", "(", ")", "if", "self", ".", "waiting", ":", "DEBUG", "and", "_Log", ".", "note", "(", "\"waiting with {{num}} others on {{name|quote}}\"", ",", "num", "=", "len", "(", "self", ".", "waiting", ")", ",", "name", "=", "self", ".", "name", ",", "stack_depth", "=", "1", ")", "self", ".", "waiting", ".", "insert", "(", "0", ",", "waiter", ")", "else", ":", "DEBUG", "and", "_Log", ".", "note", "(", "\"waiting by self on {{name|quote}}\"", ",", "name", "=", "self", ".", "name", ")", "self", ".", "waiting", "=", "[", "waiter", "]", "try", ":", "self", ".", "lock", ".", "release", "(", ")", "DEBUG", "and", "_Log", ".", "note", "(", "\"out of lock {{name|quote}}\"", ",", "name", "=", "self", ".", "name", ")", "(", "waiter", "|", "till", ")", ".", "wait", "(", ")", "if", "DEBUG", ":", "_Log", ".", "note", "(", "\"done minimum wait (for signal {{till|quote}})\"", ",", "till", "=", "till", ".", "name", "if", "till", "else", "\"\"", ",", "name", "=", "self", ".", "name", ")", "except", "Exception", "as", "e", ":", "if", "not", "_Log", ":", "_late_import", "(", ")", "_Log", ".", "warning", "(", "\"problem\"", ",", "cause", "=", "e", ")", "finally", ":", "self", ".", "lock", ".", "acquire", "(", ")", "DEBUG", "and", "_Log", ".", "note", "(", "\"re-acquired lock {{name|quote}}\"", ",", "name", "=", "self", ".", "name", ")", "try", ":", "self", ".", "waiting", ".", "remove", "(", "waiter", ")", "DEBUG", "and", "_Log", ".", "note", "(", "\"removed own signal from {{name|quote}}\"", ",", "name", "=", "self", ".", "name", ")", "except", "Exception", ":", "pass", "return", "bool", "(", "waiter", ")" ]
THE ASSUMPTION IS wait() WILL ALWAYS RETURN WITH THE LOCK ACQUIRED :param till: WHEN TO GIVE UP WAITING FOR ANOTHER THREAD TO SIGNAL :return: True IF SIGNALED TO GO, False IF till WAS SIGNALED
[ "THE", "ASSUMPTION", "IS", "wait", "()", "WILL", "ALWAYS", "RETURN", "WITH", "THE", "LOCK", "ACQUIRED", ":", "param", "till", ":", "WHEN", "TO", "GIVE", "UP", "WAITING", "FOR", "ANOTHER", "THREAD", "TO", "SIGNAL", ":", "return", ":", "True", "IF", "SIGNALED", "TO", "GO", "False", "IF", "till", "WAS", "SIGNALED" ]
python
train
39.457143
PMEAL/porespy
porespy/filters/__funcs__.py
https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/filters/__funcs__.py#L1271-L1321
def nphase_border(im, include_diagonals=False): r''' Identifies the voxels in regions that border *N* other regions. Useful for finding triple-phase boundaries. Parameters ---------- im : ND-array An ND image of the porous material containing discrete values in the pore space identifying different regions. e.g. the result of a snow-partition include_diagonals : boolean When identifying bordering pixels (2D) and voxels (3D) include those shifted along more than one axis Returns ------- image : ND-array A copy of ``im`` with voxel values equal to the number of uniquely different bordering values ''' if im.ndim != im.squeeze().ndim: warnings.warn('Input image conains a singleton axis:' + str(im.shape) + ' Reduce dimensionality with np.squeeze(im) to avoid' + ' unexpected behavior.') # Get dimension of image ndim = len(np.shape(im)) if ndim not in [2, 3]: raise NotImplementedError("Function only works for 2d and 3d images") # Pad image to handle edges im = np.pad(im, pad_width=1, mode='edge') # Stack rolled images for each neighbor to be inspected stack = _make_stack(im, include_diagonals) # Sort the stack along the last axis stack.sort() out = np.ones_like(im) # Run through stack recording when neighbor id changes # Number of changes is number of unique bordering regions for k in range(np.shape(stack)[ndim])[1:]: if ndim == 2: mask = stack[:, :, k] != stack[:, :, k-1] elif ndim == 3: mask = stack[:, :, :, k] != stack[:, :, :, k-1] out += mask # Un-pad if ndim == 2: return out[1:-1, 1:-1].copy() else: return out[1:-1, 1:-1, 1:-1].copy()
[ "def", "nphase_border", "(", "im", ",", "include_diagonals", "=", "False", ")", ":", "if", "im", ".", "ndim", "!=", "im", ".", "squeeze", "(", ")", ".", "ndim", ":", "warnings", ".", "warn", "(", "'Input image conains a singleton axis:'", "+", "str", "(", "im", ".", "shape", ")", "+", "' Reduce dimensionality with np.squeeze(im) to avoid'", "+", "' unexpected behavior.'", ")", "# Get dimension of image", "ndim", "=", "len", "(", "np", ".", "shape", "(", "im", ")", ")", "if", "ndim", "not", "in", "[", "2", ",", "3", "]", ":", "raise", "NotImplementedError", "(", "\"Function only works for 2d and 3d images\"", ")", "# Pad image to handle edges", "im", "=", "np", ".", "pad", "(", "im", ",", "pad_width", "=", "1", ",", "mode", "=", "'edge'", ")", "# Stack rolled images for each neighbor to be inspected", "stack", "=", "_make_stack", "(", "im", ",", "include_diagonals", ")", "# Sort the stack along the last axis", "stack", ".", "sort", "(", ")", "out", "=", "np", ".", "ones_like", "(", "im", ")", "# Run through stack recording when neighbor id changes", "# Number of changes is number of unique bordering regions", "for", "k", "in", "range", "(", "np", ".", "shape", "(", "stack", ")", "[", "ndim", "]", ")", "[", "1", ":", "]", ":", "if", "ndim", "==", "2", ":", "mask", "=", "stack", "[", ":", ",", ":", ",", "k", "]", "!=", "stack", "[", ":", ",", ":", ",", "k", "-", "1", "]", "elif", "ndim", "==", "3", ":", "mask", "=", "stack", "[", ":", ",", ":", ",", ":", ",", "k", "]", "!=", "stack", "[", ":", ",", ":", ",", ":", ",", "k", "-", "1", "]", "out", "+=", "mask", "# Un-pad", "if", "ndim", "==", "2", ":", "return", "out", "[", "1", ":", "-", "1", ",", "1", ":", "-", "1", "]", ".", "copy", "(", ")", "else", ":", "return", "out", "[", "1", ":", "-", "1", ",", "1", ":", "-", "1", ",", "1", ":", "-", "1", "]", ".", "copy", "(", ")" ]
r''' Identifies the voxels in regions that border *N* other regions. Useful for finding triple-phase boundaries. Parameters ---------- im : ND-array An ND image of the porous material containing discrete values in the pore space identifying different regions. e.g. the result of a snow-partition include_diagonals : boolean When identifying bordering pixels (2D) and voxels (3D) include those shifted along more than one axis Returns ------- image : ND-array A copy of ``im`` with voxel values equal to the number of uniquely different bordering values
[ "r", "Identifies", "the", "voxels", "in", "regions", "that", "border", "*", "N", "*", "other", "regions", "." ]
python
train
35.372549
mozilla-releng/mozapkpublisher
mozapkpublisher/check_rollout.py
https://github.com/mozilla-releng/mozapkpublisher/blob/df61034220153cbb98da74c8ef6de637f9185e12/mozapkpublisher/check_rollout.py#L19-L34
def check_rollout(edits_service, package_name, days): """Check if package_name has a release on staged rollout for too long""" edit = edits_service.insert(body={}, packageName=package_name).execute() response = edits_service.tracks().get(editId=edit['id'], track='production', packageName=package_name).execute() releases = response['releases'] for release in releases: if release['status'] == 'inProgress': url = 'https://archive.mozilla.org/pub/mobile/releases/{}/SHA512SUMS'.format(release['name']) resp = requests.head(url) if resp.status_code != 200: if resp.status_code != 404: # 404 is expected for release candidates logger.warning("Could not check %s: %s", url, resp.status_code) continue age = time.time() - calendar.timegm(eu.parsedate(resp.headers['Last-Modified'])) if age >= days * DAY: yield release, age
[ "def", "check_rollout", "(", "edits_service", ",", "package_name", ",", "days", ")", ":", "edit", "=", "edits_service", ".", "insert", "(", "body", "=", "{", "}", ",", "packageName", "=", "package_name", ")", ".", "execute", "(", ")", "response", "=", "edits_service", ".", "tracks", "(", ")", ".", "get", "(", "editId", "=", "edit", "[", "'id'", "]", ",", "track", "=", "'production'", ",", "packageName", "=", "package_name", ")", ".", "execute", "(", ")", "releases", "=", "response", "[", "'releases'", "]", "for", "release", "in", "releases", ":", "if", "release", "[", "'status'", "]", "==", "'inProgress'", ":", "url", "=", "'https://archive.mozilla.org/pub/mobile/releases/{}/SHA512SUMS'", ".", "format", "(", "release", "[", "'name'", "]", ")", "resp", "=", "requests", ".", "head", "(", "url", ")", "if", "resp", ".", "status_code", "!=", "200", ":", "if", "resp", ".", "status_code", "!=", "404", ":", "# 404 is expected for release candidates", "logger", ".", "warning", "(", "\"Could not check %s: %s\"", ",", "url", ",", "resp", ".", "status_code", ")", "continue", "age", "=", "time", ".", "time", "(", ")", "-", "calendar", ".", "timegm", "(", "eu", ".", "parsedate", "(", "resp", ".", "headers", "[", "'Last-Modified'", "]", ")", ")", "if", "age", ">=", "days", "*", "DAY", ":", "yield", "release", ",", "age" ]
Check if package_name has a release on staged rollout for too long
[ "Check", "if", "package_name", "has", "a", "release", "on", "staged", "rollout", "for", "too", "long" ]
python
train
60.0625
pasztorpisti/json-cfg
src/jsoncfg/tree_python.py
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/tree_python.py#L61-L69
def default_number_converter(number_str): """ Converts the string representation of a json number into its python object equivalent, an int, long, float or whatever type suits. """ is_int = (number_str.startswith('-') and number_str[1:].isdigit()) or number_str.isdigit() # FIXME: this handles a wider range of numbers than allowed by the json standard, # etc.: float('nan') and float('inf'). But is this a problem? return int(number_str) if is_int else float(number_str)
[ "def", "default_number_converter", "(", "number_str", ")", ":", "is_int", "=", "(", "number_str", ".", "startswith", "(", "'-'", ")", "and", "number_str", "[", "1", ":", "]", ".", "isdigit", "(", ")", ")", "or", "number_str", ".", "isdigit", "(", ")", "# FIXME: this handles a wider range of numbers than allowed by the json standard,", "# etc.: float('nan') and float('inf'). But is this a problem?", "return", "int", "(", "number_str", ")", "if", "is_int", "else", "float", "(", "number_str", ")" ]
Converts the string representation of a json number into its python object equivalent, an int, long, float or whatever type suits.
[ "Converts", "the", "string", "representation", "of", "a", "json", "number", "into", "its", "python", "object", "equivalent", "an", "int", "long", "float", "or", "whatever", "type", "suits", "." ]
python
train
55
VikParuchuri/percept
percept/tasks/validate.py
https://github.com/VikParuchuri/percept/blob/90304ba82053e2a9ad2bacaab3479403d3923bcf/percept/tasks/validate.py#L78-L85
def train(self, data, target, **kwargs): """ Used in the training phase. Override. """ non_predictors = [i.replace(" ", "_").lower() for i in list(set(data['team']))] + ["team", "next_year_wins"] self.column_names = [l for l in list(data.columns) if l not in non_predictors] results, folds = self.cross_validate(data, non_predictors, **kwargs) self.gather_results(results, folds, data)
[ "def", "train", "(", "self", ",", "data", ",", "target", ",", "*", "*", "kwargs", ")", ":", "non_predictors", "=", "[", "i", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", ".", "lower", "(", ")", "for", "i", "in", "list", "(", "set", "(", "data", "[", "'team'", "]", ")", ")", "]", "+", "[", "\"team\"", ",", "\"next_year_wins\"", "]", "self", ".", "column_names", "=", "[", "l", "for", "l", "in", "list", "(", "data", ".", "columns", ")", "if", "l", "not", "in", "non_predictors", "]", "results", ",", "folds", "=", "self", ".", "cross_validate", "(", "data", ",", "non_predictors", ",", "*", "*", "kwargs", ")", "self", ".", "gather_results", "(", "results", ",", "folds", ",", "data", ")" ]
Used in the training phase. Override.
[ "Used", "in", "the", "training", "phase", ".", "Override", "." ]
python
train
54.375
pyviz/holoviews
holoviews/core/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/util.py#L637-L642
def add_aliases(self_or_cls, **kwargs): """ Conveniently add new aliases as keyword arguments. For instance you can add a new alias with add_aliases(short='Longer string') """ self_or_cls.aliases.update({v:k for k,v in kwargs.items()})
[ "def", "add_aliases", "(", "self_or_cls", ",", "*", "*", "kwargs", ")", ":", "self_or_cls", ".", "aliases", ".", "update", "(", "{", "v", ":", "k", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "}", ")" ]
Conveniently add new aliases as keyword arguments. For instance you can add a new alias with add_aliases(short='Longer string')
[ "Conveniently", "add", "new", "aliases", "as", "keyword", "arguments", ".", "For", "instance", "you", "can", "add", "a", "new", "alias", "with", "add_aliases", "(", "short", "=", "Longer", "string", ")" ]
python
train
45
OnroerendErfgoed/crabpy
crabpy/gateway/crab.py
https://github.com/OnroerendErfgoed/crabpy/blob/3a6fd8bc5aca37c2a173e3ea94e4e468b8aa79c1/crabpy/gateway/crab.py#L1211-L1243
def get_gebouw_by_id(self, id): ''' Retrieve a `Gebouw` by the Id. :param integer id: the Id of the `Gebouw` :rtype: :class:`Gebouw` ''' def creator(): res = crab_gateway_request( self.client, 'GetGebouwByIdentificatorGebouw', id ) if res == None: raise GatewayResourceNotFoundException() return Gebouw( res.IdentificatorGebouw, res.AardGebouw, res.StatusGebouw, res.GeometriemethodeGebouw, res.Geometrie, Metadata( res.BeginDatum, res.BeginTijd, self.get_bewerking(res.BeginBewerking), self.get_organisatie(res.BeginOrganisatie) ) ) if self.caches['short'].is_configured: key = 'GetGebouwByIdentificatorGebouw#%s' % (id) gebouw = self.caches['short'].get_or_create(key, creator) else: gebouw = creator() gebouw.set_gateway(self) return gebouw
[ "def", "get_gebouw_by_id", "(", "self", ",", "id", ")", ":", "def", "creator", "(", ")", ":", "res", "=", "crab_gateway_request", "(", "self", ".", "client", ",", "'GetGebouwByIdentificatorGebouw'", ",", "id", ")", "if", "res", "==", "None", ":", "raise", "GatewayResourceNotFoundException", "(", ")", "return", "Gebouw", "(", "res", ".", "IdentificatorGebouw", ",", "res", ".", "AardGebouw", ",", "res", ".", "StatusGebouw", ",", "res", ".", "GeometriemethodeGebouw", ",", "res", ".", "Geometrie", ",", "Metadata", "(", "res", ".", "BeginDatum", ",", "res", ".", "BeginTijd", ",", "self", ".", "get_bewerking", "(", "res", ".", "BeginBewerking", ")", ",", "self", ".", "get_organisatie", "(", "res", ".", "BeginOrganisatie", ")", ")", ")", "if", "self", ".", "caches", "[", "'short'", "]", ".", "is_configured", ":", "key", "=", "'GetGebouwByIdentificatorGebouw#%s'", "%", "(", "id", ")", "gebouw", "=", "self", ".", "caches", "[", "'short'", "]", ".", "get_or_create", "(", "key", ",", "creator", ")", "else", ":", "gebouw", "=", "creator", "(", ")", "gebouw", ".", "set_gateway", "(", "self", ")", "return", "gebouw" ]
Retrieve a `Gebouw` by the Id. :param integer id: the Id of the `Gebouw` :rtype: :class:`Gebouw`
[ "Retrieve", "a", "Gebouw", "by", "the", "Id", "." ]
python
train
33.69697
jantman/awslimitchecker
awslimitchecker/services/cloudformation.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/services/cloudformation.py#L102-L115
def _update_limits_from_api(self): """ Call the service's API action to retrieve limit/quota information, and update AwsLimit objects in ``self.limits`` with this information. """ logger.debug('Setting CloudFormation limits from API') self.connect() resp = self.conn.describe_account_limits() for lim in resp['AccountLimits']: if lim['Name'] == 'StackLimit': self.limits['Stacks']._set_api_limit(lim['Value']) continue logger.debug('API response contained unknown CloudFormation ' 'limit: %s', lim['Name'])
[ "def", "_update_limits_from_api", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Setting CloudFormation limits from API'", ")", "self", ".", "connect", "(", ")", "resp", "=", "self", ".", "conn", ".", "describe_account_limits", "(", ")", "for", "lim", "in", "resp", "[", "'AccountLimits'", "]", ":", "if", "lim", "[", "'Name'", "]", "==", "'StackLimit'", ":", "self", ".", "limits", "[", "'Stacks'", "]", ".", "_set_api_limit", "(", "lim", "[", "'Value'", "]", ")", "continue", "logger", ".", "debug", "(", "'API response contained unknown CloudFormation '", "'limit: %s'", ",", "lim", "[", "'Name'", "]", ")" ]
Call the service's API action to retrieve limit/quota information, and update AwsLimit objects in ``self.limits`` with this information.
[ "Call", "the", "service", "s", "API", "action", "to", "retrieve", "limit", "/", "quota", "information", "and", "update", "AwsLimit", "objects", "in", "self", ".", "limits", "with", "this", "information", "." ]
python
train
45.571429
paxosglobal/subconscious
subconscious/model.py
https://github.com/paxosglobal/subconscious/blob/bc4feabde574462ff59009b32181d12867f0aa3d/subconscious/model.py#L227-L251
async def load(cls, db, identifier=None, redis_key=None): """Load the object from redis. Use the identifier (colon-separated composite keys or the primary key) or the redis_key. """ if not identifier and not redis_key: raise InvalidQuery('Must supply identifier or redis_key') if redis_key is None: redis_key = cls.make_key(identifier) if await db.exists(redis_key): data = await db.hgetall(redis_key) kwargs = {} for key_bin, value_bin in data.items(): key, value = key_bin, value_bin column = getattr(cls, key, False) if not column or (column.field_type == str): kwargs[key] = value elif column.field_type == datetime: kwargs[key] = datetime.strptime(value, DATETIME_FORMAT) else: kwargs[key] = column.field_type(value) kwargs['loading'] = True return cls(**kwargs) else: logger.debug("No Redis key found: {}".format(redis_key)) return None
[ "async", "def", "load", "(", "cls", ",", "db", ",", "identifier", "=", "None", ",", "redis_key", "=", "None", ")", ":", "if", "not", "identifier", "and", "not", "redis_key", ":", "raise", "InvalidQuery", "(", "'Must supply identifier or redis_key'", ")", "if", "redis_key", "is", "None", ":", "redis_key", "=", "cls", ".", "make_key", "(", "identifier", ")", "if", "await", "db", ".", "exists", "(", "redis_key", ")", ":", "data", "=", "await", "db", ".", "hgetall", "(", "redis_key", ")", "kwargs", "=", "{", "}", "for", "key_bin", ",", "value_bin", "in", "data", ".", "items", "(", ")", ":", "key", ",", "value", "=", "key_bin", ",", "value_bin", "column", "=", "getattr", "(", "cls", ",", "key", ",", "False", ")", "if", "not", "column", "or", "(", "column", ".", "field_type", "==", "str", ")", ":", "kwargs", "[", "key", "]", "=", "value", "elif", "column", ".", "field_type", "==", "datetime", ":", "kwargs", "[", "key", "]", "=", "datetime", ".", "strptime", "(", "value", ",", "DATETIME_FORMAT", ")", "else", ":", "kwargs", "[", "key", "]", "=", "column", ".", "field_type", "(", "value", ")", "kwargs", "[", "'loading'", "]", "=", "True", "return", "cls", "(", "*", "*", "kwargs", ")", "else", ":", "logger", ".", "debug", "(", "\"No Redis key found: {}\"", ".", "format", "(", "redis_key", ")", ")", "return", "None" ]
Load the object from redis. Use the identifier (colon-separated composite keys or the primary key) or the redis_key.
[ "Load", "the", "object", "from", "redis", ".", "Use", "the", "identifier", "(", "colon", "-", "separated", "composite", "keys", "or", "the", "primary", "key", ")", "or", "the", "redis_key", "." ]
python
train
44.88
gwpy/gwpy
gwpy/utils/lal.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/lal.py#L212-L234
def from_lal_unit(lunit): """Convert a LALUnit` into a `~astropy.units.Unit` Parameters ---------- lunit : `lal.Unit` the input unit Returns ------- unit : `~astropy.units.Unit` the Astropy representation of the input Raises ------ TypeError if ``lunit`` cannot be converted to `lal.Unit` ValueError if Astropy doesn't understand the base units for the input """ return reduce(operator.mul, ( units.Unit(str(LAL_UNIT_INDEX[i])) ** exp for i, exp in enumerate(lunit.unitNumerator)))
[ "def", "from_lal_unit", "(", "lunit", ")", ":", "return", "reduce", "(", "operator", ".", "mul", ",", "(", "units", ".", "Unit", "(", "str", "(", "LAL_UNIT_INDEX", "[", "i", "]", ")", ")", "**", "exp", "for", "i", ",", "exp", "in", "enumerate", "(", "lunit", ".", "unitNumerator", ")", ")", ")" ]
Convert a LALUnit` into a `~astropy.units.Unit` Parameters ---------- lunit : `lal.Unit` the input unit Returns ------- unit : `~astropy.units.Unit` the Astropy representation of the input Raises ------ TypeError if ``lunit`` cannot be converted to `lal.Unit` ValueError if Astropy doesn't understand the base units for the input
[ "Convert", "a", "LALUnit", "into", "a", "~astropy", ".", "units", ".", "Unit" ]
python
train
24.434783
bokeh/bokeh
bokeh/util/serialization.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/serialization.py#L494-L530
def encode_binary_dict(array, buffers): ''' Send a numpy array as an unencoded binary buffer The encoded format is a dict with the following structure: .. code:: python { '__buffer__' : << an ID to locate the buffer >>, 'shape' : << array shape >>, 'dtype' : << dtype name >>, 'order' : << byte order at origin (little or big)>> } Args: array (np.ndarray) : an array to encode buffers (set) : Set to add buffers to **This is an "out" parameter**. The values it contains will be modified in-place. Returns: dict ''' buffer_id = make_id() buf = (dict(id=buffer_id), array.tobytes()) buffers.append(buf) return { '__buffer__' : buffer_id, 'shape' : array.shape, 'dtype' : array.dtype.name, 'order' : sys.byteorder }
[ "def", "encode_binary_dict", "(", "array", ",", "buffers", ")", ":", "buffer_id", "=", "make_id", "(", ")", "buf", "=", "(", "dict", "(", "id", "=", "buffer_id", ")", ",", "array", ".", "tobytes", "(", ")", ")", "buffers", ".", "append", "(", "buf", ")", "return", "{", "'__buffer__'", ":", "buffer_id", ",", "'shape'", ":", "array", ".", "shape", ",", "'dtype'", ":", "array", ".", "dtype", ".", "name", ",", "'order'", ":", "sys", ".", "byteorder", "}" ]
Send a numpy array as an unencoded binary buffer The encoded format is a dict with the following structure: .. code:: python { '__buffer__' : << an ID to locate the buffer >>, 'shape' : << array shape >>, 'dtype' : << dtype name >>, 'order' : << byte order at origin (little or big)>> } Args: array (np.ndarray) : an array to encode buffers (set) : Set to add buffers to **This is an "out" parameter**. The values it contains will be modified in-place. Returns: dict
[ "Send", "a", "numpy", "array", "as", "an", "unencoded", "binary", "buffer" ]
python
train
24.864865
swgillespie/tsquare
tsquare/core.py
https://github.com/swgillespie/tsquare/blob/242adb2c27e6c65a1f75db32a4636ea3f1d22a3a/tsquare/core.py#L180-L206
def get_assignments(self, site): """ Gets a list of assignments associated with a site (class). Returns a list of TSquareAssignment objects. @param site (TSquareSite) - The site to use with the assignment query @returns - A list of TSquareSite objects. May be an empty list if the site has defined no assignments. """ tools = self.get_tools(site) assignment_tool_filter = [x.href for x in tools if x.name == 'assignment-grades'] if not assignment_tool_filter: return [] assignment_tool_url = assignment_tool_filter[0].href response = self._session.get(assignment_tool_url) response.raise_for_status() iframes = self._html_iface.get_iframes(response.text) iframe_url = '' for frame in iframes: if frame['title'] == 'Assignments ': iframe_url = frame['src'] if iframe_url == '': print "WARNING: NO ASSIGNMENT IFRAMES FOUND" response = self._session.get(iframe_url) response.raise_for_status() assignment_dict_list = self._html_iface.get_assignments(response.text) return [TSquareAssignment(**x) for x in assignment_dict_list]
[ "def", "get_assignments", "(", "self", ",", "site", ")", ":", "tools", "=", "self", ".", "get_tools", "(", "site", ")", "assignment_tool_filter", "=", "[", "x", ".", "href", "for", "x", "in", "tools", "if", "x", ".", "name", "==", "'assignment-grades'", "]", "if", "not", "assignment_tool_filter", ":", "return", "[", "]", "assignment_tool_url", "=", "assignment_tool_filter", "[", "0", "]", ".", "href", "response", "=", "self", ".", "_session", ".", "get", "(", "assignment_tool_url", ")", "response", ".", "raise_for_status", "(", ")", "iframes", "=", "self", ".", "_html_iface", ".", "get_iframes", "(", "response", ".", "text", ")", "iframe_url", "=", "''", "for", "frame", "in", "iframes", ":", "if", "frame", "[", "'title'", "]", "==", "'Assignments '", ":", "iframe_url", "=", "frame", "[", "'src'", "]", "if", "iframe_url", "==", "''", ":", "print", "\"WARNING: NO ASSIGNMENT IFRAMES FOUND\"", "response", "=", "self", ".", "_session", ".", "get", "(", "iframe_url", ")", "response", ".", "raise_for_status", "(", ")", "assignment_dict_list", "=", "self", ".", "_html_iface", ".", "get_assignments", "(", "response", ".", "text", ")", "return", "[", "TSquareAssignment", "(", "*", "*", "x", ")", "for", "x", "in", "assignment_dict_list", "]" ]
Gets a list of assignments associated with a site (class). Returns a list of TSquareAssignment objects. @param site (TSquareSite) - The site to use with the assignment query @returns - A list of TSquareSite objects. May be an empty list if the site has defined no assignments.
[ "Gets", "a", "list", "of", "assignments", "associated", "with", "a", "site", "(", "class", ")", ".", "Returns", "a", "list", "of", "TSquareAssignment", "objects", ".", "@param", "site", "(", "TSquareSite", ")", "-", "The", "site", "to", "use", "with", "the", "assignment", "query" ]
python
train
45.518519
usc-isi-i2/dig-dictionary-extractor
digDictionaryExtractor/name_dictionary_extractor.py
https://github.com/usc-isi-i2/dig-dictionary-extractor/blob/1fe4f6c121fd09a8f194ccd419284d3c3760195d/digDictionaryExtractor/name_dictionary_extractor.py#L11-L18
def get_name_dictionary_extractor(name_trie): """Method for creating default name dictionary extractor""" return DictionaryExtractor()\ .set_trie(name_trie)\ .set_pre_filter(VALID_TOKEN_RE.match)\ .set_pre_process(lambda x: x.lower())\ .set_metadata({'extractor': 'dig_name_dictionary_extractor'})
[ "def", "get_name_dictionary_extractor", "(", "name_trie", ")", ":", "return", "DictionaryExtractor", "(", ")", ".", "set_trie", "(", "name_trie", ")", ".", "set_pre_filter", "(", "VALID_TOKEN_RE", ".", "match", ")", ".", "set_pre_process", "(", "lambda", "x", ":", "x", ".", "lower", "(", ")", ")", ".", "set_metadata", "(", "{", "'extractor'", ":", "'dig_name_dictionary_extractor'", "}", ")" ]
Method for creating default name dictionary extractor
[ "Method", "for", "creating", "default", "name", "dictionary", "extractor" ]
python
train
41.375
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels_ext.py#L441-L453
def get_tunnel_statistics_output_tunnel_stat_rx_bytes(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_tunnel_statistics = ET.Element("get_tunnel_statistics") config = get_tunnel_statistics output = ET.SubElement(get_tunnel_statistics, "output") tunnel_stat = ET.SubElement(output, "tunnel-stat") rx_bytes = ET.SubElement(tunnel_stat, "rx-bytes") rx_bytes.text = kwargs.pop('rx_bytes') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_tunnel_statistics_output_tunnel_stat_rx_bytes", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_tunnel_statistics", "=", "ET", ".", "Element", "(", "\"get_tunnel_statistics\"", ")", "config", "=", "get_tunnel_statistics", "output", "=", "ET", ".", "SubElement", "(", "get_tunnel_statistics", ",", "\"output\"", ")", "tunnel_stat", "=", "ET", ".", "SubElement", "(", "output", ",", "\"tunnel-stat\"", ")", "rx_bytes", "=", "ET", ".", "SubElement", "(", "tunnel_stat", ",", "\"rx-bytes\"", ")", "rx_bytes", ".", "text", "=", "kwargs", ".", "pop", "(", "'rx_bytes'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
43.461538
thombashi/SimpleSQLite
simplesqlite/converter.py
https://github.com/thombashi/SimpleSQLite/blob/b16f212132b9b98773e68bf7395abc2f60f56fe5/simplesqlite/converter.py#L21-L47
def to_record(cls, attr_names, values): """ Convert values to a record to be inserted into a database. :param list attr_names: List of attributes for the converting record. :param values: Values to be converted. :type values: |dict|/|namedtuple|/|list|/|tuple| :raises ValueError: If the ``values`` is invalid. """ try: # from a namedtuple to a dict values = values._asdict() except AttributeError: pass try: # from a dictionary to a list return [cls.__to_sqlite_element(values.get(attr_name)) for attr_name in attr_names] except AttributeError: pass if isinstance(values, (tuple, list)): return [cls.__to_sqlite_element(value) for value in values] raise ValueError("cannot convert from {} to list".format(type(values)))
[ "def", "to_record", "(", "cls", ",", "attr_names", ",", "values", ")", ":", "try", ":", "# from a namedtuple to a dict", "values", "=", "values", ".", "_asdict", "(", ")", "except", "AttributeError", ":", "pass", "try", ":", "# from a dictionary to a list", "return", "[", "cls", ".", "__to_sqlite_element", "(", "values", ".", "get", "(", "attr_name", ")", ")", "for", "attr_name", "in", "attr_names", "]", "except", "AttributeError", ":", "pass", "if", "isinstance", "(", "values", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "[", "cls", ".", "__to_sqlite_element", "(", "value", ")", "for", "value", "in", "values", "]", "raise", "ValueError", "(", "\"cannot convert from {} to list\"", ".", "format", "(", "type", "(", "values", ")", ")", ")" ]
Convert values to a record to be inserted into a database. :param list attr_names: List of attributes for the converting record. :param values: Values to be converted. :type values: |dict|/|namedtuple|/|list|/|tuple| :raises ValueError: If the ``values`` is invalid.
[ "Convert", "values", "to", "a", "record", "to", "be", "inserted", "into", "a", "database", "." ]
python
train
33.296296
jealous/stockstats
stockstats.py
https://github.com/jealous/stockstats/blob/a479a504ea1906955feeb8519c34ef40eb48ec9b/stockstats.py#L470-L486
def _get_pdm(cls, df, windows): """ +DM, positive directional moving If window is not 1, calculate the SMMA of +DM :param df: data :param windows: range :return: """ window = cls.get_only_one_positive_int(windows) column_name = 'pdm_{}'.format(window) um, dm = df['um'], df['dm'] df['pdm'] = np.where(um > dm, um, 0) if window > 1: pdm = df['pdm_{}_ema'.format(window)] else: pdm = df['pdm'] df[column_name] = pdm
[ "def", "_get_pdm", "(", "cls", ",", "df", ",", "windows", ")", ":", "window", "=", "cls", ".", "get_only_one_positive_int", "(", "windows", ")", "column_name", "=", "'pdm_{}'", ".", "format", "(", "window", ")", "um", ",", "dm", "=", "df", "[", "'um'", "]", ",", "df", "[", "'dm'", "]", "df", "[", "'pdm'", "]", "=", "np", ".", "where", "(", "um", ">", "dm", ",", "um", ",", "0", ")", "if", "window", ">", "1", ":", "pdm", "=", "df", "[", "'pdm_{}_ema'", ".", "format", "(", "window", ")", "]", "else", ":", "pdm", "=", "df", "[", "'pdm'", "]", "df", "[", "column_name", "]", "=", "pdm" ]
+DM, positive directional moving If window is not 1, calculate the SMMA of +DM :param df: data :param windows: range :return:
[ "+", "DM", "positive", "directional", "moving", "If", "window", "is", "not", "1", "calculate", "the", "SMMA", "of", "+", "DM", ":", "param", "df", ":", "data", ":", "param", "windows", ":", "range", ":", "return", ":" ]
python
train
31.882353
pyvisa/pyvisa
pyvisa/shell.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/shell.py#L362-L409
def do_termchar(self, args): """Get or set termination character for resource in use. <termchar> can be one of: CR, LF, CRLF, NUL or None. None is used to disable termination character Get termination character: termchar Set termination character read or read+write: termchar <termchar> [<termchar>] """ if not self.current: print('There are no resources in use. Use the command "open".') return args = args.strip() if not args: try: charmap = { u'\r': 'CR', u'\n': 'LF', u'\r\n': 'CRLF', u'\0': 'NUL' } chr = self.current.read_termination if chr in charmap: chr = charmap[chr] chw = self.current.write_termination if chw in charmap: chw = charmap[chw] print('Termchar read: {} write: {}'.format(chr, chw)) except Exception as e: print(e) else: args = args.split(' ') charmap = { 'CR': u'\r', 'LF': u'\n', 'CRLF': u'\r\n', 'NUL': u'\0', 'None': None } chr = args[0] chw = args[0 if len(args) == 1 else 1] if chr in charmap and chw in charmap: try: self.current.read_termination = charmap[chr] self.current.write_termination = charmap[chw] print('Done') except Exception as e: print(e) else: print('use CR, LF, CRLF, NUL or None to set termchar') return
[ "def", "do_termchar", "(", "self", ",", "args", ")", ":", "if", "not", "self", ".", "current", ":", "print", "(", "'There are no resources in use. Use the command \"open\".'", ")", "return", "args", "=", "args", ".", "strip", "(", ")", "if", "not", "args", ":", "try", ":", "charmap", "=", "{", "u'\\r'", ":", "'CR'", ",", "u'\\n'", ":", "'LF'", ",", "u'\\r\\n'", ":", "'CRLF'", ",", "u'\\0'", ":", "'NUL'", "}", "chr", "=", "self", ".", "current", ".", "read_termination", "if", "chr", "in", "charmap", ":", "chr", "=", "charmap", "[", "chr", "]", "chw", "=", "self", ".", "current", ".", "write_termination", "if", "chw", "in", "charmap", ":", "chw", "=", "charmap", "[", "chw", "]", "print", "(", "'Termchar read: {} write: {}'", ".", "format", "(", "chr", ",", "chw", ")", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "else", ":", "args", "=", "args", ".", "split", "(", "' '", ")", "charmap", "=", "{", "'CR'", ":", "u'\\r'", ",", "'LF'", ":", "u'\\n'", ",", "'CRLF'", ":", "u'\\r\\n'", ",", "'NUL'", ":", "u'\\0'", ",", "'None'", ":", "None", "}", "chr", "=", "args", "[", "0", "]", "chw", "=", "args", "[", "0", "if", "len", "(", "args", ")", "==", "1", "else", "1", "]", "if", "chr", "in", "charmap", "and", "chw", "in", "charmap", ":", "try", ":", "self", ".", "current", ".", "read_termination", "=", "charmap", "[", "chr", "]", "self", ".", "current", ".", "write_termination", "=", "charmap", "[", "chw", "]", "print", "(", "'Done'", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "else", ":", "print", "(", "'use CR, LF, CRLF, NUL or None to set termchar'", ")", "return" ]
Get or set termination character for resource in use. <termchar> can be one of: CR, LF, CRLF, NUL or None. None is used to disable termination character Get termination character: termchar Set termination character read or read+write: termchar <termchar> [<termchar>]
[ "Get", "or", "set", "termination", "character", "for", "resource", "in", "use", ".", "<termchar", ">", "can", "be", "one", "of", ":", "CR", "LF", "CRLF", "NUL", "or", "None", ".", "None", "is", "used", "to", "disable", "termination", "character", "Get", "termination", "character", ":" ]
python
train
34.354167
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/ext/_bundled/mplexporter.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/ext/_bundled/mplexporter.py#L301-L308
def draw_image(self, ax, image): """Process a matplotlib image object and call renderer.draw_image""" self.renderer.draw_image(imdata=utils.image_to_base64(image), extent=image.get_extent(), coordinates="data", style={"alpha": image.get_alpha(), "zorder": image.get_zorder()}, mplobj=image)
[ "def", "draw_image", "(", "self", ",", "ax", ",", "image", ")", ":", "self", ".", "renderer", ".", "draw_image", "(", "imdata", "=", "utils", ".", "image_to_base64", "(", "image", ")", ",", "extent", "=", "image", ".", "get_extent", "(", ")", ",", "coordinates", "=", "\"data\"", ",", "style", "=", "{", "\"alpha\"", ":", "image", ".", "get_alpha", "(", ")", ",", "\"zorder\"", ":", "image", ".", "get_zorder", "(", ")", "}", ",", "mplobj", "=", "image", ")" ]
Process a matplotlib image object and call renderer.draw_image
[ "Process", "a", "matplotlib", "image", "object", "and", "call", "renderer", ".", "draw_image" ]
python
train
58.875
basho/riak-python-client
riak/content.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/content.py#L99-L118
def add_index(self, field, value): """ add_index(field, value) Tag this object with the specified field/value pair for indexing. :param field: The index field. :type field: string :param value: The index value. :type value: string or integer :rtype: :class:`RiakObject <riak.riak_object.RiakObject>` """ if field[-4:] not in ("_bin", "_int"): raise RiakError("Riak 2i fields must end with either '_bin'" " or '_int'.") self.indexes.add((field, value)) return self._robject
[ "def", "add_index", "(", "self", ",", "field", ",", "value", ")", ":", "if", "field", "[", "-", "4", ":", "]", "not", "in", "(", "\"_bin\"", ",", "\"_int\"", ")", ":", "raise", "RiakError", "(", "\"Riak 2i fields must end with either '_bin'\"", "\" or '_int'.\"", ")", "self", ".", "indexes", ".", "add", "(", "(", "field", ",", "value", ")", ")", "return", "self", ".", "_robject" ]
add_index(field, value) Tag this object with the specified field/value pair for indexing. :param field: The index field. :type field: string :param value: The index value. :type value: string or integer :rtype: :class:`RiakObject <riak.riak_object.RiakObject>`
[ "add_index", "(", "field", "value", ")" ]
python
train
30.05
wolfhong/formic
formic/treewalk.py
https://github.com/wolfhong/formic/blob/0d81eb88dcbb6fa705194fc6ccf2993f4abbaa76/formic/treewalk.py#L34-L45
def tree_walk(cls, directory, tree): """Walks a tree returned by `cls.list_to_tree` returning a list of 3-tuples as if from os.walk().""" results = [] dirs = [d for d in tree if d != FILE_MARKER] files = tree[FILE_MARKER] results.append((directory, dirs, files)) for d in dirs: subdir = os.path.join(directory, d) subtree = tree[d] results.extend(cls.tree_walk(subdir, subtree)) return results
[ "def", "tree_walk", "(", "cls", ",", "directory", ",", "tree", ")", ":", "results", "=", "[", "]", "dirs", "=", "[", "d", "for", "d", "in", "tree", "if", "d", "!=", "FILE_MARKER", "]", "files", "=", "tree", "[", "FILE_MARKER", "]", "results", ".", "append", "(", "(", "directory", ",", "dirs", ",", "files", ")", ")", "for", "d", "in", "dirs", ":", "subdir", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "d", ")", "subtree", "=", "tree", "[", "d", "]", "results", ".", "extend", "(", "cls", ".", "tree_walk", "(", "subdir", ",", "subtree", ")", ")", "return", "results" ]
Walks a tree returned by `cls.list_to_tree` returning a list of 3-tuples as if from os.walk().
[ "Walks", "a", "tree", "returned", "by", "cls", ".", "list_to_tree", "returning", "a", "list", "of", "3", "-", "tuples", "as", "if", "from", "os", ".", "walk", "()", "." ]
python
train
40.166667
cokelaer/spectrum
src/spectrum/correlation.py
https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/correlation.py#L151-L226
def xcorr(x, y=None, maxlags=None, norm='biased'): """Cross-correlation using numpy.correlate Estimates the cross-correlation (and autocorrelation) sequence of a random process of length N. By default, there is no normalisation and the output sequence of the cross-correlation has a length 2*N+1. :param array x: first data array of length N :param array y: second data array of length N. If not specified, computes the autocorrelation. :param int maxlags: compute cross correlation between [-maxlags:maxlags] when maxlags is not specified, the range of lags is [-N+1:N-1]. :param str option: normalisation in ['biased', 'unbiased', None, 'coeff'] The true cross-correlation sequence is .. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m]) However, in practice, only a finite segment of one realization of the infinite-length random process is available. The correlation is estimated using numpy.correlate(x,y,'full'). Normalisation is handled by this function using the following cases: * 'biased': Biased estimate of the cross-correlation function * 'unbiased': Unbiased estimate of the cross-correlation function * 'coeff': Normalizes the sequence so the autocorrelations at zero lag is 1.0. :return: * a numpy.array containing the cross-correlation sequence (length 2*N-1) * lags vector .. note:: If x and y are not the same length, the shorter vector is zero-padded to the length of the longer vector. .. rubric:: Examples .. doctest:: >>> from spectrum import xcorr >>> x = [1,2,3,4,5] >>> c, l = xcorr(x,x, maxlags=0, norm='biased') >>> c array([ 11.]) .. seealso:: :func:`CORRELATION`. """ N = len(x) if y is None: y = x assert len(x) == len(y), 'x and y must have the same length. Add zeros if needed' if maxlags is None: maxlags = N-1 lags = np.arange(0, 2*N-1) else: assert maxlags <= N, 'maxlags must be less than data length' lags = np.arange(N-maxlags-1, N+maxlags) res = np.correlate(x, y, mode='full') if norm == 'biased': Nf = float(N) res = res[lags] / float(N) # do not use /= !! elif norm == 'unbiased': res = res[lags] / (float(N)-abs(np.arange(-N+1, N)))[lags] elif norm == 'coeff': Nf = float(N) rms = pylab_rms_flat(x) * pylab_rms_flat(y) res = res[lags] / rms / Nf else: res = res[lags] lags = np.arange(-maxlags, maxlags+1) return res, lags
[ "def", "xcorr", "(", "x", ",", "y", "=", "None", ",", "maxlags", "=", "None", ",", "norm", "=", "'biased'", ")", ":", "N", "=", "len", "(", "x", ")", "if", "y", "is", "None", ":", "y", "=", "x", "assert", "len", "(", "x", ")", "==", "len", "(", "y", ")", ",", "'x and y must have the same length. Add zeros if needed'", "if", "maxlags", "is", "None", ":", "maxlags", "=", "N", "-", "1", "lags", "=", "np", ".", "arange", "(", "0", ",", "2", "*", "N", "-", "1", ")", "else", ":", "assert", "maxlags", "<=", "N", ",", "'maxlags must be less than data length'", "lags", "=", "np", ".", "arange", "(", "N", "-", "maxlags", "-", "1", ",", "N", "+", "maxlags", ")", "res", "=", "np", ".", "correlate", "(", "x", ",", "y", ",", "mode", "=", "'full'", ")", "if", "norm", "==", "'biased'", ":", "Nf", "=", "float", "(", "N", ")", "res", "=", "res", "[", "lags", "]", "/", "float", "(", "N", ")", "# do not use /= !!", "elif", "norm", "==", "'unbiased'", ":", "res", "=", "res", "[", "lags", "]", "/", "(", "float", "(", "N", ")", "-", "abs", "(", "np", ".", "arange", "(", "-", "N", "+", "1", ",", "N", ")", ")", ")", "[", "lags", "]", "elif", "norm", "==", "'coeff'", ":", "Nf", "=", "float", "(", "N", ")", "rms", "=", "pylab_rms_flat", "(", "x", ")", "*", "pylab_rms_flat", "(", "y", ")", "res", "=", "res", "[", "lags", "]", "/", "rms", "/", "Nf", "else", ":", "res", "=", "res", "[", "lags", "]", "lags", "=", "np", ".", "arange", "(", "-", "maxlags", ",", "maxlags", "+", "1", ")", "return", "res", ",", "lags" ]
Cross-correlation using numpy.correlate Estimates the cross-correlation (and autocorrelation) sequence of a random process of length N. By default, there is no normalisation and the output sequence of the cross-correlation has a length 2*N+1. :param array x: first data array of length N :param array y: second data array of length N. If not specified, computes the autocorrelation. :param int maxlags: compute cross correlation between [-maxlags:maxlags] when maxlags is not specified, the range of lags is [-N+1:N-1]. :param str option: normalisation in ['biased', 'unbiased', None, 'coeff'] The true cross-correlation sequence is .. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m]) However, in practice, only a finite segment of one realization of the infinite-length random process is available. The correlation is estimated using numpy.correlate(x,y,'full'). Normalisation is handled by this function using the following cases: * 'biased': Biased estimate of the cross-correlation function * 'unbiased': Unbiased estimate of the cross-correlation function * 'coeff': Normalizes the sequence so the autocorrelations at zero lag is 1.0. :return: * a numpy.array containing the cross-correlation sequence (length 2*N-1) * lags vector .. note:: If x and y are not the same length, the shorter vector is zero-padded to the length of the longer vector. .. rubric:: Examples .. doctest:: >>> from spectrum import xcorr >>> x = [1,2,3,4,5] >>> c, l = xcorr(x,x, maxlags=0, norm='biased') >>> c array([ 11.]) .. seealso:: :func:`CORRELATION`.
[ "Cross", "-", "correlation", "using", "numpy", ".", "correlate" ]
python
valid
33.618421
Kane610/axis
axis/rtsp.py
https://github.com/Kane610/axis/blob/b2b44ce595c7b722b5e13eabcab7b91f048e1808/axis/rtsp.py#L358-L371
def state(self): """Which state the session is in. Starting - all messages needed to get stream started. Playing - keep-alive messages every self.session_timeout. """ if self.method in ['OPTIONS', 'DESCRIBE', 'SETUP', 'PLAY']: state = STATE_STARTING elif self.method in ['KEEP-ALIVE']: state = STATE_PLAYING else: state = STATE_STOPPED _LOGGER.debug('RTSP session (%s) state %s', self.host, state) return state
[ "def", "state", "(", "self", ")", ":", "if", "self", ".", "method", "in", "[", "'OPTIONS'", ",", "'DESCRIBE'", ",", "'SETUP'", ",", "'PLAY'", "]", ":", "state", "=", "STATE_STARTING", "elif", "self", ".", "method", "in", "[", "'KEEP-ALIVE'", "]", ":", "state", "=", "STATE_PLAYING", "else", ":", "state", "=", "STATE_STOPPED", "_LOGGER", ".", "debug", "(", "'RTSP session (%s) state %s'", ",", "self", ".", "host", ",", "state", ")", "return", "state" ]
Which state the session is in. Starting - all messages needed to get stream started. Playing - keep-alive messages every self.session_timeout.
[ "Which", "state", "the", "session", "is", "in", "." ]
python
train
36.142857
AnalogJ/lexicon
lexicon/providers/hetzner.py
https://github.com/AnalogJ/lexicon/blob/9330b871988753cad44fe2876a217b4c67b1fa0e/lexicon/providers/hetzner.py#L29-L54
def provider_parser(subparser): """Configure a provider parser for Hetzner""" subparser.add_argument('--auth-account', help='specify type of Hetzner account: by default Hetzner Robot ' '(robot) or Hetzner konsoleH (konsoleh)') subparser.add_argument('--auth-username', help='specify username of Hetzner account') subparser.add_argument('--auth-password', help='specify password of Hetzner account') subparser.add_argument('--linked', help='if exists, uses linked CNAME as A|AAAA|TXT record name for edit ' 'actions: by default (yes); Further restriction: Only enabled if ' 'record name or raw FQDN record identifier \'type/name/content\' is ' 'specified, and additionally for update actions the record name ' 'remains the same', default=str('yes'), choices=['yes', 'no']) subparser.add_argument('--propagated', help='waits until record is publicly propagated after succeeded ' 'create|update actions: by default (yes)', default=str('yes'), choices=['yes', 'no']) subparser.add_argument('--latency', help='specify latency, used during checks for publicly propagation ' 'and additionally for Hetzner Robot after record edits: by default ' '30s (30)', default=int(30), type=int)
[ "def", "provider_parser", "(", "subparser", ")", ":", "subparser", ".", "add_argument", "(", "'--auth-account'", ",", "help", "=", "'specify type of Hetzner account: by default Hetzner Robot '", "'(robot) or Hetzner konsoleH (konsoleh)'", ")", "subparser", ".", "add_argument", "(", "'--auth-username'", ",", "help", "=", "'specify username of Hetzner account'", ")", "subparser", ".", "add_argument", "(", "'--auth-password'", ",", "help", "=", "'specify password of Hetzner account'", ")", "subparser", ".", "add_argument", "(", "'--linked'", ",", "help", "=", "'if exists, uses linked CNAME as A|AAAA|TXT record name for edit '", "'actions: by default (yes); Further restriction: Only enabled if '", "'record name or raw FQDN record identifier \\'type/name/content\\' is '", "'specified, and additionally for update actions the record name '", "'remains the same'", ",", "default", "=", "str", "(", "'yes'", ")", ",", "choices", "=", "[", "'yes'", ",", "'no'", "]", ")", "subparser", ".", "add_argument", "(", "'--propagated'", ",", "help", "=", "'waits until record is publicly propagated after succeeded '", "'create|update actions: by default (yes)'", ",", "default", "=", "str", "(", "'yes'", ")", ",", "choices", "=", "[", "'yes'", ",", "'no'", "]", ")", "subparser", ".", "add_argument", "(", "'--latency'", ",", "help", "=", "'specify latency, used during checks for publicly propagation '", "'and additionally for Hetzner Robot after record edits: by default '", "'30s (30)'", ",", "default", "=", "int", "(", "30", ")", ",", "type", "=", "int", ")" ]
Configure a provider parser for Hetzner
[ "Configure", "a", "provider", "parser", "for", "Hetzner" ]
python
train
64
mar10/wsgidav
wsgidav/samples/mysql_dav_provider.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/samples/mysql_dav_provider.py#L211-L245
def get_content(self): """Open content as a stream for reading. See DAVResource.get_content() """ filestream = compat.StringIO() tableName, primKey = self.provider._split_path(self.path) if primKey is not None: conn = self.provider._init_connection() listFields = self.provider._get_field_list(conn, tableName) csvwriter = csv.DictWriter(filestream, listFields, extrasaction="ignore") dictFields = {} for field_name in listFields: dictFields[field_name] = field_name csvwriter.writerow(dictFields) if primKey == "_ENTIRE_CONTENTS": cursor = conn.cursor(MySQLdb.cursors.DictCursor) cursor.execute("SELECT * from " + self.provider._db + "." + tableName) result_set = cursor.fetchall() for row in result_set: csvwriter.writerow(row) cursor.close() else: row = self.provider._get_record_by_primary_key(conn, tableName, primKey) if row is not None: csvwriter.writerow(row) conn.close() # this suffices for small dbs, but # for a production big database, I imagine you would have a FileMixin that # does the retrieving and population even as the file object is being read filestream.seek(0) return filestream
[ "def", "get_content", "(", "self", ")", ":", "filestream", "=", "compat", ".", "StringIO", "(", ")", "tableName", ",", "primKey", "=", "self", ".", "provider", ".", "_split_path", "(", "self", ".", "path", ")", "if", "primKey", "is", "not", "None", ":", "conn", "=", "self", ".", "provider", ".", "_init_connection", "(", ")", "listFields", "=", "self", ".", "provider", ".", "_get_field_list", "(", "conn", ",", "tableName", ")", "csvwriter", "=", "csv", ".", "DictWriter", "(", "filestream", ",", "listFields", ",", "extrasaction", "=", "\"ignore\"", ")", "dictFields", "=", "{", "}", "for", "field_name", "in", "listFields", ":", "dictFields", "[", "field_name", "]", "=", "field_name", "csvwriter", ".", "writerow", "(", "dictFields", ")", "if", "primKey", "==", "\"_ENTIRE_CONTENTS\"", ":", "cursor", "=", "conn", ".", "cursor", "(", "MySQLdb", ".", "cursors", ".", "DictCursor", ")", "cursor", ".", "execute", "(", "\"SELECT * from \"", "+", "self", ".", "provider", ".", "_db", "+", "\".\"", "+", "tableName", ")", "result_set", "=", "cursor", ".", "fetchall", "(", ")", "for", "row", "in", "result_set", ":", "csvwriter", ".", "writerow", "(", "row", ")", "cursor", ".", "close", "(", ")", "else", ":", "row", "=", "self", ".", "provider", ".", "_get_record_by_primary_key", "(", "conn", ",", "tableName", ",", "primKey", ")", "if", "row", "is", "not", "None", ":", "csvwriter", ".", "writerow", "(", "row", ")", "conn", ".", "close", "(", ")", "# this suffices for small dbs, but", "# for a production big database, I imagine you would have a FileMixin that", "# does the retrieving and population even as the file object is being read", "filestream", ".", "seek", "(", "0", ")", "return", "filestream" ]
Open content as a stream for reading. See DAVResource.get_content()
[ "Open", "content", "as", "a", "stream", "for", "reading", "." ]
python
valid
41.028571
crunchyroll/ef-open
efopen/ef_aws_resolver.py
https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_aws_resolver.py#L182-L197
def ec2_network_network_acl_id(self, lookup, default=None): """ Args: lookup: the friendly name of the network ACL we are looking up default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the network ACL, or None if no match found """ network_acl_id = EFAwsResolver.__CLIENTS["ec2"].describe_network_acls(Filters=[{ 'Name': 'tag:Name', 'Values': [lookup] }]) if len(network_acl_id["NetworkAcls"]) > 0: return network_acl_id["NetworkAcls"][0]["NetworkAclId"] else: return default
[ "def", "ec2_network_network_acl_id", "(", "self", ",", "lookup", ",", "default", "=", "None", ")", ":", "network_acl_id", "=", "EFAwsResolver", ".", "__CLIENTS", "[", "\"ec2\"", "]", ".", "describe_network_acls", "(", "Filters", "=", "[", "{", "'Name'", ":", "'tag:Name'", ",", "'Values'", ":", "[", "lookup", "]", "}", "]", ")", "if", "len", "(", "network_acl_id", "[", "\"NetworkAcls\"", "]", ")", ">", "0", ":", "return", "network_acl_id", "[", "\"NetworkAcls\"", "]", "[", "0", "]", "[", "\"NetworkAclId\"", "]", "else", ":", "return", "default" ]
Args: lookup: the friendly name of the network ACL we are looking up default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the network ACL, or None if no match found
[ "Args", ":", "lookup", ":", "the", "friendly", "name", "of", "the", "network", "ACL", "we", "are", "looking", "up", "default", ":", "the", "optional", "value", "to", "return", "if", "lookup", "failed", ";", "returns", "None", "if", "not", "set", "Returns", ":", "the", "ID", "of", "the", "network", "ACL", "or", "None", "if", "no", "match", "found" ]
python
train
36.3125
nicolargo/glances
glances/outputs/glances_curses.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/outputs/glances_curses.py#L442-L453
def end(self): """Shutdown the curses window.""" if hasattr(curses, 'echo'): curses.echo() if hasattr(curses, 'nocbreak'): curses.nocbreak() if hasattr(curses, 'curs_set'): try: curses.curs_set(1) except Exception: pass curses.endwin()
[ "def", "end", "(", "self", ")", ":", "if", "hasattr", "(", "curses", ",", "'echo'", ")", ":", "curses", ".", "echo", "(", ")", "if", "hasattr", "(", "curses", ",", "'nocbreak'", ")", ":", "curses", ".", "nocbreak", "(", ")", "if", "hasattr", "(", "curses", ",", "'curs_set'", ")", ":", "try", ":", "curses", ".", "curs_set", "(", "1", ")", "except", "Exception", ":", "pass", "curses", ".", "endwin", "(", ")" ]
Shutdown the curses window.
[ "Shutdown", "the", "curses", "window", "." ]
python
train
28.666667
RockFeng0/rtsf
rtsf/p_report.py
https://github.com/RockFeng0/rtsf/blob/fbc0d57edaeca86418af3942472fcc6d3e9ce591/rtsf/p_report.py#L134-L193
def get_summary(list_all=[], **kwargs): ''' summarize the report data @param list_all: a list which save the report data @param kwargs: such as show_all: True/False report show all status cases proj_name: project name home_page: home page url ''' all_summary = [] for module in list_all: summary = { "module_name" : module['Name'], "show_all" : kwargs.get("show_all",True), "project_name" : kwargs.get("proj_name","TestProject"), "home_page" : kwargs.get("home_page",__about__.HOME_PAGE), "start_time" : "", "end_time" : "", "duration_seconds" : "", "total_case_num" : len(module["TestCases"]), "pass_cases_num" : 0, "fail_cases_num" : 0, "details" : [] } for case in module["TestCases"]: case_detail = {} case_detail["linkurl"] = "./caselogs/%s_%s.log" %(case["case_name"],case["exec_date"]) if case["status"].lower() == "pass": summary["pass_cases_num"] += 1 case_detail["c_style"] = "tr_pass" else: summary["fail_cases_num"] += 1 case_detail["c_style"] = "tr_fail" case_detail.update(case) summary["details"].append(case_detail) try: st = module["TestCases"][0].get("start_at") et = module["TestCases"][-1].get("end_at") summary["start_time"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(st)) summary["end_time"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(et)) summary["duration_seconds"] = float("%.2f" %(et - st)) except Exception as _: logger.log_warning("Will set 'start_at' and 'end_at' to 'None'") (summary["start_time"], summary["end_time"], summary["duration_seconds"]) = (None,None,None) if summary["fail_cases_num"] > 0: summary["dict_report"] = {"result":0,"message":"failure","pass":summary["pass_cases_num"],"fail":summary["fail_cases_num"]} else: summary["dict_report"] = {"result":1,"message":"success","pass":summary["pass_cases_num"],"fail":summary["fail_cases_num"]} all_summary.append(summary) return all_summary
[ "def", "get_summary", "(", "list_all", "=", "[", "]", ",", "*", "*", "kwargs", ")", ":", "all_summary", "=", "[", "]", "for", "module", "in", "list_all", ":", "summary", "=", "{", "\"module_name\"", ":", "module", "[", "'Name'", "]", ",", "\"show_all\"", ":", "kwargs", ".", "get", "(", "\"show_all\"", ",", "True", ")", ",", "\"project_name\"", ":", "kwargs", ".", "get", "(", "\"proj_name\"", ",", "\"TestProject\"", ")", ",", "\"home_page\"", ":", "kwargs", ".", "get", "(", "\"home_page\"", ",", "__about__", ".", "HOME_PAGE", ")", ",", "\"start_time\"", ":", "\"\"", ",", "\"end_time\"", ":", "\"\"", ",", "\"duration_seconds\"", ":", "\"\"", ",", "\"total_case_num\"", ":", "len", "(", "module", "[", "\"TestCases\"", "]", ")", ",", "\"pass_cases_num\"", ":", "0", ",", "\"fail_cases_num\"", ":", "0", ",", "\"details\"", ":", "[", "]", "}", "for", "case", "in", "module", "[", "\"TestCases\"", "]", ":", "case_detail", "=", "{", "}", "case_detail", "[", "\"linkurl\"", "]", "=", "\"./caselogs/%s_%s.log\"", "%", "(", "case", "[", "\"case_name\"", "]", ",", "case", "[", "\"exec_date\"", "]", ")", "if", "case", "[", "\"status\"", "]", ".", "lower", "(", ")", "==", "\"pass\"", ":", "summary", "[", "\"pass_cases_num\"", "]", "+=", "1", "case_detail", "[", "\"c_style\"", "]", "=", "\"tr_pass\"", "else", ":", "summary", "[", "\"fail_cases_num\"", "]", "+=", "1", "case_detail", "[", "\"c_style\"", "]", "=", "\"tr_fail\"", "case_detail", ".", "update", "(", "case", ")", "summary", "[", "\"details\"", "]", ".", "append", "(", "case_detail", ")", "try", ":", "st", "=", "module", "[", "\"TestCases\"", "]", "[", "0", "]", ".", "get", "(", "\"start_at\"", ")", "et", "=", "module", "[", "\"TestCases\"", "]", "[", "-", "1", "]", ".", "get", "(", "\"end_at\"", ")", "summary", "[", "\"start_time\"", "]", "=", "time", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ",", "time", ".", "localtime", "(", "st", ")", ")", "summary", "[", "\"end_time\"", "]", "=", "time", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ",", "time", ".", "localtime", "(", "et", ")", ")", "summary", "[", "\"duration_seconds\"", "]", "=", "float", "(", "\"%.2f\"", "%", "(", "et", "-", "st", ")", ")", "except", "Exception", "as", "_", ":", "logger", ".", "log_warning", "(", "\"Will set 'start_at' and 'end_at' to 'None'\"", ")", "(", "summary", "[", "\"start_time\"", "]", ",", "summary", "[", "\"end_time\"", "]", ",", "summary", "[", "\"duration_seconds\"", "]", ")", "=", "(", "None", ",", "None", ",", "None", ")", "if", "summary", "[", "\"fail_cases_num\"", "]", ">", "0", ":", "summary", "[", "\"dict_report\"", "]", "=", "{", "\"result\"", ":", "0", ",", "\"message\"", ":", "\"failure\"", ",", "\"pass\"", ":", "summary", "[", "\"pass_cases_num\"", "]", ",", "\"fail\"", ":", "summary", "[", "\"fail_cases_num\"", "]", "}", "else", ":", "summary", "[", "\"dict_report\"", "]", "=", "{", "\"result\"", ":", "1", ",", "\"message\"", ":", "\"success\"", ",", "\"pass\"", ":", "summary", "[", "\"pass_cases_num\"", "]", ",", "\"fail\"", ":", "summary", "[", "\"fail_cases_num\"", "]", "}", "all_summary", ".", "append", "(", "summary", ")", "return", "all_summary" ]
summarize the report data @param list_all: a list which save the report data @param kwargs: such as show_all: True/False report show all status cases proj_name: project name home_page: home page url
[ "summarize", "the", "report", "data" ]
python
train
47.85
samastur/pyimagediet
pyimagediet/process.py
https://github.com/samastur/pyimagediet/blob/480c6e171577df36e166590b031bc8891b3c9e7b/pyimagediet/process.py#L46-L55
def determine_type(filename): '''Determine the file type and return it.''' ftype = magic.from_file(filename, mime=True).decode('utf8') if ftype == 'text/plain': ftype = 'text' elif ftype == 'image/svg+xml': ftype = 'svg' else: ftype = ftype.split('/')[1] return ftype
[ "def", "determine_type", "(", "filename", ")", ":", "ftype", "=", "magic", ".", "from_file", "(", "filename", ",", "mime", "=", "True", ")", ".", "decode", "(", "'utf8'", ")", "if", "ftype", "==", "'text/plain'", ":", "ftype", "=", "'text'", "elif", "ftype", "==", "'image/svg+xml'", ":", "ftype", "=", "'svg'", "else", ":", "ftype", "=", "ftype", ".", "split", "(", "'/'", ")", "[", "1", "]", "return", "ftype" ]
Determine the file type and return it.
[ "Determine", "the", "file", "type", "and", "return", "it", "." ]
python
train
30.6
veripress/veripress
veripress/model/storages.py
https://github.com/veripress/veripress/blob/9e3df3a10eb1db32da596bf52118fe6acbe4b14a/veripress/model/storages.py#L350-L382
def get_posts(self, include_draft=False, filter_functions=None): """ Get all posts from filesystem. :param include_draft: return draft posts or not :param filter_functions: filter to apply BEFORE result being sorted :return: an iterable of Post objects (the first is the latest post) """ def posts_generator(path): """Loads valid posts one by one in the given path.""" if os.path.isdir(path): for file in os.listdir(path): filename, ext = os.path.splitext(file) format_name = get_standard_format_name(ext[1:]) if format_name is not None and re.match( r'\d{4}-\d{2}-\d{2}-.+', filename): # the format is supported and the filename is valid, # so load this post post = Post() post.format = format_name post.meta, post.raw_content = FileStorage.read_file( os.path.join(path, file)) post.rel_url = filename.replace('-', '/', 3) + '/' post.unique_key = '/post/' + post.rel_url yield post posts_path = os.path.join(current_app.instance_path, 'posts') result = filter(lambda p: include_draft or not p.is_draft, posts_generator(posts_path)) result = self._filter_result(result, filter_functions) return sorted(result, key=lambda p: p.created, reverse=True)
[ "def", "get_posts", "(", "self", ",", "include_draft", "=", "False", ",", "filter_functions", "=", "None", ")", ":", "def", "posts_generator", "(", "path", ")", ":", "\"\"\"Loads valid posts one by one in the given path.\"\"\"", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "for", "file", "in", "os", ".", "listdir", "(", "path", ")", ":", "filename", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file", ")", "format_name", "=", "get_standard_format_name", "(", "ext", "[", "1", ":", "]", ")", "if", "format_name", "is", "not", "None", "and", "re", ".", "match", "(", "r'\\d{4}-\\d{2}-\\d{2}-.+'", ",", "filename", ")", ":", "# the format is supported and the filename is valid,", "# so load this post", "post", "=", "Post", "(", ")", "post", ".", "format", "=", "format_name", "post", ".", "meta", ",", "post", ".", "raw_content", "=", "FileStorage", ".", "read_file", "(", "os", ".", "path", ".", "join", "(", "path", ",", "file", ")", ")", "post", ".", "rel_url", "=", "filename", ".", "replace", "(", "'-'", ",", "'/'", ",", "3", ")", "+", "'/'", "post", ".", "unique_key", "=", "'/post/'", "+", "post", ".", "rel_url", "yield", "post", "posts_path", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "instance_path", ",", "'posts'", ")", "result", "=", "filter", "(", "lambda", "p", ":", "include_draft", "or", "not", "p", ".", "is_draft", ",", "posts_generator", "(", "posts_path", ")", ")", "result", "=", "self", ".", "_filter_result", "(", "result", ",", "filter_functions", ")", "return", "sorted", "(", "result", ",", "key", "=", "lambda", "p", ":", "p", ".", "created", ",", "reverse", "=", "True", ")" ]
Get all posts from filesystem. :param include_draft: return draft posts or not :param filter_functions: filter to apply BEFORE result being sorted :return: an iterable of Post objects (the first is the latest post)
[ "Get", "all", "posts", "from", "filesystem", "." ]
python
train
47.878788
blockstack/virtualchain
virtualchain/lib/ecdsalib.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/ecdsalib.py#L65-L73
def finalize(self): """ Get the base64-encoded signature itself. Can only be called once. """ signature = self.signer.finalize() sig_r, sig_s = decode_dss_signature(signature) sig_b64 = encode_signature(sig_r, sig_s) return sig_b64
[ "def", "finalize", "(", "self", ")", ":", "signature", "=", "self", ".", "signer", ".", "finalize", "(", ")", "sig_r", ",", "sig_s", "=", "decode_dss_signature", "(", "signature", ")", "sig_b64", "=", "encode_signature", "(", "sig_r", ",", "sig_s", ")", "return", "sig_b64" ]
Get the base64-encoded signature itself. Can only be called once.
[ "Get", "the", "base64", "-", "encoded", "signature", "itself", ".", "Can", "only", "be", "called", "once", "." ]
python
train
31.888889
abseil/abseil-py
absl/logging/__init__.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/logging/__init__.py#L801-L841
def emit(self, record): """Prints a record out to some streams. If FLAGS.logtostderr is set, it will print to sys.stderr ONLY. If FLAGS.alsologtostderr is set, it will print to sys.stderr. If FLAGS.logtostderr is not set, it will log to the stream associated with the current thread. Args: record: logging.LogRecord, the record to emit. """ # People occasionally call logging functions at import time before # our flags may have even been defined yet, let alone even parsed, as we # rely on the C++ side to define some flags for us and app init to # deal with parsing. Match the C++ library behavior of notify and emit # such messages to stderr. It encourages people to clean-up and does # not hide the message. level = record.levelno if not FLAGS.is_parsed(): # Also implies "before flag has been defined". global _warn_preinit_stderr if _warn_preinit_stderr: sys.stderr.write( 'WARNING: Logging before flag parsing goes to stderr.\n') _warn_preinit_stderr = False self._log_to_stderr(record) elif FLAGS['logtostderr'].value: self._log_to_stderr(record) else: super(PythonHandler, self).emit(record) stderr_threshold = converter.string_to_standard( FLAGS['stderrthreshold'].value) if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and self.stream != sys.stderr): self._log_to_stderr(record) # Die when the record is created from ABSLLogger and level is FATAL. if _is_absl_fatal_record(record): self.flush() # Flush the log before dying. # In threaded python, sys.exit() from a non-main thread only # exits the thread in question. os.abort()
[ "def", "emit", "(", "self", ",", "record", ")", ":", "# People occasionally call logging functions at import time before", "# our flags may have even been defined yet, let alone even parsed, as we", "# rely on the C++ side to define some flags for us and app init to", "# deal with parsing. Match the C++ library behavior of notify and emit", "# such messages to stderr. It encourages people to clean-up and does", "# not hide the message.", "level", "=", "record", ".", "levelno", "if", "not", "FLAGS", ".", "is_parsed", "(", ")", ":", "# Also implies \"before flag has been defined\".", "global", "_warn_preinit_stderr", "if", "_warn_preinit_stderr", ":", "sys", ".", "stderr", ".", "write", "(", "'WARNING: Logging before flag parsing goes to stderr.\\n'", ")", "_warn_preinit_stderr", "=", "False", "self", ".", "_log_to_stderr", "(", "record", ")", "elif", "FLAGS", "[", "'logtostderr'", "]", ".", "value", ":", "self", ".", "_log_to_stderr", "(", "record", ")", "else", ":", "super", "(", "PythonHandler", ",", "self", ")", ".", "emit", "(", "record", ")", "stderr_threshold", "=", "converter", ".", "string_to_standard", "(", "FLAGS", "[", "'stderrthreshold'", "]", ".", "value", ")", "if", "(", "(", "FLAGS", "[", "'alsologtostderr'", "]", ".", "value", "or", "level", ">=", "stderr_threshold", ")", "and", "self", ".", "stream", "!=", "sys", ".", "stderr", ")", ":", "self", ".", "_log_to_stderr", "(", "record", ")", "# Die when the record is created from ABSLLogger and level is FATAL.", "if", "_is_absl_fatal_record", "(", "record", ")", ":", "self", ".", "flush", "(", ")", "# Flush the log before dying.", "# In threaded python, sys.exit() from a non-main thread only", "# exits the thread in question.", "os", ".", "abort", "(", ")" ]
Prints a record out to some streams. If FLAGS.logtostderr is set, it will print to sys.stderr ONLY. If FLAGS.alsologtostderr is set, it will print to sys.stderr. If FLAGS.logtostderr is not set, it will log to the stream associated with the current thread. Args: record: logging.LogRecord, the record to emit.
[ "Prints", "a", "record", "out", "to", "some", "streams", "." ]
python
train
42.195122
lk-geimfari/mimesis
mimesis/providers/person.py
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/person.py#L115-L134
def title(self, gender: Optional[Gender] = None, title_type: Optional[TitleType] = None) -> str: """Generate a random title for name. You can generate random prefix or suffix for name using this method. :param gender: The gender. :param title_type: TitleType enum object. :return: The title. :raises NonEnumerableError: if gender or title_type in incorrect format. :Example: PhD. """ gender_key = self._validate_enum(gender, Gender) title_key = self._validate_enum(title_type, TitleType) titles = self._data['title'][gender_key][title_key] return self.random.choice(titles)
[ "def", "title", "(", "self", ",", "gender", ":", "Optional", "[", "Gender", "]", "=", "None", ",", "title_type", ":", "Optional", "[", "TitleType", "]", "=", "None", ")", "->", "str", ":", "gender_key", "=", "self", ".", "_validate_enum", "(", "gender", ",", "Gender", ")", "title_key", "=", "self", ".", "_validate_enum", "(", "title_type", ",", "TitleType", ")", "titles", "=", "self", ".", "_data", "[", "'title'", "]", "[", "gender_key", "]", "[", "title_key", "]", "return", "self", ".", "random", ".", "choice", "(", "titles", ")" ]
Generate a random title for name. You can generate random prefix or suffix for name using this method. :param gender: The gender. :param title_type: TitleType enum object. :return: The title. :raises NonEnumerableError: if gender or title_type in incorrect format. :Example: PhD.
[ "Generate", "a", "random", "title", "for", "name", "." ]
python
train
34.4
intel-analytics/BigDL
pyspark/bigdl/nn/criterion.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/criterion.py#L44-L63
def forward(self, input, target): """ NB: It's for debug only, please use optimizer.optimize() in production. Takes an input object, and computes the corresponding loss of the criterion, compared with `target` :param input: ndarray or list of ndarray :param target: ndarray or list of ndarray :return: value of loss """ jinput, input_is_table = Layer.check_input(input) jtarget, target_is_table = Layer.check_input(target) output = callBigDlFunc(self.bigdl_type, "criterionForward", self.value, jinput, input_is_table, jtarget, target_is_table) return output
[ "def", "forward", "(", "self", ",", "input", ",", "target", ")", ":", "jinput", ",", "input_is_table", "=", "Layer", ".", "check_input", "(", "input", ")", "jtarget", ",", "target_is_table", "=", "Layer", ".", "check_input", "(", "target", ")", "output", "=", "callBigDlFunc", "(", "self", ".", "bigdl_type", ",", "\"criterionForward\"", ",", "self", ".", "value", ",", "jinput", ",", "input_is_table", ",", "jtarget", ",", "target_is_table", ")", "return", "output" ]
NB: It's for debug only, please use optimizer.optimize() in production. Takes an input object, and computes the corresponding loss of the criterion, compared with `target` :param input: ndarray or list of ndarray :param target: ndarray or list of ndarray :return: value of loss
[ "NB", ":", "It", "s", "for", "debug", "only", "please", "use", "optimizer", ".", "optimize", "()", "in", "production", ".", "Takes", "an", "input", "object", "and", "computes", "the", "corresponding", "loss", "of", "the", "criterion", "compared", "with", "target" ]
python
test
41.1
bpython/curtsies
curtsies/input.py
https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/curtsies/input.py#L129-L162
def _wait_for_read_ready_or_timeout(self, timeout): """Returns tuple of whether stdin is ready to read and an event. If an event is returned, that event is more pressing than reading bytes on stdin to create a keyboard input event. If stdin is ready, either there are bytes to read or a SIGTSTP triggered by dsusp has been received""" remaining_timeout = timeout t0 = time.time() while True: try: (rs, _, _) = select.select( [self.in_stream.fileno()] + self.readers, [], [], remaining_timeout) if not rs: return False, None r = rs[0] # if there's more than one, get it in the next loop if r == self.in_stream.fileno(): return True, None else: os.read(r, 1024) if self.queued_interrupting_events: return False, self.queued_interrupting_events.pop(0) elif remaining_timeout is not None: remaining_timeout = max(0, t0 + timeout - time.time()) continue else: continue except select.error: if self.sigints: return False, self.sigints.pop() if remaining_timeout is not None: remaining_timeout = max(timeout - (time.time() - t0), 0)
[ "def", "_wait_for_read_ready_or_timeout", "(", "self", ",", "timeout", ")", ":", "remaining_timeout", "=", "timeout", "t0", "=", "time", ".", "time", "(", ")", "while", "True", ":", "try", ":", "(", "rs", ",", "_", ",", "_", ")", "=", "select", ".", "select", "(", "[", "self", ".", "in_stream", ".", "fileno", "(", ")", "]", "+", "self", ".", "readers", ",", "[", "]", ",", "[", "]", ",", "remaining_timeout", ")", "if", "not", "rs", ":", "return", "False", ",", "None", "r", "=", "rs", "[", "0", "]", "# if there's more than one, get it in the next loop", "if", "r", "==", "self", ".", "in_stream", ".", "fileno", "(", ")", ":", "return", "True", ",", "None", "else", ":", "os", ".", "read", "(", "r", ",", "1024", ")", "if", "self", ".", "queued_interrupting_events", ":", "return", "False", ",", "self", ".", "queued_interrupting_events", ".", "pop", "(", "0", ")", "elif", "remaining_timeout", "is", "not", "None", ":", "remaining_timeout", "=", "max", "(", "0", ",", "t0", "+", "timeout", "-", "time", ".", "time", "(", ")", ")", "continue", "else", ":", "continue", "except", "select", ".", "error", ":", "if", "self", ".", "sigints", ":", "return", "False", ",", "self", ".", "sigints", ".", "pop", "(", ")", "if", "remaining_timeout", "is", "not", "None", ":", "remaining_timeout", "=", "max", "(", "timeout", "-", "(", "time", ".", "time", "(", ")", "-", "t0", ")", ",", "0", ")" ]
Returns tuple of whether stdin is ready to read and an event. If an event is returned, that event is more pressing than reading bytes on stdin to create a keyboard input event. If stdin is ready, either there are bytes to read or a SIGTSTP triggered by dsusp has been received
[ "Returns", "tuple", "of", "whether", "stdin", "is", "ready", "to", "read", "and", "an", "event", "." ]
python
train
43.852941
projectshift/shift-schema
shiftschema/schema.py
https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/schema.py#L41-L54
def has_property(self, property_name): """ Check if schema has property :param property_name: str, name to check :return: bool """ if property_name in self.properties: return True elif property_name in self.entities: return True elif property_name in self.collections: return True else: return False
[ "def", "has_property", "(", "self", ",", "property_name", ")", ":", "if", "property_name", "in", "self", ".", "properties", ":", "return", "True", "elif", "property_name", "in", "self", ".", "entities", ":", "return", "True", "elif", "property_name", "in", "self", ".", "collections", ":", "return", "True", "else", ":", "return", "False" ]
Check if schema has property :param property_name: str, name to check :return: bool
[ "Check", "if", "schema", "has", "property", ":", "param", "property_name", ":", "str", "name", "to", "check", ":", "return", ":", "bool" ]
python
train
29
ngmarchant/oasis
oasis/kad.py
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/kad.py#L133-L145
def _update_estimate_and_sampler(self, ell, ell_hat, weight, extra_info, **kwargs): """Update the BB models and the estimates""" stratum_idx = extra_info['stratum'] self._BB_TP.update(ell*ell_hat, stratum_idx) self._BB_PP.update(ell_hat, stratum_idx) self._BB_P.update(ell, stratum_idx) # Update model covariance matrix for stratum_idx self._update_cov_model(strata_to_update = [stratum_idx]) # Update F-measure estimate, estimator variance, exp. variance decrease self._update_estimates()
[ "def", "_update_estimate_and_sampler", "(", "self", ",", "ell", ",", "ell_hat", ",", "weight", ",", "extra_info", ",", "*", "*", "kwargs", ")", ":", "stratum_idx", "=", "extra_info", "[", "'stratum'", "]", "self", ".", "_BB_TP", ".", "update", "(", "ell", "*", "ell_hat", ",", "stratum_idx", ")", "self", ".", "_BB_PP", ".", "update", "(", "ell_hat", ",", "stratum_idx", ")", "self", ".", "_BB_P", ".", "update", "(", "ell", ",", "stratum_idx", ")", "# Update model covariance matrix for stratum_idx", "self", ".", "_update_cov_model", "(", "strata_to_update", "=", "[", "stratum_idx", "]", ")", "# Update F-measure estimate, estimator variance, exp. variance decrease", "self", ".", "_update_estimates", "(", ")" ]
Update the BB models and the estimates
[ "Update", "the", "BB", "models", "and", "the", "estimates" ]
python
train
45.230769
connectordb/connectordb-python
connectordb/_datapointarray.py
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_datapointarray.py#L86-L93
def loadJSON(self, filename): """Adds the data from a JSON file. The file is expected to be in datapoint format:: d = DatapointArray().loadJSON("myfile.json") """ with open(filename, "r") as f: self.merge(json.load(f)) return self
[ "def", "loadJSON", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "\"r\"", ")", "as", "f", ":", "self", ".", "merge", "(", "json", ".", "load", "(", "f", ")", ")", "return", "self" ]
Adds the data from a JSON file. The file is expected to be in datapoint format:: d = DatapointArray().loadJSON("myfile.json")
[ "Adds", "the", "data", "from", "a", "JSON", "file", ".", "The", "file", "is", "expected", "to", "be", "in", "datapoint", "format", "::" ]
python
test
35
timofurrer/w1thermsensor
w1thermsensor/cli.py
https://github.com/timofurrer/w1thermsensor/blob/8ac4fbb85e0c247dbb39e8b178cca0a975adc332/w1thermsensor/cli.py#L175-L213
def get(id_, hwid, type_, unit, precision, as_json): """Get temperature of a specific sensor""" if id_ and (hwid or type_): raise click.BadOptionUsage( "If --id is given --hwid and --type are not allowed." ) if id_: try: sensor = W1ThermSensor.get_available_sensors()[id_ - 1] except IndexError: raise click.BadOptionUsage( "No sensor with id {0} available. " "Use the ls command to show all available sensors.".format(id_) ) else: sensor = W1ThermSensor(type_, hwid) if precision: sensor.set_precision(precision, persist=False) temperature = sensor.get_temperature(unit) if as_json: data = { "hwid": sensor.id, "type": sensor.type_name, "temperature": temperature, "unit": unit, } click.echo(json.dumps(data, indent=4, sort_keys=True)) else: click.echo( "Sensor {0} measured temperature: {1} {2}".format( click.style(sensor.id, bold=True), click.style(str(temperature), bold=True), click.style(unit, bold=True), ) )
[ "def", "get", "(", "id_", ",", "hwid", ",", "type_", ",", "unit", ",", "precision", ",", "as_json", ")", ":", "if", "id_", "and", "(", "hwid", "or", "type_", ")", ":", "raise", "click", ".", "BadOptionUsage", "(", "\"If --id is given --hwid and --type are not allowed.\"", ")", "if", "id_", ":", "try", ":", "sensor", "=", "W1ThermSensor", ".", "get_available_sensors", "(", ")", "[", "id_", "-", "1", "]", "except", "IndexError", ":", "raise", "click", ".", "BadOptionUsage", "(", "\"No sensor with id {0} available. \"", "\"Use the ls command to show all available sensors.\"", ".", "format", "(", "id_", ")", ")", "else", ":", "sensor", "=", "W1ThermSensor", "(", "type_", ",", "hwid", ")", "if", "precision", ":", "sensor", ".", "set_precision", "(", "precision", ",", "persist", "=", "False", ")", "temperature", "=", "sensor", ".", "get_temperature", "(", "unit", ")", "if", "as_json", ":", "data", "=", "{", "\"hwid\"", ":", "sensor", ".", "id", ",", "\"type\"", ":", "sensor", ".", "type_name", ",", "\"temperature\"", ":", "temperature", ",", "\"unit\"", ":", "unit", ",", "}", "click", ".", "echo", "(", "json", ".", "dumps", "(", "data", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", ")", "else", ":", "click", ".", "echo", "(", "\"Sensor {0} measured temperature: {1} {2}\"", ".", "format", "(", "click", ".", "style", "(", "sensor", ".", "id", ",", "bold", "=", "True", ")", ",", "click", ".", "style", "(", "str", "(", "temperature", ")", ",", "bold", "=", "True", ")", ",", "click", ".", "style", "(", "unit", ",", "bold", "=", "True", ")", ",", ")", ")" ]
Get temperature of a specific sensor
[ "Get", "temperature", "of", "a", "specific", "sensor" ]
python
train
30.820513
numba/llvmlite
llvmlite/ir/builder.py
https://github.com/numba/llvmlite/blob/fcadf8af11947f3fd041c5d6526c5bf231564883/llvmlite/ir/builder.py#L860-L868
def gep(self, ptr, indices, inbounds=False, name=''): """ Compute effective address (getelementptr): name = getelementptr ptr, <indices...> """ instr = instructions.GEPInstr(self.block, ptr, indices, inbounds=inbounds, name=name) self._insert(instr) return instr
[ "def", "gep", "(", "self", ",", "ptr", ",", "indices", ",", "inbounds", "=", "False", ",", "name", "=", "''", ")", ":", "instr", "=", "instructions", ".", "GEPInstr", "(", "self", ".", "block", ",", "ptr", ",", "indices", ",", "inbounds", "=", "inbounds", ",", "name", "=", "name", ")", "self", ".", "_insert", "(", "instr", ")", "return", "instr" ]
Compute effective address (getelementptr): name = getelementptr ptr, <indices...>
[ "Compute", "effective", "address", "(", "getelementptr", ")", ":", "name", "=", "getelementptr", "ptr", "<indices", "...", ">" ]
python
train
39.111111
pyviz/holoviews
holoviews/plotting/bokeh/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/util.py#L600-L609
def cds_column_replace(source, data): """ Determine if the CDS.data requires a full replacement or simply needs to be updated. A replacement is required if untouched columns are not the same length as the columns being updated. """ current_length = [len(v) for v in source.data.values() if isinstance(v, (list, np.ndarray))] new_length = [len(v) for v in data.values() if isinstance(v, (list, np.ndarray))] untouched = [k for k in source.data if k not in data] return bool(untouched and current_length and new_length and current_length[0] != new_length[0])
[ "def", "cds_column_replace", "(", "source", ",", "data", ")", ":", "current_length", "=", "[", "len", "(", "v", ")", "for", "v", "in", "source", ".", "data", ".", "values", "(", ")", "if", "isinstance", "(", "v", ",", "(", "list", ",", "np", ".", "ndarray", ")", ")", "]", "new_length", "=", "[", "len", "(", "v", ")", "for", "v", "in", "data", ".", "values", "(", ")", "if", "isinstance", "(", "v", ",", "(", "list", ",", "np", ".", "ndarray", ")", ")", "]", "untouched", "=", "[", "k", "for", "k", "in", "source", ".", "data", "if", "k", "not", "in", "data", "]", "return", "bool", "(", "untouched", "and", "current_length", "and", "new_length", "and", "current_length", "[", "0", "]", "!=", "new_length", "[", "0", "]", ")" ]
Determine if the CDS.data requires a full replacement or simply needs to be updated. A replacement is required if untouched columns are not the same length as the columns being updated.
[ "Determine", "if", "the", "CDS", ".", "data", "requires", "a", "full", "replacement", "or", "simply", "needs", "to", "be", "updated", ".", "A", "replacement", "is", "required", "if", "untouched", "columns", "are", "not", "the", "same", "length", "as", "the", "columns", "being", "updated", "." ]
python
train
58.3
pmacosta/pcsv
pcsv/dsort.py
https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/dsort.py#L37-L93
def dsort(fname, order, has_header=True, frow=0, ofname=None): r""" Sort file data. :param fname: Name of the comma-separated values file to sort :type fname: FileNameExists_ :param order: Sort order :type order: :ref:`CsvColFilter` :param has_header: Flag that indicates whether the comma-separated values file to sort has column headers in its first line (True) or not (False) :type has_header: boolean :param frow: First data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the sorted data. If None the sorting is done "in place" :type ofname: FileName_ or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.dsort.dsort :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (Argument \`frow\` is not valid) * RuntimeError (Argument \`has_header\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] """ ofname = fname if ofname is None else ofname obj = CsvFile(fname=fname, has_header=has_header, frow=frow) obj.dsort(order) obj.write(fname=ofname, header=has_header, append=False)
[ "def", "dsort", "(", "fname", ",", "order", ",", "has_header", "=", "True", ",", "frow", "=", "0", ",", "ofname", "=", "None", ")", ":", "ofname", "=", "fname", "if", "ofname", "is", "None", "else", "ofname", "obj", "=", "CsvFile", "(", "fname", "=", "fname", ",", "has_header", "=", "has_header", ",", "frow", "=", "frow", ")", "obj", ".", "dsort", "(", "order", ")", "obj", ".", "write", "(", "fname", "=", "ofname", ",", "header", "=", "has_header", ",", "append", "=", "False", ")" ]
r""" Sort file data. :param fname: Name of the comma-separated values file to sort :type fname: FileNameExists_ :param order: Sort order :type order: :ref:`CsvColFilter` :param has_header: Flag that indicates whether the comma-separated values file to sort has column headers in its first line (True) or not (False) :type has_header: boolean :param frow: First data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the sorted data. If None the sorting is done "in place" :type ofname: FileName_ or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.dsort.dsort :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (Argument \`frow\` is not valid) * RuntimeError (Argument \`has_header\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]]
[ "r", "Sort", "file", "data", "." ]
python
train
33.350877
alertot/detectem
detectem/core.py
https://github.com/alertot/detectem/blob/b1ecc3543b7c44ee76c4cac0d3896a7747bf86c1/detectem/core.py#L67-L93
def mark_entries(self, entries): ''' Mark one entry as main entry and the rest as resource entry. Main entry is the entry that contain response's body of the requested URL. ''' for entry in entries: self._set_entry_type(entry, RESOURCE_ENTRY) # If first entry doesn't have a redirect, set is as main entry main_entry = entries[0] main_location = self._get_location(main_entry) if not main_location: self._set_entry_type(main_entry, MAIN_ENTRY) return # Resolve redirected URL and see if it's in the rest of entries main_url = urllib.parse.urljoin(get_url(main_entry), main_location) for entry in entries[1:]: url = get_url(entry) if url == main_url: self._set_entry_type(entry, MAIN_ENTRY) break else: # In fail case, set the first entry self._set_entry_type(main_entry, MAIN_ENTRY)
[ "def", "mark_entries", "(", "self", ",", "entries", ")", ":", "for", "entry", "in", "entries", ":", "self", ".", "_set_entry_type", "(", "entry", ",", "RESOURCE_ENTRY", ")", "# If first entry doesn't have a redirect, set is as main entry", "main_entry", "=", "entries", "[", "0", "]", "main_location", "=", "self", ".", "_get_location", "(", "main_entry", ")", "if", "not", "main_location", ":", "self", ".", "_set_entry_type", "(", "main_entry", ",", "MAIN_ENTRY", ")", "return", "# Resolve redirected URL and see if it's in the rest of entries", "main_url", "=", "urllib", ".", "parse", ".", "urljoin", "(", "get_url", "(", "main_entry", ")", ",", "main_location", ")", "for", "entry", "in", "entries", "[", "1", ":", "]", ":", "url", "=", "get_url", "(", "entry", ")", "if", "url", "==", "main_url", ":", "self", ".", "_set_entry_type", "(", "entry", ",", "MAIN_ENTRY", ")", "break", "else", ":", "# In fail case, set the first entry", "self", ".", "_set_entry_type", "(", "main_entry", ",", "MAIN_ENTRY", ")" ]
Mark one entry as main entry and the rest as resource entry. Main entry is the entry that contain response's body of the requested URL.
[ "Mark", "one", "entry", "as", "main", "entry", "and", "the", "rest", "as", "resource", "entry", "." ]
python
train
36.592593
pjuren/pyokit
src/pyokit/io/genomeAlignment.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/genomeAlignment.py#L204-L235
def build_genome_alignment_from_file(ga_path, ref_spec, idx_path=None, verbose=False): """ build a genome alignment by loading from a single MAF file. :param ga_path: the path to the file to load. :param ref_spec: which species in the MAF file is the reference? :param idx_path: if provided, use this index to generate a just-in-time genome alignment, instead of loading the file immediately. """ blocks = [] if (idx_path is not None): bound_iter = functools.partial(genome_alignment_iterator, reference_species=ref_spec) hash_func = JustInTimeGenomeAlignmentBlock.build_hash factory = IndexedFile(None, bound_iter, hash_func) factory.read_index(idx_path, ga_path, verbose=verbose) pind = None for k in factory: if verbose: if pind is None: total = len(factory) pind = ProgressIndicator(totalToDo=total, messagePrefix="completed", messageSuffix="building alignment blocks ") pind.done += 1 pind.showProgress() blocks.append(JustInTimeGenomeAlignmentBlock(factory, k)) else: for b in genome_alignment_iterator(ga_path, ref_spec, verbose=verbose): blocks.append(b) return GenomeAlignment(blocks, verbose)
[ "def", "build_genome_alignment_from_file", "(", "ga_path", ",", "ref_spec", ",", "idx_path", "=", "None", ",", "verbose", "=", "False", ")", ":", "blocks", "=", "[", "]", "if", "(", "idx_path", "is", "not", "None", ")", ":", "bound_iter", "=", "functools", ".", "partial", "(", "genome_alignment_iterator", ",", "reference_species", "=", "ref_spec", ")", "hash_func", "=", "JustInTimeGenomeAlignmentBlock", ".", "build_hash", "factory", "=", "IndexedFile", "(", "None", ",", "bound_iter", ",", "hash_func", ")", "factory", ".", "read_index", "(", "idx_path", ",", "ga_path", ",", "verbose", "=", "verbose", ")", "pind", "=", "None", "for", "k", "in", "factory", ":", "if", "verbose", ":", "if", "pind", "is", "None", ":", "total", "=", "len", "(", "factory", ")", "pind", "=", "ProgressIndicator", "(", "totalToDo", "=", "total", ",", "messagePrefix", "=", "\"completed\"", ",", "messageSuffix", "=", "\"building alignment blocks \"", ")", "pind", ".", "done", "+=", "1", "pind", ".", "showProgress", "(", ")", "blocks", ".", "append", "(", "JustInTimeGenomeAlignmentBlock", "(", "factory", ",", "k", ")", ")", "else", ":", "for", "b", "in", "genome_alignment_iterator", "(", "ga_path", ",", "ref_spec", ",", "verbose", "=", "verbose", ")", ":", "blocks", ".", "append", "(", "b", ")", "return", "GenomeAlignment", "(", "blocks", ",", "verbose", ")" ]
build a genome alignment by loading from a single MAF file. :param ga_path: the path to the file to load. :param ref_spec: which species in the MAF file is the reference? :param idx_path: if provided, use this index to generate a just-in-time genome alignment, instead of loading the file immediately.
[ "build", "a", "genome", "alignment", "by", "loading", "from", "a", "single", "MAF", "file", "." ]
python
train
40.9375
mvn23/pyotgw
pyotgw/pyotgw.py
https://github.com/mvn23/pyotgw/blob/7612378ef4332b250176505af33e7536d6c9da78/pyotgw/pyotgw.py#L800-L810
async def _send_report(self, status): """ Call all subscribed coroutines in _notify whenever a status update occurs. This method is a coroutine """ if len(self._notify) > 0: # Each client gets its own copy of the dict. asyncio.gather(*[coro(dict(status)) for coro in self._notify], loop=self.loop)
[ "async", "def", "_send_report", "(", "self", ",", "status", ")", ":", "if", "len", "(", "self", ".", "_notify", ")", ">", "0", ":", "# Each client gets its own copy of the dict.", "asyncio", ".", "gather", "(", "*", "[", "coro", "(", "dict", "(", "status", ")", ")", "for", "coro", "in", "self", ".", "_notify", "]", ",", "loop", "=", "self", ".", "loop", ")" ]
Call all subscribed coroutines in _notify whenever a status update occurs. This method is a coroutine
[ "Call", "all", "subscribed", "coroutines", "in", "_notify", "whenever", "a", "status", "update", "occurs", "." ]
python
train
35.181818
chaoss/grimoirelab-perceval
perceval/backends/core/gerrit.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/gerrit.py#L357-L366
def reviews(self, last_item, filter_=None): """Get the reviews starting from last_item.""" cmd = self._get_gerrit_cmd(last_item, filter_) logger.debug("Getting reviews with command: %s", cmd) raw_data = self.__execute(cmd) raw_data = str(raw_data, "UTF-8") return raw_data
[ "def", "reviews", "(", "self", ",", "last_item", ",", "filter_", "=", "None", ")", ":", "cmd", "=", "self", ".", "_get_gerrit_cmd", "(", "last_item", ",", "filter_", ")", "logger", ".", "debug", "(", "\"Getting reviews with command: %s\"", ",", "cmd", ")", "raw_data", "=", "self", ".", "__execute", "(", "cmd", ")", "raw_data", "=", "str", "(", "raw_data", ",", "\"UTF-8\"", ")", "return", "raw_data" ]
Get the reviews starting from last_item.
[ "Get", "the", "reviews", "starting", "from", "last_item", "." ]
python
test
31.4
usc-isi-i2/etk
etk/cli/reonto.py
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/cli/reonto.py#L50-L93
def run(args): """ Args: args (argparse.Namespace) """ with warnings.catch_warnings(): warnings.simplefilter('ignore') query = prepareQuery(args.query_file.read()) ds = Dataset() res_indices_prev = set() # de-duplication res_indices = set() # create sub graphs for f in args.graphs: g = Graph(identifier=os.path.basename(f.name)) g.parse(data=f.read(), format='n3') ds.add_graph(g) # create and query data graph for data in read_by_chunk(args.input_file, int(args.chunk_size)): g = Graph(identifier='data') g.parse(data=data, format=args.input_type) ds.add_graph(g) res = ds.query(query) dedup_res_graph = Graph() if len(res) != 0: for r in res: tid = generate_index(r) res_indices.add(tid) if tid in res_indices_prev: # duplicated continue dedup_res_graph.add(r) if len(dedup_res_graph) > 0: ret = dedup_res_graph.serialize(format=args.output_type, encoding='utf-8') args.output_file.write(ret) ds.remove_graph(g) res_indices_prev = res_indices res_indices = set()
[ "def", "run", "(", "args", ")", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "'ignore'", ")", "query", "=", "prepareQuery", "(", "args", ".", "query_file", ".", "read", "(", ")", ")", "ds", "=", "Dataset", "(", ")", "res_indices_prev", "=", "set", "(", ")", "# de-duplication", "res_indices", "=", "set", "(", ")", "# create sub graphs", "for", "f", "in", "args", ".", "graphs", ":", "g", "=", "Graph", "(", "identifier", "=", "os", ".", "path", ".", "basename", "(", "f", ".", "name", ")", ")", "g", ".", "parse", "(", "data", "=", "f", ".", "read", "(", ")", ",", "format", "=", "'n3'", ")", "ds", ".", "add_graph", "(", "g", ")", "# create and query data graph", "for", "data", "in", "read_by_chunk", "(", "args", ".", "input_file", ",", "int", "(", "args", ".", "chunk_size", ")", ")", ":", "g", "=", "Graph", "(", "identifier", "=", "'data'", ")", "g", ".", "parse", "(", "data", "=", "data", ",", "format", "=", "args", ".", "input_type", ")", "ds", ".", "add_graph", "(", "g", ")", "res", "=", "ds", ".", "query", "(", "query", ")", "dedup_res_graph", "=", "Graph", "(", ")", "if", "len", "(", "res", ")", "!=", "0", ":", "for", "r", "in", "res", ":", "tid", "=", "generate_index", "(", "r", ")", "res_indices", ".", "add", "(", "tid", ")", "if", "tid", "in", "res_indices_prev", ":", "# duplicated", "continue", "dedup_res_graph", ".", "add", "(", "r", ")", "if", "len", "(", "dedup_res_graph", ")", ">", "0", ":", "ret", "=", "dedup_res_graph", ".", "serialize", "(", "format", "=", "args", ".", "output_type", ",", "encoding", "=", "'utf-8'", ")", "args", ".", "output_file", ".", "write", "(", "ret", ")", "ds", ".", "remove_graph", "(", "g", ")", "res_indices_prev", "=", "res_indices", "res_indices", "=", "set", "(", ")" ]
Args: args (argparse.Namespace)
[ "Args", ":", "args", "(", "argparse", ".", "Namespace", ")" ]
python
train
30.636364
mretegan/crispy
crispy/modules/orca/parser.py
https://github.com/mretegan/crispy/blob/7e241ac1a48d34ca769f3a6183c430360b5f6725/crispy/modules/orca/parser.py#L157-L174
def _parse_tensor(self, indices=False): '''Parse a tensor.''' if indices: self.line = self._skip_lines(1) tensor = np.zeros((3, 3)) for i in range(3): tokens = self.line.split() if indices: tensor[i][0] = float(tokens[1]) tensor[i][1] = float(tokens[2]) tensor[i][2] = float(tokens[3]) else: tensor[i][0] = float(tokens[0]) tensor[i][1] = float(tokens[1]) tensor[i][2] = float(tokens[2]) self.line = self._skip_lines(1) return tensor
[ "def", "_parse_tensor", "(", "self", ",", "indices", "=", "False", ")", ":", "if", "indices", ":", "self", ".", "line", "=", "self", ".", "_skip_lines", "(", "1", ")", "tensor", "=", "np", ".", "zeros", "(", "(", "3", ",", "3", ")", ")", "for", "i", "in", "range", "(", "3", ")", ":", "tokens", "=", "self", ".", "line", ".", "split", "(", ")", "if", "indices", ":", "tensor", "[", "i", "]", "[", "0", "]", "=", "float", "(", "tokens", "[", "1", "]", ")", "tensor", "[", "i", "]", "[", "1", "]", "=", "float", "(", "tokens", "[", "2", "]", ")", "tensor", "[", "i", "]", "[", "2", "]", "=", "float", "(", "tokens", "[", "3", "]", ")", "else", ":", "tensor", "[", "i", "]", "[", "0", "]", "=", "float", "(", "tokens", "[", "0", "]", ")", "tensor", "[", "i", "]", "[", "1", "]", "=", "float", "(", "tokens", "[", "1", "]", ")", "tensor", "[", "i", "]", "[", "2", "]", "=", "float", "(", "tokens", "[", "2", "]", ")", "self", ".", "line", "=", "self", ".", "_skip_lines", "(", "1", ")", "return", "tensor" ]
Parse a tensor.
[ "Parse", "a", "tensor", "." ]
python
train
34.055556
tensorpack/tensorpack
tensorpack/dataflow/serialize.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/dataflow/serialize.py#L167-L189
def save(df, path, data_paths): """ Args: df (DataFlow): the DataFlow to serialize. path (str): output hdf5 file. data_paths (list[str]): list of h5 paths. It should have the same length as each datapoint, and each path should correspond to one component of the datapoint. """ size = _reset_df_and_get_size(df) buffer = defaultdict(list) with get_tqdm(total=size) as pbar: for dp in df: assert len(dp) == len(data_paths), "Datapoint has {} components!".format(len(dp)) for k, el in zip(data_paths, dp): buffer[k].append(el) pbar.update() with h5py.File(path, 'w') as hf, get_tqdm(total=len(data_paths)) as pbar: for data_path in data_paths: hf.create_dataset(data_path, data=buffer[data_path]) pbar.update()
[ "def", "save", "(", "df", ",", "path", ",", "data_paths", ")", ":", "size", "=", "_reset_df_and_get_size", "(", "df", ")", "buffer", "=", "defaultdict", "(", "list", ")", "with", "get_tqdm", "(", "total", "=", "size", ")", "as", "pbar", ":", "for", "dp", "in", "df", ":", "assert", "len", "(", "dp", ")", "==", "len", "(", "data_paths", ")", ",", "\"Datapoint has {} components!\"", ".", "format", "(", "len", "(", "dp", ")", ")", "for", "k", ",", "el", "in", "zip", "(", "data_paths", ",", "dp", ")", ":", "buffer", "[", "k", "]", ".", "append", "(", "el", ")", "pbar", ".", "update", "(", ")", "with", "h5py", ".", "File", "(", "path", ",", "'w'", ")", "as", "hf", ",", "get_tqdm", "(", "total", "=", "len", "(", "data_paths", ")", ")", "as", "pbar", ":", "for", "data_path", "in", "data_paths", ":", "hf", ".", "create_dataset", "(", "data_path", ",", "data", "=", "buffer", "[", "data_path", "]", ")", "pbar", ".", "update", "(", ")" ]
Args: df (DataFlow): the DataFlow to serialize. path (str): output hdf5 file. data_paths (list[str]): list of h5 paths. It should have the same length as each datapoint, and each path should correspond to one component of the datapoint.
[ "Args", ":", "df", "(", "DataFlow", ")", ":", "the", "DataFlow", "to", "serialize", ".", "path", "(", "str", ")", ":", "output", "hdf5", "file", ".", "data_paths", "(", "list", "[", "str", "]", ")", ":", "list", "of", "h5", "paths", ".", "It", "should", "have", "the", "same", "length", "as", "each", "datapoint", "and", "each", "path", "should", "correspond", "to", "one", "component", "of", "the", "datapoint", "." ]
python
train
40.652174
spyder-ide/spyder
spyder/plugins/projects/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/projects/plugin.py#L398-L403
def set_project_filenames(self, recent_files): """Set the list of open file names in a project""" if (self.current_active_project and self.is_valid_project( self.current_active_project.root_path)): self.current_active_project.set_recent_files(recent_files)
[ "def", "set_project_filenames", "(", "self", ",", "recent_files", ")", ":", "if", "(", "self", ".", "current_active_project", "and", "self", ".", "is_valid_project", "(", "self", ".", "current_active_project", ".", "root_path", ")", ")", ":", "self", ".", "current_active_project", ".", "set_recent_files", "(", "recent_files", ")" ]
Set the list of open file names in a project
[ "Set", "the", "list", "of", "open", "file", "names", "in", "a", "project" ]
python
train
54
openwisp/django-x509
django_x509/base/models.py
https://github.com/openwisp/django-x509/blob/7f6cc937d6b13a10ce6511e0bb2a9a1345e45a2c/django_x509/base/models.py#L490-L498
def revoke(self): """ * flag certificate as revoked * fill in revoked_at DateTimeField """ now = timezone.now() self.revoked = True self.revoked_at = now self.save()
[ "def", "revoke", "(", "self", ")", ":", "now", "=", "timezone", ".", "now", "(", ")", "self", ".", "revoked", "=", "True", "self", ".", "revoked_at", "=", "now", "self", ".", "save", "(", ")" ]
* flag certificate as revoked * fill in revoked_at DateTimeField
[ "*", "flag", "certificate", "as", "revoked", "*", "fill", "in", "revoked_at", "DateTimeField" ]
python
train
24.555556
jobovy/galpy
galpy/actionAngle/actionAngleStaeckelGrid.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/actionAngle/actionAngleStaeckelGrid.py#L600-L617
def calcu0(self,E,Lz): """ NAME: calcu0 PURPOSE: calculate the minimum of the u potential INPUT: E - energy Lz - angular momentum OUTPUT: u0 HISTORY: 2012-11-29 - Written - Bovy (IAS) """ logu0= optimize.brent(_u0Eq, args=(self._delta,self._pot, E,Lz**2./2.)) return numpy.exp(logu0)
[ "def", "calcu0", "(", "self", ",", "E", ",", "Lz", ")", ":", "logu0", "=", "optimize", ".", "brent", "(", "_u0Eq", ",", "args", "=", "(", "self", ".", "_delta", ",", "self", ".", "_pot", ",", "E", ",", "Lz", "**", "2.", "/", "2.", ")", ")", "return", "numpy", ".", "exp", "(", "logu0", ")" ]
NAME: calcu0 PURPOSE: calculate the minimum of the u potential INPUT: E - energy Lz - angular momentum OUTPUT: u0 HISTORY: 2012-11-29 - Written - Bovy (IAS)
[ "NAME", ":", "calcu0", "PURPOSE", ":", "calculate", "the", "minimum", "of", "the", "u", "potential", "INPUT", ":", "E", "-", "energy", "Lz", "-", "angular", "momentum", "OUTPUT", ":", "u0", "HISTORY", ":", "2012", "-", "11", "-", "29", "-", "Written", "-", "Bovy", "(", "IAS", ")" ]
python
train
27.611111
DLR-RM/RAFCON
source/rafcon/gui/helpers/state_machine.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/helpers/state_machine.py#L82-L120
def open_state_machine(path=None, recent_opened_notification=False): """ Open a state machine from respective file system path :param str path: file system path to the state machine :param bool recent_opened_notification: flags that indicates that this call also should update recently open :rtype rafcon.core.state_machine.StateMachine :return: opened state machine """ start_time = time.time() if path is None: if interface.open_folder_func is None: logger.error("No function defined for opening a folder") return load_path = interface.open_folder_func("Please choose the folder of the state machine") if load_path is None: return else: load_path = path if state_machine_manager.is_state_machine_open(load_path): logger.info("State machine already open. Select state machine instance from path {0}.".format(load_path)) sm = state_machine_manager.get_open_state_machine_of_file_system_path(load_path) gui_helper_state.gui_singletons.state_machine_manager_model.selected_state_machine_id = sm.state_machine_id return state_machine_manager.get_open_state_machine_of_file_system_path(load_path) state_machine = None try: state_machine = storage.load_state_machine_from_path(load_path) state_machine_manager.add_state_machine(state_machine) if recent_opened_notification: global_runtime_config.update_recently_opened_state_machines_with(state_machine) duration = time.time() - start_time stat = state_machine.root_state.get_states_statistics(0) logger.info("It took {0:.2}s to load {1} states with {2} hierarchy levels.".format(duration, stat[0], stat[1])) except (AttributeError, ValueError, IOError) as e: logger.error('Error while trying to open state machine: {0}'.format(e)) return state_machine
[ "def", "open_state_machine", "(", "path", "=", "None", ",", "recent_opened_notification", "=", "False", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "if", "path", "is", "None", ":", "if", "interface", ".", "open_folder_func", "is", "None", ":", "logger", ".", "error", "(", "\"No function defined for opening a folder\"", ")", "return", "load_path", "=", "interface", ".", "open_folder_func", "(", "\"Please choose the folder of the state machine\"", ")", "if", "load_path", "is", "None", ":", "return", "else", ":", "load_path", "=", "path", "if", "state_machine_manager", ".", "is_state_machine_open", "(", "load_path", ")", ":", "logger", ".", "info", "(", "\"State machine already open. Select state machine instance from path {0}.\"", ".", "format", "(", "load_path", ")", ")", "sm", "=", "state_machine_manager", ".", "get_open_state_machine_of_file_system_path", "(", "load_path", ")", "gui_helper_state", ".", "gui_singletons", ".", "state_machine_manager_model", ".", "selected_state_machine_id", "=", "sm", ".", "state_machine_id", "return", "state_machine_manager", ".", "get_open_state_machine_of_file_system_path", "(", "load_path", ")", "state_machine", "=", "None", "try", ":", "state_machine", "=", "storage", ".", "load_state_machine_from_path", "(", "load_path", ")", "state_machine_manager", ".", "add_state_machine", "(", "state_machine", ")", "if", "recent_opened_notification", ":", "global_runtime_config", ".", "update_recently_opened_state_machines_with", "(", "state_machine", ")", "duration", "=", "time", ".", "time", "(", ")", "-", "start_time", "stat", "=", "state_machine", ".", "root_state", ".", "get_states_statistics", "(", "0", ")", "logger", ".", "info", "(", "\"It took {0:.2}s to load {1} states with {2} hierarchy levels.\"", ".", "format", "(", "duration", ",", "stat", "[", "0", "]", ",", "stat", "[", "1", "]", ")", ")", "except", "(", "AttributeError", ",", "ValueError", ",", "IOError", ")", "as", "e", ":", "logger", ".", "error", "(", "'Error while trying to open state machine: {0}'", ".", "format", "(", "e", ")", ")", "return", "state_machine" ]
Open a state machine from respective file system path :param str path: file system path to the state machine :param bool recent_opened_notification: flags that indicates that this call also should update recently open :rtype rafcon.core.state_machine.StateMachine :return: opened state machine
[ "Open", "a", "state", "machine", "from", "respective", "file", "system", "path" ]
python
train
48.410256
asyncee/django-easy-select2
easy_select2/widgets.py
https://github.com/asyncee/django-easy-select2/blob/f81bbaa91d0266029be7ef6d075d85f13273e3a5/easy_select2/widgets.py#L87-L93
def render_js_code(self, id_, *args, **kwargs): """Render html container for Select2 widget with options.""" if id_: options = self.render_select2_options_code( dict(self.get_options()), id_) return mark_safe(self.html.format(id=id_, options=options)) return u''
[ "def", "render_js_code", "(", "self", ",", "id_", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "id_", ":", "options", "=", "self", ".", "render_select2_options_code", "(", "dict", "(", "self", ".", "get_options", "(", ")", ")", ",", "id_", ")", "return", "mark_safe", "(", "self", ".", "html", ".", "format", "(", "id", "=", "id_", ",", "options", "=", "options", ")", ")", "return", "u''" ]
Render html container for Select2 widget with options.
[ "Render", "html", "container", "for", "Select2", "widget", "with", "options", "." ]
python
train
46.285714
genialis/resolwe
resolwe/flow/managers/dispatcher.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L631-L634
def _ensure_counter(self): """Ensure the sync counter is a valid non-dummy object.""" if not isinstance(self.sync_counter, self._SynchronizationManager): self.sync_counter = self._SynchronizationManager()
[ "def", "_ensure_counter", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "sync_counter", ",", "self", ".", "_SynchronizationManager", ")", ":", "self", ".", "sync_counter", "=", "self", ".", "_SynchronizationManager", "(", ")" ]
Ensure the sync counter is a valid non-dummy object.
[ "Ensure", "the", "sync", "counter", "is", "a", "valid", "non", "-", "dummy", "object", "." ]
python
train
57.25
rbarrois/throttle
throttle/api.py
https://github.com/rbarrois/throttle/blob/cc00e6b446f3938c81826ee258975ebdc12511a2/throttle/api.py#L57-L63
def throttle(self, key, amount=1, rate=None, capacity=None, exc_class=Throttled, **kwargs): """Consume an amount for a given key, or raise a Throttled exception.""" if not self.consume(key, amount, rate, capacity, **kwargs): raise exc_class("Request of %d unit for %s exceeds capacity." % (amount, key))
[ "def", "throttle", "(", "self", ",", "key", ",", "amount", "=", "1", ",", "rate", "=", "None", ",", "capacity", "=", "None", ",", "exc_class", "=", "Throttled", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "consume", "(", "key", ",", "amount", ",", "rate", ",", "capacity", ",", "*", "*", "kwargs", ")", ":", "raise", "exc_class", "(", "\"Request of %d unit for %s exceeds capacity.\"", "%", "(", "amount", ",", "key", ")", ")" ]
Consume an amount for a given key, or raise a Throttled exception.
[ "Consume", "an", "amount", "for", "a", "given", "key", "or", "raise", "a", "Throttled", "exception", "." ]
python
train
51.142857
molpopgen/fwdpy11
fwdpy11/_tables_to_tskit.py
https://github.com/molpopgen/fwdpy11/blob/7a5905f0f0a09e24ae5b0f39d22017499e81ea9e/fwdpy11/_tables_to_tskit.py#L93-L145
def dump_tables_to_tskit(pop): """ Converts fwdpy11.TableCollection to an tskit.TreeSequence """ node_view = np.array(pop.tables.nodes, copy=True) node_view['time'] -= node_view['time'].max() node_view['time'][np.where(node_view['time'] != 0.0)[0]] *= -1.0 edge_view = np.array(pop.tables.edges, copy=False) mut_view = np.array(pop.tables.mutations, copy=False) tc = tskit.TableCollection(pop.tables.genome_length) # We must initialize population and individual # tables before we can do anything else. # Attempting to set population to anything # other than -1 in an tskit.NodeTable will # raise an exception if the PopulationTable # isn't set up. _initializePopulationTable(node_view, tc) node_to_individual = _initializeIndividualTable(pop, tc) individual = [-1 for i in range(len(node_view))] for k, v in node_to_individual.items(): individual[k] = v flags = [1]*2*pop.N + [0]*(len(node_view) - 2*pop.N) # Bug fixed in 0.3.1: add preserved nodes to samples list for i in pop.tables.preserved_nodes: flags[i] = 1 tc.nodes.set_columns(flags=flags, time=node_view['time'], population=node_view['population'], individual=individual) tc.edges.set_columns(left=edge_view['left'], right=edge_view['right'], parent=edge_view['parent'], child=edge_view['child']) mpos = np.array([pop.mutations[i].pos for i in mut_view['key']]) ancestral_state = np.zeros(len(mut_view), dtype=np.int8)+ord('0') ancestral_state_offset = np.arange(len(mut_view)+1, dtype=np.uint32) tc.sites.set_columns(position=mpos, ancestral_state=ancestral_state, ancestral_state_offset=ancestral_state_offset) derived_state = np.zeros(len(mut_view), dtype=np.int8)+ord('1') md, mdo = _generate_mutation_metadata(pop) tc.mutations.set_columns(site=np.arange(len(mpos), dtype=np.int32), node=mut_view['node'], derived_state=derived_state, derived_state_offset=ancestral_state_offset, metadata=md, metadata_offset=mdo) return tc.tree_sequence()
[ "def", "dump_tables_to_tskit", "(", "pop", ")", ":", "node_view", "=", "np", ".", "array", "(", "pop", ".", "tables", ".", "nodes", ",", "copy", "=", "True", ")", "node_view", "[", "'time'", "]", "-=", "node_view", "[", "'time'", "]", ".", "max", "(", ")", "node_view", "[", "'time'", "]", "[", "np", ".", "where", "(", "node_view", "[", "'time'", "]", "!=", "0.0", ")", "[", "0", "]", "]", "*=", "-", "1.0", "edge_view", "=", "np", ".", "array", "(", "pop", ".", "tables", ".", "edges", ",", "copy", "=", "False", ")", "mut_view", "=", "np", ".", "array", "(", "pop", ".", "tables", ".", "mutations", ",", "copy", "=", "False", ")", "tc", "=", "tskit", ".", "TableCollection", "(", "pop", ".", "tables", ".", "genome_length", ")", "# We must initialize population and individual", "# tables before we can do anything else.", "# Attempting to set population to anything", "# other than -1 in an tskit.NodeTable will", "# raise an exception if the PopulationTable", "# isn't set up.", "_initializePopulationTable", "(", "node_view", ",", "tc", ")", "node_to_individual", "=", "_initializeIndividualTable", "(", "pop", ",", "tc", ")", "individual", "=", "[", "-", "1", "for", "i", "in", "range", "(", "len", "(", "node_view", ")", ")", "]", "for", "k", ",", "v", "in", "node_to_individual", ".", "items", "(", ")", ":", "individual", "[", "k", "]", "=", "v", "flags", "=", "[", "1", "]", "*", "2", "*", "pop", ".", "N", "+", "[", "0", "]", "*", "(", "len", "(", "node_view", ")", "-", "2", "*", "pop", ".", "N", ")", "# Bug fixed in 0.3.1: add preserved nodes to samples list", "for", "i", "in", "pop", ".", "tables", ".", "preserved_nodes", ":", "flags", "[", "i", "]", "=", "1", "tc", ".", "nodes", ".", "set_columns", "(", "flags", "=", "flags", ",", "time", "=", "node_view", "[", "'time'", "]", ",", "population", "=", "node_view", "[", "'population'", "]", ",", "individual", "=", "individual", ")", "tc", ".", "edges", ".", "set_columns", "(", "left", "=", "edge_view", "[", "'left'", "]", ",", "right", "=", "edge_view", "[", "'right'", "]", ",", "parent", "=", "edge_view", "[", "'parent'", "]", ",", "child", "=", "edge_view", "[", "'child'", "]", ")", "mpos", "=", "np", ".", "array", "(", "[", "pop", ".", "mutations", "[", "i", "]", ".", "pos", "for", "i", "in", "mut_view", "[", "'key'", "]", "]", ")", "ancestral_state", "=", "np", ".", "zeros", "(", "len", "(", "mut_view", ")", ",", "dtype", "=", "np", ".", "int8", ")", "+", "ord", "(", "'0'", ")", "ancestral_state_offset", "=", "np", ".", "arange", "(", "len", "(", "mut_view", ")", "+", "1", ",", "dtype", "=", "np", ".", "uint32", ")", "tc", ".", "sites", ".", "set_columns", "(", "position", "=", "mpos", ",", "ancestral_state", "=", "ancestral_state", ",", "ancestral_state_offset", "=", "ancestral_state_offset", ")", "derived_state", "=", "np", ".", "zeros", "(", "len", "(", "mut_view", ")", ",", "dtype", "=", "np", ".", "int8", ")", "+", "ord", "(", "'1'", ")", "md", ",", "mdo", "=", "_generate_mutation_metadata", "(", "pop", ")", "tc", ".", "mutations", ".", "set_columns", "(", "site", "=", "np", ".", "arange", "(", "len", "(", "mpos", ")", ",", "dtype", "=", "np", ".", "int32", ")", ",", "node", "=", "mut_view", "[", "'node'", "]", ",", "derived_state", "=", "derived_state", ",", "derived_state_offset", "=", "ancestral_state_offset", ",", "metadata", "=", "md", ",", "metadata_offset", "=", "mdo", ")", "return", "tc", ".", "tree_sequence", "(", ")" ]
Converts fwdpy11.TableCollection to an tskit.TreeSequence
[ "Converts", "fwdpy11", ".", "TableCollection", "to", "an", "tskit", ".", "TreeSequence" ]
python
train
43.886792
BlueHack-Core/blueforge
blueforge/util/file.py
https://github.com/BlueHack-Core/blueforge/blob/ac40a888ee9c388638a8f312c51f7500b8891b6c/blueforge/util/file.py#L84-L96
def uncompress_files(original, destination): """ Move file from original path to destination path. :type original: str :param original: The location of zip file :type destination: str :param destination: The extract path """ with zipfile.ZipFile(original) as zips: extract_path = os.path.join(destination) zips.extractall(extract_path)
[ "def", "uncompress_files", "(", "original", ",", "destination", ")", ":", "with", "zipfile", ".", "ZipFile", "(", "original", ")", "as", "zips", ":", "extract_path", "=", "os", ".", "path", ".", "join", "(", "destination", ")", "zips", ".", "extractall", "(", "extract_path", ")" ]
Move file from original path to destination path. :type original: str :param original: The location of zip file :type destination: str :param destination: The extract path
[ "Move", "file", "from", "original", "path", "to", "destination", "path", "." ]
python
train
30.230769
andymccurdy/redis-py
redis/connection.py
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/connection.py#L474-L498
def connect(self): "Connects to the Redis server if not already connected" if self._sock: return try: sock = self._connect() except socket.timeout: raise TimeoutError("Timeout connecting to server") except socket.error: e = sys.exc_info()[1] raise ConnectionError(self._error_message(e)) self._sock = sock self._selector = DefaultSelector(sock) try: self.on_connect() except RedisError: # clean up after any error in on_connect self.disconnect() raise # run any user callbacks. right now the only internal callback # is for pubsub channel/pattern resubscription for callback in self._connect_callbacks: callback(self)
[ "def", "connect", "(", "self", ")", ":", "if", "self", ".", "_sock", ":", "return", "try", ":", "sock", "=", "self", ".", "_connect", "(", ")", "except", "socket", ".", "timeout", ":", "raise", "TimeoutError", "(", "\"Timeout connecting to server\"", ")", "except", "socket", ".", "error", ":", "e", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "raise", "ConnectionError", "(", "self", ".", "_error_message", "(", "e", ")", ")", "self", ".", "_sock", "=", "sock", "self", ".", "_selector", "=", "DefaultSelector", "(", "sock", ")", "try", ":", "self", ".", "on_connect", "(", ")", "except", "RedisError", ":", "# clean up after any error in on_connect", "self", ".", "disconnect", "(", ")", "raise", "# run any user callbacks. right now the only internal callback", "# is for pubsub channel/pattern resubscription", "for", "callback", "in", "self", ".", "_connect_callbacks", ":", "callback", "(", "self", ")" ]
Connects to the Redis server if not already connected
[ "Connects", "to", "the", "Redis", "server", "if", "not", "already", "connected" ]
python
train
32.44
hsolbrig/PyShEx
pyshex/utils/datatype_utils.py
https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/utils/datatype_utils.py#L12-L19
def can_cast_to(v: Literal, dt: str) -> bool: """ 5.4.3 Datatype Constraints Determine whether "a value of the lexical form of n can be cast to the target type v per XPath Functions 3.1 section 19 Casting[xpath-functions]." """ # TODO: rdflib doesn't appear to pay any attention to lengths (e.g. 257 is a valid XSD.byte) return v.value is not None and Literal(str(v), datatype=dt).value is not None
[ "def", "can_cast_to", "(", "v", ":", "Literal", ",", "dt", ":", "str", ")", "->", "bool", ":", "# TODO: rdflib doesn't appear to pay any attention to lengths (e.g. 257 is a valid XSD.byte)", "return", "v", ".", "value", "is", "not", "None", "and", "Literal", "(", "str", "(", "v", ")", ",", "datatype", "=", "dt", ")", ".", "value", "is", "not", "None" ]
5.4.3 Datatype Constraints Determine whether "a value of the lexical form of n can be cast to the target type v per XPath Functions 3.1 section 19 Casting[xpath-functions]."
[ "5", ".", "4", ".", "3", "Datatype", "Constraints" ]
python
train
52
SmileyChris/easy-thumbnails
easy_thumbnails/files.py
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/files.py#L212-L240
def tag(self, alt='', use_size=None, **attrs): """ Return a standard XHTML ``<img ... />`` tag for this field. :param alt: The ``alt=""`` text for the tag. Defaults to ``''``. :param use_size: Whether to get the size of the thumbnail image for use in the tag attributes. If ``None`` (default), the size will only be used it if won't result in a remote file retrieval. All other keyword parameters are added as (properly escaped) extra attributes to the `img` tag. """ if use_size is None: if getattr(self, '_dimensions_cache', None): use_size = True else: try: self.storage.path(self.name) use_size = True except NotImplementedError: use_size = False attrs['alt'] = alt attrs['src'] = self.url if use_size: attrs.update(dict(width=self.width, height=self.height)) attrs = ' '.join(['%s="%s"' % (key, escape(value)) for key, value in sorted(attrs.items())]) return mark_safe('<img %s />' % attrs)
[ "def", "tag", "(", "self", ",", "alt", "=", "''", ",", "use_size", "=", "None", ",", "*", "*", "attrs", ")", ":", "if", "use_size", "is", "None", ":", "if", "getattr", "(", "self", ",", "'_dimensions_cache'", ",", "None", ")", ":", "use_size", "=", "True", "else", ":", "try", ":", "self", ".", "storage", ".", "path", "(", "self", ".", "name", ")", "use_size", "=", "True", "except", "NotImplementedError", ":", "use_size", "=", "False", "attrs", "[", "'alt'", "]", "=", "alt", "attrs", "[", "'src'", "]", "=", "self", ".", "url", "if", "use_size", ":", "attrs", ".", "update", "(", "dict", "(", "width", "=", "self", ".", "width", ",", "height", "=", "self", ".", "height", ")", ")", "attrs", "=", "' '", ".", "join", "(", "[", "'%s=\"%s\"'", "%", "(", "key", ",", "escape", "(", "value", ")", ")", "for", "key", ",", "value", "in", "sorted", "(", "attrs", ".", "items", "(", ")", ")", "]", ")", "return", "mark_safe", "(", "'<img %s />'", "%", "attrs", ")" ]
Return a standard XHTML ``<img ... />`` tag for this field. :param alt: The ``alt=""`` text for the tag. Defaults to ``''``. :param use_size: Whether to get the size of the thumbnail image for use in the tag attributes. If ``None`` (default), the size will only be used it if won't result in a remote file retrieval. All other keyword parameters are added as (properly escaped) extra attributes to the `img` tag.
[ "Return", "a", "standard", "XHTML", "<img", "...", "/", ">", "tag", "for", "this", "field", "." ]
python
train
40.275862
Alignak-monitoring/alignak
alignak/daemon.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemon.py#L2095-L2114
def exit_on_error(self, message, exit_code=1): # pylint: disable=no-self-use """Log generic message when getting an error and exit :param exit_code: if not None, exit with the provided value as exit code :type exit_code: int :param message: message for the exit reason :type message: str :return: None """ log = "I got an unrecoverable error. I have to exit." if message: log += "\n-----\nError message: %s" % message print("Error message: %s" % message) log += "-----\n" log += "You can get help at https://github.com/Alignak-monitoring/alignak\n" log += "If you think this is a bug, create a new issue including as much " \ "details as possible (version, configuration,...)" if exit_code is not None: exit(exit_code)
[ "def", "exit_on_error", "(", "self", ",", "message", ",", "exit_code", "=", "1", ")", ":", "# pylint: disable=no-self-use", "log", "=", "\"I got an unrecoverable error. I have to exit.\"", "if", "message", ":", "log", "+=", "\"\\n-----\\nError message: %s\"", "%", "message", "print", "(", "\"Error message: %s\"", "%", "message", ")", "log", "+=", "\"-----\\n\"", "log", "+=", "\"You can get help at https://github.com/Alignak-monitoring/alignak\\n\"", "log", "+=", "\"If you think this is a bug, create a new issue including as much \"", "\"details as possible (version, configuration,...)\"", "if", "exit_code", "is", "not", "None", ":", "exit", "(", "exit_code", ")" ]
Log generic message when getting an error and exit :param exit_code: if not None, exit with the provided value as exit code :type exit_code: int :param message: message for the exit reason :type message: str :return: None
[ "Log", "generic", "message", "when", "getting", "an", "error", "and", "exit" ]
python
train
43.15
sorgerlab/indra
rest_api/api.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L348-L359
def assemble_cx(): """Assemble INDRA Statements and return CX network json.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) ca = CxAssembler(stmts) model_str = ca.make_model() res = {'model': model_str} return res
[ "def", "assemble_cx", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "ca", "=", "CxAssembler", "(", "stmts", ")", "model_str", "=", "ca", ".", "make_model", "(", ")", "res", "=", "{", "'model'", ":", "model_str", "}", "return", "res" ]
Assemble INDRA Statements and return CX network json.
[ "Assemble", "INDRA", "Statements", "and", "return", "CX", "network", "json", "." ]
python
train
32.833333
mfcovington/djangocms-shiny-app
cms_shiny/menu.py
https://github.com/mfcovington/djangocms-shiny-app/blob/67eb7996c9a26abd14b3eb8b6b6aabdf5d626685/cms_shiny/menu.py#L15-L27
def get_nodes(self, request): """ This method is used to build the menu tree. """ nodes = [] for shiny_app in ShinyApp.objects.all(): node = NavigationNode( shiny_app.name, reverse('cms_shiny:shiny_detail', args=(shiny_app.slug,)), shiny_app.slug ) nodes.append(node) return nodes
[ "def", "get_nodes", "(", "self", ",", "request", ")", ":", "nodes", "=", "[", "]", "for", "shiny_app", "in", "ShinyApp", ".", "objects", ".", "all", "(", ")", ":", "node", "=", "NavigationNode", "(", "shiny_app", ".", "name", ",", "reverse", "(", "'cms_shiny:shiny_detail'", ",", "args", "=", "(", "shiny_app", ".", "slug", ",", ")", ")", ",", "shiny_app", ".", "slug", ")", "nodes", ".", "append", "(", "node", ")", "return", "nodes" ]
This method is used to build the menu tree.
[ "This", "method", "is", "used", "to", "build", "the", "menu", "tree", "." ]
python
train
30.769231
bitesofcode/projexui
projexui/widgets/xactiongroupwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xactiongroupwidget.py#L191-L295
def reset( self ): """ Resets the user interface buttons for this widget. """ # clear previous widgets for btn in self.findChildren(QToolButton): btn.close() btn.setParent(None) btn.deleteLater() # determine coloring options palette = self.palette() unchecked = palette.color(palette.Button) # determine if this is a dark or light scheme avg = (unchecked.red() + unchecked.green() + unchecked.blue()) / 3.0 if ( avg < 140 ): checked = unchecked.lighter(115) checked_clr = self.colorString(unchecked.lighter(120)) border_clr = self.colorString(unchecked.darker(140)) unchecked_clr = self.colorString(checked.lighter(140)) unchecked_clr_alt = self.colorString(checked.lighter(120)) checked_clr_alt = self.colorString(unchecked) else: checked = unchecked.lighter(120) checked_clr = self.colorString(unchecked) border_clr = self.colorString(unchecked.darker(160)) unchecked_clr = self.colorString(checked) unchecked_clr_alt = self.colorString(checked.darker(130)) checked_clr_alt = self.colorString(unchecked.darker(120)) # define the stylesheet options options = {} options['top_left_radius'] = 0 options['top_right_radius'] = 0 options['bot_left_radius'] = 0 options['bot_right_radius'] = 0 options['border_color'] = border_clr options['checked_clr'] = checked_clr options['checked_clr_alt'] = checked_clr_alt options['unchecked_clr'] = unchecked_clr options['unchecked_clr_alt'] = unchecked_clr_alt options['padding_top'] = 1 options['padding_bottom'] = 1 options['padding_left'] = 1 options['padding_right'] = 1 horiz = self.direction() in (QBoxLayout.LeftToRight, QBoxLayout.RightToLeft) if ( horiz ): options['x1'] = 0 options['y1'] = 0 options['x2'] = 0 options['y2'] = 1 else: options['x1'] = 0 options['y1'] = 0 options['x2'] = 1 options['y2'] = 1 actions = self.actionGroup().actions() count = len(actions) for i, action in enumerate(actions): btn = QToolButton(self) btn.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred) btn.setDefaultAction(action) self.layout().insertWidget(i, btn) options['top_left_radius'] = 1 options['bot_left_radius'] = 1 options['top_right_radius'] = 1 options['bot_right_radius'] = 1 if ( horiz ): options['padding_left'] = self._padding options['padding_right'] = self._padding else: options['padding_top'] = self._padding options['padding_bottom'] = self._padding if ( not i ): if ( horiz ): options['top_left_radius'] = self.cornerRadius() options['bot_left_radius'] = self.cornerRadius() options['padding_left'] += self.cornerRadius() / 3.0 else: options['top_left_radius'] = self.cornerRadius() options['top_right_radius'] = self.cornerRadius() options['padding_top'] += self.cornerRadius() / 3.0 elif ( i == count - 1 ): if ( horiz ): options['top_right_radius'] = self.cornerRadius() options['bot_right_radius'] = self.cornerRadius() options['padding_right'] += self.cornerRadius() / 3.0 else: options['bot_left_radius'] = self.cornerRadius() options['bot_right_radius'] = self.cornerRadius() options['padding_bottom'] += self.cornerRadius() / 3.0 btn.setStyleSheet(TOOLBUTTON_STYLE % options) btn.setAutoFillBackground(True)
[ "def", "reset", "(", "self", ")", ":", "# clear previous widgets", "for", "btn", "in", "self", ".", "findChildren", "(", "QToolButton", ")", ":", "btn", ".", "close", "(", ")", "btn", ".", "setParent", "(", "None", ")", "btn", ".", "deleteLater", "(", ")", "# determine coloring options", "palette", "=", "self", ".", "palette", "(", ")", "unchecked", "=", "palette", ".", "color", "(", "palette", ".", "Button", ")", "# determine if this is a dark or light scheme", "avg", "=", "(", "unchecked", ".", "red", "(", ")", "+", "unchecked", ".", "green", "(", ")", "+", "unchecked", ".", "blue", "(", ")", ")", "/", "3.0", "if", "(", "avg", "<", "140", ")", ":", "checked", "=", "unchecked", ".", "lighter", "(", "115", ")", "checked_clr", "=", "self", ".", "colorString", "(", "unchecked", ".", "lighter", "(", "120", ")", ")", "border_clr", "=", "self", ".", "colorString", "(", "unchecked", ".", "darker", "(", "140", ")", ")", "unchecked_clr", "=", "self", ".", "colorString", "(", "checked", ".", "lighter", "(", "140", ")", ")", "unchecked_clr_alt", "=", "self", ".", "colorString", "(", "checked", ".", "lighter", "(", "120", ")", ")", "checked_clr_alt", "=", "self", ".", "colorString", "(", "unchecked", ")", "else", ":", "checked", "=", "unchecked", ".", "lighter", "(", "120", ")", "checked_clr", "=", "self", ".", "colorString", "(", "unchecked", ")", "border_clr", "=", "self", ".", "colorString", "(", "unchecked", ".", "darker", "(", "160", ")", ")", "unchecked_clr", "=", "self", ".", "colorString", "(", "checked", ")", "unchecked_clr_alt", "=", "self", ".", "colorString", "(", "checked", ".", "darker", "(", "130", ")", ")", "checked_clr_alt", "=", "self", ".", "colorString", "(", "unchecked", ".", "darker", "(", "120", ")", ")", "# define the stylesheet options", "options", "=", "{", "}", "options", "[", "'top_left_radius'", "]", "=", "0", "options", "[", "'top_right_radius'", "]", "=", "0", "options", "[", "'bot_left_radius'", "]", "=", "0", "options", "[", "'bot_right_radius'", "]", "=", "0", "options", "[", "'border_color'", "]", "=", "border_clr", "options", "[", "'checked_clr'", "]", "=", "checked_clr", "options", "[", "'checked_clr_alt'", "]", "=", "checked_clr_alt", "options", "[", "'unchecked_clr'", "]", "=", "unchecked_clr", "options", "[", "'unchecked_clr_alt'", "]", "=", "unchecked_clr_alt", "options", "[", "'padding_top'", "]", "=", "1", "options", "[", "'padding_bottom'", "]", "=", "1", "options", "[", "'padding_left'", "]", "=", "1", "options", "[", "'padding_right'", "]", "=", "1", "horiz", "=", "self", ".", "direction", "(", ")", "in", "(", "QBoxLayout", ".", "LeftToRight", ",", "QBoxLayout", ".", "RightToLeft", ")", "if", "(", "horiz", ")", ":", "options", "[", "'x1'", "]", "=", "0", "options", "[", "'y1'", "]", "=", "0", "options", "[", "'x2'", "]", "=", "0", "options", "[", "'y2'", "]", "=", "1", "else", ":", "options", "[", "'x1'", "]", "=", "0", "options", "[", "'y1'", "]", "=", "0", "options", "[", "'x2'", "]", "=", "1", "options", "[", "'y2'", "]", "=", "1", "actions", "=", "self", ".", "actionGroup", "(", ")", ".", "actions", "(", ")", "count", "=", "len", "(", "actions", ")", "for", "i", ",", "action", "in", "enumerate", "(", "actions", ")", ":", "btn", "=", "QToolButton", "(", "self", ")", "btn", ".", "setSizePolicy", "(", "QSizePolicy", ".", "Expanding", ",", "QSizePolicy", ".", "Preferred", ")", "btn", ".", "setDefaultAction", "(", "action", ")", "self", ".", "layout", "(", ")", ".", "insertWidget", "(", "i", ",", "btn", ")", "options", "[", "'top_left_radius'", "]", "=", "1", "options", "[", "'bot_left_radius'", "]", "=", "1", "options", "[", "'top_right_radius'", "]", "=", "1", "options", "[", "'bot_right_radius'", "]", "=", "1", "if", "(", "horiz", ")", ":", "options", "[", "'padding_left'", "]", "=", "self", ".", "_padding", "options", "[", "'padding_right'", "]", "=", "self", ".", "_padding", "else", ":", "options", "[", "'padding_top'", "]", "=", "self", ".", "_padding", "options", "[", "'padding_bottom'", "]", "=", "self", ".", "_padding", "if", "(", "not", "i", ")", ":", "if", "(", "horiz", ")", ":", "options", "[", "'top_left_radius'", "]", "=", "self", ".", "cornerRadius", "(", ")", "options", "[", "'bot_left_radius'", "]", "=", "self", ".", "cornerRadius", "(", ")", "options", "[", "'padding_left'", "]", "+=", "self", ".", "cornerRadius", "(", ")", "/", "3.0", "else", ":", "options", "[", "'top_left_radius'", "]", "=", "self", ".", "cornerRadius", "(", ")", "options", "[", "'top_right_radius'", "]", "=", "self", ".", "cornerRadius", "(", ")", "options", "[", "'padding_top'", "]", "+=", "self", ".", "cornerRadius", "(", ")", "/", "3.0", "elif", "(", "i", "==", "count", "-", "1", ")", ":", "if", "(", "horiz", ")", ":", "options", "[", "'top_right_radius'", "]", "=", "self", ".", "cornerRadius", "(", ")", "options", "[", "'bot_right_radius'", "]", "=", "self", ".", "cornerRadius", "(", ")", "options", "[", "'padding_right'", "]", "+=", "self", ".", "cornerRadius", "(", ")", "/", "3.0", "else", ":", "options", "[", "'bot_left_radius'", "]", "=", "self", ".", "cornerRadius", "(", ")", "options", "[", "'bot_right_radius'", "]", "=", "self", ".", "cornerRadius", "(", ")", "options", "[", "'padding_bottom'", "]", "+=", "self", ".", "cornerRadius", "(", ")", "/", "3.0", "btn", ".", "setStyleSheet", "(", "TOOLBUTTON_STYLE", "%", "options", ")", "btn", ".", "setAutoFillBackground", "(", "True", ")" ]
Resets the user interface buttons for this widget.
[ "Resets", "the", "user", "interface", "buttons", "for", "this", "widget", "." ]
python
train
42.4
driftx/Telephus
telephus/cassandra/Cassandra.py
https://github.com/driftx/Telephus/blob/860a03a0fafe71605e1a4316dfdd8d0c29094703/telephus/cassandra/Cassandra.py#L676-L689
def multiget_slice(self, keys, column_parent, predicate, consistency_level): """ Performs a get_slice for column_parent and predicate for the given keys in parallel. Parameters: - keys - column_parent - predicate - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_multiget_slice(keys, column_parent, predicate, consistency_level) return d
[ "def", "multiget_slice", "(", "self", ",", "keys", ",", "column_parent", ",", "predicate", ",", "consistency_level", ")", ":", "self", ".", "_seqid", "+=", "1", "d", "=", "self", ".", "_reqs", "[", "self", ".", "_seqid", "]", "=", "defer", ".", "Deferred", "(", ")", "self", ".", "send_multiget_slice", "(", "keys", ",", "column_parent", ",", "predicate", ",", "consistency_level", ")", "return", "d" ]
Performs a get_slice for column_parent and predicate for the given keys in parallel. Parameters: - keys - column_parent - predicate - consistency_level
[ "Performs", "a", "get_slice", "for", "column_parent", "and", "predicate", "for", "the", "given", "keys", "in", "parallel", "." ]
python
train
30.357143
nosedjango/nosedjango
nosedjango/plugins/sphinxsearch_plugin.py
https://github.com/nosedjango/nosedjango/blob/cd4d06857c88291769bc38e5c9573f43b7ffcd6a/nosedjango/plugins/sphinxsearch_plugin.py#L31-L51
def options(self, parser, env=None): """ Sphinx config file that can optionally take the following python template string arguments: ``database_name`` ``database_password`` ``database_username`` ``database_host`` ``database_port`` ``sphinx_search_data_dir`` ``searchd_log_dir`` """ if env is None: env = os.environ parser.add_option( '--sphinx-config-tpl', help='Path to the Sphinx configuration file template.', ) super(SphinxSearchPlugin, self).options(parser, env)
[ "def", "options", "(", "self", ",", "parser", ",", "env", "=", "None", ")", ":", "if", "env", "is", "None", ":", "env", "=", "os", ".", "environ", "parser", ".", "add_option", "(", "'--sphinx-config-tpl'", ",", "help", "=", "'Path to the Sphinx configuration file template.'", ",", ")", "super", "(", "SphinxSearchPlugin", ",", "self", ")", ".", "options", "(", "parser", ",", "env", ")" ]
Sphinx config file that can optionally take the following python template string arguments: ``database_name`` ``database_password`` ``database_username`` ``database_host`` ``database_port`` ``sphinx_search_data_dir`` ``searchd_log_dir``
[ "Sphinx", "config", "file", "that", "can", "optionally", "take", "the", "following", "python", "template", "string", "arguments", ":" ]
python
valid
28.809524
PolicyStat/jobtastic
jobtastic/cache/base.py
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/cache/base.py#L51-L96
def lock(self, lock_name, timeout=900): """ Attempt to use lock and unlock, which will work if the Cache is Redis, but fall back to a memcached-compliant add/delete approach. If the Jobtastic Cache isn't Redis or Memcache, or another product with a compatible lock or add/delete API, then a custom locking function will be required. However, Redis and Memcache are expected to account for the vast majority of installations. See: - http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html - http://celery.readthedocs.org/en/latest/tutorials/task-cookbook.html#ensuring-a-task-is-only-executed-one-at-a-time # NOQA """ # Try Redis first try: try: lock = self.cache.lock except AttributeError: try: # Possibly using old Django-Redis lock = self.cache.client.lock except AttributeError: # Possibly using Werkzeug + Redis lock = self.cache._client.lock have_lock = False lock = lock(lock_name, timeout=timeout) try: have_lock = lock.acquire(blocking=True) if have_lock: yield finally: if have_lock: lock.release() except AttributeError: # No lock method on the cache, so fall back to add have_lock = False try: while not have_lock: have_lock = self.cache.add(lock_name, 'locked', timeout) if have_lock: yield finally: if have_lock: self.cache.delete(lock_name)
[ "def", "lock", "(", "self", ",", "lock_name", ",", "timeout", "=", "900", ")", ":", "# Try Redis first", "try", ":", "try", ":", "lock", "=", "self", ".", "cache", ".", "lock", "except", "AttributeError", ":", "try", ":", "# Possibly using old Django-Redis", "lock", "=", "self", ".", "cache", ".", "client", ".", "lock", "except", "AttributeError", ":", "# Possibly using Werkzeug + Redis", "lock", "=", "self", ".", "cache", ".", "_client", ".", "lock", "have_lock", "=", "False", "lock", "=", "lock", "(", "lock_name", ",", "timeout", "=", "timeout", ")", "try", ":", "have_lock", "=", "lock", ".", "acquire", "(", "blocking", "=", "True", ")", "if", "have_lock", ":", "yield", "finally", ":", "if", "have_lock", ":", "lock", ".", "release", "(", ")", "except", "AttributeError", ":", "# No lock method on the cache, so fall back to add", "have_lock", "=", "False", "try", ":", "while", "not", "have_lock", ":", "have_lock", "=", "self", ".", "cache", ".", "add", "(", "lock_name", ",", "'locked'", ",", "timeout", ")", "if", "have_lock", ":", "yield", "finally", ":", "if", "have_lock", ":", "self", ".", "cache", ".", "delete", "(", "lock_name", ")" ]
Attempt to use lock and unlock, which will work if the Cache is Redis, but fall back to a memcached-compliant add/delete approach. If the Jobtastic Cache isn't Redis or Memcache, or another product with a compatible lock or add/delete API, then a custom locking function will be required. However, Redis and Memcache are expected to account for the vast majority of installations. See: - http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html - http://celery.readthedocs.org/en/latest/tutorials/task-cookbook.html#ensuring-a-task-is-only-executed-one-at-a-time # NOQA
[ "Attempt", "to", "use", "lock", "and", "unlock", "which", "will", "work", "if", "the", "Cache", "is", "Redis", "but", "fall", "back", "to", "a", "memcached", "-", "compliant", "add", "/", "delete", "approach", "." ]
python
train
38.782609
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/job.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/job.py#L1692-L1727
def from_api_repr(cls, resource, client): """Factory: construct a job given its API representation .. note: This method assumes that the project found in the resource matches the client's project. :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.CopyJob` :returns: Job parsed from ``resource``. """ job_id, config_resource = cls._get_resource_config(resource) config = CopyJobConfig.from_api_repr(config_resource) # Copy required fields to the job. copy_resource = config_resource["copy"] destination = TableReference.from_api_repr(copy_resource["destinationTable"]) sources = [] source_configs = copy_resource.get("sourceTables") if source_configs is None: single = copy_resource.get("sourceTable") if single is None: raise KeyError("Resource missing 'sourceTables' / 'sourceTable'") source_configs = [single] for source_config in source_configs: table_ref = TableReference.from_api_repr(source_config) sources.append(table_ref) job = cls(job_id, sources, destination, client=client, job_config=config) job._set_properties(resource) return job
[ "def", "from_api_repr", "(", "cls", ",", "resource", ",", "client", ")", ":", "job_id", ",", "config_resource", "=", "cls", ".", "_get_resource_config", "(", "resource", ")", "config", "=", "CopyJobConfig", ".", "from_api_repr", "(", "config_resource", ")", "# Copy required fields to the job.", "copy_resource", "=", "config_resource", "[", "\"copy\"", "]", "destination", "=", "TableReference", ".", "from_api_repr", "(", "copy_resource", "[", "\"destinationTable\"", "]", ")", "sources", "=", "[", "]", "source_configs", "=", "copy_resource", ".", "get", "(", "\"sourceTables\"", ")", "if", "source_configs", "is", "None", ":", "single", "=", "copy_resource", ".", "get", "(", "\"sourceTable\"", ")", "if", "single", "is", "None", ":", "raise", "KeyError", "(", "\"Resource missing 'sourceTables' / 'sourceTable'\"", ")", "source_configs", "=", "[", "single", "]", "for", "source_config", "in", "source_configs", ":", "table_ref", "=", "TableReference", ".", "from_api_repr", "(", "source_config", ")", "sources", ".", "append", "(", "table_ref", ")", "job", "=", "cls", "(", "job_id", ",", "sources", ",", "destination", ",", "client", "=", "client", ",", "job_config", "=", "config", ")", "job", ".", "_set_properties", "(", "resource", ")", "return", "job" ]
Factory: construct a job given its API representation .. note: This method assumes that the project found in the resource matches the client's project. :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.CopyJob` :returns: Job parsed from ``resource``.
[ "Factory", ":", "construct", "a", "job", "given", "its", "API", "representation" ]
python
train
42.555556
Linaro/squad
squad/core/plugins.py
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/plugins.py#L48-L58
def get_plugins_by_feature(features): """ Returns a list of plugin names where the plugins implement at least one of the *features*. *features* must a list of Plugin methods, e.g. [Plugin.postprocess_testrun, Plugin.postprocess_testjob] """ if not features: return get_all_plugins() plugins = PluginLoader.load_all().items() names = set([f.__name__ for f in features]) return [e for e, plugin in plugins if names & set(plugin.__dict__.keys())]
[ "def", "get_plugins_by_feature", "(", "features", ")", ":", "if", "not", "features", ":", "return", "get_all_plugins", "(", ")", "plugins", "=", "PluginLoader", ".", "load_all", "(", ")", ".", "items", "(", ")", "names", "=", "set", "(", "[", "f", ".", "__name__", "for", "f", "in", "features", "]", ")", "return", "[", "e", "for", "e", ",", "plugin", "in", "plugins", "if", "names", "&", "set", "(", "plugin", ".", "__dict__", ".", "keys", "(", ")", ")", "]" ]
Returns a list of plugin names where the plugins implement at least one of the *features*. *features* must a list of Plugin methods, e.g. [Plugin.postprocess_testrun, Plugin.postprocess_testjob]
[ "Returns", "a", "list", "of", "plugin", "names", "where", "the", "plugins", "implement", "at", "least", "one", "of", "the", "*", "features", "*", ".", "*", "features", "*", "must", "a", "list", "of", "Plugin", "methods", "e", ".", "g", ".", "[", "Plugin", ".", "postprocess_testrun", "Plugin", ".", "postprocess_testjob", "]" ]
python
train
43.363636
mikedh/trimesh
trimesh/units.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/units.py#L18-L37
def unit_conversion(current, desired): """ Calculate the conversion from one set of units to another. Parameters --------- current : str Unit system values are in now (eg 'millimeters') desired : str Unit system we'd like values in (eg 'inches') Returns --------- conversion : float Number to multiply by to put values into desired units """ current = str(current).strip().lower() desired = str(desired).strip().lower() conversion = TO_INCH[current] / TO_INCH[desired] return conversion
[ "def", "unit_conversion", "(", "current", ",", "desired", ")", ":", "current", "=", "str", "(", "current", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "desired", "=", "str", "(", "desired", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "conversion", "=", "TO_INCH", "[", "current", "]", "/", "TO_INCH", "[", "desired", "]", "return", "conversion" ]
Calculate the conversion from one set of units to another. Parameters --------- current : str Unit system values are in now (eg 'millimeters') desired : str Unit system we'd like values in (eg 'inches') Returns --------- conversion : float Number to multiply by to put values into desired units
[ "Calculate", "the", "conversion", "from", "one", "set", "of", "units", "to", "another", "." ]
python
train
27.45
urinieto/msaf
msaf/pymf/sub.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/pymf/sub.py#L206-L223
def factorize(self): """Do factorization s.t. data = dot(dot(data,beta),H), under the convexity constraint beta >=0, sum(beta)=1, H >=0, sum(H)=1 """ # compute new coefficients for reconstructing data points self.update_w() # for CHNMF it is sometimes useful to only compute # the basis vectors if self._compute_h: self.update_h() self.W = self.mdl.W self.H = self.mdl.H self.ferr = np.zeros(1) self.ferr[0] = self.mdl.frobenius_norm() self._print_cur_status(' Fro:' + str(self.ferr[0]))
[ "def", "factorize", "(", "self", ")", ":", "# compute new coefficients for reconstructing data points", "self", ".", "update_w", "(", ")", "# for CHNMF it is sometimes useful to only compute", "# the basis vectors", "if", "self", ".", "_compute_h", ":", "self", ".", "update_h", "(", ")", "self", ".", "W", "=", "self", ".", "mdl", ".", "W", "self", ".", "H", "=", "self", ".", "mdl", ".", "H", "self", ".", "ferr", "=", "np", ".", "zeros", "(", "1", ")", "self", ".", "ferr", "[", "0", "]", "=", "self", ".", "mdl", ".", "frobenius_norm", "(", ")", "self", ".", "_print_cur_status", "(", "' Fro:'", "+", "str", "(", "self", ".", "ferr", "[", "0", "]", ")", ")" ]
Do factorization s.t. data = dot(dot(data,beta),H), under the convexity constraint beta >=0, sum(beta)=1, H >=0, sum(H)=1
[ "Do", "factorization", "s", ".", "t", ".", "data", "=", "dot", "(", "dot", "(", "data", "beta", ")", "H", ")", "under", "the", "convexity", "constraint", "beta", ">", "=", "0", "sum", "(", "beta", ")", "=", "1", "H", ">", "=", "0", "sum", "(", "H", ")", "=", "1" ]
python
test
32.944444
SKA-ScienceDataProcessor/integration-prototype
sip/examples/log_spammer/log_spammer.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/log_spammer/log_spammer.py#L11-L24
def main(sleep_length=0.1): """Log to stdout using python logging in a while loop""" log = logging.getLogger('sip.examples.log_spammer') log.info('Starting to spam log messages every %fs', sleep_length) counter = 0 try: while True: log.info('Hello %06i (log_spammer: %s, sip logging: %s)', counter, _version.__version__, __version__) counter += 1 time.sleep(sleep_length) except KeyboardInterrupt: log.info('Exiting...')
[ "def", "main", "(", "sleep_length", "=", "0.1", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "'sip.examples.log_spammer'", ")", "log", ".", "info", "(", "'Starting to spam log messages every %fs'", ",", "sleep_length", ")", "counter", "=", "0", "try", ":", "while", "True", ":", "log", ".", "info", "(", "'Hello %06i (log_spammer: %s, sip logging: %s)'", ",", "counter", ",", "_version", ".", "__version__", ",", "__version__", ")", "counter", "+=", "1", "time", ".", "sleep", "(", "sleep_length", ")", "except", "KeyboardInterrupt", ":", "log", ".", "info", "(", "'Exiting...'", ")" ]
Log to stdout using python logging in a while loop
[ "Log", "to", "stdout", "using", "python", "logging", "in", "a", "while", "loop" ]
python
train
36.071429
xmikos/reparser
reparser.py
https://github.com/xmikos/reparser/blob/0668112a15b9e8e9355a1261040c36b4a6034020/reparser.py#L93-L101
def build_groups(self, tokens): """Build dict of groups from list of tokens""" groups = {} for token in tokens: match_type = MatchType.start if token.group_end else MatchType.single groups[token.group_start] = (token, match_type) if token.group_end: groups[token.group_end] = (token, MatchType.end) return groups
[ "def", "build_groups", "(", "self", ",", "tokens", ")", ":", "groups", "=", "{", "}", "for", "token", "in", "tokens", ":", "match_type", "=", "MatchType", ".", "start", "if", "token", ".", "group_end", "else", "MatchType", ".", "single", "groups", "[", "token", ".", "group_start", "]", "=", "(", "token", ",", "match_type", ")", "if", "token", ".", "group_end", ":", "groups", "[", "token", ".", "group_end", "]", "=", "(", "token", ",", "MatchType", ".", "end", ")", "return", "groups" ]
Build dict of groups from list of tokens
[ "Build", "dict", "of", "groups", "from", "list", "of", "tokens" ]
python
train
43.111111
IndicoDataSolutions/IndicoIo-python
indicoio/text/keywords.py
https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/text/keywords.py#L6-L28
def keywords(text, cloud=None, batch=False, api_key=None, version=2, batch_size=None, **kwargs): """ Given input text, returns series of keywords and associated scores Example usage: .. code-block:: python >>> import indicoio >>> import numpy as np >>> text = 'Monday: Delightful with mostly sunny skies. Highs in the low 70s.' >>> keywords = indicoio.keywords(text, top_n=3) >>> print "The keywords are: "+str(keywords.keys()) u'The keywords are ['delightful', 'highs', 'skies'] :param text: The text to be analyzed. :type text: str or unicode :rtype: Dictionary of feature score pairs """ if kwargs.get("language", "english") != "english": version = 1 url_params = {"batch": batch, "api_key": api_key, "version": version} return api_handler(text, cloud=cloud, api="keywords", url_params=url_params, batch_size=batch_size, **kwargs)
[ "def", "keywords", "(", "text", ",", "cloud", "=", "None", ",", "batch", "=", "False", ",", "api_key", "=", "None", ",", "version", "=", "2", ",", "batch_size", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "\"language\"", ",", "\"english\"", ")", "!=", "\"english\"", ":", "version", "=", "1", "url_params", "=", "{", "\"batch\"", ":", "batch", ",", "\"api_key\"", ":", "api_key", ",", "\"version\"", ":", "version", "}", "return", "api_handler", "(", "text", ",", "cloud", "=", "cloud", ",", "api", "=", "\"keywords\"", ",", "url_params", "=", "url_params", ",", "batch_size", "=", "batch_size", ",", "*", "*", "kwargs", ")" ]
Given input text, returns series of keywords and associated scores Example usage: .. code-block:: python >>> import indicoio >>> import numpy as np >>> text = 'Monday: Delightful with mostly sunny skies. Highs in the low 70s.' >>> keywords = indicoio.keywords(text, top_n=3) >>> print "The keywords are: "+str(keywords.keys()) u'The keywords are ['delightful', 'highs', 'skies'] :param text: The text to be analyzed. :type text: str or unicode :rtype: Dictionary of feature score pairs
[ "Given", "input", "text", "returns", "series", "of", "keywords", "and", "associated", "scores" ]
python
train
39.521739
aws/sagemaker-python-sdk
src/sagemaker/utils.py
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/utils.py#L141-L148
def extract_name_from_job_arn(arn): """Returns the name used in the API given a full ARN for a training job or hyperparameter tuning job. """ slash_pos = arn.find('/') if slash_pos == -1: raise ValueError("Cannot parse invalid ARN: %s" % arn) return arn[(slash_pos + 1):]
[ "def", "extract_name_from_job_arn", "(", "arn", ")", ":", "slash_pos", "=", "arn", ".", "find", "(", "'/'", ")", "if", "slash_pos", "==", "-", "1", ":", "raise", "ValueError", "(", "\"Cannot parse invalid ARN: %s\"", "%", "arn", ")", "return", "arn", "[", "(", "slash_pos", "+", "1", ")", ":", "]" ]
Returns the name used in the API given a full ARN for a training job or hyperparameter tuning job.
[ "Returns", "the", "name", "used", "in", "the", "API", "given", "a", "full", "ARN", "for", "a", "training", "job", "or", "hyperparameter", "tuning", "job", "." ]
python
train
37
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L6193-L6201
def libvlc_audio_set_mute(p_mi, status): '''Set mute status. @param p_mi: media player. @param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute. ''' f = _Cfunctions.get('libvlc_audio_set_mute', None) or \ _Cfunction('libvlc_audio_set_mute', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_int) return f(p_mi, status)
[ "def", "libvlc_audio_set_mute", "(", "p_mi", ",", "status", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_audio_set_mute'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_audio_set_mute'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ",", ")", ",", "None", ",", "None", ",", "MediaPlayer", ",", "ctypes", ".", "c_int", ")", "return", "f", "(", "p_mi", ",", "status", ")" ]
Set mute status. @param p_mi: media player. @param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute.
[ "Set", "mute", "status", "." ]
python
train
82.555556