repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
pysathq/pysat
examples/lsu.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/lsu.py#L358-L368
def parse_formula(fml_file): """ Parse and return MaxSAT formula. """ if re.search('\.wcnf(\.(gz|bz2|lzma|xz))?$', fml_file): fml = WCNF(from_file=fml_file) else: # expecting '*.cnf' fml = CNF(from_file=fml_file).weighted() return fml
[ "def", "parse_formula", "(", "fml_file", ")", ":", "if", "re", ".", "search", "(", "'\\.wcnf(\\.(gz|bz2|lzma|xz))?$'", ",", "fml_file", ")", ":", "fml", "=", "WCNF", "(", "from_file", "=", "fml_file", ")", "else", ":", "# expecting '*.cnf'", "fml", "=", "CNF", "(", "from_file", "=", "fml_file", ")", ".", "weighted", "(", ")", "return", "fml" ]
Parse and return MaxSAT formula.
[ "Parse", "and", "return", "MaxSAT", "formula", "." ]
python
train
24.636364
angr/angr
angr/simos/javavm.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/simos/javavm.py#L265-L297
def get_default_value_by_type(type_, state=None): """ Java specify defaults values for primitive and reference types. This method returns the default value for a given type. :param str type_: Name of type. :return: Default value for this type. """ if type_ in ['byte', 'char', 'short', 'int', 'boolean']: return BVS('default_value_{}'.format(type_), 32) elif type_ == "long": return BVS('default_value_{}'.format(type_), 64) elif type_ == 'float': return FPS('default_value_{}'.format(type_), FSORT_FLOAT) elif type_ == 'double': return FPS('default_value_{}'.format(type_), FSORT_DOUBLE) elif state is not None: if type_ == 'java.lang.String': return SimSootValue_StringRef.new_string(state, StringS('default_value_{}'.format(type_), 1000)) if type_.endswith('[][]'): raise NotImplementedError # multiarray = SimSootExpr_NewMultiArray.new_array(self.state, element_type, size) # multiarray.add_default_value_generator(lambda s: SimSootExpr_NewMultiArray._generate_inner_array(s, element_type, sizes)) # return multiarray elif type_.endswith('[]'): array = SimSootExpr_NewArray.new_array(state, type_[:-2], BVV(2, 32)) return array else: return SimSootValue_ThisRef.new_object(state, type_, symbolic=True, init_object=False) else: # not a primitive type # => treat it as a reference return SootNullConstant()
[ "def", "get_default_value_by_type", "(", "type_", ",", "state", "=", "None", ")", ":", "if", "type_", "in", "[", "'byte'", ",", "'char'", ",", "'short'", ",", "'int'", ",", "'boolean'", "]", ":", "return", "BVS", "(", "'default_value_{}'", ".", "format", "(", "type_", ")", ",", "32", ")", "elif", "type_", "==", "\"long\"", ":", "return", "BVS", "(", "'default_value_{}'", ".", "format", "(", "type_", ")", ",", "64", ")", "elif", "type_", "==", "'float'", ":", "return", "FPS", "(", "'default_value_{}'", ".", "format", "(", "type_", ")", ",", "FSORT_FLOAT", ")", "elif", "type_", "==", "'double'", ":", "return", "FPS", "(", "'default_value_{}'", ".", "format", "(", "type_", ")", ",", "FSORT_DOUBLE", ")", "elif", "state", "is", "not", "None", ":", "if", "type_", "==", "'java.lang.String'", ":", "return", "SimSootValue_StringRef", ".", "new_string", "(", "state", ",", "StringS", "(", "'default_value_{}'", ".", "format", "(", "type_", ")", ",", "1000", ")", ")", "if", "type_", ".", "endswith", "(", "'[][]'", ")", ":", "raise", "NotImplementedError", "# multiarray = SimSootExpr_NewMultiArray.new_array(self.state, element_type, size)", "# multiarray.add_default_value_generator(lambda s: SimSootExpr_NewMultiArray._generate_inner_array(s, element_type, sizes))", "# return multiarray", "elif", "type_", ".", "endswith", "(", "'[]'", ")", ":", "array", "=", "SimSootExpr_NewArray", ".", "new_array", "(", "state", ",", "type_", "[", ":", "-", "2", "]", ",", "BVV", "(", "2", ",", "32", ")", ")", "return", "array", "else", ":", "return", "SimSootValue_ThisRef", ".", "new_object", "(", "state", ",", "type_", ",", "symbolic", "=", "True", ",", "init_object", "=", "False", ")", "else", ":", "# not a primitive type", "# => treat it as a reference", "return", "SootNullConstant", "(", ")" ]
Java specify defaults values for primitive and reference types. This method returns the default value for a given type. :param str type_: Name of type. :return: Default value for this type.
[ "Java", "specify", "defaults", "values", "for", "primitive", "and", "reference", "types", ".", "This", "method", "returns", "the", "default", "value", "for", "a", "given", "type", "." ]
python
train
49.909091
gwpy/gwpy
gwpy/table/io/hacr.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/hacr.py#L75-L88
def get_hacr_channels(db=None, gps=None, connection=None, **conectkwargs): """Return the names of all channels present in the given HACR database """ # connect if needed if connection is None: if gps is None: gps = from_gps('now') if db is None: db = get_database_names(gps, gps)[0] connection = connect(db=db, **conectkwargs) # query out = query("select channel from job where monitorName = 'chacr'") return [r[0] for r in out]
[ "def", "get_hacr_channels", "(", "db", "=", "None", ",", "gps", "=", "None", ",", "connection", "=", "None", ",", "*", "*", "conectkwargs", ")", ":", "# connect if needed", "if", "connection", "is", "None", ":", "if", "gps", "is", "None", ":", "gps", "=", "from_gps", "(", "'now'", ")", "if", "db", "is", "None", ":", "db", "=", "get_database_names", "(", "gps", ",", "gps", ")", "[", "0", "]", "connection", "=", "connect", "(", "db", "=", "db", ",", "*", "*", "conectkwargs", ")", "# query", "out", "=", "query", "(", "\"select channel from job where monitorName = 'chacr'\"", ")", "return", "[", "r", "[", "0", "]", "for", "r", "in", "out", "]" ]
Return the names of all channels present in the given HACR database
[ "Return", "the", "names", "of", "all", "channels", "present", "in", "the", "given", "HACR", "database" ]
python
train
36.642857
nugget/python-insteonplm
insteonplm/messages/userdata.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/messages/userdata.py#L53-L63
def human(self): """Emit the address in human-readible format (AA.BB.CC).""" strout = '' first = True for i in range(0, 28, 2): if first: first = False else: strout = strout + '.' strout = strout + self.hex[i:i + 2] return strout
[ "def", "human", "(", "self", ")", ":", "strout", "=", "''", "first", "=", "True", "for", "i", "in", "range", "(", "0", ",", "28", ",", "2", ")", ":", "if", "first", ":", "first", "=", "False", "else", ":", "strout", "=", "strout", "+", "'.'", "strout", "=", "strout", "+", "self", ".", "hex", "[", "i", ":", "i", "+", "2", "]", "return", "strout" ]
Emit the address in human-readible format (AA.BB.CC).
[ "Emit", "the", "address", "in", "human", "-", "readible", "format", "(", "AA", ".", "BB", ".", "CC", ")", "." ]
python
train
29.727273
watchforstock/evohome-client
evohomeclient/__init__.py
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient/__init__.py#L266-L275
def set_temperature(self, zone, temperature, until=None): """Sets the temperature of the given zone.""" if until is None: data = {"Value": temperature, "Status": "Hold", "NextTime": None} else: data = {"Value": temperature, "Status": "Temporary", "NextTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')} self._set_heat_setpoint(zone, data)
[ "def", "set_temperature", "(", "self", ",", "zone", ",", "temperature", ",", "until", "=", "None", ")", ":", "if", "until", "is", "None", ":", "data", "=", "{", "\"Value\"", ":", "temperature", ",", "\"Status\"", ":", "\"Hold\"", ",", "\"NextTime\"", ":", "None", "}", "else", ":", "data", "=", "{", "\"Value\"", ":", "temperature", ",", "\"Status\"", ":", "\"Temporary\"", ",", "\"NextTime\"", ":", "until", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%SZ'", ")", "}", "self", ".", "_set_heat_setpoint", "(", "zone", ",", "data", ")" ]
Sets the temperature of the given zone.
[ "Sets", "the", "temperature", "of", "the", "given", "zone", "." ]
python
train
42
LuminosoInsight/ordered-set
ordered_set.py
https://github.com/LuminosoInsight/ordered-set/blob/a29eaedcedfe5072bcee11bdef61dea321d5e9f9/ordered_set.py#L209-L226
def pop(self): """ Remove and return the last element from the set. Raises KeyError if the set is empty. Example: >>> oset = OrderedSet([1, 2, 3]) >>> oset.pop() 3 """ if not self.items: raise KeyError("Set is empty") elem = self.items[-1] del self.items[-1] del self.map[elem] return elem
[ "def", "pop", "(", "self", ")", ":", "if", "not", "self", ".", "items", ":", "raise", "KeyError", "(", "\"Set is empty\"", ")", "elem", "=", "self", ".", "items", "[", "-", "1", "]", "del", "self", ".", "items", "[", "-", "1", "]", "del", "self", ".", "map", "[", "elem", "]", "return", "elem" ]
Remove and return the last element from the set. Raises KeyError if the set is empty. Example: >>> oset = OrderedSet([1, 2, 3]) >>> oset.pop() 3
[ "Remove", "and", "return", "the", "last", "element", "from", "the", "set", "." ]
python
train
22.388889
ziwenxie/netease-dl
netease/logger.py
https://github.com/ziwenxie/netease-dl/blob/84b226fc07b10f7f66580f0fc69f10356f66b5c3/netease/logger.py#L24-L38
def get_logger(name): """Return a logger with a file handler.""" logger = logging.getLogger(name) logger.setLevel(logging.INFO) # File output handler file_handler = logging.FileHandler(log_path) file_handler.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s %(name)12s %(levelname)8s %(lineno)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') file_handler.setFormatter(formatter) logger.addHandler(file_handler) return logger
[ "def", "get_logger", "(", "name", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "# File output handler", "file_handler", "=", "logging", ".", "FileHandler", "(", "log_path", ")", "file_handler", ".", "setLevel", "(", "logging", ".", "INFO", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s %(name)12s %(levelname)8s %(lineno)s %(message)s'", ",", "datefmt", "=", "'%m/%d/%Y %I:%M:%S %p'", ")", "file_handler", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "file_handler", ")", "return", "logger" ]
Return a logger with a file handler.
[ "Return", "a", "logger", "with", "a", "file", "handler", "." ]
python
train
32.266667
spacetelescope/drizzlepac
drizzlepac/wcs_functions.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/wcs_functions.py#L806-L858
def fitlin(imgarr,refarr): """ Compute the least-squares fit between two arrays. A Python translation of 'FITLIN' from 'drutil.f' (Drizzle V2.9). """ # Initialize variables _mat = np.zeros((3,3),dtype=np.float64) _xorg = imgarr[0][0] _yorg = imgarr[0][1] _xoorg = refarr[0][0] _yoorg = refarr[0][1] _sigxox = 0. _sigxoy = 0. _sigxo = 0. _sigyox = 0. _sigyoy = 0. _sigyo = 0. _npos = len(imgarr) # Populate matrices for i in range(_npos): _mat[0][0] += np.power((imgarr[i][0] - _xorg),2) _mat[0][1] += (imgarr[i][0] - _xorg) * (imgarr[i][1] - _yorg) _mat[0][2] += (imgarr[i][0] - _xorg) _mat[1][1] += np.power((imgarr[i][1] - _yorg),2) _mat[1][2] += imgarr[i][1] - _yorg _sigxox += (refarr[i][0] - _xoorg)*(imgarr[i][0] - _xorg) _sigxoy += (refarr[i][0] - _xoorg)*(imgarr[i][1] - _yorg) _sigxo += refarr[i][0] - _xoorg _sigyox += (refarr[i][1] - _yoorg)*(imgarr[i][0] -_xorg) _sigyoy += (refarr[i][1] - _yoorg)*(imgarr[i][1] - _yorg) _sigyo += refarr[i][1] - _yoorg _mat[2][2] = _npos _mat[1][0] = _mat[0][1] _mat[2][0] = _mat[0][2] _mat[2][1] = _mat[1][2] # Now invert this matrix _mat = linalg.inv(_mat) _a = _sigxox*_mat[0][0]+_sigxoy*_mat[0][1]+_sigxo*_mat[0][2] _b = -1*(_sigxox*_mat[1][0]+_sigxoy*_mat[1][1]+_sigxo*_mat[1][2]) #_x0 = _sigxox*_mat[2][0]+_sigxoy*_mat[2][1]+_sigxo*_mat[2][2] _c = _sigyox*_mat[1][0]+_sigyoy*_mat[1][1]+_sigyo*_mat[1][2] _d = _sigyox*_mat[0][0]+_sigyoy*_mat[0][1]+_sigyo*_mat[0][2] #_y0 = _sigyox*_mat[2][0]+_sigyoy*_mat[2][1]+_sigyo*_mat[2][2] _xt = _xoorg - _a*_xorg+_b*_yorg _yt = _yoorg - _d*_xorg-_c*_yorg return [_a,_b,_xt],[_c,_d,_yt]
[ "def", "fitlin", "(", "imgarr", ",", "refarr", ")", ":", "# Initialize variables", "_mat", "=", "np", ".", "zeros", "(", "(", "3", ",", "3", ")", ",", "dtype", "=", "np", ".", "float64", ")", "_xorg", "=", "imgarr", "[", "0", "]", "[", "0", "]", "_yorg", "=", "imgarr", "[", "0", "]", "[", "1", "]", "_xoorg", "=", "refarr", "[", "0", "]", "[", "0", "]", "_yoorg", "=", "refarr", "[", "0", "]", "[", "1", "]", "_sigxox", "=", "0.", "_sigxoy", "=", "0.", "_sigxo", "=", "0.", "_sigyox", "=", "0.", "_sigyoy", "=", "0.", "_sigyo", "=", "0.", "_npos", "=", "len", "(", "imgarr", ")", "# Populate matrices", "for", "i", "in", "range", "(", "_npos", ")", ":", "_mat", "[", "0", "]", "[", "0", "]", "+=", "np", ".", "power", "(", "(", "imgarr", "[", "i", "]", "[", "0", "]", "-", "_xorg", ")", ",", "2", ")", "_mat", "[", "0", "]", "[", "1", "]", "+=", "(", "imgarr", "[", "i", "]", "[", "0", "]", "-", "_xorg", ")", "*", "(", "imgarr", "[", "i", "]", "[", "1", "]", "-", "_yorg", ")", "_mat", "[", "0", "]", "[", "2", "]", "+=", "(", "imgarr", "[", "i", "]", "[", "0", "]", "-", "_xorg", ")", "_mat", "[", "1", "]", "[", "1", "]", "+=", "np", ".", "power", "(", "(", "imgarr", "[", "i", "]", "[", "1", "]", "-", "_yorg", ")", ",", "2", ")", "_mat", "[", "1", "]", "[", "2", "]", "+=", "imgarr", "[", "i", "]", "[", "1", "]", "-", "_yorg", "_sigxox", "+=", "(", "refarr", "[", "i", "]", "[", "0", "]", "-", "_xoorg", ")", "*", "(", "imgarr", "[", "i", "]", "[", "0", "]", "-", "_xorg", ")", "_sigxoy", "+=", "(", "refarr", "[", "i", "]", "[", "0", "]", "-", "_xoorg", ")", "*", "(", "imgarr", "[", "i", "]", "[", "1", "]", "-", "_yorg", ")", "_sigxo", "+=", "refarr", "[", "i", "]", "[", "0", "]", "-", "_xoorg", "_sigyox", "+=", "(", "refarr", "[", "i", "]", "[", "1", "]", "-", "_yoorg", ")", "*", "(", "imgarr", "[", "i", "]", "[", "0", "]", "-", "_xorg", ")", "_sigyoy", "+=", "(", "refarr", "[", "i", "]", "[", "1", "]", "-", "_yoorg", ")", "*", "(", "imgarr", "[", "i", "]", "[", "1", "]", "-", "_yorg", ")", "_sigyo", "+=", "refarr", "[", "i", "]", "[", "1", "]", "-", "_yoorg", "_mat", "[", "2", "]", "[", "2", "]", "=", "_npos", "_mat", "[", "1", "]", "[", "0", "]", "=", "_mat", "[", "0", "]", "[", "1", "]", "_mat", "[", "2", "]", "[", "0", "]", "=", "_mat", "[", "0", "]", "[", "2", "]", "_mat", "[", "2", "]", "[", "1", "]", "=", "_mat", "[", "1", "]", "[", "2", "]", "# Now invert this matrix", "_mat", "=", "linalg", ".", "inv", "(", "_mat", ")", "_a", "=", "_sigxox", "*", "_mat", "[", "0", "]", "[", "0", "]", "+", "_sigxoy", "*", "_mat", "[", "0", "]", "[", "1", "]", "+", "_sigxo", "*", "_mat", "[", "0", "]", "[", "2", "]", "_b", "=", "-", "1", "*", "(", "_sigxox", "*", "_mat", "[", "1", "]", "[", "0", "]", "+", "_sigxoy", "*", "_mat", "[", "1", "]", "[", "1", "]", "+", "_sigxo", "*", "_mat", "[", "1", "]", "[", "2", "]", ")", "#_x0 = _sigxox*_mat[2][0]+_sigxoy*_mat[2][1]+_sigxo*_mat[2][2]", "_c", "=", "_sigyox", "*", "_mat", "[", "1", "]", "[", "0", "]", "+", "_sigyoy", "*", "_mat", "[", "1", "]", "[", "1", "]", "+", "_sigyo", "*", "_mat", "[", "1", "]", "[", "2", "]", "_d", "=", "_sigyox", "*", "_mat", "[", "0", "]", "[", "0", "]", "+", "_sigyoy", "*", "_mat", "[", "0", "]", "[", "1", "]", "+", "_sigyo", "*", "_mat", "[", "0", "]", "[", "2", "]", "#_y0 = _sigyox*_mat[2][0]+_sigyoy*_mat[2][1]+_sigyo*_mat[2][2]", "_xt", "=", "_xoorg", "-", "_a", "*", "_xorg", "+", "_b", "*", "_yorg", "_yt", "=", "_yoorg", "-", "_d", "*", "_xorg", "-", "_c", "*", "_yorg", "return", "[", "_a", ",", "_b", ",", "_xt", "]", ",", "[", "_c", ",", "_d", ",", "_yt", "]" ]
Compute the least-squares fit between two arrays. A Python translation of 'FITLIN' from 'drutil.f' (Drizzle V2.9).
[ "Compute", "the", "least", "-", "squares", "fit", "between", "two", "arrays", ".", "A", "Python", "translation", "of", "FITLIN", "from", "drutil", ".", "f", "(", "Drizzle", "V2", ".", "9", ")", "." ]
python
train
33.207547
mdgoldberg/sportsref
sportsref/nfl/boxscores.py
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/boxscores.py#L370-L378
def ref_info(self): """Gets a dictionary of ref positions and the ref IDs of the refs for that game. :returns: A dictionary of ref positions and IDs. """ doc = self.get_doc() table = doc('table#officials') return sportsref.utils.parse_info_table(table)
[ "def", "ref_info", "(", "self", ")", ":", "doc", "=", "self", ".", "get_doc", "(", ")", "table", "=", "doc", "(", "'table#officials'", ")", "return", "sportsref", ".", "utils", ".", "parse_info_table", "(", "table", ")" ]
Gets a dictionary of ref positions and the ref IDs of the refs for that game. :returns: A dictionary of ref positions and IDs.
[ "Gets", "a", "dictionary", "of", "ref", "positions", "and", "the", "ref", "IDs", "of", "the", "refs", "for", "that", "game", "." ]
python
test
33.444444
scour-project/scour
scour/scour.py
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L1885-L1894
def taint(taintedSet, taintedAttribute): u"""Adds an attribute to a set of attributes. Related attributes are also included.""" taintedSet.add(taintedAttribute) if taintedAttribute == 'marker': taintedSet |= set(['marker-start', 'marker-mid', 'marker-end']) if taintedAttribute in ['marker-start', 'marker-mid', 'marker-end']: taintedSet.add('marker') return taintedSet
[ "def", "taint", "(", "taintedSet", ",", "taintedAttribute", ")", ":", "taintedSet", ".", "add", "(", "taintedAttribute", ")", "if", "taintedAttribute", "==", "'marker'", ":", "taintedSet", "|=", "set", "(", "[", "'marker-start'", ",", "'marker-mid'", ",", "'marker-end'", "]", ")", "if", "taintedAttribute", "in", "[", "'marker-start'", ",", "'marker-mid'", ",", "'marker-end'", "]", ":", "taintedSet", ".", "add", "(", "'marker'", ")", "return", "taintedSet" ]
u"""Adds an attribute to a set of attributes. Related attributes are also included.
[ "u", "Adds", "an", "attribute", "to", "a", "set", "of", "attributes", "." ]
python
train
40.1
saltstack/salt
salt/modules/nfs3.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nfs3.py#L69-L82
def del_export(exports='/etc/exports', path=None): ''' Remove an export CLI Example: .. code-block:: bash salt '*' nfs.del_export /media/storage ''' edict = list_exports(exports) del edict[path] _write_exports(exports, edict) return edict
[ "def", "del_export", "(", "exports", "=", "'/etc/exports'", ",", "path", "=", "None", ")", ":", "edict", "=", "list_exports", "(", "exports", ")", "del", "edict", "[", "path", "]", "_write_exports", "(", "exports", ",", "edict", ")", "return", "edict" ]
Remove an export CLI Example: .. code-block:: bash salt '*' nfs.del_export /media/storage
[ "Remove", "an", "export" ]
python
train
19.428571
hotdoc/hotdoc
hotdoc/utils/loggable.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/utils/loggable.py#L213-L222
def _log(code, message, level, domain): """Call this to add an entry in the journal""" entry = LogEntry(level, domain, code, message) Logger.journal.append(entry) if Logger.silent: return if level >= Logger._verbosity: _print_entry(entry)
[ "def", "_log", "(", "code", ",", "message", ",", "level", ",", "domain", ")", ":", "entry", "=", "LogEntry", "(", "level", ",", "domain", ",", "code", ",", "message", ")", "Logger", ".", "journal", ".", "append", "(", "entry", ")", "if", "Logger", ".", "silent", ":", "return", "if", "level", ">=", "Logger", ".", "_verbosity", ":", "_print_entry", "(", "entry", ")" ]
Call this to add an entry in the journal
[ "Call", "this", "to", "add", "an", "entry", "in", "the", "journal" ]
python
train
29.5
pasztorpisti/py-flags
src/flags.py
https://github.com/pasztorpisti/py-flags/blob/bc48adb5edd7340ea1a686622d7993b4bcf4bfc2/src/flags.py#L95-L112
def process_inline_members_definition(members): """ :param members: this can be any of the following: - a string containing a space and/or comma separated list of names: e.g.: "item1 item2 item3" OR "item1,item2,item3" OR "item1, item2, item3" - tuple/list/Set of strings (names) - Mapping of (name, data) pairs - any kind of iterable that yields (name, data) pairs :return: An iterable of (name, data) pairs. """ if isinstance(members, str): members = ((name, UNDEFINED) for name in members.replace(',', ' ').split()) elif isinstance(members, (tuple, list, collections.Set)): if members and isinstance(next(iter(members)), str): members = ((name, UNDEFINED) for name in members) elif isinstance(members, collections.Mapping): members = members.items() return members
[ "def", "process_inline_members_definition", "(", "members", ")", ":", "if", "isinstance", "(", "members", ",", "str", ")", ":", "members", "=", "(", "(", "name", ",", "UNDEFINED", ")", "for", "name", "in", "members", ".", "replace", "(", "','", ",", "' '", ")", ".", "split", "(", ")", ")", "elif", "isinstance", "(", "members", ",", "(", "tuple", ",", "list", ",", "collections", ".", "Set", ")", ")", ":", "if", "members", "and", "isinstance", "(", "next", "(", "iter", "(", "members", ")", ")", ",", "str", ")", ":", "members", "=", "(", "(", "name", ",", "UNDEFINED", ")", "for", "name", "in", "members", ")", "elif", "isinstance", "(", "members", ",", "collections", ".", "Mapping", ")", ":", "members", "=", "members", ".", "items", "(", ")", "return", "members" ]
:param members: this can be any of the following: - a string containing a space and/or comma separated list of names: e.g.: "item1 item2 item3" OR "item1,item2,item3" OR "item1, item2, item3" - tuple/list/Set of strings (names) - Mapping of (name, data) pairs - any kind of iterable that yields (name, data) pairs :return: An iterable of (name, data) pairs.
[ ":", "param", "members", ":", "this", "can", "be", "any", "of", "the", "following", ":", "-", "a", "string", "containing", "a", "space", "and", "/", "or", "comma", "separated", "list", "of", "names", ":", "e", ".", "g", ".", ":", "item1", "item2", "item3", "OR", "item1", "item2", "item3", "OR", "item1", "item2", "item3", "-", "tuple", "/", "list", "/", "Set", "of", "strings", "(", "names", ")", "-", "Mapping", "of", "(", "name", "data", ")", "pairs", "-", "any", "kind", "of", "iterable", "that", "yields", "(", "name", "data", ")", "pairs", ":", "return", ":", "An", "iterable", "of", "(", "name", "data", ")", "pairs", "." ]
python
train
46.666667
twilio/twilio-python
twilio/rest/autopilot/v1/assistant/model_build.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/model_build.py#L119-L137
def create(self, status_callback=values.unset, unique_name=values.unset): """ Create a new ModelBuildInstance :param unicode status_callback: The URL we should call using a POST method to send status information to your application :param unicode unique_name: An application-defined string that uniquely identifies the new resource :returns: Newly created ModelBuildInstance :rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildInstance """ data = values.of({'StatusCallback': status_callback, 'UniqueName': unique_name, }) payload = self._version.create( 'POST', self._uri, data=data, ) return ModelBuildInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
[ "def", "create", "(", "self", ",", "status_callback", "=", "values", ".", "unset", ",", "unique_name", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'StatusCallback'", ":", "status_callback", ",", "'UniqueName'", ":", "unique_name", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "create", "(", "'POST'", ",", "self", ".", "_uri", ",", "data", "=", "data", ",", ")", "return", "ModelBuildInstance", "(", "self", ".", "_version", ",", "payload", ",", "assistant_sid", "=", "self", ".", "_solution", "[", "'assistant_sid'", "]", ",", ")" ]
Create a new ModelBuildInstance :param unicode status_callback: The URL we should call using a POST method to send status information to your application :param unicode unique_name: An application-defined string that uniquely identifies the new resource :returns: Newly created ModelBuildInstance :rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildInstance
[ "Create", "a", "new", "ModelBuildInstance" ]
python
train
42.526316
brutasse/graphite-api
graphite_api/functions.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3298-L3335
def aggregateLine(requestContext, seriesList, func='avg'): """ Takes a metric or wildcard seriesList and draws a horizontal line based on the function applied to each series. Note: By default, the graphite renderer consolidates data points by averaging data points over time. If you are using the 'min' or 'max' function for aggregateLine, this can cause an unusual gap in the line drawn by this function and the data itself. To fix this, you should use the consolidateBy() function with the same function argument you are using for aggregateLine. This will ensure that the proper data points are retained and the graph should line up correctly. Example:: &target=aggregateLine(server01.connections.total, 'avg') &target=aggregateLine(server*.connections.total, 'avg') """ t_funcs = {'avg': safeAvg, 'min': safeMin, 'max': safeMax} if func not in t_funcs: raise ValueError("Invalid function %s" % func) results = [] for series in seriesList: value = t_funcs[func](series) if value is not None: name = 'aggregateLine(%s, %g)' % (series.name, value) else: name = 'aggregateLine(%s, None)' % (series.name) [series] = constantLine(requestContext, value) series.name = name series.pathExpression = series.name results.append(series) return results
[ "def", "aggregateLine", "(", "requestContext", ",", "seriesList", ",", "func", "=", "'avg'", ")", ":", "t_funcs", "=", "{", "'avg'", ":", "safeAvg", ",", "'min'", ":", "safeMin", ",", "'max'", ":", "safeMax", "}", "if", "func", "not", "in", "t_funcs", ":", "raise", "ValueError", "(", "\"Invalid function %s\"", "%", "func", ")", "results", "=", "[", "]", "for", "series", "in", "seriesList", ":", "value", "=", "t_funcs", "[", "func", "]", "(", "series", ")", "if", "value", "is", "not", "None", ":", "name", "=", "'aggregateLine(%s, %g)'", "%", "(", "series", ".", "name", ",", "value", ")", "else", ":", "name", "=", "'aggregateLine(%s, None)'", "%", "(", "series", ".", "name", ")", "[", "series", "]", "=", "constantLine", "(", "requestContext", ",", "value", ")", "series", ".", "name", "=", "name", "series", ".", "pathExpression", "=", "series", ".", "name", "results", ".", "append", "(", "series", ")", "return", "results" ]
Takes a metric or wildcard seriesList and draws a horizontal line based on the function applied to each series. Note: By default, the graphite renderer consolidates data points by averaging data points over time. If you are using the 'min' or 'max' function for aggregateLine, this can cause an unusual gap in the line drawn by this function and the data itself. To fix this, you should use the consolidateBy() function with the same function argument you are using for aggregateLine. This will ensure that the proper data points are retained and the graph should line up correctly. Example:: &target=aggregateLine(server01.connections.total, 'avg') &target=aggregateLine(server*.connections.total, 'avg')
[ "Takes", "a", "metric", "or", "wildcard", "seriesList", "and", "draws", "a", "horizontal", "line", "based", "on", "the", "function", "applied", "to", "each", "series", "." ]
python
train
36.526316
opendatateam/udata
udata/core/tags/tasks.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/tags/tasks.py#L38-L48
def count_tags(self): '''Count tag occurences by type and update the tag collection''' for key, model in TAGGED.items(): collection = '{0}_tags'.format(key) results = (model.objects(tags__exists=True) .map_reduce(map_tags, reduce_tags, collection)) for result in results: tag, created = Tag.objects.get_or_create(name=result.key, auto_save=False) tag.counts[key] = int(result.value) if result.value else 0 tag.save()
[ "def", "count_tags", "(", "self", ")", ":", "for", "key", ",", "model", "in", "TAGGED", ".", "items", "(", ")", ":", "collection", "=", "'{0}_tags'", ".", "format", "(", "key", ")", "results", "=", "(", "model", ".", "objects", "(", "tags__exists", "=", "True", ")", ".", "map_reduce", "(", "map_tags", ",", "reduce_tags", ",", "collection", ")", ")", "for", "result", "in", "results", ":", "tag", ",", "created", "=", "Tag", ".", "objects", ".", "get_or_create", "(", "name", "=", "result", ".", "key", ",", "auto_save", "=", "False", ")", "tag", ".", "counts", "[", "key", "]", "=", "int", "(", "result", ".", "value", ")", "if", "result", ".", "value", "else", "0", "tag", ".", "save", "(", ")" ]
Count tag occurences by type and update the tag collection
[ "Count", "tag", "occurences", "by", "type", "and", "update", "the", "tag", "collection" ]
python
train
50.090909
gabstopper/smc-python
smc/examples/ip_lists.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/examples/ip_lists.py#L113-L125
def download_as_json(name): """ Download IPList as json. This would allow for easily manipulation of the IPList, but generally recommended only for smaller lists :param str name: name of IPList :return: None """ location = list(IPList.objects.filter(name)) if location: iplist = location[0] return iplist.download(as_type='json')
[ "def", "download_as_json", "(", "name", ")", ":", "location", "=", "list", "(", "IPList", ".", "objects", ".", "filter", "(", "name", ")", ")", "if", "location", ":", "iplist", "=", "location", "[", "0", "]", "return", "iplist", ".", "download", "(", "as_type", "=", "'json'", ")" ]
Download IPList as json. This would allow for easily manipulation of the IPList, but generally recommended only for smaller lists :param str name: name of IPList :return: None
[ "Download", "IPList", "as", "json", ".", "This", "would", "allow", "for", "easily", "manipulation", "of", "the", "IPList", "but", "generally", "recommended", "only", "for", "smaller", "lists" ]
python
train
28.538462
pandas-dev/pandas
pandas/io/excel/_openpyxl.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_openpyxl.py#L98-L123
def _convert_to_color(cls, color_spec): """ Convert ``color_spec`` to an openpyxl v2 Color object Parameters ---------- color_spec : str, dict A 32-bit ARGB hex string, or a dict with zero or more of the following keys. 'rgb' 'indexed' 'auto' 'theme' 'tint' 'index' 'type' Returns ------- color : openpyxl.styles.Color """ from openpyxl.styles import Color if isinstance(color_spec, str): return Color(color_spec) else: return Color(**color_spec)
[ "def", "_convert_to_color", "(", "cls", ",", "color_spec", ")", ":", "from", "openpyxl", ".", "styles", "import", "Color", "if", "isinstance", "(", "color_spec", ",", "str", ")", ":", "return", "Color", "(", "color_spec", ")", "else", ":", "return", "Color", "(", "*", "*", "color_spec", ")" ]
Convert ``color_spec`` to an openpyxl v2 Color object Parameters ---------- color_spec : str, dict A 32-bit ARGB hex string, or a dict with zero or more of the following keys. 'rgb' 'indexed' 'auto' 'theme' 'tint' 'index' 'type' Returns ------- color : openpyxl.styles.Color
[ "Convert", "color_spec", "to", "an", "openpyxl", "v2", "Color", "object", "Parameters", "----------", "color_spec", ":", "str", "dict", "A", "32", "-", "bit", "ARGB", "hex", "string", "or", "a", "dict", "with", "zero", "or", "more", "of", "the", "following", "keys", ".", "rgb", "indexed", "auto", "theme", "tint", "index", "type", "Returns", "-------", "color", ":", "openpyxl", ".", "styles", ".", "Color" ]
python
train
26.115385
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L650-L664
def subtract(self, es): """ Subtract the BED elements in es from self. :param es: a list of BED elements (or anything with chrom, start, end) :return: a list of BED elements which represent what is left of self after the subtraction. This might be an empty list. """ workingSet = [self] for e in es: newWorkingSet = [] for w in workingSet: newWorkingSet += w.__singleIntersect(e) workingSet = newWorkingSet return workingSet
[ "def", "subtract", "(", "self", ",", "es", ")", ":", "workingSet", "=", "[", "self", "]", "for", "e", "in", "es", ":", "newWorkingSet", "=", "[", "]", "for", "w", "in", "workingSet", ":", "newWorkingSet", "+=", "w", ".", "__singleIntersect", "(", "e", ")", "workingSet", "=", "newWorkingSet", "return", "workingSet" ]
Subtract the BED elements in es from self. :param es: a list of BED elements (or anything with chrom, start, end) :return: a list of BED elements which represent what is left of self after the subtraction. This might be an empty list.
[ "Subtract", "the", "BED", "elements", "in", "es", "from", "self", "." ]
python
train
32.133333
KE-works/pykechain
pykechain/helpers.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/helpers.py#L9-L118
def get_project(url=None, username=None, password=None, token=None, scope=None, scope_id=None, env_filename=None, status=ScopeStatus.ACTIVE): """ Retrieve and return the KE-chain project to be used throughout an app. This helper is made to bootstrap a pykechain enabled python script or an jupyter notebook with the correct project (technically this is a `pykechain.models.Scope` model). When no parameters are passed in this function, it will try to retrieve `url`, `token`, `scope` (or `scope_id`) from the environment variables or a neatly placed '.env' file. when the environment variable KECHAIN_FORCE_ENV_USE is set to true, (or ok, on, 1, yes) then the use of environmentvariables for the retrieval of the scope are enforced. The following environment variables can be set:: KECHAIN_URL - full url of KE-chain where to connect to eg: 'https://<some>.ke-chain.com' KECHAIN_TOKEN - authentication token for the KE-chain user provided from KE-chain user account control KECHAIN_USERNAME - the username for the credentials KECHAIN_PASSWORD - the password for the credentials KECHAIN_SCOPE - the name of the project / scope. Should be unique, otherwise use scope_id KECHAIN_SCOPE_ID - the UUID of the project / scope. KECHAIN_FORCE_ENV_USE - set to 'true', '1', 'ok', or 'yes' to always use the environment variables. KECHAIN_SCOPE_STATUS - the status of the Scope to retrieve, defaults to None to retrieve all scopes .. versionadded:: 1.12 :param url: (optional) url of KE-chain :type url: basestring or None :param username: (optional) username for authentication (together with password, if not token) :type username: basestring or None :param password: (optional) password for username/password authentication (together with username, if not token) :type password: basestring or None :param token: (optional) token for authentication (if not username/password) :type token: basestring or None :param scope: (optional) name of the scope to retrieve from KE-chain. :type scope: basestring or None :param scope_id: (optional) UUID of the scope to retrieve and return from KE-chain :type scope_id: basestring or None :param env_filename: (optional) name of the environment filename to bootstrap the Client :type env_filename: basestring or None :param status: (optional) status of the scope to retrieve, defaults to :attr:`enums.Scopestatus.ACTIVE` :type status: basestring or None :return: pykechain.models.Scope :raises NotFoundError: If the scope could not be found :raises ClientError: If the client connection to KE-chain was unsuccessful :raises APIError: If other Errors occur to retrieve the scope Example ------- An example with parameters provided >>> from pykechain import get_project >>> project = get_project(url='http://localhost:8000', ... username='foo', password='bar', scope='1st!') >>> print(project.name) 1st An example with a .env file on disk:: # This is an .env file on disk. KECHAIN_TOKEN=bd9377793f7e74a29dbb11fce969 KECHAIN_URL=http://localhost:8080 KECHAIN_SCOPE_ID=c9f0-228e-4d3a-9dc0-ec5a75d7 >>> project = get_project(env_filename='/path/to/.env') >>> project.id c9f0-228e-4d3a-9dc0-ec5a75d7 An example for get_project that will extract all from the environment variables >>> env_vars = os.environ >>> env_vars.get('KECHAIN_TOKEN') bd9377793f7e74a29dbb11fce969 >>> env_vars.get('KECHAIN_URL') http://localhost:8080 >>> env_vars.get('KECHAIN_SCOPE') Bike Project >>> project = get_project() >>> project.name Bike Project """ if env.bool(kecenv.KECHAIN_FORCE_ENV_USE, default=False): if not os.getenv(kecenv.KECHAIN_URL): raise ClientError( "Error: KECHAIN_URL should be provided as environment variable (use of env vars is enforced)") if not (os.getenv(kecenv.KECHAIN_TOKEN) or (os.getenv(kecenv.KECHAIN_PASSWORD) and os.getenv(kecenv.KECHAIN_PASSWORD))): raise ClientError("Error: KECHAIN_TOKEN or KECHAIN_USERNAME and KECHAIN_PASSWORD should be provided as " "environment variable(s) (use of env vars is enforced)") if not (os.getenv(kecenv.KECHAIN_SCOPE) or os.getenv(kecenv.KECHAIN_SCOPE_ID)): raise ClientError("Error: KECHAIN_SCOPE or KECHAIN_SCOPE_ID should be provided as environment variable " "(use of env vars is enforced)") if env.bool(kecenv.KECHAIN_FORCE_ENV_USE, default=False) or \ not any((url, username, password, token, scope, scope_id)): client = Client.from_env(env_filename=env_filename) scope_id = env(kecenv.KECHAIN_SCOPE_ID, default=None) scope = env(kecenv.KECHAIN_SCOPE, default=None) status = env(kecenv.KECHAIN_SCOPE_STATUS, default=None) elif (url and ((username and password) or (token)) and (scope or scope_id)) and \ not env.bool(kecenv.KECHAIN_FORCE_ENV_USE, default=False): client = Client(url=url) client.login(username=username, password=password, token=token) else: raise ClientError("Error: insufficient arguments to connect to KE-chain. " "See documentation of `pykechain.get_project()`") if scope_id: return client.scope(pk=scope_id, status=status) else: return client.scope(name=scope, status=status)
[ "def", "get_project", "(", "url", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "token", "=", "None", ",", "scope", "=", "None", ",", "scope_id", "=", "None", ",", "env_filename", "=", "None", ",", "status", "=", "ScopeStatus", ".", "ACTIVE", ")", ":", "if", "env", ".", "bool", "(", "kecenv", ".", "KECHAIN_FORCE_ENV_USE", ",", "default", "=", "False", ")", ":", "if", "not", "os", ".", "getenv", "(", "kecenv", ".", "KECHAIN_URL", ")", ":", "raise", "ClientError", "(", "\"Error: KECHAIN_URL should be provided as environment variable (use of env vars is enforced)\"", ")", "if", "not", "(", "os", ".", "getenv", "(", "kecenv", ".", "KECHAIN_TOKEN", ")", "or", "(", "os", ".", "getenv", "(", "kecenv", ".", "KECHAIN_PASSWORD", ")", "and", "os", ".", "getenv", "(", "kecenv", ".", "KECHAIN_PASSWORD", ")", ")", ")", ":", "raise", "ClientError", "(", "\"Error: KECHAIN_TOKEN or KECHAIN_USERNAME and KECHAIN_PASSWORD should be provided as \"", "\"environment variable(s) (use of env vars is enforced)\"", ")", "if", "not", "(", "os", ".", "getenv", "(", "kecenv", ".", "KECHAIN_SCOPE", ")", "or", "os", ".", "getenv", "(", "kecenv", ".", "KECHAIN_SCOPE_ID", ")", ")", ":", "raise", "ClientError", "(", "\"Error: KECHAIN_SCOPE or KECHAIN_SCOPE_ID should be provided as environment variable \"", "\"(use of env vars is enforced)\"", ")", "if", "env", ".", "bool", "(", "kecenv", ".", "KECHAIN_FORCE_ENV_USE", ",", "default", "=", "False", ")", "or", "not", "any", "(", "(", "url", ",", "username", ",", "password", ",", "token", ",", "scope", ",", "scope_id", ")", ")", ":", "client", "=", "Client", ".", "from_env", "(", "env_filename", "=", "env_filename", ")", "scope_id", "=", "env", "(", "kecenv", ".", "KECHAIN_SCOPE_ID", ",", "default", "=", "None", ")", "scope", "=", "env", "(", "kecenv", ".", "KECHAIN_SCOPE", ",", "default", "=", "None", ")", "status", "=", "env", "(", "kecenv", ".", "KECHAIN_SCOPE_STATUS", ",", "default", "=", "None", ")", "elif", "(", "url", "and", "(", "(", "username", "and", "password", ")", "or", "(", "token", ")", ")", "and", "(", "scope", "or", "scope_id", ")", ")", "and", "not", "env", ".", "bool", "(", "kecenv", ".", "KECHAIN_FORCE_ENV_USE", ",", "default", "=", "False", ")", ":", "client", "=", "Client", "(", "url", "=", "url", ")", "client", ".", "login", "(", "username", "=", "username", ",", "password", "=", "password", ",", "token", "=", "token", ")", "else", ":", "raise", "ClientError", "(", "\"Error: insufficient arguments to connect to KE-chain. \"", "\"See documentation of `pykechain.get_project()`\"", ")", "if", "scope_id", ":", "return", "client", ".", "scope", "(", "pk", "=", "scope_id", ",", "status", "=", "status", ")", "else", ":", "return", "client", ".", "scope", "(", "name", "=", "scope", ",", "status", "=", "status", ")" ]
Retrieve and return the KE-chain project to be used throughout an app. This helper is made to bootstrap a pykechain enabled python script or an jupyter notebook with the correct project (technically this is a `pykechain.models.Scope` model). When no parameters are passed in this function, it will try to retrieve `url`, `token`, `scope` (or `scope_id`) from the environment variables or a neatly placed '.env' file. when the environment variable KECHAIN_FORCE_ENV_USE is set to true, (or ok, on, 1, yes) then the use of environmentvariables for the retrieval of the scope are enforced. The following environment variables can be set:: KECHAIN_URL - full url of KE-chain where to connect to eg: 'https://<some>.ke-chain.com' KECHAIN_TOKEN - authentication token for the KE-chain user provided from KE-chain user account control KECHAIN_USERNAME - the username for the credentials KECHAIN_PASSWORD - the password for the credentials KECHAIN_SCOPE - the name of the project / scope. Should be unique, otherwise use scope_id KECHAIN_SCOPE_ID - the UUID of the project / scope. KECHAIN_FORCE_ENV_USE - set to 'true', '1', 'ok', or 'yes' to always use the environment variables. KECHAIN_SCOPE_STATUS - the status of the Scope to retrieve, defaults to None to retrieve all scopes .. versionadded:: 1.12 :param url: (optional) url of KE-chain :type url: basestring or None :param username: (optional) username for authentication (together with password, if not token) :type username: basestring or None :param password: (optional) password for username/password authentication (together with username, if not token) :type password: basestring or None :param token: (optional) token for authentication (if not username/password) :type token: basestring or None :param scope: (optional) name of the scope to retrieve from KE-chain. :type scope: basestring or None :param scope_id: (optional) UUID of the scope to retrieve and return from KE-chain :type scope_id: basestring or None :param env_filename: (optional) name of the environment filename to bootstrap the Client :type env_filename: basestring or None :param status: (optional) status of the scope to retrieve, defaults to :attr:`enums.Scopestatus.ACTIVE` :type status: basestring or None :return: pykechain.models.Scope :raises NotFoundError: If the scope could not be found :raises ClientError: If the client connection to KE-chain was unsuccessful :raises APIError: If other Errors occur to retrieve the scope Example ------- An example with parameters provided >>> from pykechain import get_project >>> project = get_project(url='http://localhost:8000', ... username='foo', password='bar', scope='1st!') >>> print(project.name) 1st An example with a .env file on disk:: # This is an .env file on disk. KECHAIN_TOKEN=bd9377793f7e74a29dbb11fce969 KECHAIN_URL=http://localhost:8080 KECHAIN_SCOPE_ID=c9f0-228e-4d3a-9dc0-ec5a75d7 >>> project = get_project(env_filename='/path/to/.env') >>> project.id c9f0-228e-4d3a-9dc0-ec5a75d7 An example for get_project that will extract all from the environment variables >>> env_vars = os.environ >>> env_vars.get('KECHAIN_TOKEN') bd9377793f7e74a29dbb11fce969 >>> env_vars.get('KECHAIN_URL') http://localhost:8080 >>> env_vars.get('KECHAIN_SCOPE') Bike Project >>> project = get_project() >>> project.name Bike Project
[ "Retrieve", "and", "return", "the", "KE", "-", "chain", "project", "to", "be", "used", "throughout", "an", "app", "." ]
python
train
50.427273
zhanglab/psamm
psamm/command.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/command.py#L248-L268
def _create_executor(self, handler, args, cpus_per_worker=1): """Return a new :class:`.Executor` instance.""" if self._args.parallel > 0: workers = self._args.parallel else: try: workers = mp.cpu_count() // cpus_per_worker except NotImplementedError: workers = 1 if workers != 1: logger.info('Using {} parallel worker processes...'.format( workers)) executor = ProcessPoolExecutor( processes=workers, handler_init=handler, handler_args=args) else: logger.info('Using single worker...') executor = SequentialExecutor( handler_init=handler, handler_args=args) return executor
[ "def", "_create_executor", "(", "self", ",", "handler", ",", "args", ",", "cpus_per_worker", "=", "1", ")", ":", "if", "self", ".", "_args", ".", "parallel", ">", "0", ":", "workers", "=", "self", ".", "_args", ".", "parallel", "else", ":", "try", ":", "workers", "=", "mp", ".", "cpu_count", "(", ")", "//", "cpus_per_worker", "except", "NotImplementedError", ":", "workers", "=", "1", "if", "workers", "!=", "1", ":", "logger", ".", "info", "(", "'Using {} parallel worker processes...'", ".", "format", "(", "workers", ")", ")", "executor", "=", "ProcessPoolExecutor", "(", "processes", "=", "workers", ",", "handler_init", "=", "handler", ",", "handler_args", "=", "args", ")", "else", ":", "logger", ".", "info", "(", "'Using single worker...'", ")", "executor", "=", "SequentialExecutor", "(", "handler_init", "=", "handler", ",", "handler_args", "=", "args", ")", "return", "executor" ]
Return a new :class:`.Executor` instance.
[ "Return", "a", "new", ":", "class", ":", ".", "Executor", "instance", "." ]
python
train
36.52381
Kozea/cairocffi
cairocffi/context.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L1629-L1643
def clip_extents(self): """Computes a bounding box in user coordinates covering the area inside the current clip. :return: A ``(x1, y1, x2, y2)`` tuple of floats: the left, top, right and bottom of the resulting extents, respectively. """ extents = ffi.new('double[4]') cairo.cairo_clip_extents( self._pointer, extents + 0, extents + 1, extents + 2, extents + 3) self._check_status() return tuple(extents)
[ "def", "clip_extents", "(", "self", ")", ":", "extents", "=", "ffi", ".", "new", "(", "'double[4]'", ")", "cairo", ".", "cairo_clip_extents", "(", "self", ".", "_pointer", ",", "extents", "+", "0", ",", "extents", "+", "1", ",", "extents", "+", "2", ",", "extents", "+", "3", ")", "self", ".", "_check_status", "(", ")", "return", "tuple", "(", "extents", ")" ]
Computes a bounding box in user coordinates covering the area inside the current clip. :return: A ``(x1, y1, x2, y2)`` tuple of floats: the left, top, right and bottom of the resulting extents, respectively.
[ "Computes", "a", "bounding", "box", "in", "user", "coordinates", "covering", "the", "area", "inside", "the", "current", "clip", "." ]
python
train
33.666667
cytoscape/py2cytoscape
py2cytoscape/cyrest/layout.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/layout.py#L380-L452
def fruchterman_rheingold(self,attraction_multiplier=None,conflict_avoidance=None,\ defaultEdgeWeight=None,EdgeAttribute=None,gravity_multiplier=None,\ layout3D=None,max_distance_factor=None,maxWeightCutoff=None,minWeightCutoff=None,\ network=None,nIterations=None,NodeAttribute=None,nodeList=None,randomize=None,\ repulsion_multiplier=None,singlePartition=None,spread_factor=None,\ temperature=None,Type=None,update_iterations=None,verbose=None): """ Execute the Edge-weighted Force directed (BioLayout) on a network :param attraction_multiplier (string, optional): Divisor to calculate the a ttraction force, in numeric value :param conflict_avoidance (string, optional): Constant force applied to avo id conflicts, in numeric value :param defaultEdgeWeight (string, optional): The default edge weight to con sider, default is 0.5 :param EdgeAttribute (string, optional): The name of the edge column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param gravity_multiplier (string, optional): Multiplier to calculate the g ravity force, in numeric value :param layout3D (string, optional): Layout nodes in 3D; boolean values only , true or false; defaults to true :param max_distance_factor (string, optional): Percent of graph used for no de repulsion calculations, in numeric value :param maxWeightCutoff (string, optional): The maximum edge weight to consi der, default to the Double.MAX value :param minWeightCutoff (string, optional): The minimum edge weight to consi der, numeric values, default is 0 :param network (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value c an also be used to specify the current network. :param nIterations (string, optional): Number of iterations, in numeric val ue :param NodeAttribute (string, optional): The name of the node column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param nodeList (string, optional): Specifies a list of nodes. The keywords all, selected, or unselected can be used to specify nodes by their selection state. The pattern COLUMN:VALUE sets this parameter to any rows that contain the specified column value; if the COLUMN prefix is not used, the NAME column is matched by default. A list of COLUMN :VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to match multiple values. :param randomize (string, optional): Randomize graph before layout; boolean values only, true or false; defaults to true :param repulsion_multiplier (string, optional): Multiplier to calculate the repulsion force, in numeric value :param singlePartition (string, optional): Don't partition graph before lay out; boolean values only, true or false; defaults to false :param spread_factor (string, optional): Amount of extra room for layout, i n numeric value :param temperature (string, optional): Initial temperature, in numeric valu e :param Type (string, optional): How to interpret weight values; must be one of Heuristic, -Log(value), 1 - normalized value and normalized valu e. Defaults to Heuristic = ['Heuristic', '-Log(value)', '1 - normali zed value', 'normalized value'] :param update_iterations (string, optional): Number of iterations before up dating display, in numeric value (0: update only at end) """ network=check_network(self,network,verbose=verbose) PARAMS=set_param(['attraction_multiplier','conflict_avoidance',\ 'defaultEdgeWeight','EdgeAttribute','gravity_multiplier','layout3D',\ 'max_distance_factor','maxWeightCutoff','minWeightCutoff','network',\ 'nIterations','NodeAttribute','nodeList','randomize','repulsion_multiplier',\ 'singlePartition','spread_factor','temperature','Type','update_iterations'],\ [attraction_multiplier,conflict_avoidance,defaultEdgeWeight,EdgeAttribute,\ gravity_multiplier,layout3D,max_distance_factor,maxWeightCutoff,\ minWeightCutoff,network,nIterations,NodeAttribute,nodeList,randomize,\ repulsion_multiplier,singlePartition,spread_factor,temperature,Type,\ update_iterations]) response=api(url=self.__url+"/fruchterman-rheingold", PARAMS=PARAMS, method="POST", verbose=verbose) return response
[ "def", "fruchterman_rheingold", "(", "self", ",", "attraction_multiplier", "=", "None", ",", "conflict_avoidance", "=", "None", ",", "defaultEdgeWeight", "=", "None", ",", "EdgeAttribute", "=", "None", ",", "gravity_multiplier", "=", "None", ",", "layout3D", "=", "None", ",", "max_distance_factor", "=", "None", ",", "maxWeightCutoff", "=", "None", ",", "minWeightCutoff", "=", "None", ",", "network", "=", "None", ",", "nIterations", "=", "None", ",", "NodeAttribute", "=", "None", ",", "nodeList", "=", "None", ",", "randomize", "=", "None", ",", "repulsion_multiplier", "=", "None", ",", "singlePartition", "=", "None", ",", "spread_factor", "=", "None", ",", "temperature", "=", "None", ",", "Type", "=", "None", ",", "update_iterations", "=", "None", ",", "verbose", "=", "None", ")", ":", "network", "=", "check_network", "(", "self", ",", "network", ",", "verbose", "=", "verbose", ")", "PARAMS", "=", "set_param", "(", "[", "'attraction_multiplier'", ",", "'conflict_avoidance'", ",", "'defaultEdgeWeight'", ",", "'EdgeAttribute'", ",", "'gravity_multiplier'", ",", "'layout3D'", ",", "'max_distance_factor'", ",", "'maxWeightCutoff'", ",", "'minWeightCutoff'", ",", "'network'", ",", "'nIterations'", ",", "'NodeAttribute'", ",", "'nodeList'", ",", "'randomize'", ",", "'repulsion_multiplier'", ",", "'singlePartition'", ",", "'spread_factor'", ",", "'temperature'", ",", "'Type'", ",", "'update_iterations'", "]", ",", "[", "attraction_multiplier", ",", "conflict_avoidance", ",", "defaultEdgeWeight", ",", "EdgeAttribute", ",", "gravity_multiplier", ",", "layout3D", ",", "max_distance_factor", ",", "maxWeightCutoff", ",", "minWeightCutoff", ",", "network", ",", "nIterations", ",", "NodeAttribute", ",", "nodeList", ",", "randomize", ",", "repulsion_multiplier", ",", "singlePartition", ",", "spread_factor", ",", "temperature", ",", "Type", ",", "update_iterations", "]", ")", "response", "=", "api", "(", "url", "=", "self", ".", "__url", "+", "\"/fruchterman-rheingold\"", ",", "PARAMS", "=", "PARAMS", ",", "method", "=", "\"POST\"", ",", "verbose", "=", "verbose", ")", "return", "response" ]
Execute the Edge-weighted Force directed (BioLayout) on a network :param attraction_multiplier (string, optional): Divisor to calculate the a ttraction force, in numeric value :param conflict_avoidance (string, optional): Constant force applied to avo id conflicts, in numeric value :param defaultEdgeWeight (string, optional): The default edge weight to con sider, default is 0.5 :param EdgeAttribute (string, optional): The name of the edge column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param gravity_multiplier (string, optional): Multiplier to calculate the g ravity force, in numeric value :param layout3D (string, optional): Layout nodes in 3D; boolean values only , true or false; defaults to true :param max_distance_factor (string, optional): Percent of graph used for no de repulsion calculations, in numeric value :param maxWeightCutoff (string, optional): The maximum edge weight to consi der, default to the Double.MAX value :param minWeightCutoff (string, optional): The minimum edge weight to consi der, numeric values, default is 0 :param network (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value c an also be used to specify the current network. :param nIterations (string, optional): Number of iterations, in numeric val ue :param NodeAttribute (string, optional): The name of the node column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param nodeList (string, optional): Specifies a list of nodes. The keywords all, selected, or unselected can be used to specify nodes by their selection state. The pattern COLUMN:VALUE sets this parameter to any rows that contain the specified column value; if the COLUMN prefix is not used, the NAME column is matched by default. A list of COLUMN :VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to match multiple values. :param randomize (string, optional): Randomize graph before layout; boolean values only, true or false; defaults to true :param repulsion_multiplier (string, optional): Multiplier to calculate the repulsion force, in numeric value :param singlePartition (string, optional): Don't partition graph before lay out; boolean values only, true or false; defaults to false :param spread_factor (string, optional): Amount of extra room for layout, i n numeric value :param temperature (string, optional): Initial temperature, in numeric valu e :param Type (string, optional): How to interpret weight values; must be one of Heuristic, -Log(value), 1 - normalized value and normalized valu e. Defaults to Heuristic = ['Heuristic', '-Log(value)', '1 - normali zed value', 'normalized value'] :param update_iterations (string, optional): Number of iterations before up dating display, in numeric value (0: update only at end)
[ "Execute", "the", "Edge", "-", "weighted", "Force", "directed", "(", "BioLayout", ")", "on", "a", "network" ]
python
train
59.821918
lawsie/guizero
guizero/Combo.py
https://github.com/lawsie/guizero/blob/84c7f0b314fa86f9fc88eb11c9a0f6c4b57155e2/guizero/Combo.py#L284-L292
def _set_option_by_index(self, index): """ Sets a single option in the Combo by its index, returning True if it was able too. """ if index < len(self._options): self._selected.set(self._options[index]) return True else: return False
[ "def", "_set_option_by_index", "(", "self", ",", "index", ")", ":", "if", "index", "<", "len", "(", "self", ".", "_options", ")", ":", "self", ".", "_selected", ".", "set", "(", "self", ".", "_options", "[", "index", "]", ")", "return", "True", "else", ":", "return", "False" ]
Sets a single option in the Combo by its index, returning True if it was able too.
[ "Sets", "a", "single", "option", "in", "the", "Combo", "by", "its", "index", "returning", "True", "if", "it", "was", "able", "too", "." ]
python
train
33.333333
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/dataframeeditor.py#L1249-L1257
def get_value(self): """Return modified Dataframe -- this is *not* a copy""" # It is import to avoid accessing Qt C++ object as it has probably # already been destroyed, due to the Qt.WA_DeleteOnClose attribute df = self.dataModel.get_data() if self.is_series: return df.iloc[:, 0] else: return df
[ "def", "get_value", "(", "self", ")", ":", "# It is import to avoid accessing Qt C++ object as it has probably\r", "# already been destroyed, due to the Qt.WA_DeleteOnClose attribute\r", "df", "=", "self", ".", "dataModel", ".", "get_data", "(", ")", "if", "self", ".", "is_series", ":", "return", "df", ".", "iloc", "[", ":", ",", "0", "]", "else", ":", "return", "df" ]
Return modified Dataframe -- this is *not* a copy
[ "Return", "modified", "Dataframe", "--", "this", "is", "*", "not", "*", "a", "copy" ]
python
train
41
thomasdelaet/python-velbus
velbus/messages/channel_name_request.py
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/messages/channel_name_request.py#L56-L65
def data_to_binary(self): """ :return: bytes """ tmp = 0x00 if 1 in self.channels: tmp += 0x03 if 2 in self.channels: tmp += 0x0c return bytes([COMMAND_CODE, tmp])
[ "def", "data_to_binary", "(", "self", ")", ":", "tmp", "=", "0x00", "if", "1", "in", "self", ".", "channels", ":", "tmp", "+=", "0x03", "if", "2", "in", "self", ".", "channels", ":", "tmp", "+=", "0x0c", "return", "bytes", "(", "[", "COMMAND_CODE", ",", "tmp", "]", ")" ]
:return: bytes
[ ":", "return", ":", "bytes" ]
python
train
23.4
genialis/resolwe
resolwe/elastic/builder.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/builder.py#L310-L315
def process_delete(self, obj, pk_set=None, action=None, update_fields=None, **kwargs): """Recreate queryset from the index and rebuild the index.""" build_kwargs = self.delete_cache.take(obj) if build_kwargs: self.index.build(**build_kwargs)
[ "def", "process_delete", "(", "self", ",", "obj", ",", "pk_set", "=", "None", ",", "action", "=", "None", ",", "update_fields", "=", "None", ",", "*", "*", "kwargs", ")", ":", "build_kwargs", "=", "self", ".", "delete_cache", ".", "take", "(", "obj", ")", "if", "build_kwargs", ":", "self", ".", "index", ".", "build", "(", "*", "*", "build_kwargs", ")" ]
Recreate queryset from the index and rebuild the index.
[ "Recreate", "queryset", "from", "the", "index", "and", "rebuild", "the", "index", "." ]
python
train
45.5
msztolcman/versionner
versionner/vcs/git.py
https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/vcs/git.py#L93-L109
def raise_if_cant_commit(self): """Verify VCS status and raise an error if commit is disallowed :return: """ cmd = self._command.status() (code, stdout, stderr) = self._exec(cmd) if code: raise errors.VCSError('Can\'t verify VCS status. Process exited with code %d and message: %s' % ( code, stderr or stdout)) for line in stdout.splitlines(): if line.startswith(('??', '!!')): continue raise errors.VCSStateError("VCS status doesn't allow to commit. Please commit or stash your changes and try again")
[ "def", "raise_if_cant_commit", "(", "self", ")", ":", "cmd", "=", "self", ".", "_command", ".", "status", "(", ")", "(", "code", ",", "stdout", ",", "stderr", ")", "=", "self", ".", "_exec", "(", "cmd", ")", "if", "code", ":", "raise", "errors", ".", "VCSError", "(", "'Can\\'t verify VCS status. Process exited with code %d and message: %s'", "%", "(", "code", ",", "stderr", "or", "stdout", ")", ")", "for", "line", "in", "stdout", ".", "splitlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "(", "'??'", ",", "'!!'", ")", ")", ":", "continue", "raise", "errors", ".", "VCSStateError", "(", "\"VCS status doesn't allow to commit. Please commit or stash your changes and try again\"", ")" ]
Verify VCS status and raise an error if commit is disallowed :return:
[ "Verify", "VCS", "status", "and", "raise", "an", "error", "if", "commit", "is", "disallowed" ]
python
train
36.117647
ansible/molecule
molecule/provisioner/ansible.py
https://github.com/ansible/molecule/blob/766dc35b0b0ce498cd5e3a62b40f828742d0d08c/molecule/provisioner/ansible.py#L807-L822
def _link_or_update_vars(self): """ Creates or updates the symlink to group_vars and returns None. :returns: None """ for d, source in self.links.items(): target = os.path.join(self.inventory_directory, d) source = os.path.join(self._config.scenario.directory, source) if not os.path.exists(source): msg = "The source path '{}' does not exist.".format(source) util.sysexit_with_message(msg) msg = "Inventory {} linked to {}".format(source, target) LOG.info(msg) os.symlink(source, target)
[ "def", "_link_or_update_vars", "(", "self", ")", ":", "for", "d", ",", "source", "in", "self", ".", "links", ".", "items", "(", ")", ":", "target", "=", "os", ".", "path", ".", "join", "(", "self", ".", "inventory_directory", ",", "d", ")", "source", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_config", ".", "scenario", ".", "directory", ",", "source", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "source", ")", ":", "msg", "=", "\"The source path '{}' does not exist.\"", ".", "format", "(", "source", ")", "util", ".", "sysexit_with_message", "(", "msg", ")", "msg", "=", "\"Inventory {} linked to {}\"", ".", "format", "(", "source", ",", "target", ")", "LOG", ".", "info", "(", "msg", ")", "os", ".", "symlink", "(", "source", ",", "target", ")" ]
Creates or updates the symlink to group_vars and returns None. :returns: None
[ "Creates", "or", "updates", "the", "symlink", "to", "group_vars", "and", "returns", "None", "." ]
python
train
38.6875
pebble/libpebble2
libpebble2/protocol/base/__init__.py
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/protocol/base/__init__.py#L143-L153
def serialise_packet(self): """ Serialise a message, including framing information inferred from the ``Meta`` inner class of the packet. ``self.Meta.endpoint`` must be defined to call this method. :return: A serialised message, ready to be sent to the Pebble. """ if not hasattr(self, '_Meta'): raise ReferenceError("Can't serialise a packet that doesn't have an endpoint ID.") serialised = self.serialise() return struct.pack('!HH', len(serialised), self._Meta['endpoint']) + serialised
[ "def", "serialise_packet", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_Meta'", ")", ":", "raise", "ReferenceError", "(", "\"Can't serialise a packet that doesn't have an endpoint ID.\"", ")", "serialised", "=", "self", ".", "serialise", "(", ")", "return", "struct", ".", "pack", "(", "'!HH'", ",", "len", "(", "serialised", ")", ",", "self", ".", "_Meta", "[", "'endpoint'", "]", ")", "+", "serialised" ]
Serialise a message, including framing information inferred from the ``Meta`` inner class of the packet. ``self.Meta.endpoint`` must be defined to call this method. :return: A serialised message, ready to be sent to the Pebble.
[ "Serialise", "a", "message", "including", "framing", "information", "inferred", "from", "the", "Meta", "inner", "class", "of", "the", "packet", ".", "self", ".", "Meta", ".", "endpoint", "must", "be", "defined", "to", "call", "this", "method", "." ]
python
train
50.363636
redbridge/molnctrl
molnctrl/csobjects.py
https://github.com/redbridge/molnctrl/blob/9990ae7e522ce364bb61a735f774dc28de5f8e60/molnctrl/csobjects.py#L155-L158
def update(self): """ Update the state """ vm = self._cs_api.list_virtualmachines(id=self.id)[0] self.is_running = self._is_running(vm.state)
[ "def", "update", "(", "self", ")", ":", "vm", "=", "self", ".", "_cs_api", ".", "list_virtualmachines", "(", "id", "=", "self", ".", "id", ")", "[", "0", "]", "self", ".", "is_running", "=", "self", ".", "_is_running", "(", "vm", ".", "state", ")" ]
Update the state
[ "Update", "the", "state" ]
python
train
40.5
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L626-L640
def download(self, remote, writer): """ Downloads a file :param remote: remote file name :param writer: an object the implements the write(bytes) interface (typical a file descriptor) :return: """ fd = self.open(remote) while True: chunk = self.read(fd) if chunk == b'': break writer.write(chunk) self.close(fd)
[ "def", "download", "(", "self", ",", "remote", ",", "writer", ")", ":", "fd", "=", "self", ".", "open", "(", "remote", ")", "while", "True", ":", "chunk", "=", "self", ".", "read", "(", "fd", ")", "if", "chunk", "==", "b''", ":", "break", "writer", ".", "write", "(", "chunk", ")", "self", ".", "close", "(", "fd", ")" ]
Downloads a file :param remote: remote file name :param writer: an object the implements the write(bytes) interface (typical a file descriptor) :return:
[ "Downloads", "a", "file", ":", "param", "remote", ":", "remote", "file", "name", ":", "param", "writer", ":", "an", "object", "the", "implements", "the", "write", "(", "bytes", ")", "interface", "(", "typical", "a", "file", "descriptor", ")", ":", "return", ":" ]
python
train
28.133333
enkore/i3pystatus
i3pystatus/weather/__init__.py
https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/weather/__init__.py#L276-L306
def get_color_data(self, condition): ''' Disambiguate similarly-named weather conditions, and return the icon and color that match. ''' if condition not in self.color_icons: # Check for similarly-named conditions if no exact match found condition_lc = condition.lower() if 'cloudy' in condition_lc or 'clouds' in condition_lc: if 'partly' in condition_lc: condition = 'Partly Cloudy' else: condition = 'Cloudy' elif condition_lc == 'overcast': condition = 'Cloudy' elif 'thunder' in condition_lc or 't-storm' in condition_lc: condition = 'Thunderstorm' elif 'snow' in condition_lc: condition = 'Snow' elif 'rain' in condition_lc or 'showers' in condition_lc: condition = 'Rainy' elif 'sunny' in condition_lc: condition = 'Sunny' elif 'clear' in condition_lc or 'fair' in condition_lc: condition = 'Fair' elif 'fog' in condition_lc: condition = 'Fog' return self.color_icons['default'] \ if condition not in self.color_icons \ else self.color_icons[condition]
[ "def", "get_color_data", "(", "self", ",", "condition", ")", ":", "if", "condition", "not", "in", "self", ".", "color_icons", ":", "# Check for similarly-named conditions if no exact match found", "condition_lc", "=", "condition", ".", "lower", "(", ")", "if", "'cloudy'", "in", "condition_lc", "or", "'clouds'", "in", "condition_lc", ":", "if", "'partly'", "in", "condition_lc", ":", "condition", "=", "'Partly Cloudy'", "else", ":", "condition", "=", "'Cloudy'", "elif", "condition_lc", "==", "'overcast'", ":", "condition", "=", "'Cloudy'", "elif", "'thunder'", "in", "condition_lc", "or", "'t-storm'", "in", "condition_lc", ":", "condition", "=", "'Thunderstorm'", "elif", "'snow'", "in", "condition_lc", ":", "condition", "=", "'Snow'", "elif", "'rain'", "in", "condition_lc", "or", "'showers'", "in", "condition_lc", ":", "condition", "=", "'Rainy'", "elif", "'sunny'", "in", "condition_lc", ":", "condition", "=", "'Sunny'", "elif", "'clear'", "in", "condition_lc", "or", "'fair'", "in", "condition_lc", ":", "condition", "=", "'Fair'", "elif", "'fog'", "in", "condition_lc", ":", "condition", "=", "'Fog'", "return", "self", ".", "color_icons", "[", "'default'", "]", "if", "condition", "not", "in", "self", ".", "color_icons", "else", "self", ".", "color_icons", "[", "condition", "]" ]
Disambiguate similarly-named weather conditions, and return the icon and color that match.
[ "Disambiguate", "similarly", "-", "named", "weather", "conditions", "and", "return", "the", "icon", "and", "color", "that", "match", "." ]
python
train
42.096774
wummel/dosage
dosagelib/util.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/util.py#L268-L275
def check_robotstxt(url, session): """Check if robots.txt allows our user agent for the given URL. @raises: IOError if URL is not allowed """ roboturl = get_roboturl(url) rp = get_robotstxt_parser(roboturl, session=session) if not rp.can_fetch(UserAgent, str(url)): raise IOError("%s is disallowed by %s" % (url, roboturl))
[ "def", "check_robotstxt", "(", "url", ",", "session", ")", ":", "roboturl", "=", "get_roboturl", "(", "url", ")", "rp", "=", "get_robotstxt_parser", "(", "roboturl", ",", "session", "=", "session", ")", "if", "not", "rp", ".", "can_fetch", "(", "UserAgent", ",", "str", "(", "url", ")", ")", ":", "raise", "IOError", "(", "\"%s is disallowed by %s\"", "%", "(", "url", ",", "roboturl", ")", ")" ]
Check if robots.txt allows our user agent for the given URL. @raises: IOError if URL is not allowed
[ "Check", "if", "robots", ".", "txt", "allows", "our", "user", "agent", "for", "the", "given", "URL", "." ]
python
train
43.5
inveniosoftware/invenio-oauthclient
invenio_oauthclient/handlers.py
https://github.com/inveniosoftware/invenio-oauthclient/blob/2500dc6935738107617aeade79e050d7608004bb/invenio_oauthclient/handlers.py#L351-L373
def disconnect_handler(remote, *args, **kwargs): """Handle unlinking of remote account. This default handler will just delete the remote account link. You may wish to extend this module to perform clean-up in the remote service before removing the link (e.g. removing install webhooks). :param remote: The remote application. :returns: Redirect response. """ if not current_user.is_authenticated: return current_app.login_manager.unauthorized() with db.session.begin_nested(): account = RemoteAccount.get( user_id=current_user.get_id(), client_id=remote.consumer_key ) if account: account.delete() db.session.commit() return redirect(url_for('invenio_oauthclient_settings.index'))
[ "def", "disconnect_handler", "(", "remote", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "current_user", ".", "is_authenticated", ":", "return", "current_app", ".", "login_manager", ".", "unauthorized", "(", ")", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "account", "=", "RemoteAccount", ".", "get", "(", "user_id", "=", "current_user", ".", "get_id", "(", ")", ",", "client_id", "=", "remote", ".", "consumer_key", ")", "if", "account", ":", "account", ".", "delete", "(", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "redirect", "(", "url_for", "(", "'invenio_oauthclient_settings.index'", ")", ")" ]
Handle unlinking of remote account. This default handler will just delete the remote account link. You may wish to extend this module to perform clean-up in the remote service before removing the link (e.g. removing install webhooks). :param remote: The remote application. :returns: Redirect response.
[ "Handle", "unlinking", "of", "remote", "account", "." ]
python
train
33.652174
openpermissions/perch
perch/views.py
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/views.py#L50-L65
def create_design_doc(self): """Create a design document from a Python map function""" source = [x for x in inspect.getsourcelines(self.func)[0] if not x.startswith('@')] doc = { '_id': '_design/{}'.format(self.name), 'language': 'python', 'views': { self.name: { 'map': ''.join(source) } } } return doc
[ "def", "create_design_doc", "(", "self", ")", ":", "source", "=", "[", "x", "for", "x", "in", "inspect", ".", "getsourcelines", "(", "self", ".", "func", ")", "[", "0", "]", "if", "not", "x", ".", "startswith", "(", "'@'", ")", "]", "doc", "=", "{", "'_id'", ":", "'_design/{}'", ".", "format", "(", "self", ".", "name", ")", ",", "'language'", ":", "'python'", ",", "'views'", ":", "{", "self", ".", "name", ":", "{", "'map'", ":", "''", ".", "join", "(", "source", ")", "}", "}", "}", "return", "doc" ]
Create a design document from a Python map function
[ "Create", "a", "design", "document", "from", "a", "Python", "map", "function" ]
python
train
28
bwhite/hadoopy
hadoopy/_hdfs.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_hdfs.py#L193-L201
def put(local_path, hdfs_path): """Put a file on hdfs :param local_path: Source (str) :param hdfs_path: Destination (str) :raises: IOError: If unsuccessful """ cmd = "hadoop fs -put %s %s" % (local_path, hdfs_path) rcode, stdout, stderr = _checked_hadoop_fs_command(cmd)
[ "def", "put", "(", "local_path", ",", "hdfs_path", ")", ":", "cmd", "=", "\"hadoop fs -put %s %s\"", "%", "(", "local_path", ",", "hdfs_path", ")", "rcode", ",", "stdout", ",", "stderr", "=", "_checked_hadoop_fs_command", "(", "cmd", ")" ]
Put a file on hdfs :param local_path: Source (str) :param hdfs_path: Destination (str) :raises: IOError: If unsuccessful
[ "Put", "a", "file", "on", "hdfs" ]
python
train
32.333333
StackStorm/pybind
pybind/slxos/v17s_1_02/qos_mpls/map_apply/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/qos_mpls/map_apply/__init__.py#L100-L121
def _set_apply_exp_traffic_class_map_name(self, v, load=False): """ Setter method for apply_exp_traffic_class_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name (container) If this variable is read-only (config: false) in the source YANG file, then _set_apply_exp_traffic_class_map_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_apply_exp_traffic_class_map_name() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=apply_exp_traffic_class_map_name.apply_exp_traffic_class_map_name, is_container='container', presence=False, yang_name="apply-exp-traffic-class-map-name", rest_name="exp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp traffic class map', u'cli-sequence-commands': None, u'alt-name': u'exp-traffic-class', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """apply_exp_traffic_class_map_name must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=apply_exp_traffic_class_map_name.apply_exp_traffic_class_map_name, is_container='container', presence=False, yang_name="apply-exp-traffic-class-map-name", rest_name="exp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp traffic class map', u'cli-sequence-commands': None, u'alt-name': u'exp-traffic-class', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)""", }) self.__apply_exp_traffic_class_map_name = t if hasattr(self, '_set'): self._set()
[ "def", "_set_apply_exp_traffic_class_map_name", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "apply_exp_traffic_class_map_name", ".", "apply_exp_traffic_class_map_name", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"apply-exp-traffic-class-map-name\"", ",", "rest_name", "=", "\"exp-traffic-class\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Apply exp traffic class map'", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'alt-name'", ":", "u'exp-traffic-class'", ",", "u'cli-incomplete-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-apply-qos-mpls'", ",", "defining_module", "=", "'brocade-apply-qos-mpls'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"apply_exp_traffic_class_map_name must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=apply_exp_traffic_class_map_name.apply_exp_traffic_class_map_name, is_container='container', presence=False, yang_name=\"apply-exp-traffic-class-map-name\", rest_name=\"exp-traffic-class\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp traffic class map', u'cli-sequence-commands': None, u'alt-name': u'exp-traffic-class', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__apply_exp_traffic_class_map_name", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for apply_exp_traffic_class_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name (container) If this variable is read-only (config: false) in the source YANG file, then _set_apply_exp_traffic_class_map_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_apply_exp_traffic_class_map_name() directly.
[ "Setter", "method", "for", "apply_exp_traffic_class_map_name", "mapped", "from", "YANG", "variable", "/", "qos_mpls", "/", "map_apply", "/", "apply_exp_traffic_class_map_name", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_apply_exp_traffic_class_map_name", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_apply_exp_traffic_class_map_name", "()", "directly", "." ]
python
train
96.772727
mlperf/training
rnn_translator/pytorch/seq2seq/train/fp_optimizers.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/rnn_translator/pytorch/seq2seq/train/fp_optimizers.py#L149-L163
def step(self, loss, optimizer, scheduler, update=True): """ Performs one step of the optimizer. :param loss: value of loss function :param optimizer: optimizer :param update: if True executes weight update """ loss.backward() if update: if self.grad_clip != float('inf'): clip_grad_norm_(self.model.parameters(), self.grad_clip) scheduler.step() optimizer.step() self.model.zero_grad()
[ "def", "step", "(", "self", ",", "loss", ",", "optimizer", ",", "scheduler", ",", "update", "=", "True", ")", ":", "loss", ".", "backward", "(", ")", "if", "update", ":", "if", "self", ".", "grad_clip", "!=", "float", "(", "'inf'", ")", ":", "clip_grad_norm_", "(", "self", ".", "model", ".", "parameters", "(", ")", ",", "self", ".", "grad_clip", ")", "scheduler", ".", "step", "(", ")", "optimizer", ".", "step", "(", ")", "self", ".", "model", ".", "zero_grad", "(", ")" ]
Performs one step of the optimizer. :param loss: value of loss function :param optimizer: optimizer :param update: if True executes weight update
[ "Performs", "one", "step", "of", "the", "optimizer", "." ]
python
train
33.4
f3at/feat
tools/pep8.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/tools/pep8.py#L608-L647
def check_all(self): """ Run all checks on the input file. """ self.file_errors = 0 self.line_number = 0 self.indent_char = None self.indent_level = 0 self.previous_logical = '' self.blank_lines = 0 self.tokens = [] parens = 0 for token in tokenize.generate_tokens(self.readline_check_physical): # print tokenize.tok_name[token[0]], repr(token) self.tokens.append(token) token_type, text = token[0:2] if token_type == tokenize.OP and text in '([{': parens += 1 if token_type == tokenize.OP and text in '}])': parens -= 1 if token_type == tokenize.NEWLINE and not parens: self.check_logical() self.blank_lines = 0 self.tokens = [] if token_type == tokenize.NL and not parens: if len(self.tokens) <= 1: # The physical line contains only this token. self.blank_lines += 1 self.tokens = [] if token_type == tokenize.COMMENT: source_line = token[4] token_start = token[2][1] if source_line[:token_start].strip() == '': self.blank_lines = 0 if text.endswith('\n') and not parens: # The comment also ends a physical line. This works around # Python < 2.6 behaviour, which does not generate NL after # a comment which is on a line by itself. self.tokens = [] return self.file_errors
[ "def", "check_all", "(", "self", ")", ":", "self", ".", "file_errors", "=", "0", "self", ".", "line_number", "=", "0", "self", ".", "indent_char", "=", "None", "self", ".", "indent_level", "=", "0", "self", ".", "previous_logical", "=", "''", "self", ".", "blank_lines", "=", "0", "self", ".", "tokens", "=", "[", "]", "parens", "=", "0", "for", "token", "in", "tokenize", ".", "generate_tokens", "(", "self", ".", "readline_check_physical", ")", ":", "# print tokenize.tok_name[token[0]], repr(token)", "self", ".", "tokens", ".", "append", "(", "token", ")", "token_type", ",", "text", "=", "token", "[", "0", ":", "2", "]", "if", "token_type", "==", "tokenize", ".", "OP", "and", "text", "in", "'([{'", ":", "parens", "+=", "1", "if", "token_type", "==", "tokenize", ".", "OP", "and", "text", "in", "'}])'", ":", "parens", "-=", "1", "if", "token_type", "==", "tokenize", ".", "NEWLINE", "and", "not", "parens", ":", "self", ".", "check_logical", "(", ")", "self", ".", "blank_lines", "=", "0", "self", ".", "tokens", "=", "[", "]", "if", "token_type", "==", "tokenize", ".", "NL", "and", "not", "parens", ":", "if", "len", "(", "self", ".", "tokens", ")", "<=", "1", ":", "# The physical line contains only this token.", "self", ".", "blank_lines", "+=", "1", "self", ".", "tokens", "=", "[", "]", "if", "token_type", "==", "tokenize", ".", "COMMENT", ":", "source_line", "=", "token", "[", "4", "]", "token_start", "=", "token", "[", "2", "]", "[", "1", "]", "if", "source_line", "[", ":", "token_start", "]", ".", "strip", "(", ")", "==", "''", ":", "self", ".", "blank_lines", "=", "0", "if", "text", ".", "endswith", "(", "'\\n'", ")", "and", "not", "parens", ":", "# The comment also ends a physical line. This works around", "# Python < 2.6 behaviour, which does not generate NL after", "# a comment which is on a line by itself.", "self", ".", "tokens", "=", "[", "]", "return", "self", ".", "file_errors" ]
Run all checks on the input file.
[ "Run", "all", "checks", "on", "the", "input", "file", "." ]
python
train
41.3
pypa/pipenv
pipenv/patched/notpip/_vendor/pkg_resources/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py#L1028-L1035
def add(self, dist): """Add `dist` if we ``can_add()`` it and it has not already been added """ if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
[ "def", "add", "(", "self", ",", "dist", ")", ":", "if", "self", ".", "can_add", "(", "dist", ")", "and", "dist", ".", "has_version", "(", ")", ":", "dists", "=", "self", ".", "_distmap", ".", "setdefault", "(", "dist", ".", "key", ",", "[", "]", ")", "if", "dist", "not", "in", "dists", ":", "dists", ".", "append", "(", "dist", ")", "dists", ".", "sort", "(", "key", "=", "operator", ".", "attrgetter", "(", "'hashcmp'", ")", ",", "reverse", "=", "True", ")" ]
Add `dist` if we ``can_add()`` it and it has not already been added
[ "Add", "dist", "if", "we", "can_add", "()", "it", "and", "it", "has", "not", "already", "been", "added" ]
python
train
45.375
xtrementl/focus
focus/plugin/modules/im.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/im.py#L93-L119
def _pidgin_status(status, message): """ Updates status and message for Pidgin IM application. `status` Status type. `message` Status message. """ try: iface = _dbus_get_interface('im.pidgin.purple.PurpleService', '/im/pidgin/purple/PurpleObject', 'im.pidgin.purple.PurpleInterface') if iface: # create new transient status code = PIDGIN_CODE_MAP[status] saved_status = iface.PurpleSavedstatusNew('', code) # set the message, if provided iface.PurpleSavedstatusSetMessage(saved_status, message) # activate status iface.PurpleSavedstatusActivate(saved_status) except dbus.exceptions.DBusException: pass
[ "def", "_pidgin_status", "(", "status", ",", "message", ")", ":", "try", ":", "iface", "=", "_dbus_get_interface", "(", "'im.pidgin.purple.PurpleService'", ",", "'/im/pidgin/purple/PurpleObject'", ",", "'im.pidgin.purple.PurpleInterface'", ")", "if", "iface", ":", "# create new transient status", "code", "=", "PIDGIN_CODE_MAP", "[", "status", "]", "saved_status", "=", "iface", ".", "PurpleSavedstatusNew", "(", "''", ",", "code", ")", "# set the message, if provided", "iface", ".", "PurpleSavedstatusSetMessage", "(", "saved_status", ",", "message", ")", "# activate status", "iface", ".", "PurpleSavedstatusActivate", "(", "saved_status", ")", "except", "dbus", ".", "exceptions", ".", "DBusException", ":", "pass" ]
Updates status and message for Pidgin IM application. `status` Status type. `message` Status message.
[ "Updates", "status", "and", "message", "for", "Pidgin", "IM", "application", "." ]
python
train
30.407407
oauthlib/oauthlib
oauthlib/oauth1/rfc5849/signature.py
https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/signature.py#L634-L661
def verify_hmac_sha1(request, client_secret=None, resource_owner_secret=None): """Verify a HMAC-SHA1 signature. Per `section 3.4`_ of the spec. .. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4 To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri attribute MUST be an absolute URI whose netloc part identifies the origin server or gateway on which the resource resides. Any Host item of the request argument's headers dict attribute will be ignored. .. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2 """ norm_params = normalize_parameters(request.params) bs_uri = base_string_uri(request.uri) sig_base_str = signature_base_string(request.http_method, bs_uri, norm_params) signature = sign_hmac_sha1(sig_base_str, client_secret, resource_owner_secret) match = safe_string_equals(signature, request.signature) if not match: log.debug('Verify HMAC-SHA1 failed: signature base string: %s', sig_base_str) return match
[ "def", "verify_hmac_sha1", "(", "request", ",", "client_secret", "=", "None", ",", "resource_owner_secret", "=", "None", ")", ":", "norm_params", "=", "normalize_parameters", "(", "request", ".", "params", ")", "bs_uri", "=", "base_string_uri", "(", "request", ".", "uri", ")", "sig_base_str", "=", "signature_base_string", "(", "request", ".", "http_method", ",", "bs_uri", ",", "norm_params", ")", "signature", "=", "sign_hmac_sha1", "(", "sig_base_str", ",", "client_secret", ",", "resource_owner_secret", ")", "match", "=", "safe_string_equals", "(", "signature", ",", "request", ".", "signature", ")", "if", "not", "match", ":", "log", ".", "debug", "(", "'Verify HMAC-SHA1 failed: signature base string: %s'", ",", "sig_base_str", ")", "return", "match" ]
Verify a HMAC-SHA1 signature. Per `section 3.4`_ of the spec. .. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4 To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri attribute MUST be an absolute URI whose netloc part identifies the origin server or gateway on which the resource resides. Any Host item of the request argument's headers dict attribute will be ignored. .. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
[ "Verify", "a", "HMAC", "-", "SHA1", "signature", "." ]
python
train
40.535714
d0c-s4vage/pfp
pfp/fields.py
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/fields.py#L759-L777
def _pfp__build(self, stream=None, save_offset=False): """Build the field and write the result into the stream :stream: An IO stream that can be written to :returns: None """ if save_offset and stream is not None: self._pfp__offset = stream.tell() # returns either num bytes written or total data res = utils.binary("") if stream is None else 0 # iterate IN ORDER for child in self._pfp__children: child_res = child._pfp__build(stream, save_offset) res += child_res return res
[ "def", "_pfp__build", "(", "self", ",", "stream", "=", "None", ",", "save_offset", "=", "False", ")", ":", "if", "save_offset", "and", "stream", "is", "not", "None", ":", "self", ".", "_pfp__offset", "=", "stream", ".", "tell", "(", ")", "# returns either num bytes written or total data", "res", "=", "utils", ".", "binary", "(", "\"\"", ")", "if", "stream", "is", "None", "else", "0", "# iterate IN ORDER", "for", "child", "in", "self", ".", "_pfp__children", ":", "child_res", "=", "child", ".", "_pfp__build", "(", "stream", ",", "save_offset", ")", "res", "+=", "child_res", "return", "res" ]
Build the field and write the result into the stream :stream: An IO stream that can be written to :returns: None
[ "Build", "the", "field", "and", "write", "the", "result", "into", "the", "stream" ]
python
train
30.473684
neurosynth/neurosynth
neurosynth/analysis/classify.py
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/classify.py#L150-L209
def classify_regions(dataset, masks, method='ERF', threshold=0.08, remove_overlap=True, regularization='scale', output='summary', studies=None, features=None, class_weight='auto', classifier=None, cross_val='4-Fold', param_grid=None, scoring='accuracy'): """ Perform classification on specified regions Given a set of masks, this function retrieves studies associated with each mask at the specified threshold, optionally removes overlap and filters by studies and features. Then it trains an algorithm to classify studies based on features and tests performance. Args: dataset: a Neurosynth dataset maks: a list of paths to Nifti masks method: a string indicating which method to used. 'SVM': Support Vector Classifier with rbf kernel 'ERF': Extremely Randomized Forest classifier 'Dummy': A dummy classifier using stratified classes as predictor threshold: percentage of voxels active within the mask for study to be included remove_overlap: A boolean indicating if studies studies that appear in more than one mask should be excluded regularization: A string indicating type of regularization to use. If None, performs no regularization. 'scale': Unit scale without demeaning output: A string indicating output type 'summary': Dictionary with summary statistics including score and n 'summary_clf': Same as above but also includes classifier 'clf': Only returns classifier Warning: using cv without grid will return an untrained classifier studies: An optional list of study names used to constrain the set used in classification. If None, will use all features in the dataset. features: An optional list of feature names used to constrain the set used in classification. If None, will use all features in the dataset. class_weight: Parameter to pass to classifier determining how to weight classes classifier: An optional sci-kit learn classifier to use instead of pre-set up classifiers set up using 'method' cross_val: A string indicating type of cross validation to use. Can also pass a scikit_classifier param_grid: A dictionary indicating which parameters to optimize using GridSearchCV. If None, no GridSearch will be used Returns: A tuple (X, y) of np arrays. X is a feature by studies matrix and y is a vector of class labels """ (X, y) = get_studies_by_regions(dataset, masks, threshold, remove_overlap, studies, features, regularization=regularization) return classify(X, y, method, classifier, output, cross_val, class_weight, scoring=scoring, param_grid=param_grid)
[ "def", "classify_regions", "(", "dataset", ",", "masks", ",", "method", "=", "'ERF'", ",", "threshold", "=", "0.08", ",", "remove_overlap", "=", "True", ",", "regularization", "=", "'scale'", ",", "output", "=", "'summary'", ",", "studies", "=", "None", ",", "features", "=", "None", ",", "class_weight", "=", "'auto'", ",", "classifier", "=", "None", ",", "cross_val", "=", "'4-Fold'", ",", "param_grid", "=", "None", ",", "scoring", "=", "'accuracy'", ")", ":", "(", "X", ",", "y", ")", "=", "get_studies_by_regions", "(", "dataset", ",", "masks", ",", "threshold", ",", "remove_overlap", ",", "studies", ",", "features", ",", "regularization", "=", "regularization", ")", "return", "classify", "(", "X", ",", "y", ",", "method", ",", "classifier", ",", "output", ",", "cross_val", ",", "class_weight", ",", "scoring", "=", "scoring", ",", "param_grid", "=", "param_grid", ")" ]
Perform classification on specified regions Given a set of masks, this function retrieves studies associated with each mask at the specified threshold, optionally removes overlap and filters by studies and features. Then it trains an algorithm to classify studies based on features and tests performance. Args: dataset: a Neurosynth dataset maks: a list of paths to Nifti masks method: a string indicating which method to used. 'SVM': Support Vector Classifier with rbf kernel 'ERF': Extremely Randomized Forest classifier 'Dummy': A dummy classifier using stratified classes as predictor threshold: percentage of voxels active within the mask for study to be included remove_overlap: A boolean indicating if studies studies that appear in more than one mask should be excluded regularization: A string indicating type of regularization to use. If None, performs no regularization. 'scale': Unit scale without demeaning output: A string indicating output type 'summary': Dictionary with summary statistics including score and n 'summary_clf': Same as above but also includes classifier 'clf': Only returns classifier Warning: using cv without grid will return an untrained classifier studies: An optional list of study names used to constrain the set used in classification. If None, will use all features in the dataset. features: An optional list of feature names used to constrain the set used in classification. If None, will use all features in the dataset. class_weight: Parameter to pass to classifier determining how to weight classes classifier: An optional sci-kit learn classifier to use instead of pre-set up classifiers set up using 'method' cross_val: A string indicating type of cross validation to use. Can also pass a scikit_classifier param_grid: A dictionary indicating which parameters to optimize using GridSearchCV. If None, no GridSearch will be used Returns: A tuple (X, y) of np arrays. X is a feature by studies matrix and y is a vector of class labels
[ "Perform", "classification", "on", "specified", "regions" ]
python
test
53.45
jrief/djangocms-cascade
cmsplugin_cascade/plugin_base.py
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/plugin_base.py#L480-L488
def in_edit_mode(self, request, placeholder): """ Returns True, if the plugin is in "edit mode". """ toolbar = getattr(request, 'toolbar', None) edit_mode = getattr(toolbar, 'edit_mode', False) and getattr(placeholder, 'is_editable', True) if edit_mode: edit_mode = placeholder.has_change_permission(request.user) return edit_mode
[ "def", "in_edit_mode", "(", "self", ",", "request", ",", "placeholder", ")", ":", "toolbar", "=", "getattr", "(", "request", ",", "'toolbar'", ",", "None", ")", "edit_mode", "=", "getattr", "(", "toolbar", ",", "'edit_mode'", ",", "False", ")", "and", "getattr", "(", "placeholder", ",", "'is_editable'", ",", "True", ")", "if", "edit_mode", ":", "edit_mode", "=", "placeholder", ".", "has_change_permission", "(", "request", ".", "user", ")", "return", "edit_mode" ]
Returns True, if the plugin is in "edit mode".
[ "Returns", "True", "if", "the", "plugin", "is", "in", "edit", "mode", "." ]
python
train
43.333333
ehansis/ozelot
ozelot/etl/tasks.py
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/tasks.py#L18-L32
def get_task_param_string(task): """Get all parameters of a task as one string Returns: str: task parameter string """ # get dict str -> str from luigi param_dict = task.to_str_params() # sort keys, serialize items = [] for key in sorted(param_dict.keys()): items.append("'{:s}': '{:s}'".format(key, param_dict[key])) return "{" + ", ".join(items) + "}"
[ "def", "get_task_param_string", "(", "task", ")", ":", "# get dict str -> str from luigi", "param_dict", "=", "task", ".", "to_str_params", "(", ")", "# sort keys, serialize", "items", "=", "[", "]", "for", "key", "in", "sorted", "(", "param_dict", ".", "keys", "(", ")", ")", ":", "items", ".", "append", "(", "\"'{:s}': '{:s}'\"", ".", "format", "(", "key", ",", "param_dict", "[", "key", "]", ")", ")", "return", "\"{\"", "+", "\", \"", ".", "join", "(", "items", ")", "+", "\"}\"" ]
Get all parameters of a task as one string Returns: str: task parameter string
[ "Get", "all", "parameters", "of", "a", "task", "as", "one", "string" ]
python
train
26.266667
xtuml/pyxtuml
bridgepoint/oal.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/oal.py#L1634-L1638
def p_navigation_step_2(self, p): '''navigation_step : ARROW identifier LSQBR identifier DOT phrase RSQBR''' p[0] = NavigationStepNode(key_letter=p[2], rel_id=p[4], phrase=p[6])
[ "def", "p_navigation_step_2", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "NavigationStepNode", "(", "key_letter", "=", "p", "[", "2", "]", ",", "rel_id", "=", "p", "[", "4", "]", ",", "phrase", "=", "p", "[", "6", "]", ")" ]
navigation_step : ARROW identifier LSQBR identifier DOT phrase RSQBR
[ "navigation_step", ":", "ARROW", "identifier", "LSQBR", "identifier", "DOT", "phrase", "RSQBR" ]
python
test
51.4
aiogram/aiogram
aiogram/contrib/middlewares/logging.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/contrib/middlewares/logging.py#L233-L245
def make_prefix(self, prefix, iterable): """ Add prefix to the label :param prefix: :param iterable: :return: """ if not prefix: yield from iterable for key, value in iterable: yield f"{prefix}_{key}", value
[ "def", "make_prefix", "(", "self", ",", "prefix", ",", "iterable", ")", ":", "if", "not", "prefix", ":", "yield", "from", "iterable", "for", "key", ",", "value", "in", "iterable", ":", "yield", "f\"{prefix}_{key}\"", ",", "value" ]
Add prefix to the label :param prefix: :param iterable: :return:
[ "Add", "prefix", "to", "the", "label" ]
python
train
21.923077
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L6532-L6544
def get_referenced_object(self): """ :rtype: core.BunqModel :raise: BunqException """ if self._TabUsageSingle is not None: return self._TabUsageSingle if self._TabUsageMultiple is not None: return self._TabUsageMultiple raise exception.BunqException(self._ERROR_NULL_FIELDS)
[ "def", "get_referenced_object", "(", "self", ")", ":", "if", "self", ".", "_TabUsageSingle", "is", "not", "None", ":", "return", "self", ".", "_TabUsageSingle", "if", "self", ".", "_TabUsageMultiple", "is", "not", "None", ":", "return", "self", ".", "_TabUsageMultiple", "raise", "exception", ".", "BunqException", "(", "self", ".", "_ERROR_NULL_FIELDS", ")" ]
:rtype: core.BunqModel :raise: BunqException
[ ":", "rtype", ":", "core", ".", "BunqModel", ":", "raise", ":", "BunqException" ]
python
train
26.538462
yyuu/botornado
boto/dynamodb/layer2.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/dynamodb/layer2.py#L299-L316
def list_tables(self, limit=None, start_table=None): """ Return a list of the names of all Tables associated with the current account and region. TODO - Layer2 should probably automatically handle pagination. :type limit: int :param limit: The maximum number of tables to return. :type start_table: str :param limit: The name of the table that starts the list. If you ran a previous list_tables and not all results were returned, the response dict would include a LastEvaluatedTableName attribute. Use that value here to continue the listing. """ result = self.layer1.list_tables(limit, start_table) return result['TableNames']
[ "def", "list_tables", "(", "self", ",", "limit", "=", "None", ",", "start_table", "=", "None", ")", ":", "result", "=", "self", ".", "layer1", ".", "list_tables", "(", "limit", ",", "start_table", ")", "return", "result", "[", "'TableNames'", "]" ]
Return a list of the names of all Tables associated with the current account and region. TODO - Layer2 should probably automatically handle pagination. :type limit: int :param limit: The maximum number of tables to return. :type start_table: str :param limit: The name of the table that starts the list. If you ran a previous list_tables and not all results were returned, the response dict would include a LastEvaluatedTableName attribute. Use that value here to continue the listing.
[ "Return", "a", "list", "of", "the", "names", "of", "all", "Tables", "associated", "with", "the", "current", "account", "and", "region", ".", "TODO", "-", "Layer2", "should", "probably", "automatically", "handle", "pagination", "." ]
python
train
41.666667
geophysics-ubonn/reda
lib/reda/containers/SIP.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/SIP.py#L92-L153
def reduce_duplicate_frequencies(self): """In case multiple frequencies were measured, average them and compute std, min, max values for zt. In case timesteps were added (i.e., multiple separate measurements), group over those and average for each timestep. Examples -------- :: import tempfile import reda with tempfile.TemporaryDirectory() as fid: reda.data.download_data('sip04_fs_06', fid) sip = reda.SIP() sip.import_sip04(fid + '/sip_dataA.mat', timestep=0) # well, add the spectrum again as another timestep sip.import_sip04(fid + '/sip_dataA.mat', timestep=1) df = sip.reduce_duplicate_frequencies() """ group_keys = ['frequency', ] if 'timestep' in self.data.columns: group_keys = group_keys + ['timestep', ] g = self.data.groupby(group_keys) def group_apply(item): y = item[['zt_1', 'zt_2', 'zt_3']].values.flatten() zt_imag_std = np.std(y.imag) zt_real_std = np.std(y.real) zt_imag_min = np.min(y.imag) zt_real_min = np.min(y.real) zt_imag_max = np.max(y.imag) zt_real_max = np.max(y.real) zt_imag_mean = np.mean(y.imag) zt_real_mean = np.mean(y.real) dfn = pd.DataFrame( { 'zt_real_mean': zt_real_mean, 'zt_real_std': zt_real_std, 'zt_real_min': zt_real_min, 'zt_real_max': zt_real_max, 'zt_imag_mean': zt_imag_mean, 'zt_imag_std': zt_imag_std, 'zt_imag_min': zt_imag_min, 'zt_imag_max': zt_imag_max, }, index=[0, ] ) dfn['count'] = len(y) dfn.index.name = 'index' return dfn p = g.apply(group_apply) p.index = p.index.droplevel('index') if len(group_keys) > 1: p = p.swaplevel(0, 1).sort_index() return p
[ "def", "reduce_duplicate_frequencies", "(", "self", ")", ":", "group_keys", "=", "[", "'frequency'", ",", "]", "if", "'timestep'", "in", "self", ".", "data", ".", "columns", ":", "group_keys", "=", "group_keys", "+", "[", "'timestep'", ",", "]", "g", "=", "self", ".", "data", ".", "groupby", "(", "group_keys", ")", "def", "group_apply", "(", "item", ")", ":", "y", "=", "item", "[", "[", "'zt_1'", ",", "'zt_2'", ",", "'zt_3'", "]", "]", ".", "values", ".", "flatten", "(", ")", "zt_imag_std", "=", "np", ".", "std", "(", "y", ".", "imag", ")", "zt_real_std", "=", "np", ".", "std", "(", "y", ".", "real", ")", "zt_imag_min", "=", "np", ".", "min", "(", "y", ".", "imag", ")", "zt_real_min", "=", "np", ".", "min", "(", "y", ".", "real", ")", "zt_imag_max", "=", "np", ".", "max", "(", "y", ".", "imag", ")", "zt_real_max", "=", "np", ".", "max", "(", "y", ".", "real", ")", "zt_imag_mean", "=", "np", ".", "mean", "(", "y", ".", "imag", ")", "zt_real_mean", "=", "np", ".", "mean", "(", "y", ".", "real", ")", "dfn", "=", "pd", ".", "DataFrame", "(", "{", "'zt_real_mean'", ":", "zt_real_mean", ",", "'zt_real_std'", ":", "zt_real_std", ",", "'zt_real_min'", ":", "zt_real_min", ",", "'zt_real_max'", ":", "zt_real_max", ",", "'zt_imag_mean'", ":", "zt_imag_mean", ",", "'zt_imag_std'", ":", "zt_imag_std", ",", "'zt_imag_min'", ":", "zt_imag_min", ",", "'zt_imag_max'", ":", "zt_imag_max", ",", "}", ",", "index", "=", "[", "0", ",", "]", ")", "dfn", "[", "'count'", "]", "=", "len", "(", "y", ")", "dfn", ".", "index", ".", "name", "=", "'index'", "return", "dfn", "p", "=", "g", ".", "apply", "(", "group_apply", ")", "p", ".", "index", "=", "p", ".", "index", ".", "droplevel", "(", "'index'", ")", "if", "len", "(", "group_keys", ")", ">", "1", ":", "p", "=", "p", ".", "swaplevel", "(", "0", ",", "1", ")", ".", "sort_index", "(", ")", "return", "p" ]
In case multiple frequencies were measured, average them and compute std, min, max values for zt. In case timesteps were added (i.e., multiple separate measurements), group over those and average for each timestep. Examples -------- :: import tempfile import reda with tempfile.TemporaryDirectory() as fid: reda.data.download_data('sip04_fs_06', fid) sip = reda.SIP() sip.import_sip04(fid + '/sip_dataA.mat', timestep=0) # well, add the spectrum again as another timestep sip.import_sip04(fid + '/sip_dataA.mat', timestep=1) df = sip.reduce_duplicate_frequencies()
[ "In", "case", "multiple", "frequencies", "were", "measured", "average", "them", "and", "compute", "std", "min", "max", "values", "for", "zt", "." ]
python
train
34.209677
google/transitfeed
kmlwriter.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/kmlwriter.py#L415-L436
def _CreateStopPlacemark(self, stop_folder, stop, style_id): """Creates a new stop <Placemark/> element. Args: stop_folder: the KML folder the placemark will be added to. stop: the actual Stop to create a placemark for. style_id: optional argument indicating a style id to add to the placemark. """ desc_items = [] desc_items.append("Stop id: %s" % stop.stop_id) if stop.stop_desc: desc_items.append(stop.stop_desc) if stop.stop_url: desc_items.append('Stop info page: <a href="%s">%s</a>' % ( stop.stop_url, stop.stop_url)) description = '<br/>'.join(desc_items) or None placemark = self._CreatePlacemark(stop_folder, stop.stop_name, description=description, style_id=style_id) point = ET.SubElement(placemark, 'Point') coordinates = ET.SubElement(point, 'coordinates') coordinates.text = '%.6f,%.6f' % (stop.stop_lon, stop.stop_lat)
[ "def", "_CreateStopPlacemark", "(", "self", ",", "stop_folder", ",", "stop", ",", "style_id", ")", ":", "desc_items", "=", "[", "]", "desc_items", ".", "append", "(", "\"Stop id: %s\"", "%", "stop", ".", "stop_id", ")", "if", "stop", ".", "stop_desc", ":", "desc_items", ".", "append", "(", "stop", ".", "stop_desc", ")", "if", "stop", ".", "stop_url", ":", "desc_items", ".", "append", "(", "'Stop info page: <a href=\"%s\">%s</a>'", "%", "(", "stop", ".", "stop_url", ",", "stop", ".", "stop_url", ")", ")", "description", "=", "'<br/>'", ".", "join", "(", "desc_items", ")", "or", "None", "placemark", "=", "self", ".", "_CreatePlacemark", "(", "stop_folder", ",", "stop", ".", "stop_name", ",", "description", "=", "description", ",", "style_id", "=", "style_id", ")", "point", "=", "ET", ".", "SubElement", "(", "placemark", ",", "'Point'", ")", "coordinates", "=", "ET", ".", "SubElement", "(", "point", ",", "'coordinates'", ")", "coordinates", ".", "text", "=", "'%.6f,%.6f'", "%", "(", "stop", ".", "stop_lon", ",", "stop", ".", "stop_lat", ")" ]
Creates a new stop <Placemark/> element. Args: stop_folder: the KML folder the placemark will be added to. stop: the actual Stop to create a placemark for. style_id: optional argument indicating a style id to add to the placemark.
[ "Creates", "a", "new", "stop", "<Placemark", "/", ">", "element", "." ]
python
train
44.636364
qweeze/wex-api-client
wex/client.py
https://github.com/qweeze/wex-api-client/blob/e84d139be229aab2c7c5eda5976b812be651807b/wex/client.py#L233-L250
def create_coupon(self, currency, amount, receiver): """ This method allows you to create Coupons. Please, note: In order to use this method, you need the Coupon key privilege. You can make a request to enable it by submitting a ticket to Support.. You need to create the API key that you are going to use for this method in advance. Please provide the first 8 characters of the key (e.g. HKG82W66) in your ticket to support. We'll enable the Coupon privilege for this key. You must also provide us the IP-addresses from which you will be accessing the API. When using this method, there will be no additional confirmations of transactions. Please note that you are fully responsible for keeping the secret of the API key safe after we have enabled the Withdraw privilege for it. :param str currency: currency (ex. 'BTC') :param int amount: withdrawal amount :param str receiver: name of user who is allowed to redeem the code """ return self._trade_api_call('CreateCoupon', currency=currency, amount=amount, receiver=receiver)
[ "def", "create_coupon", "(", "self", ",", "currency", ",", "amount", ",", "receiver", ")", ":", "return", "self", ".", "_trade_api_call", "(", "'CreateCoupon'", ",", "currency", "=", "currency", ",", "amount", "=", "amount", ",", "receiver", "=", "receiver", ")" ]
This method allows you to create Coupons. Please, note: In order to use this method, you need the Coupon key privilege. You can make a request to enable it by submitting a ticket to Support.. You need to create the API key that you are going to use for this method in advance. Please provide the first 8 characters of the key (e.g. HKG82W66) in your ticket to support. We'll enable the Coupon privilege for this key. You must also provide us the IP-addresses from which you will be accessing the API. When using this method, there will be no additional confirmations of transactions. Please note that you are fully responsible for keeping the secret of the API key safe after we have enabled the Withdraw privilege for it. :param str currency: currency (ex. 'BTC') :param int amount: withdrawal amount :param str receiver: name of user who is allowed to redeem the code
[ "This", "method", "allows", "you", "to", "create", "Coupons", ".", "Please", "note", ":", "In", "order", "to", "use", "this", "method", "you", "need", "the", "Coupon", "key", "privilege", ".", "You", "can", "make", "a", "request", "to", "enable", "it", "by", "submitting", "a", "ticket", "to", "Support", "..", "You", "need", "to", "create", "the", "API", "key", "that", "you", "are", "going", "to", "use", "for", "this", "method", "in", "advance", ".", "Please", "provide", "the", "first", "8", "characters", "of", "the", "key", "(", "e", ".", "g", ".", "HKG82W66", ")", "in", "your", "ticket", "to", "support", ".", "We", "ll", "enable", "the", "Coupon", "privilege", "for", "this", "key", ".", "You", "must", "also", "provide", "us", "the", "IP", "-", "addresses", "from", "which", "you", "will", "be", "accessing", "the", "API", ".", "When", "using", "this", "method", "there", "will", "be", "no", "additional", "confirmations", "of", "transactions", ".", "Please", "note", "that", "you", "are", "fully", "responsible", "for", "keeping", "the", "secret", "of", "the", "API", "key", "safe", "after", "we", "have", "enabled", "the", "Withdraw", "privilege", "for", "it", "." ]
python
train
63.277778
Rapptz/discord.py
discord/shard.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/shard.py#L297-L359
async def change_presence(self, *, activity=None, status=None, afk=False, shard_id=None): """|coro| Changes the client's presence. The activity parameter is a :class:`Activity` object (not a string) that represents the activity being done currently. This could also be the slimmed down versions, :class:`Game` and :class:`Streaming`. Example: :: game = discord.Game("with the API") await client.change_presence(status=discord.Status.idle, activity=game) Parameters ---------- activity: Optional[Union[:class:`Game`, :class:`Streaming`, :class:`Activity`]] The activity being done. ``None`` if no currently active activity is done. status: Optional[:class:`Status`] Indicates what status to change to. If None, then :attr:`Status.online` is used. afk: :class:`bool` Indicates if you are going AFK. This allows the discord client to know how to handle push notifications better for you in case you are actually idle and not lying. shard_id: Optional[:class:`int`] The shard_id to change the presence to. If not specified or ``None``, then it will change the presence of every shard the bot can see. Raises ------ InvalidArgument If the ``activity`` parameter is not of proper type. """ if status is None: status = 'online' status_enum = Status.online elif status is Status.offline: status = 'invisible' status_enum = Status.offline else: status_enum = status status = str(status) if shard_id is None: for shard in self.shards.values(): await shard.ws.change_presence(activity=activity, status=status, afk=afk) guilds = self._connection.guilds else: shard = self.shards[shard_id] await shard.ws.change_presence(activity=activity, status=status, afk=afk) guilds = [g for g in self._connection.guilds if g.shard_id == shard_id] for guild in guilds: me = guild.me if me is None: continue me.activities = (activity,) me.status = status_enum
[ "async", "def", "change_presence", "(", "self", ",", "*", ",", "activity", "=", "None", ",", "status", "=", "None", ",", "afk", "=", "False", ",", "shard_id", "=", "None", ")", ":", "if", "status", "is", "None", ":", "status", "=", "'online'", "status_enum", "=", "Status", ".", "online", "elif", "status", "is", "Status", ".", "offline", ":", "status", "=", "'invisible'", "status_enum", "=", "Status", ".", "offline", "else", ":", "status_enum", "=", "status", "status", "=", "str", "(", "status", ")", "if", "shard_id", "is", "None", ":", "for", "shard", "in", "self", ".", "shards", ".", "values", "(", ")", ":", "await", "shard", ".", "ws", ".", "change_presence", "(", "activity", "=", "activity", ",", "status", "=", "status", ",", "afk", "=", "afk", ")", "guilds", "=", "self", ".", "_connection", ".", "guilds", "else", ":", "shard", "=", "self", ".", "shards", "[", "shard_id", "]", "await", "shard", ".", "ws", ".", "change_presence", "(", "activity", "=", "activity", ",", "status", "=", "status", ",", "afk", "=", "afk", ")", "guilds", "=", "[", "g", "for", "g", "in", "self", ".", "_connection", ".", "guilds", "if", "g", ".", "shard_id", "==", "shard_id", "]", "for", "guild", "in", "guilds", ":", "me", "=", "guild", ".", "me", "if", "me", "is", "None", ":", "continue", "me", ".", "activities", "=", "(", "activity", ",", ")", "me", ".", "status", "=", "status_enum" ]
|coro| Changes the client's presence. The activity parameter is a :class:`Activity` object (not a string) that represents the activity being done currently. This could also be the slimmed down versions, :class:`Game` and :class:`Streaming`. Example: :: game = discord.Game("with the API") await client.change_presence(status=discord.Status.idle, activity=game) Parameters ---------- activity: Optional[Union[:class:`Game`, :class:`Streaming`, :class:`Activity`]] The activity being done. ``None`` if no currently active activity is done. status: Optional[:class:`Status`] Indicates what status to change to. If None, then :attr:`Status.online` is used. afk: :class:`bool` Indicates if you are going AFK. This allows the discord client to know how to handle push notifications better for you in case you are actually idle and not lying. shard_id: Optional[:class:`int`] The shard_id to change the presence to. If not specified or ``None``, then it will change the presence of every shard the bot can see. Raises ------ InvalidArgument If the ``activity`` parameter is not of proper type.
[ "|coro|" ]
python
train
36.730159
AtteqCom/zsl
src/zsl/utils/cache_helper.py
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/utils/cache_helper.py#L181-L194
def create_key_for_data(prefix, data, key_params): """ From ``data`` params in task create corresponding key with help of ``key_params`` (defined in decorator) """ d = data.get_data() values = [] for k in key_params: if k in d and type(d[k]) is list: values.append("{0}:{1}".format(k, " -".join(d[k]))) else: value = d[k] if k in d else '' values.append("{0}:{1}".format(k, value)) return "{0}-{1}".format(prefix, "-".join(values))
[ "def", "create_key_for_data", "(", "prefix", ",", "data", ",", "key_params", ")", ":", "d", "=", "data", ".", "get_data", "(", ")", "values", "=", "[", "]", "for", "k", "in", "key_params", ":", "if", "k", "in", "d", "and", "type", "(", "d", "[", "k", "]", ")", "is", "list", ":", "values", ".", "append", "(", "\"{0}:{1}\"", ".", "format", "(", "k", ",", "\" -\"", ".", "join", "(", "d", "[", "k", "]", ")", ")", ")", "else", ":", "value", "=", "d", "[", "k", "]", "if", "k", "in", "d", "else", "''", "values", ".", "append", "(", "\"{0}:{1}\"", ".", "format", "(", "k", ",", "value", ")", ")", "return", "\"{0}-{1}\"", ".", "format", "(", "prefix", ",", "\"-\"", ".", "join", "(", "values", ")", ")" ]
From ``data`` params in task create corresponding key with help of ``key_params`` (defined in decorator)
[ "From", "data", "params", "in", "task", "create", "corresponding", "key", "with", "help", "of", "key_params", "(", "defined", "in", "decorator", ")" ]
python
train
35.642857
baliame/http-hmac-python
httphmac/v2.py
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/v2.py#L165-L177
def unroll_auth_headers(self, authheaders, exclude_signature=False, sep=",", quote=True): """Converts an authorization header dict-like object into a string representing the authorization. Keyword arguments: authheaders -- A string-indexable object which contains the headers appropriate for this signature version. """ res = "" ordered = collections.OrderedDict(sorted(authheaders.items())) form = '{0}=\"{1}\"' if quote else '{0}={1}' if exclude_signature: return sep.join([form.format(k, urlquote(str(v), safe='')) for k, v in ordered.items() if k != 'signature']) else: return sep.join([form.format(k, urlquote(str(v), safe='') if k != 'signature' else str(v)) for k, v in ordered.items()])
[ "def", "unroll_auth_headers", "(", "self", ",", "authheaders", ",", "exclude_signature", "=", "False", ",", "sep", "=", "\",\"", ",", "quote", "=", "True", ")", ":", "res", "=", "\"\"", "ordered", "=", "collections", ".", "OrderedDict", "(", "sorted", "(", "authheaders", ".", "items", "(", ")", ")", ")", "form", "=", "'{0}=\\\"{1}\\\"'", "if", "quote", "else", "'{0}={1}'", "if", "exclude_signature", ":", "return", "sep", ".", "join", "(", "[", "form", ".", "format", "(", "k", ",", "urlquote", "(", "str", "(", "v", ")", ",", "safe", "=", "''", ")", ")", "for", "k", ",", "v", "in", "ordered", ".", "items", "(", ")", "if", "k", "!=", "'signature'", "]", ")", "else", ":", "return", "sep", ".", "join", "(", "[", "form", ".", "format", "(", "k", ",", "urlquote", "(", "str", "(", "v", ")", ",", "safe", "=", "''", ")", "if", "k", "!=", "'signature'", "else", "str", "(", "v", ")", ")", "for", "k", ",", "v", "in", "ordered", ".", "items", "(", ")", "]", ")" ]
Converts an authorization header dict-like object into a string representing the authorization. Keyword arguments: authheaders -- A string-indexable object which contains the headers appropriate for this signature version.
[ "Converts", "an", "authorization", "header", "dict", "-", "like", "object", "into", "a", "string", "representing", "the", "authorization", "." ]
python
train
59.923077
materialsproject/pymatgen
pymatgen/io/abinit/flows.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/flows.py#L268-L275
def reload(self): """ Reload the flow from the pickle file. Used when we are monitoring the flow executed by the scheduler. In this case, indeed, the flow might have been changed by the scheduler and we have to reload the new flow in memory. """ new = self.__class__.pickle_load(self.workdir) self = new
[ "def", "reload", "(", "self", ")", ":", "new", "=", "self", ".", "__class__", ".", "pickle_load", "(", "self", ".", "workdir", ")", "self", "=", "new" ]
Reload the flow from the pickle file. Used when we are monitoring the flow executed by the scheduler. In this case, indeed, the flow might have been changed by the scheduler and we have to reload the new flow in memory.
[ "Reload", "the", "flow", "from", "the", "pickle", "file", ".", "Used", "when", "we", "are", "monitoring", "the", "flow", "executed", "by", "the", "scheduler", ".", "In", "this", "case", "indeed", "the", "flow", "might", "have", "been", "changed", "by", "the", "scheduler", "and", "we", "have", "to", "reload", "the", "new", "flow", "in", "memory", "." ]
python
train
44
fabiobatalha/crossrefapi
crossref/restful.py
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1594-L1603
def works(self, prefix_id): """ This method retrieve a iterable of Works of the given prefix. args: Crossref Prefix (String) return: Works() """ context = '%s/%s' % (self.ENDPOINT, str(prefix_id)) return Works(context=context)
[ "def", "works", "(", "self", ",", "prefix_id", ")", ":", "context", "=", "'%s/%s'", "%", "(", "self", ".", "ENDPOINT", ",", "str", "(", "prefix_id", ")", ")", "return", "Works", "(", "context", "=", "context", ")" ]
This method retrieve a iterable of Works of the given prefix. args: Crossref Prefix (String) return: Works()
[ "This", "method", "retrieve", "a", "iterable", "of", "Works", "of", "the", "given", "prefix", "." ]
python
train
27.5
cloudendpoints/endpoints-python
endpoints/endpoints_dispatcher.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/endpoints_dispatcher.py#L683-L696
def transform_rest_response(self, response_body): """Translates an apiserving REST response so it's ready to return. Currently, the only thing that needs to be fixed here is indentation, so it's consistent with what the live app will return. Args: response_body: A string containing the backend response. Returns: A reformatted version of the response JSON. """ body_json = json.loads(response_body) return json.dumps(body_json, indent=1, sort_keys=True)
[ "def", "transform_rest_response", "(", "self", ",", "response_body", ")", ":", "body_json", "=", "json", ".", "loads", "(", "response_body", ")", "return", "json", ".", "dumps", "(", "body_json", ",", "indent", "=", "1", ",", "sort_keys", "=", "True", ")" ]
Translates an apiserving REST response so it's ready to return. Currently, the only thing that needs to be fixed here is indentation, so it's consistent with what the live app will return. Args: response_body: A string containing the backend response. Returns: A reformatted version of the response JSON.
[ "Translates", "an", "apiserving", "REST", "response", "so", "it", "s", "ready", "to", "return", "." ]
python
train
34.857143
chaoss/grimoirelab-manuscripts
manuscripts/report.py
https://github.com/chaoss/grimoirelab-manuscripts/blob/94a3ad4f11bfbcd6c5190e01cb5d3e47a5187cd9/manuscripts/report.py#L556-L616
def sec_project_community(self, project=None): """ Generate the data for the Communication section in a Project report :return: """ def create_csv(metric1, csv_labels, file_label): esfilters = None csv_labels = csv_labels.replace("_", "") # LaTeX not supports "_" if project != self.GLOBAL_PROJECT: esfilters = {"project": project} data_path = os.path.join(self.data_dir, "data") file_name = os.path.join(data_path, file_label + "_" + project + ".csv") logger.debug("CSV file %s generation in progress", file_name) m1 = metric1(self.es_url, self.get_metric_index(metric1), esfilters=esfilters, start=self.end_prev_month, end=self.end) top = m1.get_list() csv = csv_labels + '\n' for i in range(0, len(top['value'])): if i > self.TOP_MAX: break csv += top[metric1.FIELD_NAME][i] + "," + self.str_val(top['value'][i]) csv += "\n" with open(file_name, "w") as f: f.write(csv) logger.debug("CSV file %s was generated", file_name) logger.info("Community data for: %s", project) author = self.config['project_community']['author_metrics'][0] csv_labels = 'labels,' + author.id file_label = author.ds.name + "_" + author.id title_label = author.name + " per " + self.interval self.__create_csv_eps(author, None, csv_labels, file_label, title_label, project) """ Main developers """ metric = self.config['project_community']['people_top_metrics'][0] # TODO: Commits must be extracted from metric csv_labels = author.id + ",commits" file_label = author.ds.name + "_top_" + author.id create_csv(metric, csv_labels, file_label) """ Main organizations """ orgs = self.config['project_community']['orgs_top_metrics'][0] # TODO: Commits must be extracted from metric csv_labels = orgs.id + ",commits" file_label = orgs.ds.name + "_top_" + orgs.id create_csv(orgs, csv_labels, file_label)
[ "def", "sec_project_community", "(", "self", ",", "project", "=", "None", ")", ":", "def", "create_csv", "(", "metric1", ",", "csv_labels", ",", "file_label", ")", ":", "esfilters", "=", "None", "csv_labels", "=", "csv_labels", ".", "replace", "(", "\"_\"", ",", "\"\"", ")", "# LaTeX not supports \"_\"", "if", "project", "!=", "self", ".", "GLOBAL_PROJECT", ":", "esfilters", "=", "{", "\"project\"", ":", "project", "}", "data_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "data_dir", ",", "\"data\"", ")", "file_name", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "file_label", "+", "\"_\"", "+", "project", "+", "\".csv\"", ")", "logger", ".", "debug", "(", "\"CSV file %s generation in progress\"", ",", "file_name", ")", "m1", "=", "metric1", "(", "self", ".", "es_url", ",", "self", ".", "get_metric_index", "(", "metric1", ")", ",", "esfilters", "=", "esfilters", ",", "start", "=", "self", ".", "end_prev_month", ",", "end", "=", "self", ".", "end", ")", "top", "=", "m1", ".", "get_list", "(", ")", "csv", "=", "csv_labels", "+", "'\\n'", "for", "i", "in", "range", "(", "0", ",", "len", "(", "top", "[", "'value'", "]", ")", ")", ":", "if", "i", ">", "self", ".", "TOP_MAX", ":", "break", "csv", "+=", "top", "[", "metric1", ".", "FIELD_NAME", "]", "[", "i", "]", "+", "\",\"", "+", "self", ".", "str_val", "(", "top", "[", "'value'", "]", "[", "i", "]", ")", "csv", "+=", "\"\\n\"", "with", "open", "(", "file_name", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "csv", ")", "logger", ".", "debug", "(", "\"CSV file %s was generated\"", ",", "file_name", ")", "logger", ".", "info", "(", "\"Community data for: %s\"", ",", "project", ")", "author", "=", "self", ".", "config", "[", "'project_community'", "]", "[", "'author_metrics'", "]", "[", "0", "]", "csv_labels", "=", "'labels,'", "+", "author", ".", "id", "file_label", "=", "author", ".", "ds", ".", "name", "+", "\"_\"", "+", "author", ".", "id", "title_label", "=", "author", ".", "name", "+", "\" per \"", "+", "self", ".", "interval", "self", ".", "__create_csv_eps", "(", "author", ",", "None", ",", "csv_labels", ",", "file_label", ",", "title_label", ",", "project", ")", "\"\"\"\n Main developers\n\n \"\"\"", "metric", "=", "self", ".", "config", "[", "'project_community'", "]", "[", "'people_top_metrics'", "]", "[", "0", "]", "# TODO: Commits must be extracted from metric", "csv_labels", "=", "author", ".", "id", "+", "\",commits\"", "file_label", "=", "author", ".", "ds", ".", "name", "+", "\"_top_\"", "+", "author", ".", "id", "create_csv", "(", "metric", ",", "csv_labels", ",", "file_label", ")", "\"\"\"\n Main organizations\n\n \"\"\"", "orgs", "=", "self", ".", "config", "[", "'project_community'", "]", "[", "'orgs_top_metrics'", "]", "[", "0", "]", "# TODO: Commits must be extracted from metric", "csv_labels", "=", "orgs", ".", "id", "+", "\",commits\"", "file_label", "=", "orgs", ".", "ds", ".", "name", "+", "\"_top_\"", "+", "orgs", ".", "id", "create_csv", "(", "orgs", ",", "csv_labels", ",", "file_label", ")" ]
Generate the data for the Communication section in a Project report :return:
[ "Generate", "the", "data", "for", "the", "Communication", "section", "in", "a", "Project", "report", ":", "return", ":" ]
python
train
36.754098
StackStorm/pybind
pybind/slxos/v17s_1_02/bd_vc_peer_state/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/bd_vc_peer_state/__init__.py#L141-L164
def _set_bd_vc_peer_counter(self, v, load=False): """ Setter method for bd_vc_peer_counter, mapped from YANG variable /bd_vc_peer_state/bd_vc_peer_counter (container) If this variable is read-only (config: false) in the source YANG file, then _set_bd_vc_peer_counter is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_bd_vc_peer_counter() directly. YANG Description: VC peer counters """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=bd_vc_peer_counter.bd_vc_peer_counter, is_container='container', presence=False, yang_name="bd-vc-peer-counter", rest_name="bd-vc-peer-counter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pwm-bd-vc-peer-counter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-pwm-operational', defining_module='brocade-pwm-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """bd_vc_peer_counter must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=bd_vc_peer_counter.bd_vc_peer_counter, is_container='container', presence=False, yang_name="bd-vc-peer-counter", rest_name="bd-vc-peer-counter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pwm-bd-vc-peer-counter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-pwm-operational', defining_module='brocade-pwm-operational', yang_type='container', is_config=False)""", }) self.__bd_vc_peer_counter = t if hasattr(self, '_set'): self._set()
[ "def", "_set_bd_vc_peer_counter", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "bd_vc_peer_counter", ".", "bd_vc_peer_counter", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"bd-vc-peer-counter\"", ",", "rest_name", "=", "\"bd-vc-peer-counter\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'pwm-bd-vc-peer-counter'", ",", "u'cli-suppress-show-path'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-pwm-operational'", ",", "defining_module", "=", "'brocade-pwm-operational'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "False", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"bd_vc_peer_counter must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=bd_vc_peer_counter.bd_vc_peer_counter, is_container='container', presence=False, yang_name=\"bd-vc-peer-counter\", rest_name=\"bd-vc-peer-counter\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pwm-bd-vc-peer-counter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-pwm-operational', defining_module='brocade-pwm-operational', yang_type='container', is_config=False)\"\"\"", ",", "}", ")", "self", ".", "__bd_vc_peer_counter", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for bd_vc_peer_counter, mapped from YANG variable /bd_vc_peer_state/bd_vc_peer_counter (container) If this variable is read-only (config: false) in the source YANG file, then _set_bd_vc_peer_counter is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_bd_vc_peer_counter() directly. YANG Description: VC peer counters
[ "Setter", "method", "for", "bd_vc_peer_counter", "mapped", "from", "YANG", "variable", "/", "bd_vc_peer_state", "/", "bd_vc_peer_counter", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_bd_vc_peer_counter", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_bd_vc_peer_counter", "()", "directly", "." ]
python
train
77.833333
ensime/ensime-vim
ensime_shared/client.py
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/client.py#L272-L279
def set_position(self, decl_pos): """Set editor position from ENSIME declPos data.""" if decl_pos["typehint"] == "LineSourcePosition": self.editor.set_cursor(decl_pos['line'], 0) else: # OffsetSourcePosition point = decl_pos["offset"] row, col = self.editor.point2pos(point + 1) self.editor.set_cursor(row, col)
[ "def", "set_position", "(", "self", ",", "decl_pos", ")", ":", "if", "decl_pos", "[", "\"typehint\"", "]", "==", "\"LineSourcePosition\"", ":", "self", ".", "editor", ".", "set_cursor", "(", "decl_pos", "[", "'line'", "]", ",", "0", ")", "else", ":", "# OffsetSourcePosition", "point", "=", "decl_pos", "[", "\"offset\"", "]", "row", ",", "col", "=", "self", ".", "editor", ".", "point2pos", "(", "point", "+", "1", ")", "self", ".", "editor", ".", "set_cursor", "(", "row", ",", "col", ")" ]
Set editor position from ENSIME declPos data.
[ "Set", "editor", "position", "from", "ENSIME", "declPos", "data", "." ]
python
train
47.125
Erotemic/utool
utool/util_alg.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2632-L2728
def solve_boolexpr(): """ sudo pip install git+https://github.com/tpircher/quine-mccluskey.git sudo pip uninstall quine_mccluskey pip uninstall quine_mccluskey pip install git+https://github.com/tpircher/quine-mccluskey.git Args: varnames (?): Returns: ?: CommandLine: python -m utool.util_alg solve_boolexpr --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> import utool as ut >>> varnames = ['sa', 'said', 'aid'] >>> result = solve_boolexpr() >>> print(result) """ #false_cases = [ # int('111', 2), # int('011', 2), # int('001', 2), #] #true_cases = list(set(range(2 ** 3)) - set(false_cases)) varnames = ['sa', 'said', 'aid'] #import utool as ut truth_table = [ dict(sa=True, said=True, aid=True, output=False), dict(sa=True, said=True, aid=False, output=True), dict(sa=True, said=False, aid=True, output=True), dict(sa=True, said=False, aid=False, output=True), dict(sa=False, said=True, aid=True, output=False), dict(sa=False, said=True, aid=False, output=True), dict(sa=False, said=False, aid=True, output=False), dict(sa=False, said=False, aid=False, output=True), ] truth_tuples = [ut.dict_take(d, varnames) for d in truth_table] outputs = [d['output'] for d in truth_table] true_tuples = ut.compress(truth_tuples, outputs) #false_tuples = ut.compress(truth_tuples, ut.not_list(outputs)) true_cases = [''.join([str(int(t)) for t in tup]) for tup in true_tuples] true_cases = [''.join([str(int(t)) for t in tup]) for tup in true_tuples] #truth_nums = [int(s, 2) for s in true_cases] from quine_mccluskey.qm import QuineMcCluskey qm = QuineMcCluskey(use_xor=False) result = qm.simplify_los(true_cases, num_bits=len(varnames)) print(result) #ut.chr_range(3) #symbol_map = { # '-': '', # '1': '{v}', # '0': 'not {v}', # '^': '^', #} #'-' don't care: this bit can be either zero or one. #'1' the bit must be one. #'0' the bit must be zero. #'^' all bits with the caret are XOR-ed together. #'~' all bits with the tilde are XNOR-ed together. #formulas = [[symbol_map[r].format(v=v) for v, r in zip(varnames, rs)] for rs in result] grouped_terms = [dict(ut.group_items(varnames, rs)) for rs in result] def parenjoin(char, list_): if len(list_) == 0: return '' else: return '(' + char.join(list_) + ')' expanded_terms = [ ( term.get('1', []) + ['(not ' + b + ')' for b in term.get('0', [])] + [ parenjoin(' ^ ', term.get('^', [])), parenjoin(' ~ ', term.get('~', [])), ] ) for term in grouped_terms ] final_terms = [[t for t in term if t] for term in expanded_terms] products = [parenjoin(' and ', [f for f in form if f]) for form in final_terms] final_expr = ' or '.join(products) print(final_expr)
[ "def", "solve_boolexpr", "(", ")", ":", "#false_cases = [", "# int('111', 2),", "# int('011', 2),", "# int('001', 2),", "#]", "#true_cases = list(set(range(2 ** 3)) - set(false_cases))", "varnames", "=", "[", "'sa'", ",", "'said'", ",", "'aid'", "]", "#import utool as ut", "truth_table", "=", "[", "dict", "(", "sa", "=", "True", ",", "said", "=", "True", ",", "aid", "=", "True", ",", "output", "=", "False", ")", ",", "dict", "(", "sa", "=", "True", ",", "said", "=", "True", ",", "aid", "=", "False", ",", "output", "=", "True", ")", ",", "dict", "(", "sa", "=", "True", ",", "said", "=", "False", ",", "aid", "=", "True", ",", "output", "=", "True", ")", ",", "dict", "(", "sa", "=", "True", ",", "said", "=", "False", ",", "aid", "=", "False", ",", "output", "=", "True", ")", ",", "dict", "(", "sa", "=", "False", ",", "said", "=", "True", ",", "aid", "=", "True", ",", "output", "=", "False", ")", ",", "dict", "(", "sa", "=", "False", ",", "said", "=", "True", ",", "aid", "=", "False", ",", "output", "=", "True", ")", ",", "dict", "(", "sa", "=", "False", ",", "said", "=", "False", ",", "aid", "=", "True", ",", "output", "=", "False", ")", ",", "dict", "(", "sa", "=", "False", ",", "said", "=", "False", ",", "aid", "=", "False", ",", "output", "=", "True", ")", ",", "]", "truth_tuples", "=", "[", "ut", ".", "dict_take", "(", "d", ",", "varnames", ")", "for", "d", "in", "truth_table", "]", "outputs", "=", "[", "d", "[", "'output'", "]", "for", "d", "in", "truth_table", "]", "true_tuples", "=", "ut", ".", "compress", "(", "truth_tuples", ",", "outputs", ")", "#false_tuples = ut.compress(truth_tuples, ut.not_list(outputs))", "true_cases", "=", "[", "''", ".", "join", "(", "[", "str", "(", "int", "(", "t", ")", ")", "for", "t", "in", "tup", "]", ")", "for", "tup", "in", "true_tuples", "]", "true_cases", "=", "[", "''", ".", "join", "(", "[", "str", "(", "int", "(", "t", ")", ")", "for", "t", "in", "tup", "]", ")", "for", "tup", "in", "true_tuples", "]", "#truth_nums = [int(s, 2) for s in true_cases]", "from", "quine_mccluskey", ".", "qm", "import", "QuineMcCluskey", "qm", "=", "QuineMcCluskey", "(", "use_xor", "=", "False", ")", "result", "=", "qm", ".", "simplify_los", "(", "true_cases", ",", "num_bits", "=", "len", "(", "varnames", ")", ")", "print", "(", "result", ")", "#ut.chr_range(3)", "#symbol_map = {", "# '-': '',", "# '1': '{v}',", "# '0': 'not {v}',", "# '^': '^',", "#}", "#'-' don't care: this bit can be either zero or one.", "#'1' the bit must be one.", "#'0' the bit must be zero.", "#'^' all bits with the caret are XOR-ed together.", "#'~' all bits with the tilde are XNOR-ed together.", "#formulas = [[symbol_map[r].format(v=v) for v, r in zip(varnames, rs)] for rs in result]", "grouped_terms", "=", "[", "dict", "(", "ut", ".", "group_items", "(", "varnames", ",", "rs", ")", ")", "for", "rs", "in", "result", "]", "def", "parenjoin", "(", "char", ",", "list_", ")", ":", "if", "len", "(", "list_", ")", "==", "0", ":", "return", "''", "else", ":", "return", "'('", "+", "char", ".", "join", "(", "list_", ")", "+", "')'", "expanded_terms", "=", "[", "(", "term", ".", "get", "(", "'1'", ",", "[", "]", ")", "+", "[", "'(not '", "+", "b", "+", "')'", "for", "b", "in", "term", ".", "get", "(", "'0'", ",", "[", "]", ")", "]", "+", "[", "parenjoin", "(", "' ^ '", ",", "term", ".", "get", "(", "'^'", ",", "[", "]", ")", ")", ",", "parenjoin", "(", "' ~ '", ",", "term", ".", "get", "(", "'~'", ",", "[", "]", ")", ")", ",", "]", ")", "for", "term", "in", "grouped_terms", "]", "final_terms", "=", "[", "[", "t", "for", "t", "in", "term", "if", "t", "]", "for", "term", "in", "expanded_terms", "]", "products", "=", "[", "parenjoin", "(", "' and '", ",", "[", "f", "for", "f", "in", "form", "if", "f", "]", ")", "for", "form", "in", "final_terms", "]", "final_expr", "=", "' or '", ".", "join", "(", "products", ")", "print", "(", "final_expr", ")" ]
sudo pip install git+https://github.com/tpircher/quine-mccluskey.git sudo pip uninstall quine_mccluskey pip uninstall quine_mccluskey pip install git+https://github.com/tpircher/quine-mccluskey.git Args: varnames (?): Returns: ?: CommandLine: python -m utool.util_alg solve_boolexpr --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> import utool as ut >>> varnames = ['sa', 'said', 'aid'] >>> result = solve_boolexpr() >>> print(result)
[ "sudo", "pip", "install", "git", "+", "https", ":", "//", "github", ".", "com", "/", "tpircher", "/", "quine", "-", "mccluskey", ".", "git", "sudo", "pip", "uninstall", "quine_mccluskey", "pip", "uninstall", "quine_mccluskey" ]
python
train
31.587629
Gandi/gandi.cli
gandi/cli/modules/disk.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/disk.py#L236-L272
def create(cls, name, vm, size, snapshotprofile, datacenter, source, disk_type='data', background=False): """ Create a disk and attach it to a vm. """ if isinstance(size, tuple): prefix, size = size if source: size = None disk_params = cls.disk_param(name, size, snapshotprofile) disk_params['datacenter_id'] = int(Datacenter.usable_id(datacenter)) disk_params['type'] = disk_type if source: disk_id = int(Image.usable_id(source, disk_params['datacenter_id'])) result = cls.call('hosting.disk.create_from', disk_params, disk_id) else: result = cls.call('hosting.disk.create', disk_params) if background and not vm: return result # interactive mode, run a progress bar cls.echo('Creating your disk.') cls.display_progress(result) if not vm: return vm_id = Iaas.usable_id(vm) result = cls._attach(result['disk_id'], vm_id) if background: return result cls.echo('Attaching your disk.') cls.display_progress(result)
[ "def", "create", "(", "cls", ",", "name", ",", "vm", ",", "size", ",", "snapshotprofile", ",", "datacenter", ",", "source", ",", "disk_type", "=", "'data'", ",", "background", "=", "False", ")", ":", "if", "isinstance", "(", "size", ",", "tuple", ")", ":", "prefix", ",", "size", "=", "size", "if", "source", ":", "size", "=", "None", "disk_params", "=", "cls", ".", "disk_param", "(", "name", ",", "size", ",", "snapshotprofile", ")", "disk_params", "[", "'datacenter_id'", "]", "=", "int", "(", "Datacenter", ".", "usable_id", "(", "datacenter", ")", ")", "disk_params", "[", "'type'", "]", "=", "disk_type", "if", "source", ":", "disk_id", "=", "int", "(", "Image", ".", "usable_id", "(", "source", ",", "disk_params", "[", "'datacenter_id'", "]", ")", ")", "result", "=", "cls", ".", "call", "(", "'hosting.disk.create_from'", ",", "disk_params", ",", "disk_id", ")", "else", ":", "result", "=", "cls", ".", "call", "(", "'hosting.disk.create'", ",", "disk_params", ")", "if", "background", "and", "not", "vm", ":", "return", "result", "# interactive mode, run a progress bar", "cls", ".", "echo", "(", "'Creating your disk.'", ")", "cls", ".", "display_progress", "(", "result", ")", "if", "not", "vm", ":", "return", "vm_id", "=", "Iaas", ".", "usable_id", "(", "vm", ")", "result", "=", "cls", ".", "_attach", "(", "result", "[", "'disk_id'", "]", ",", "vm_id", ")", "if", "background", ":", "return", "result", "cls", ".", "echo", "(", "'Attaching your disk.'", ")", "cls", ".", "display_progress", "(", "result", ")" ]
Create a disk and attach it to a vm.
[ "Create", "a", "disk", "and", "attach", "it", "to", "a", "vm", "." ]
python
train
31.891892
dcwatson/drill
drill.py
https://github.com/dcwatson/drill/blob/b8a30ec0fd5b5bf55154bd44c1c75f5f5945691b/drill.py#L307-L312
def items(self): """ A generator yielding ``(key, value)`` attribute pairs, sorted by key name. """ for key in sorted(self.attrs): yield key, self.attrs[key]
[ "def", "items", "(", "self", ")", ":", "for", "key", "in", "sorted", "(", "self", ".", "attrs", ")", ":", "yield", "key", ",", "self", ".", "attrs", "[", "key", "]" ]
A generator yielding ``(key, value)`` attribute pairs, sorted by key name.
[ "A", "generator", "yielding", "(", "key", "value", ")", "attribute", "pairs", "sorted", "by", "key", "name", "." ]
python
valid
32.666667
divio/cmsplugin-filer
cmsplugin_filer_teaser/cms_plugins.py
https://github.com/divio/cmsplugin-filer/blob/4f9b0307dd768852ead64e651b743a165b3efccb/cmsplugin_filer_teaser/cms_plugins.py#L42-L75
def _get_thumbnail_options(self, context, instance): """ Return the size and options of the thumbnail that should be inserted """ width, height = None, None subject_location = False placeholder_width = context.get('width', None) placeholder_height = context.get('height', None) if instance.use_autoscale and placeholder_width: # use the placeholder width as a hint for sizing width = int(placeholder_width) if instance.use_autoscale and placeholder_height: height = int(placeholder_height) elif instance.width: width = instance.width if instance.height: height = instance.height if instance.image: if instance.image.subject_location: subject_location = instance.image.subject_location if not height and width: # height was not externally defined: use ratio to scale it by the width height = int(float(width) * float(instance.image.height) / float(instance.image.width)) if not width and height: # width was not externally defined: use ratio to scale it by the height width = int(float(height) * float(instance.image.width) / float(instance.image.height)) if not width: # width is still not defined. fallback the actual image width width = instance.image.width if not height: # height is still not defined. fallback the actual image height height = instance.image.height return {'size': (width, height), 'subject_location': subject_location}
[ "def", "_get_thumbnail_options", "(", "self", ",", "context", ",", "instance", ")", ":", "width", ",", "height", "=", "None", ",", "None", "subject_location", "=", "False", "placeholder_width", "=", "context", ".", "get", "(", "'width'", ",", "None", ")", "placeholder_height", "=", "context", ".", "get", "(", "'height'", ",", "None", ")", "if", "instance", ".", "use_autoscale", "and", "placeholder_width", ":", "# use the placeholder width as a hint for sizing", "width", "=", "int", "(", "placeholder_width", ")", "if", "instance", ".", "use_autoscale", "and", "placeholder_height", ":", "height", "=", "int", "(", "placeholder_height", ")", "elif", "instance", ".", "width", ":", "width", "=", "instance", ".", "width", "if", "instance", ".", "height", ":", "height", "=", "instance", ".", "height", "if", "instance", ".", "image", ":", "if", "instance", ".", "image", ".", "subject_location", ":", "subject_location", "=", "instance", ".", "image", ".", "subject_location", "if", "not", "height", "and", "width", ":", "# height was not externally defined: use ratio to scale it by the width", "height", "=", "int", "(", "float", "(", "width", ")", "*", "float", "(", "instance", ".", "image", ".", "height", ")", "/", "float", "(", "instance", ".", "image", ".", "width", ")", ")", "if", "not", "width", "and", "height", ":", "# width was not externally defined: use ratio to scale it by the height", "width", "=", "int", "(", "float", "(", "height", ")", "*", "float", "(", "instance", ".", "image", ".", "width", ")", "/", "float", "(", "instance", ".", "image", ".", "height", ")", ")", "if", "not", "width", ":", "# width is still not defined. fallback the actual image width", "width", "=", "instance", ".", "image", ".", "width", "if", "not", "height", ":", "# height is still not defined. fallback the actual image height", "height", "=", "instance", ".", "image", ".", "height", "return", "{", "'size'", ":", "(", "width", ",", "height", ")", ",", "'subject_location'", ":", "subject_location", "}" ]
Return the size and options of the thumbnail that should be inserted
[ "Return", "the", "size", "and", "options", "of", "the", "thumbnail", "that", "should", "be", "inserted" ]
python
train
49.735294
f3at/feat
tools/pep8.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/tools/pep8.py#L510-L518
def readline_check_physical(self): """ Check and return the next physical line. This method can be used to feed tokenize.generate_tokens. """ line = self.readline() if line: self.check_physical(line) return line
[ "def", "readline_check_physical", "(", "self", ")", ":", "line", "=", "self", ".", "readline", "(", ")", "if", "line", ":", "self", ".", "check_physical", "(", "line", ")", "return", "line" ]
Check and return the next physical line. This method can be used to feed tokenize.generate_tokens.
[ "Check", "and", "return", "the", "next", "physical", "line", ".", "This", "method", "can", "be", "used", "to", "feed", "tokenize", ".", "generate_tokens", "." ]
python
train
30.111111
juju/charm-helpers
charmhelpers/fetch/centos.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/fetch/centos.py#L140-L171
def _run_yum_command(cmd, fatal=False): """Run an YUM command. Checks the output and retry if the fatal flag is set to True. :param: cmd: str: The yum command to run. :param: fatal: bool: Whether the command's output should be checked and retried. """ env = os.environ.copy() if fatal: retry_count = 0 result = None # If the command is considered "fatal", we need to retry if the yum # lock was not acquired. while result is None or result == YUM_NO_LOCK: try: result = subprocess.check_call(cmd, env=env) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > YUM_NO_LOCK_RETRY_COUNT: raise result = e.returncode log("Couldn't acquire YUM lock. Will retry in {} seconds." "".format(YUM_NO_LOCK_RETRY_DELAY)) time.sleep(YUM_NO_LOCK_RETRY_DELAY) else: subprocess.call(cmd, env=env)
[ "def", "_run_yum_command", "(", "cmd", ",", "fatal", "=", "False", ")", ":", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", "if", "fatal", ":", "retry_count", "=", "0", "result", "=", "None", "# If the command is considered \"fatal\", we need to retry if the yum", "# lock was not acquired.", "while", "result", "is", "None", "or", "result", "==", "YUM_NO_LOCK", ":", "try", ":", "result", "=", "subprocess", ".", "check_call", "(", "cmd", ",", "env", "=", "env", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "retry_count", "=", "retry_count", "+", "1", "if", "retry_count", ">", "YUM_NO_LOCK_RETRY_COUNT", ":", "raise", "result", "=", "e", ".", "returncode", "log", "(", "\"Couldn't acquire YUM lock. Will retry in {} seconds.\"", "\"\"", ".", "format", "(", "YUM_NO_LOCK_RETRY_DELAY", ")", ")", "time", ".", "sleep", "(", "YUM_NO_LOCK_RETRY_DELAY", ")", "else", ":", "subprocess", ".", "call", "(", "cmd", ",", "env", "=", "env", ")" ]
Run an YUM command. Checks the output and retry if the fatal flag is set to True. :param: cmd: str: The yum command to run. :param: fatal: bool: Whether the command's output should be checked and retried.
[ "Run", "an", "YUM", "command", "." ]
python
train
32.4375
Iotic-Labs/py-IoticAgent
src/IoticAgent/IOT/Client.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/Client.py#L621-L627
def _get_point_data_handler_for(self, point): """Used by point instances and data callbacks""" with self.__point_data_handlers: try: return self.__point_data_handlers[point] except KeyError: return self.__point_data_handlers.setdefault(point, PointDataObjectHandler(point, self))
[ "def", "_get_point_data_handler_for", "(", "self", ",", "point", ")", ":", "with", "self", ".", "__point_data_handlers", ":", "try", ":", "return", "self", ".", "__point_data_handlers", "[", "point", "]", "except", "KeyError", ":", "return", "self", ".", "__point_data_handlers", ".", "setdefault", "(", "point", ",", "PointDataObjectHandler", "(", "point", ",", "self", ")", ")" ]
Used by point instances and data callbacks
[ "Used", "by", "point", "instances", "and", "data", "callbacks" ]
python
train
49.285714
juju/charm-helpers
charmhelpers/contrib/openstack/audits/openstack_security_guide.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/audits/openstack_security_guide.py#L104-L116
def _stat(file): """ Get the Ownership information from a file. :param file: The path to a file to stat :type file: str :returns: owner, group, and mode of the specified file :rtype: Ownership :raises subprocess.CalledProcessError: If the underlying stat fails """ out = subprocess.check_output( ['stat', '-c', '%U %G %a', file]).decode('utf-8') return Ownership(*out.strip().split(' '))
[ "def", "_stat", "(", "file", ")", ":", "out", "=", "subprocess", ".", "check_output", "(", "[", "'stat'", ",", "'-c'", ",", "'%U %G %a'", ",", "file", "]", ")", ".", "decode", "(", "'utf-8'", ")", "return", "Ownership", "(", "*", "out", ".", "strip", "(", ")", ".", "split", "(", "' '", ")", ")" ]
Get the Ownership information from a file. :param file: The path to a file to stat :type file: str :returns: owner, group, and mode of the specified file :rtype: Ownership :raises subprocess.CalledProcessError: If the underlying stat fails
[ "Get", "the", "Ownership", "information", "from", "a", "file", "." ]
python
train
32.615385
totalgood/nlpia
src/nlpia/loaders.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L549-L567
def series_strip(series, startswith=None, endswith=None, startsorendswith=None, ignorecase=True): """ Strip a suffix/prefix str (`endswith`/`startswith` str) from a `df` columns or pd.Series of type str """ if ignorecase: mask = series.str.lower() endswith = endswith.lower() else: mask = series if not (startsorendswith or endswith or startswith): logger.warning('In series_strip(): You must specify endswith, startswith, or startsorendswith string arguments.') return series if startsorendswith: startswith = endswith = startsorendswith if endswith: mask = mask.str.endswith(endswith) series[mask] = series[mask].str[:-len(endswith)] if startswith: mask = mask.str.endswith(startswith) series[mask] = series[mask].str[len(startswith):] return series
[ "def", "series_strip", "(", "series", ",", "startswith", "=", "None", ",", "endswith", "=", "None", ",", "startsorendswith", "=", "None", ",", "ignorecase", "=", "True", ")", ":", "if", "ignorecase", ":", "mask", "=", "series", ".", "str", ".", "lower", "(", ")", "endswith", "=", "endswith", ".", "lower", "(", ")", "else", ":", "mask", "=", "series", "if", "not", "(", "startsorendswith", "or", "endswith", "or", "startswith", ")", ":", "logger", ".", "warning", "(", "'In series_strip(): You must specify endswith, startswith, or startsorendswith string arguments.'", ")", "return", "series", "if", "startsorendswith", ":", "startswith", "=", "endswith", "=", "startsorendswith", "if", "endswith", ":", "mask", "=", "mask", ".", "str", ".", "endswith", "(", "endswith", ")", "series", "[", "mask", "]", "=", "series", "[", "mask", "]", ".", "str", "[", ":", "-", "len", "(", "endswith", ")", "]", "if", "startswith", ":", "mask", "=", "mask", ".", "str", ".", "endswith", "(", "startswith", ")", "series", "[", "mask", "]", "=", "series", "[", "mask", "]", ".", "str", "[", "len", "(", "startswith", ")", ":", "]", "return", "series" ]
Strip a suffix/prefix str (`endswith`/`startswith` str) from a `df` columns or pd.Series of type str
[ "Strip", "a", "suffix", "/", "prefix", "str", "(", "endswith", "/", "startswith", "str", ")", "from", "a", "df", "columns", "or", "pd", ".", "Series", "of", "type", "str" ]
python
train
44.473684
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L2488-L2492
def _forget_page(self, page): """Remove a page from document page dict.""" pid = id(page) if pid in self._page_refs: self._page_refs[pid] = None
[ "def", "_forget_page", "(", "self", ",", "page", ")", ":", "pid", "=", "id", "(", "page", ")", "if", "pid", "in", "self", ".", "_page_refs", ":", "self", ".", "_page_refs", "[", "pid", "]", "=", "None" ]
Remove a page from document page dict.
[ "Remove", "a", "page", "from", "document", "page", "dict", "." ]
python
train
35.2
pyvisa/pyvisa
pyvisa/shell.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/shell.py#L121-L136
def do_list(self, args): """List all connected resources.""" try: resources = self.resource_manager.list_resources_info() except Exception as e: print(e) else: self.resources = [] for ndx, (resource_name, value) in enumerate(resources.items()): if not args: print('({0:2d}) {1}'.format(ndx, resource_name)) if value.alias: print(' alias: {}'.format(value.alias)) self.resources.append((resource_name, value.alias or None))
[ "def", "do_list", "(", "self", ",", "args", ")", ":", "try", ":", "resources", "=", "self", ".", "resource_manager", ".", "list_resources_info", "(", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "else", ":", "self", ".", "resources", "=", "[", "]", "for", "ndx", ",", "(", "resource_name", ",", "value", ")", "in", "enumerate", "(", "resources", ".", "items", "(", ")", ")", ":", "if", "not", "args", ":", "print", "(", "'({0:2d}) {1}'", ".", "format", "(", "ndx", ",", "resource_name", ")", ")", "if", "value", ".", "alias", ":", "print", "(", "' alias: {}'", ".", "format", "(", "value", ".", "alias", ")", ")", "self", ".", "resources", ".", "append", "(", "(", "resource_name", ",", "value", ".", "alias", "or", "None", ")", ")" ]
List all connected resources.
[ "List", "all", "connected", "resources", "." ]
python
train
36.8125
pyviz/holoviews
holoviews/plotting/links.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/links.py#L62-L78
def link(self): """ Registers the Link """ if self.source in self.registry: links = self.registry[self.source] params = { k: v for k, v in self.get_param_values() if k != 'name'} for link in links: link_params = { k: v for k, v in link.get_param_values() if k != 'name'} if (type(link) is type(self) and link.source is self.source and link.target is self.target and params == link_params): return self.registry[self.source].append(self) else: self.registry[self.source] = [self]
[ "def", "link", "(", "self", ")", ":", "if", "self", ".", "source", "in", "self", ".", "registry", ":", "links", "=", "self", ".", "registry", "[", "self", ".", "source", "]", "params", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "get_param_values", "(", ")", "if", "k", "!=", "'name'", "}", "for", "link", "in", "links", ":", "link_params", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "link", ".", "get_param_values", "(", ")", "if", "k", "!=", "'name'", "}", "if", "(", "type", "(", "link", ")", "is", "type", "(", "self", ")", "and", "link", ".", "source", "is", "self", ".", "source", "and", "link", ".", "target", "is", "self", ".", "target", "and", "params", "==", "link_params", ")", ":", "return", "self", ".", "registry", "[", "self", ".", "source", "]", ".", "append", "(", "self", ")", "else", ":", "self", ".", "registry", "[", "self", ".", "source", "]", "=", "[", "self", "]" ]
Registers the Link
[ "Registers", "the", "Link" ]
python
train
39.411765
denisenkom/django-sqlserver
sqlserver/base.py
https://github.com/denisenkom/django-sqlserver/blob/f5d5dc8637799746f1bd11bd8c479d3acd468581/sqlserver/base.py#L87-L92
def __get_dbms_version(self, make_connection=True): """ Returns the 'DBMS Version' string """ major, minor, _, _ = self.get_server_version(make_connection=make_connection) return '{}.{}'.format(major, minor)
[ "def", "__get_dbms_version", "(", "self", ",", "make_connection", "=", "True", ")", ":", "major", ",", "minor", ",", "_", ",", "_", "=", "self", ".", "get_server_version", "(", "make_connection", "=", "make_connection", ")", "return", "'{}.{}'", ".", "format", "(", "major", ",", "minor", ")" ]
Returns the 'DBMS Version' string
[ "Returns", "the", "DBMS", "Version", "string" ]
python
train
40.333333
Esri/ArcREST
src/arcrest/manageportal/administration.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageportal/administration.py#L745-L754
def SSLCertificates(self): """ Lists certificates. """ url = self._url + "/SSLCertificate" params = {"f" : "json"} return self._post(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "SSLCertificates", "(", "self", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/SSLCertificate\"", "params", "=", "{", "\"f\"", ":", "\"json\"", "}", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
Lists certificates.
[ "Lists", "certificates", "." ]
python
train
33.3
inasafe/inasafe
safe/report/extractors/action_notes.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/extractors/action_notes.py#L54-L201
def notes_assumptions_extractor(impact_report, component_metadata): """Extracting notes and assumptions of the exposure layer :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.0 """ context = {} provenance = impact_report.impact_function.provenance extra_args = component_metadata.extra_args hazard_keywords = provenance['hazard_keywords'] exposure_keywords = provenance['exposure_keywords'] exposure_type = definition(exposure_keywords['exposure']) analysis_note_dict = resolve_from_dictionary(extra_args, 'analysis_notes') context['items'] = [analysis_note_dict] context['component_key'] = component_metadata.key context['header'] = resolve_from_dictionary(extra_args, 'header') context['items'] += provenance['notes'] # Get hazard classification hazard_classification = definition( active_classification(hazard_keywords, exposure_keywords['exposure'])) # Check hazard affected class affected_classes = [] for hazard_class in hazard_classification['classes']: if exposure_keywords['exposure'] == exposure_population['key']: # Taking from profile is_affected_class = is_affected( hazard=hazard_keywords['hazard'], classification=hazard_classification['key'], hazard_class=hazard_class['key'], ) if is_affected_class: affected_classes.append(hazard_class) else: if hazard_class.get('affected', False): affected_classes.append(hazard_class) if affected_classes: affected_note_dict = resolve_from_dictionary( extra_args, 'affected_note_format') # generate hazard classes hazard_classes = ', '.join([ c['name'] for c in affected_classes ]) for index, affected_note in enumerate(affected_note_dict['item_list']): affected_note_dict['item_list'][index] = ( affected_note.format(hazard_classes=hazard_classes) ) context['items'].append(affected_note_dict) # Check hazard have displacement rate for hazard_class in hazard_classification['classes']: if hazard_class.get('displacement_rate', 0) > 0: have_displacement_rate = True break else: have_displacement_rate = False # Only show displacement note if analysis about population exposure if have_displacement_rate and exposure_type == exposure_population: # add notes for displacement rate used displacement_note_dict = resolve_from_dictionary( extra_args, 'displacement_rates_note_format') # generate rate description displacement_rates_note_format = resolve_from_dictionary( extra_args, 'hazard_displacement_rates_note_format') displacement_rates_note = [] for hazard_class in hazard_classification['classes']: the_hazard_class = deepcopy(hazard_class) the_hazard_class['displacement_rate'] = get_displacement_rate( hazard=hazard_keywords['hazard'], classification=hazard_classification['key'], hazard_class=the_hazard_class['key'] ) displacement_rates_note.append( displacement_rates_note_format.format(**the_hazard_class)) rate_description = ', '.join(displacement_rates_note) for index, displacement_note in enumerate( displacement_note_dict['item_list']): displacement_note_dict['item_list'][index] = ( displacement_note.format(rate_description=rate_description) ) context['items'].append(displacement_note_dict) # Check hazard have displacement rate have_fatality_rate = False for hazard_class in hazard_classification['classes']: if hazard_class.get('fatality_rate', None) is not None and \ hazard_class.get('fatality_rate', 0) > 0: have_fatality_rate = True break if have_fatality_rate and exposure_type == exposure_population: # add notes for fatality rate used fatality_note_dict = resolve_from_dictionary( extra_args, 'fatality_rates_note_format') # generate rate description fatality_rates_note_format = resolve_from_dictionary( extra_args, 'hazard_fatality_rates_note_format') fatality_rates_note = [] for hazard_class in hazard_classification['classes']: # we make a copy here because we don't want to # change the real value. copy_of_hazard_class = dict(hazard_class) if copy_of_hazard_class['fatality_rate'] is None or \ copy_of_hazard_class['fatality_rate'] <= 0: copy_of_hazard_class['fatality_rate'] = 0 else: # we want to show the rate as a scientific notation copy_of_hazard_class['fatality_rate'] = ( html_scientific_notation_rate( copy_of_hazard_class['fatality_rate'])) fatality_rates_note.append( fatality_rates_note_format.format(**copy_of_hazard_class)) rate_description = ', '.join(fatality_rates_note) for index, fatality_note in enumerate(fatality_note_dict['item_list']): fatality_note_dict['item_list'][index] = ( fatality_note.format(rate_description=rate_description) ) context['items'].append(fatality_note_dict) return context
[ "def", "notes_assumptions_extractor", "(", "impact_report", ",", "component_metadata", ")", ":", "context", "=", "{", "}", "provenance", "=", "impact_report", ".", "impact_function", ".", "provenance", "extra_args", "=", "component_metadata", ".", "extra_args", "hazard_keywords", "=", "provenance", "[", "'hazard_keywords'", "]", "exposure_keywords", "=", "provenance", "[", "'exposure_keywords'", "]", "exposure_type", "=", "definition", "(", "exposure_keywords", "[", "'exposure'", "]", ")", "analysis_note_dict", "=", "resolve_from_dictionary", "(", "extra_args", ",", "'analysis_notes'", ")", "context", "[", "'items'", "]", "=", "[", "analysis_note_dict", "]", "context", "[", "'component_key'", "]", "=", "component_metadata", ".", "key", "context", "[", "'header'", "]", "=", "resolve_from_dictionary", "(", "extra_args", ",", "'header'", ")", "context", "[", "'items'", "]", "+=", "provenance", "[", "'notes'", "]", "# Get hazard classification", "hazard_classification", "=", "definition", "(", "active_classification", "(", "hazard_keywords", ",", "exposure_keywords", "[", "'exposure'", "]", ")", ")", "# Check hazard affected class", "affected_classes", "=", "[", "]", "for", "hazard_class", "in", "hazard_classification", "[", "'classes'", "]", ":", "if", "exposure_keywords", "[", "'exposure'", "]", "==", "exposure_population", "[", "'key'", "]", ":", "# Taking from profile", "is_affected_class", "=", "is_affected", "(", "hazard", "=", "hazard_keywords", "[", "'hazard'", "]", ",", "classification", "=", "hazard_classification", "[", "'key'", "]", ",", "hazard_class", "=", "hazard_class", "[", "'key'", "]", ",", ")", "if", "is_affected_class", ":", "affected_classes", ".", "append", "(", "hazard_class", ")", "else", ":", "if", "hazard_class", ".", "get", "(", "'affected'", ",", "False", ")", ":", "affected_classes", ".", "append", "(", "hazard_class", ")", "if", "affected_classes", ":", "affected_note_dict", "=", "resolve_from_dictionary", "(", "extra_args", ",", "'affected_note_format'", ")", "# generate hazard classes", "hazard_classes", "=", "', '", ".", "join", "(", "[", "c", "[", "'name'", "]", "for", "c", "in", "affected_classes", "]", ")", "for", "index", ",", "affected_note", "in", "enumerate", "(", "affected_note_dict", "[", "'item_list'", "]", ")", ":", "affected_note_dict", "[", "'item_list'", "]", "[", "index", "]", "=", "(", "affected_note", ".", "format", "(", "hazard_classes", "=", "hazard_classes", ")", ")", "context", "[", "'items'", "]", ".", "append", "(", "affected_note_dict", ")", "# Check hazard have displacement rate", "for", "hazard_class", "in", "hazard_classification", "[", "'classes'", "]", ":", "if", "hazard_class", ".", "get", "(", "'displacement_rate'", ",", "0", ")", ">", "0", ":", "have_displacement_rate", "=", "True", "break", "else", ":", "have_displacement_rate", "=", "False", "# Only show displacement note if analysis about population exposure", "if", "have_displacement_rate", "and", "exposure_type", "==", "exposure_population", ":", "# add notes for displacement rate used", "displacement_note_dict", "=", "resolve_from_dictionary", "(", "extra_args", ",", "'displacement_rates_note_format'", ")", "# generate rate description", "displacement_rates_note_format", "=", "resolve_from_dictionary", "(", "extra_args", ",", "'hazard_displacement_rates_note_format'", ")", "displacement_rates_note", "=", "[", "]", "for", "hazard_class", "in", "hazard_classification", "[", "'classes'", "]", ":", "the_hazard_class", "=", "deepcopy", "(", "hazard_class", ")", "the_hazard_class", "[", "'displacement_rate'", "]", "=", "get_displacement_rate", "(", "hazard", "=", "hazard_keywords", "[", "'hazard'", "]", ",", "classification", "=", "hazard_classification", "[", "'key'", "]", ",", "hazard_class", "=", "the_hazard_class", "[", "'key'", "]", ")", "displacement_rates_note", ".", "append", "(", "displacement_rates_note_format", ".", "format", "(", "*", "*", "the_hazard_class", ")", ")", "rate_description", "=", "', '", ".", "join", "(", "displacement_rates_note", ")", "for", "index", ",", "displacement_note", "in", "enumerate", "(", "displacement_note_dict", "[", "'item_list'", "]", ")", ":", "displacement_note_dict", "[", "'item_list'", "]", "[", "index", "]", "=", "(", "displacement_note", ".", "format", "(", "rate_description", "=", "rate_description", ")", ")", "context", "[", "'items'", "]", ".", "append", "(", "displacement_note_dict", ")", "# Check hazard have displacement rate", "have_fatality_rate", "=", "False", "for", "hazard_class", "in", "hazard_classification", "[", "'classes'", "]", ":", "if", "hazard_class", ".", "get", "(", "'fatality_rate'", ",", "None", ")", "is", "not", "None", "and", "hazard_class", ".", "get", "(", "'fatality_rate'", ",", "0", ")", ">", "0", ":", "have_fatality_rate", "=", "True", "break", "if", "have_fatality_rate", "and", "exposure_type", "==", "exposure_population", ":", "# add notes for fatality rate used", "fatality_note_dict", "=", "resolve_from_dictionary", "(", "extra_args", ",", "'fatality_rates_note_format'", ")", "# generate rate description", "fatality_rates_note_format", "=", "resolve_from_dictionary", "(", "extra_args", ",", "'hazard_fatality_rates_note_format'", ")", "fatality_rates_note", "=", "[", "]", "for", "hazard_class", "in", "hazard_classification", "[", "'classes'", "]", ":", "# we make a copy here because we don't want to", "# change the real value.", "copy_of_hazard_class", "=", "dict", "(", "hazard_class", ")", "if", "copy_of_hazard_class", "[", "'fatality_rate'", "]", "is", "None", "or", "copy_of_hazard_class", "[", "'fatality_rate'", "]", "<=", "0", ":", "copy_of_hazard_class", "[", "'fatality_rate'", "]", "=", "0", "else", ":", "# we want to show the rate as a scientific notation", "copy_of_hazard_class", "[", "'fatality_rate'", "]", "=", "(", "html_scientific_notation_rate", "(", "copy_of_hazard_class", "[", "'fatality_rate'", "]", ")", ")", "fatality_rates_note", ".", "append", "(", "fatality_rates_note_format", ".", "format", "(", "*", "*", "copy_of_hazard_class", ")", ")", "rate_description", "=", "', '", ".", "join", "(", "fatality_rates_note", ")", "for", "index", ",", "fatality_note", "in", "enumerate", "(", "fatality_note_dict", "[", "'item_list'", "]", ")", ":", "fatality_note_dict", "[", "'item_list'", "]", "[", "index", "]", "=", "(", "fatality_note", ".", "format", "(", "rate_description", "=", "rate_description", ")", ")", "context", "[", "'items'", "]", ".", "append", "(", "fatality_note_dict", ")", "return", "context" ]
Extracting notes and assumptions of the exposure layer :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.0
[ "Extracting", "notes", "and", "assumptions", "of", "the", "exposure", "layer" ]
python
train
39.858108
jbasko/configmanager
configmanager/sections.py
https://github.com/jbasko/configmanager/blob/1d7229ce367143c7210d8e5f0782de03945a1721/configmanager/sections.py#L536-L587
def load_values(self, dictionary, as_defaults=False, flat=False): """ Import config values from a dictionary. When ``as_defaults`` is set to ``True``, the values imported will be set as defaults. This can be used to declare the sections and items of configuration. Values of sections and items in ``dictionary`` can be dictionaries as well as instances of :class:`.Item` and :class:`.Config`. Args: dictionary: as_defaults: if ``True``, the imported values will be set as defaults. """ if flat: # Deflatten the dictionary and then pass on to the normal case. separator = self.settings.str_path_separator flat_dictionary = dictionary dictionary = collections.OrderedDict() for k, v in flat_dictionary.items(): k_parts = k.split(separator) c = dictionary for i, kp in enumerate(k_parts): if i >= len(k_parts) - 1: c[kp] = v else: if kp not in c: c[kp] = collections.OrderedDict() c = c[kp] for name, value in dictionary.items(): if name not in self: if as_defaults: if isinstance(value, dict): self[name] = self.create_section() self[name].load_values(value, as_defaults=as_defaults) else: self[name] = self.create_item(name, default=value) else: # Skip unknown names if not interpreting dictionary as defaults pass continue resolution = self._get_item_or_section(name, handle_not_found=False) if is_config_item(resolution): if as_defaults: resolution.default = value else: resolution.value = value else: resolution.load_values(value, as_defaults=as_defaults)
[ "def", "load_values", "(", "self", ",", "dictionary", ",", "as_defaults", "=", "False", ",", "flat", "=", "False", ")", ":", "if", "flat", ":", "# Deflatten the dictionary and then pass on to the normal case.", "separator", "=", "self", ".", "settings", ".", "str_path_separator", "flat_dictionary", "=", "dictionary", "dictionary", "=", "collections", ".", "OrderedDict", "(", ")", "for", "k", ",", "v", "in", "flat_dictionary", ".", "items", "(", ")", ":", "k_parts", "=", "k", ".", "split", "(", "separator", ")", "c", "=", "dictionary", "for", "i", ",", "kp", "in", "enumerate", "(", "k_parts", ")", ":", "if", "i", ">=", "len", "(", "k_parts", ")", "-", "1", ":", "c", "[", "kp", "]", "=", "v", "else", ":", "if", "kp", "not", "in", "c", ":", "c", "[", "kp", "]", "=", "collections", ".", "OrderedDict", "(", ")", "c", "=", "c", "[", "kp", "]", "for", "name", ",", "value", "in", "dictionary", ".", "items", "(", ")", ":", "if", "name", "not", "in", "self", ":", "if", "as_defaults", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "self", "[", "name", "]", "=", "self", ".", "create_section", "(", ")", "self", "[", "name", "]", ".", "load_values", "(", "value", ",", "as_defaults", "=", "as_defaults", ")", "else", ":", "self", "[", "name", "]", "=", "self", ".", "create_item", "(", "name", ",", "default", "=", "value", ")", "else", ":", "# Skip unknown names if not interpreting dictionary as defaults", "pass", "continue", "resolution", "=", "self", ".", "_get_item_or_section", "(", "name", ",", "handle_not_found", "=", "False", ")", "if", "is_config_item", "(", "resolution", ")", ":", "if", "as_defaults", ":", "resolution", ".", "default", "=", "value", "else", ":", "resolution", ".", "value", "=", "value", "else", ":", "resolution", ".", "load_values", "(", "value", ",", "as_defaults", "=", "as_defaults", ")" ]
Import config values from a dictionary. When ``as_defaults`` is set to ``True``, the values imported will be set as defaults. This can be used to declare the sections and items of configuration. Values of sections and items in ``dictionary`` can be dictionaries as well as instances of :class:`.Item` and :class:`.Config`. Args: dictionary: as_defaults: if ``True``, the imported values will be set as defaults.
[ "Import", "config", "values", "from", "a", "dictionary", "." ]
python
train
40.653846
fedora-python/pyp2rpm
pyp2rpm/archive.py
https://github.com/fedora-python/pyp2rpm/blob/853eb3d226689a5ccdcdb9358b1a3394fafbd2b5/pyp2rpm/archive.py#L27-L39
def flat_list(lst): """This function flatten given nested list. Argument: nested list Returns: flat list """ if isinstance(lst, list): for item in lst: for i in flat_list(item): yield i else: yield lst
[ "def", "flat_list", "(", "lst", ")", ":", "if", "isinstance", "(", "lst", ",", "list", ")", ":", "for", "item", "in", "lst", ":", "for", "i", "in", "flat_list", "(", "item", ")", ":", "yield", "i", "else", ":", "yield", "lst" ]
This function flatten given nested list. Argument: nested list Returns: flat list
[ "This", "function", "flatten", "given", "nested", "list", ".", "Argument", ":", "nested", "list", "Returns", ":", "flat", "list" ]
python
train
21
senaite/senaite.core
bika/lims/idserver.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/idserver.py#L370-L384
def get_seq_number_from_id(id, id_template, prefix, **kw): """Return the sequence number of the given ID """ separator = kw.get("separator", "-") postfix = id.replace(prefix, "").strip(separator) postfix_segments = postfix.split(separator) seq_number = 0 possible_seq_nums = filter(lambda n: n.isalnum(), postfix_segments) if possible_seq_nums: seq_number = possible_seq_nums[-1] # Check if this id has to be expressed as an alphanumeric number seq_number = get_alpha_or_number(seq_number, id_template) seq_number = to_int(seq_number) return seq_number
[ "def", "get_seq_number_from_id", "(", "id", ",", "id_template", ",", "prefix", ",", "*", "*", "kw", ")", ":", "separator", "=", "kw", ".", "get", "(", "\"separator\"", ",", "\"-\"", ")", "postfix", "=", "id", ".", "replace", "(", "prefix", ",", "\"\"", ")", ".", "strip", "(", "separator", ")", "postfix_segments", "=", "postfix", ".", "split", "(", "separator", ")", "seq_number", "=", "0", "possible_seq_nums", "=", "filter", "(", "lambda", "n", ":", "n", ".", "isalnum", "(", ")", ",", "postfix_segments", ")", "if", "possible_seq_nums", ":", "seq_number", "=", "possible_seq_nums", "[", "-", "1", "]", "# Check if this id has to be expressed as an alphanumeric number", "seq_number", "=", "get_alpha_or_number", "(", "seq_number", ",", "id_template", ")", "seq_number", "=", "to_int", "(", "seq_number", ")", "return", "seq_number" ]
Return the sequence number of the given ID
[ "Return", "the", "sequence", "number", "of", "the", "given", "ID" ]
python
train
39.666667
Qiskit/qiskit-terra
qiskit/transpiler/coupling.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/transpiler/coupling.py#L115-L124
def is_connected(self): """ Test if the graph is connected. Return True if connected, False otherwise """ try: return nx.is_weakly_connected(self.graph) except nx.exception.NetworkXException: return False
[ "def", "is_connected", "(", "self", ")", ":", "try", ":", "return", "nx", ".", "is_weakly_connected", "(", "self", ".", "graph", ")", "except", "nx", ".", "exception", ".", "NetworkXException", ":", "return", "False" ]
Test if the graph is connected. Return True if connected, False otherwise
[ "Test", "if", "the", "graph", "is", "connected", "." ]
python
test
26.8
theislab/scvelo
scvelo/preprocessing/moments.py
https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/preprocessing/moments.py#L10-L61
def moments(data, n_neighbors=30, n_pcs=30, mode='connectivities', method='umap', metric='euclidean', use_rep=None, recurse_neighbors=False, renormalize=False, copy=False): """Computes moments for velocity estimation. Arguments --------- data: :class:`~anndata.AnnData` Annotated data matrix. n_neighbors: `int` (default: 30) Number of neighbors to use. n_pcs: `int` (default: 30) Number of principal components to use. mode: `'connectivities'` or `'distances'` (default: `'connectivities'`) Distance metric to use for moment computation. renormalize: `bool` (default: `False`) Renormalize the moments by total counts per cell to its median. copy: `bool` (default: `False`) Return a copy instead of writing to adata. Returns ------- Returns or updates `adata` with the attributes Ms: `.layers` dense matrix with first order moments of spliced counts. Mu: `.layers` dense matrix with first order moments of unspliced counts. """ adata = data.copy() if copy else data if 'spliced' not in adata.layers.keys() or 'unspliced' not in adata.layers.keys(): raise ValueError('Could not find spliced / unspliced counts.') if any([not_yet_normalized(adata.layers[layer]) for layer in {'spliced', 'unspliced'}]): normalize_per_cell(adata) if 'neighbors' not in adata.uns.keys() or neighbors_to_be_recomputed(adata, n_neighbors=n_neighbors): if use_rep is None: use_rep = 'X_pca' neighbors(adata, n_neighbors=n_neighbors, use_rep=use_rep, n_pcs=n_pcs, method=method, metric=metric) if mode not in adata.uns['neighbors']: raise ValueError('mode can only be \'connectivities\' or \'distances\'') logg.info('computing moments based on ' + str(mode), r=True) connectivities = get_connectivities(adata, mode, n_neighbors=n_neighbors, recurse_neighbors=recurse_neighbors) adata.layers['Ms'] = csr_matrix.dot(connectivities, csr_matrix(adata.layers['spliced'])).astype(np.float32).A adata.layers['Mu'] = csr_matrix.dot(connectivities, csr_matrix(adata.layers['unspliced'])).astype(np.float32).A if renormalize: normalize_per_cell(adata, layers={'Ms', 'Mu'}, enforce=True) logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n') logg.hint( 'added \n' ' \'Ms\' and \'Mu\', moments of spliced/unspliced abundances (adata.layers)') return adata if copy else None
[ "def", "moments", "(", "data", ",", "n_neighbors", "=", "30", ",", "n_pcs", "=", "30", ",", "mode", "=", "'connectivities'", ",", "method", "=", "'umap'", ",", "metric", "=", "'euclidean'", ",", "use_rep", "=", "None", ",", "recurse_neighbors", "=", "False", ",", "renormalize", "=", "False", ",", "copy", "=", "False", ")", ":", "adata", "=", "data", ".", "copy", "(", ")", "if", "copy", "else", "data", "if", "'spliced'", "not", "in", "adata", ".", "layers", ".", "keys", "(", ")", "or", "'unspliced'", "not", "in", "adata", ".", "layers", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "'Could not find spliced / unspliced counts.'", ")", "if", "any", "(", "[", "not_yet_normalized", "(", "adata", ".", "layers", "[", "layer", "]", ")", "for", "layer", "in", "{", "'spliced'", ",", "'unspliced'", "}", "]", ")", ":", "normalize_per_cell", "(", "adata", ")", "if", "'neighbors'", "not", "in", "adata", ".", "uns", ".", "keys", "(", ")", "or", "neighbors_to_be_recomputed", "(", "adata", ",", "n_neighbors", "=", "n_neighbors", ")", ":", "if", "use_rep", "is", "None", ":", "use_rep", "=", "'X_pca'", "neighbors", "(", "adata", ",", "n_neighbors", "=", "n_neighbors", ",", "use_rep", "=", "use_rep", ",", "n_pcs", "=", "n_pcs", ",", "method", "=", "method", ",", "metric", "=", "metric", ")", "if", "mode", "not", "in", "adata", ".", "uns", "[", "'neighbors'", "]", ":", "raise", "ValueError", "(", "'mode can only be \\'connectivities\\' or \\'distances\\''", ")", "logg", ".", "info", "(", "'computing moments based on '", "+", "str", "(", "mode", ")", ",", "r", "=", "True", ")", "connectivities", "=", "get_connectivities", "(", "adata", ",", "mode", ",", "n_neighbors", "=", "n_neighbors", ",", "recurse_neighbors", "=", "recurse_neighbors", ")", "adata", ".", "layers", "[", "'Ms'", "]", "=", "csr_matrix", ".", "dot", "(", "connectivities", ",", "csr_matrix", "(", "adata", ".", "layers", "[", "'spliced'", "]", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", ".", "A", "adata", ".", "layers", "[", "'Mu'", "]", "=", "csr_matrix", ".", "dot", "(", "connectivities", ",", "csr_matrix", "(", "adata", ".", "layers", "[", "'unspliced'", "]", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", ".", "A", "if", "renormalize", ":", "normalize_per_cell", "(", "adata", ",", "layers", "=", "{", "'Ms'", ",", "'Mu'", "}", ",", "enforce", "=", "True", ")", "logg", ".", "info", "(", "' finished'", ",", "time", "=", "True", ",", "end", "=", "' '", "if", "settings", ".", "verbosity", ">", "2", "else", "'\\n'", ")", "logg", ".", "hint", "(", "'added \\n'", "' \\'Ms\\' and \\'Mu\\', moments of spliced/unspliced abundances (adata.layers)'", ")", "return", "adata", "if", "copy", "else", "None" ]
Computes moments for velocity estimation. Arguments --------- data: :class:`~anndata.AnnData` Annotated data matrix. n_neighbors: `int` (default: 30) Number of neighbors to use. n_pcs: `int` (default: 30) Number of principal components to use. mode: `'connectivities'` or `'distances'` (default: `'connectivities'`) Distance metric to use for moment computation. renormalize: `bool` (default: `False`) Renormalize the moments by total counts per cell to its median. copy: `bool` (default: `False`) Return a copy instead of writing to adata. Returns ------- Returns or updates `adata` with the attributes Ms: `.layers` dense matrix with first order moments of spliced counts. Mu: `.layers` dense matrix with first order moments of unspliced counts.
[ "Computes", "moments", "for", "velocity", "estimation", "." ]
python
train
47.461538
Microsoft/nni
src/sdk/pynni/nni/networkmorphism_tuner/networkmorphism_tuner.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/networkmorphism_tuner.py#L213-L228
def update(self, other_info, graph, metric_value, model_id): """ Update the controller with evaluation result of a neural architecture. Parameters ---------- other_info: any object In our case it is the father ID in the search tree. graph: Graph An instance of Graph. The trained neural architecture. metric_value: float The final evaluated metric value. model_id: int """ father_id = other_info self.bo.fit([graph.extract_descriptor()], [metric_value]) self.bo.add_child(father_id, model_id)
[ "def", "update", "(", "self", ",", "other_info", ",", "graph", ",", "metric_value", ",", "model_id", ")", ":", "father_id", "=", "other_info", "self", ".", "bo", ".", "fit", "(", "[", "graph", ".", "extract_descriptor", "(", ")", "]", ",", "[", "metric_value", "]", ")", "self", ".", "bo", ".", "add_child", "(", "father_id", ",", "model_id", ")" ]
Update the controller with evaluation result of a neural architecture. Parameters ---------- other_info: any object In our case it is the father ID in the search tree. graph: Graph An instance of Graph. The trained neural architecture. metric_value: float The final evaluated metric value. model_id: int
[ "Update", "the", "controller", "with", "evaluation", "result", "of", "a", "neural", "architecture", "." ]
python
train
37.625
LonamiWebs/Telethon
telethon/tl/custom/messagebutton.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/messagebutton.py#L57-L60
def url(self): """The url ``str`` for :tl:`KeyboardButtonUrl` objects.""" if isinstance(self.button, types.KeyboardButtonUrl): return self.button.url
[ "def", "url", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "button", ",", "types", ".", "KeyboardButtonUrl", ")", ":", "return", "self", ".", "button", ".", "url" ]
The url ``str`` for :tl:`KeyboardButtonUrl` objects.
[ "The", "url", "str", "for", ":", "tl", ":", "KeyboardButtonUrl", "objects", "." ]
python
train
43.5
istresearch/scrapy-cluster
redis-monitor/plugins/stats_monitor.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/plugins/stats_monitor.py#L109-L146
def _get_plugin_stats(self, name): ''' Used for getting stats for Plugin based stuff, like Kafka Monitor and Redis Monitor @param name: the main class stats name @return: A formatted dict of stats ''' the_dict = {} keys = self.redis_conn.keys('stats:{n}:*'.format(n=name)) for key in keys: # break down key elements = key.split(":") main = elements[2] end = elements[3] if main == 'total' or main == 'fail': if main not in the_dict: the_dict[main] = {} the_dict[main][end] = self._get_key_value(key, end == 'lifetime') elif main == 'self': if 'nodes' not in the_dict: # main is self, end is machine, true_tail is uuid the_dict['nodes'] = {} true_tail = elements[4] if end not in the_dict['nodes']: the_dict['nodes'][end] = [] the_dict['nodes'][end].append(true_tail) else: if 'plugins' not in the_dict: the_dict['plugins'] = {} if main not in the_dict['plugins']: the_dict['plugins'][main] = {} the_dict['plugins'][main][end] = self._get_key_value(key, end == 'lifetime') return the_dict
[ "def", "_get_plugin_stats", "(", "self", ",", "name", ")", ":", "the_dict", "=", "{", "}", "keys", "=", "self", ".", "redis_conn", ".", "keys", "(", "'stats:{n}:*'", ".", "format", "(", "n", "=", "name", ")", ")", "for", "key", "in", "keys", ":", "# break down key", "elements", "=", "key", ".", "split", "(", "\":\"", ")", "main", "=", "elements", "[", "2", "]", "end", "=", "elements", "[", "3", "]", "if", "main", "==", "'total'", "or", "main", "==", "'fail'", ":", "if", "main", "not", "in", "the_dict", ":", "the_dict", "[", "main", "]", "=", "{", "}", "the_dict", "[", "main", "]", "[", "end", "]", "=", "self", ".", "_get_key_value", "(", "key", ",", "end", "==", "'lifetime'", ")", "elif", "main", "==", "'self'", ":", "if", "'nodes'", "not", "in", "the_dict", ":", "# main is self, end is machine, true_tail is uuid", "the_dict", "[", "'nodes'", "]", "=", "{", "}", "true_tail", "=", "elements", "[", "4", "]", "if", "end", "not", "in", "the_dict", "[", "'nodes'", "]", ":", "the_dict", "[", "'nodes'", "]", "[", "end", "]", "=", "[", "]", "the_dict", "[", "'nodes'", "]", "[", "end", "]", ".", "append", "(", "true_tail", ")", "else", ":", "if", "'plugins'", "not", "in", "the_dict", ":", "the_dict", "[", "'plugins'", "]", "=", "{", "}", "if", "main", "not", "in", "the_dict", "[", "'plugins'", "]", ":", "the_dict", "[", "'plugins'", "]", "[", "main", "]", "=", "{", "}", "the_dict", "[", "'plugins'", "]", "[", "main", "]", "[", "end", "]", "=", "self", ".", "_get_key_value", "(", "key", ",", "end", "==", "'lifetime'", ")", "return", "the_dict" ]
Used for getting stats for Plugin based stuff, like Kafka Monitor and Redis Monitor @param name: the main class stats name @return: A formatted dict of stats
[ "Used", "for", "getting", "stats", "for", "Plugin", "based", "stuff", "like", "Kafka", "Monitor", "and", "Redis", "Monitor" ]
python
train
36.394737
singularityhub/sregistry-cli
sregistry/main/__template__/push.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/__template__/push.py#L28-L52
def push(self, path, name, tag=None): '''push an image to Singularity Registry path: should correspond to an absolte image path (or derive it) name: should be the complete uri that the user has requested to push. tag: should correspond with an image tag. This is provided to mirror Docker ''' path = os.path.abspath(path) bot.debug("PUSH %s" % path) if not os.path.exists(path): bot.error('%s does not exist.' %path) sys.exit(1) # This returns a data structure with collection, container, based on uri names = parse_image_name(remove_uri(name),tag=tag) # use Singularity client, if exists, to inspect to extract metadata metadata = self.get_metadata(path, names=names) # If you want a spinner bot.spinner.start() # do your push request here. Generally you want to except a KeyboardInterrupt # and give the user a status from the response bot.spinner.stop()
[ "def", "push", "(", "self", ",", "path", ",", "name", ",", "tag", "=", "None", ")", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "bot", ".", "debug", "(", "\"PUSH %s\"", "%", "path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "bot", ".", "error", "(", "'%s does not exist.'", "%", "path", ")", "sys", ".", "exit", "(", "1", ")", "# This returns a data structure with collection, container, based on uri", "names", "=", "parse_image_name", "(", "remove_uri", "(", "name", ")", ",", "tag", "=", "tag", ")", "# use Singularity client, if exists, to inspect to extract metadata", "metadata", "=", "self", ".", "get_metadata", "(", "path", ",", "names", "=", "names", ")", "# If you want a spinner", "bot", ".", "spinner", ".", "start", "(", ")", "# do your push request here. Generally you want to except a KeyboardInterrupt", "# and give the user a status from the response", "bot", ".", "spinner", ".", "stop", "(", ")" ]
push an image to Singularity Registry path: should correspond to an absolte image path (or derive it) name: should be the complete uri that the user has requested to push. tag: should correspond with an image tag. This is provided to mirror Docker
[ "push", "an", "image", "to", "Singularity", "Registry", "path", ":", "should", "correspond", "to", "an", "absolte", "image", "path", "(", "or", "derive", "it", ")", "name", ":", "should", "be", "the", "complete", "uri", "that", "the", "user", "has", "requested", "to", "push", ".", "tag", ":", "should", "correspond", "with", "an", "image", "tag", ".", "This", "is", "provided", "to", "mirror", "Docker" ]
python
test
37
python-diamond/Diamond
src/diamond/handler/graphite.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/graphite.py#L126-L142
def _send_data(self, data): """ Try to send all data in buffer. """ try: self.socket.sendall(data) self._reset_errors() except: self._close() self._throttle_error("GraphiteHandler: Socket error, " "trying reconnect.") self._connect() try: self.socket.sendall(data) except: return self._reset_errors()
[ "def", "_send_data", "(", "self", ",", "data", ")", ":", "try", ":", "self", ".", "socket", ".", "sendall", "(", "data", ")", "self", ".", "_reset_errors", "(", ")", "except", ":", "self", ".", "_close", "(", ")", "self", ".", "_throttle_error", "(", "\"GraphiteHandler: Socket error, \"", "\"trying reconnect.\"", ")", "self", ".", "_connect", "(", ")", "try", ":", "self", ".", "socket", ".", "sendall", "(", "data", ")", "except", ":", "return", "self", ".", "_reset_errors", "(", ")" ]
Try to send all data in buffer.
[ "Try", "to", "send", "all", "data", "in", "buffer", "." ]
python
train
28.529412
markovmodel/msmtools
msmtools/estimation/sparse/prior.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/estimation/sparse/prior.py#L86-L117
def prior_rev(C, alpha=-1.0): r"""Prior counts for sampling of reversible transition matrices. Prior is defined as b_ij= alpha if i<=j b_ij=0 else The reversible prior adds -1 to the upper triagular part of the given count matrix. This prior respects the fact that for a reversible transition matrix the degrees of freedom correspond essentially to the upper, respectively the lower triangular part of the matrix. Parameters ---------- C : (M, M) ndarray or scipy.sparse matrix Count matrix alpha : float (optional) Value of prior counts Returns ------- B : (M, M) ndarray Matrix of prior counts """ ind = np.triu_indices(C.shape[0]) B = np.zeros(C.shape) B[ind] = alpha return B
[ "def", "prior_rev", "(", "C", ",", "alpha", "=", "-", "1.0", ")", ":", "ind", "=", "np", ".", "triu_indices", "(", "C", ".", "shape", "[", "0", "]", ")", "B", "=", "np", ".", "zeros", "(", "C", ".", "shape", ")", "B", "[", "ind", "]", "=", "alpha", "return", "B" ]
r"""Prior counts for sampling of reversible transition matrices. Prior is defined as b_ij= alpha if i<=j b_ij=0 else The reversible prior adds -1 to the upper triagular part of the given count matrix. This prior respects the fact that for a reversible transition matrix the degrees of freedom correspond essentially to the upper, respectively the lower triangular part of the matrix. Parameters ---------- C : (M, M) ndarray or scipy.sparse matrix Count matrix alpha : float (optional) Value of prior counts Returns ------- B : (M, M) ndarray Matrix of prior counts
[ "r", "Prior", "counts", "for", "sampling", "of", "reversible", "transition", "matrices", "." ]
python
train
24.1875
GemHQ/round-py
round/__init__.py
https://github.com/GemHQ/round-py/blob/d0838f849cd260b1eb5df67ed3c6f2fe56c91c21/round/__init__.py#L65-L89
def authorizer(self, schemes, resource, action, request_args): """Construct the Authorization header for a request. Args: schemes (list of str): Authentication schemes supported for the requested action. resource (str): Object upon which an action is being performed. action (str): Action being performed. request_args (list of str): Arguments passed to the action call. Returns: (str, str) A tuple of the auth scheme satisfied, and the credential for the Authorization header or empty strings if none could be satisfied. """ if not schemes: return u'', u'' for scheme in schemes: if scheme in self.schemes and self.has_auth_params(scheme): cred = Context.format_auth_params(self.schemes[scheme][u'params']) if hasattr(self, 'mfa_token'): cred = '{}, mfa_token="{}"'.format(cred, self.mfa_token) return scheme, cred raise AuthenticationError(self, schemes)
[ "def", "authorizer", "(", "self", ",", "schemes", ",", "resource", ",", "action", ",", "request_args", ")", ":", "if", "not", "schemes", ":", "return", "u''", ",", "u''", "for", "scheme", "in", "schemes", ":", "if", "scheme", "in", "self", ".", "schemes", "and", "self", ".", "has_auth_params", "(", "scheme", ")", ":", "cred", "=", "Context", ".", "format_auth_params", "(", "self", ".", "schemes", "[", "scheme", "]", "[", "u'params'", "]", ")", "if", "hasattr", "(", "self", ",", "'mfa_token'", ")", ":", "cred", "=", "'{}, mfa_token=\"{}\"'", ".", "format", "(", "cred", ",", "self", ".", "mfa_token", ")", "return", "scheme", ",", "cred", "raise", "AuthenticationError", "(", "self", ",", "schemes", ")" ]
Construct the Authorization header for a request. Args: schemes (list of str): Authentication schemes supported for the requested action. resource (str): Object upon which an action is being performed. action (str): Action being performed. request_args (list of str): Arguments passed to the action call. Returns: (str, str) A tuple of the auth scheme satisfied, and the credential for the Authorization header or empty strings if none could be satisfied.
[ "Construct", "the", "Authorization", "header", "for", "a", "request", "." ]
python
train
42.76
thieman/dagobah
dagobah/daemon/daemon.py
https://github.com/thieman/dagobah/blob/e624180c2291034960302c9e0b818b65b5a7ee11/dagobah/daemon/daemon.py#L27-L42
def replace_nones(dict_or_list): """Update a dict or list in place to replace 'none' string values with Python None.""" def replace_none_in_value(value): if isinstance(value, basestring) and value.lower() == "none": return None return value items = dict_or_list.iteritems() if isinstance(dict_or_list, dict) else enumerate(dict_or_list) for accessor, value in items: if isinstance(value, (dict, list)): replace_nones(value) else: dict_or_list[accessor] = replace_none_in_value(value)
[ "def", "replace_nones", "(", "dict_or_list", ")", ":", "def", "replace_none_in_value", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "basestring", ")", "and", "value", ".", "lower", "(", ")", "==", "\"none\"", ":", "return", "None", "return", "value", "items", "=", "dict_or_list", ".", "iteritems", "(", ")", "if", "isinstance", "(", "dict_or_list", ",", "dict", ")", "else", "enumerate", "(", "dict_or_list", ")", "for", "accessor", ",", "value", "in", "items", ":", "if", "isinstance", "(", "value", ",", "(", "dict", ",", "list", ")", ")", ":", "replace_nones", "(", "value", ")", "else", ":", "dict_or_list", "[", "accessor", "]", "=", "replace_none_in_value", "(", "value", ")" ]
Update a dict or list in place to replace 'none' string values with Python None.
[ "Update", "a", "dict", "or", "list", "in", "place", "to", "replace", "none", "string", "values", "with", "Python", "None", "." ]
python
train
34.9375
allenai/allennlp
allennlp/semparse/type_declarations/wikitables_lambda_dcs.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/type_declarations/wikitables_lambda_dcs.py#L83-L123
def resolve(self, other: Type) -> Optional[Type]: """See ``PlaceholderType.resolve``""" if not isinstance(other, NltkComplexType): return None expected_second = ComplexType(NUMBER_TYPE, ComplexType(ANY_TYPE, ComplexType(ComplexType(ANY_TYPE, ANY_TYPE), ANY_TYPE))) resolved_second = other.second.resolve(expected_second) if resolved_second is None: return None # The lambda function that we use inside the argmax must take either a number or a date as # an argument. lambda_arg_type = other.second.second.second.first.first if lambda_arg_type.resolve(NUMBER_TYPE) is None and lambda_arg_type.resolve(DATE_TYPE) is None: return None try: # This is the first #1 in the type signature above. selector_function_type = resolved_second.second.first # This is the second #1 in the type signature above. quant_function_argument_type = resolved_second.second.second.first.second # This is the third #1 in the type signature above. return_type = resolved_second.second.second.second # All three placeholder (ph) types above should resolve against each other. resolved_first_ph = selector_function_type.resolve(quant_function_argument_type) resolved_first_ph.resolve(return_type) resolved_second_ph = quant_function_argument_type.resolve(resolved_first_ph) resolved_second_ph.resolve(return_type) resolved_third_ph = return_type.resolve(resolved_first_ph) resolved_third_ph = return_type.resolve(resolved_second_ph) if not resolved_first_ph or not resolved_second_ph or not resolved_third_ph: return None return ArgExtremeType(resolved_first_ph, lambda_arg_type) except AttributeError: return None
[ "def", "resolve", "(", "self", ",", "other", ":", "Type", ")", "->", "Optional", "[", "Type", "]", ":", "if", "not", "isinstance", "(", "other", ",", "NltkComplexType", ")", ":", "return", "None", "expected_second", "=", "ComplexType", "(", "NUMBER_TYPE", ",", "ComplexType", "(", "ANY_TYPE", ",", "ComplexType", "(", "ComplexType", "(", "ANY_TYPE", ",", "ANY_TYPE", ")", ",", "ANY_TYPE", ")", ")", ")", "resolved_second", "=", "other", ".", "second", ".", "resolve", "(", "expected_second", ")", "if", "resolved_second", "is", "None", ":", "return", "None", "# The lambda function that we use inside the argmax must take either a number or a date as", "# an argument.", "lambda_arg_type", "=", "other", ".", "second", ".", "second", ".", "second", ".", "first", ".", "first", "if", "lambda_arg_type", ".", "resolve", "(", "NUMBER_TYPE", ")", "is", "None", "and", "lambda_arg_type", ".", "resolve", "(", "DATE_TYPE", ")", "is", "None", ":", "return", "None", "try", ":", "# This is the first #1 in the type signature above.", "selector_function_type", "=", "resolved_second", ".", "second", ".", "first", "# This is the second #1 in the type signature above.", "quant_function_argument_type", "=", "resolved_second", ".", "second", ".", "second", ".", "first", ".", "second", "# This is the third #1 in the type signature above.", "return_type", "=", "resolved_second", ".", "second", ".", "second", ".", "second", "# All three placeholder (ph) types above should resolve against each other.", "resolved_first_ph", "=", "selector_function_type", ".", "resolve", "(", "quant_function_argument_type", ")", "resolved_first_ph", ".", "resolve", "(", "return_type", ")", "resolved_second_ph", "=", "quant_function_argument_type", ".", "resolve", "(", "resolved_first_ph", ")", "resolved_second_ph", ".", "resolve", "(", "return_type", ")", "resolved_third_ph", "=", "return_type", ".", "resolve", "(", "resolved_first_ph", ")", "resolved_third_ph", "=", "return_type", ".", "resolve", "(", "resolved_second_ph", ")", "if", "not", "resolved_first_ph", "or", "not", "resolved_second_ph", "or", "not", "resolved_third_ph", ":", "return", "None", "return", "ArgExtremeType", "(", "resolved_first_ph", ",", "lambda_arg_type", ")", "except", "AttributeError", ":", "return", "None" ]
See ``PlaceholderType.resolve``
[ "See", "PlaceholderType", ".", "resolve" ]
python
train
48.682927
evandempsey/fp-growth
pyfpgrowth/pyfpgrowth.py
https://github.com/evandempsey/fp-growth/blob/6bf4503024e86c5bbea8a05560594f2f7f061c15/pyfpgrowth/pyfpgrowth.py#L148-L155
def mine_patterns(self, threshold): """ Mine the constructed FP tree for frequent patterns. """ if self.tree_has_single_path(self.root): return self.generate_pattern_list() else: return self.zip_patterns(self.mine_sub_trees(threshold))
[ "def", "mine_patterns", "(", "self", ",", "threshold", ")", ":", "if", "self", ".", "tree_has_single_path", "(", "self", ".", "root", ")", ":", "return", "self", ".", "generate_pattern_list", "(", ")", "else", ":", "return", "self", ".", "zip_patterns", "(", "self", ".", "mine_sub_trees", "(", "threshold", ")", ")" ]
Mine the constructed FP tree for frequent patterns.
[ "Mine", "the", "constructed", "FP", "tree", "for", "frequent", "patterns", "." ]
python
train
36.5
hazelcast/hazelcast-python-client
hazelcast/proxy/map.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/map.py#L544-L574
def put_if_absent(self, key, value, ttl=-1): """ Associates the specified key with the given value if it is not already associated. If ttl is provided, entry will expire and get evicted after the ttl. This is equivalent to: >>> if not map.contains_key(key): >>> return map.put(key,value) >>> else: >>> return map.get(key) except that the action is performed atomically. **Warning: This method returns a clone of the previous value, not the original (identically equal) value previously put into the map.** **Warning 2: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), key of the entry. :param value: (object), value of the entry. :param ttl: (int), maximum time in seconds for this entry to stay in the map, if not provided, the value configured on server side configuration will be used (optional). :return: (object), old value of the entry. """ check_not_none(key, "key can't be None") check_not_none(value, "value can't be None") key_data = self._to_data(key) value_data = self._to_data(value) return self._put_if_absent_internal(key_data, value_data, ttl)
[ "def", "put_if_absent", "(", "self", ",", "key", ",", "value", ",", "ttl", "=", "-", "1", ")", ":", "check_not_none", "(", "key", ",", "\"key can't be None\"", ")", "check_not_none", "(", "value", ",", "\"value can't be None\"", ")", "key_data", "=", "self", ".", "_to_data", "(", "key", ")", "value_data", "=", "self", ".", "_to_data", "(", "value", ")", "return", "self", ".", "_put_if_absent_internal", "(", "key_data", ",", "value_data", ",", "ttl", ")" ]
Associates the specified key with the given value if it is not already associated. If ttl is provided, entry will expire and get evicted after the ttl. This is equivalent to: >>> if not map.contains_key(key): >>> return map.put(key,value) >>> else: >>> return map.get(key) except that the action is performed atomically. **Warning: This method returns a clone of the previous value, not the original (identically equal) value previously put into the map.** **Warning 2: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), key of the entry. :param value: (object), value of the entry. :param ttl: (int), maximum time in seconds for this entry to stay in the map, if not provided, the value configured on server side configuration will be used (optional). :return: (object), old value of the entry.
[ "Associates", "the", "specified", "key", "with", "the", "given", "value", "if", "it", "is", "not", "already", "associated", ".", "If", "ttl", "is", "provided", "entry", "will", "expire", "and", "get", "evicted", "after", "the", "ttl", "." ]
python
train
44.774194
6809/MC6809
MC6809/components/mc6809_ops_logic.py
https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/mc6809_ops_logic.py#L292-L300
def instruction_ROL_memory(self, opcode, ea, m): """ Rotate memory left """ r = self.ROL(m) # log.debug("$%x ROL memory value $%x << 1 | Carry = $%x and write it to $%x \t| %s" % ( # self.program_counter, # m, r, ea, # self.cfg.mem_info.get_shortest(ea) # )) return ea, r & 0xff
[ "def", "instruction_ROL_memory", "(", "self", ",", "opcode", ",", "ea", ",", "m", ")", ":", "r", "=", "self", ".", "ROL", "(", "m", ")", "# log.debug(\"$%x ROL memory value $%x << 1 | Carry = $%x and write it to $%x \\t| %s\" % (", "# self.program_counter,", "# m, r, ea,", "# self.cfg.mem_info.get_shortest(ea)", "# ))", "return", "ea", ",", "r", "&", "0xff" ]
Rotate memory left
[ "Rotate", "memory", "left" ]
python
train
37.888889
JoeVirtual/KonFoo
konfoo/options.py
https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/options.py#L72-L87
def verbose_option(default=False): """ Attaches the option ``verbose`` with its *default* value to the keyword arguments when the option does not exist. All positional arguments and keyword arguments are forwarded unchanged. """ def decorator(method): @wraps(method) def wrapper(*args, **kwargs): option = Option.verbose.value kwargs[option] = kwargs.get(option, bool(default)) return method(*args, **kwargs) return wrapper return decorator
[ "def", "verbose_option", "(", "default", "=", "False", ")", ":", "def", "decorator", "(", "method", ")", ":", "@", "wraps", "(", "method", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "option", "=", "Option", ".", "verbose", ".", "value", "kwargs", "[", "option", "]", "=", "kwargs", ".", "get", "(", "option", ",", "bool", "(", "default", ")", ")", "return", "method", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator" ]
Attaches the option ``verbose`` with its *default* value to the keyword arguments when the option does not exist. All positional arguments and keyword arguments are forwarded unchanged.
[ "Attaches", "the", "option", "verbose", "with", "its", "*", "default", "*", "value", "to", "the", "keyword", "arguments", "when", "the", "option", "does", "not", "exist", ".", "All", "positional", "arguments", "and", "keyword", "arguments", "are", "forwarded", "unchanged", "." ]
python
train
32
pantsbuild/pants
src/python/pants/init/extension_loader.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/init/extension_loader.py#L99-L112
def load_build_configuration_from_source(build_configuration, backends=None): """Installs pants backend packages to provide BUILD file symbols and cli goals. :param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases). :param backends: An optional list of additional packages to load backends from. :raises: :class:``pants.base.exceptions.BuildConfigurationError`` if there is a problem loading the build configuration. """ # pants.build_graph and pants.core_task must always be loaded, and before any other backends. # TODO: Consider replacing the "backend" nomenclature here. pants.build_graph and # pants.core_tasks aren't really backends. backend_packages = OrderedSet(['pants.build_graph', 'pants.core_tasks'] + (backends or [])) for backend_package in backend_packages: load_backend(build_configuration, backend_package)
[ "def", "load_build_configuration_from_source", "(", "build_configuration", ",", "backends", "=", "None", ")", ":", "# pants.build_graph and pants.core_task must always be loaded, and before any other backends.", "# TODO: Consider replacing the \"backend\" nomenclature here. pants.build_graph and", "# pants.core_tasks aren't really backends.", "backend_packages", "=", "OrderedSet", "(", "[", "'pants.build_graph'", ",", "'pants.core_tasks'", "]", "+", "(", "backends", "or", "[", "]", ")", ")", "for", "backend_package", "in", "backend_packages", ":", "load_backend", "(", "build_configuration", ",", "backend_package", ")" ]
Installs pants backend packages to provide BUILD file symbols and cli goals. :param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases). :param backends: An optional list of additional packages to load backends from. :raises: :class:``pants.base.exceptions.BuildConfigurationError`` if there is a problem loading the build configuration.
[ "Installs", "pants", "backend", "packages", "to", "provide", "BUILD", "file", "symbols", "and", "cli", "goals", "." ]
python
train
62.357143
cqparts/cqparts
src/cqparts_fasteners/solidtypes/threads/base.py
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_fasteners/solidtypes/threads/base.py#L26-L204
def profile_to_cross_section(profile, lefthand=False, start_count=1, min_vertices=20): r""" Converts a thread profile to it's equivalent cross-section. **Profile:** The thread profile contains a single wire along the XZ plane (note: wire will be projected onto the XZ plane; Y-coords will be ignored). The profile is expected to be of 1 thread rotation, so it's height (along the Z-axis) is the thread's "pitch". If start_count > 1, then the profile will effectively be duplicated. The resulting cross-section is designed to be swept along a helical path with a pitch of the thread's "lead" (which is {the height of the given profile} * start_count) **Method:** Each edge of the profile is converted to a bezier spline, aproximating its polar plot equivalent. **Resolution:** (via `min_vertices` parameter) Increasing the number of vertices used to define the bezier will increase the resulting thread's accuracy, but cost more to render. min_vertices may also be expressed as a list to set the number of vertices to set for each wire. where: len(min_vertices) == number of edges in profile **Example** .. doctest:: import cadquery from cqparts_fasteners.solidtypes.threads.base import profile_to_cross_section from Helpers import show # doctest: +SKIP profile = cadquery.Workplane("XZ") \ .moveTo(1, 0) \ .lineTo(2, 1).lineTo(1, 2) \ .wire() cross_section = profile_to_cross_section(profile) show(profile) # doctest: +SKIP show(cross_section) # doctest: +SKIP Will result in: .. image:: /_static/img/solidtypes.threads.base.profile_to_cross_section.01.png :param profile: workplane containing wire of thread profile. :type profile: :class:`cadquery.Workplane` :param lefthand: if True, cross-section is made backwards. :type lefthand: :class:`bool` :param start_count: profile is duplicated this many times. :type start_count: :class:`int` :param min_vertices: int or tuple of the desired resolution. :type min_vertices: :class:`int` or :class:`tuple` :return: workplane with a face ready to be swept into a thread. :rtype: :class:`cadquery.Workplane` :raises TypeError: if a problem is found with the given parameters. :raises ValueError: if ``min_vertices`` is a list with elements not equal to the numbmer of wire edges. """ # verify parameter(s) if not isinstance(profile, cadquery.Workplane): raise TypeError("profile %r must be a %s instance" % (profile, cadquery.Workplane)) if not isinstance(min_vertices, (int, list, tuple)): raise TypeError("min_vertices %r must be an int, list, or tuple" % (min_vertices)) # get wire from Workplane wire = profile.val() # cadquery.Wire if not isinstance(wire, cadquery.Wire): raise TypeError("a valid profile Wire type could not be found in the given Workplane") profile_bb = wire.BoundingBox() pitch = profile_bb.zmax - profile_bb.zmin lead = pitch * start_count # determine vertices count per edge edges = wire.Edges() vertices_count = None if isinstance(min_vertices, int): # evenly spread vertices count along profile wire # (weighted by the edge's length) vertices_count = [ int(ceil(round(e.Length() / wire.Length(), 7) * min_vertices)) for e in edges ] # rounded for desired contrived results # (trade-off: an error of 1 is of no great consequence) else: # min_vertices is defined per edge (already what we want) if len(min_vertices) != len(edges): raise ValueError( "min_vertices list size does not match number of profile edges: " "len(%r) != %i" % (min_vertices, len(edges)) ) vertices_count = min_vertices # Utilities for building cross-section def get_xz(vertex): if isinstance(vertex, cadquery.Vector): vertex = vertex.wrapped # TODO: remove this, it's messy # where isinstance(vertex, FreeCAD.Base.Vector) return (vertex.x, vertex.z) def cart2polar(x, z, z_offset=0): """ Convert cartesian coordinates to polar coordinates. Uses thread's lead height to give full 360deg translation. """ radius = x angle = ((z + z_offset) / lead) * (2 * pi) # radians if not lefthand: angle = -angle return (radius, angle) def transform(vertex, z_offset=0): # where isinstance(vertex, FreeCAD.Base.Vector) """ Transform profile vertex on the XZ plane to it's equivalent on the cross-section's XY plane """ (radius, angle) = cart2polar(*get_xz(vertex), z_offset=z_offset) return (radius * cos(angle), radius * sin(angle)) # Conversion methods def apply_spline(wp, edge, vert_count, z_offset=0): """ Trace along edge and create a spline from the transformed verteces. """ curve = edge.wrapped.Curve # FreeCADPart.Geom* (depending on type) if edge.geomType() == 'CIRCLE': iter_dist = edge.wrapped.ParameterRange[1] / vert_count else: iter_dist = edge.Length() / vert_count points = [] for j in range(vert_count): dist = (j + 1) * iter_dist vert = curve.value(dist) points.append(transform(vert, z_offset)) return wp.spline(points) def apply_arc(wp, edge, z_offset=0): """ Create an arc using edge's midpoint and endpoint. Only intended for use for vertical lines on the given profile. """ return wp.threePointArc( point1=transform(edge.wrapped.valueAt(edge.Length() / 2), z_offset), point2=transform(edge.wrapped.valueAt(edge.Length()), z_offset), ) def apply_radial_line(wp, edge, z_offset=0): """ Create a straight radial line """ return wp.lineTo(*transform(edge.endPoint(), z_offset)) # Build cross-section start_v = edges[0].startPoint().wrapped cross_section = cadquery.Workplane("XY") \ .moveTo(*transform(start_v)) for i in range(start_count): z_offset = i * pitch for (j, edge) in enumerate(wire.Edges()): # where: isinstance(edge, cadquery.Edge) if (edge.geomType() == 'LINE') and (edge.startPoint().x == edge.endPoint().x): # edge is a vertical line, plot a circular arc cross_section = apply_arc(cross_section, edge, z_offset) elif (edge.geomType() == 'LINE') and (edge.startPoint().z == edge.endPoint().z): # edge is a horizontal line, plot a radial line cross_section = apply_radial_line(cross_section, edge, z_offset) else: # create bezier spline along transformed points (default) cross_section = apply_spline(cross_section, edge, vertices_count[j], z_offset) return cross_section.close()
[ "def", "profile_to_cross_section", "(", "profile", ",", "lefthand", "=", "False", ",", "start_count", "=", "1", ",", "min_vertices", "=", "20", ")", ":", "# verify parameter(s)", "if", "not", "isinstance", "(", "profile", ",", "cadquery", ".", "Workplane", ")", ":", "raise", "TypeError", "(", "\"profile %r must be a %s instance\"", "%", "(", "profile", ",", "cadquery", ".", "Workplane", ")", ")", "if", "not", "isinstance", "(", "min_vertices", ",", "(", "int", ",", "list", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "\"min_vertices %r must be an int, list, or tuple\"", "%", "(", "min_vertices", ")", ")", "# get wire from Workplane", "wire", "=", "profile", ".", "val", "(", ")", "# cadquery.Wire", "if", "not", "isinstance", "(", "wire", ",", "cadquery", ".", "Wire", ")", ":", "raise", "TypeError", "(", "\"a valid profile Wire type could not be found in the given Workplane\"", ")", "profile_bb", "=", "wire", ".", "BoundingBox", "(", ")", "pitch", "=", "profile_bb", ".", "zmax", "-", "profile_bb", ".", "zmin", "lead", "=", "pitch", "*", "start_count", "# determine vertices count per edge", "edges", "=", "wire", ".", "Edges", "(", ")", "vertices_count", "=", "None", "if", "isinstance", "(", "min_vertices", ",", "int", ")", ":", "# evenly spread vertices count along profile wire", "# (weighted by the edge's length)", "vertices_count", "=", "[", "int", "(", "ceil", "(", "round", "(", "e", ".", "Length", "(", ")", "/", "wire", ".", "Length", "(", ")", ",", "7", ")", "*", "min_vertices", ")", ")", "for", "e", "in", "edges", "]", "# rounded for desired contrived results", "# (trade-off: an error of 1 is of no great consequence)", "else", ":", "# min_vertices is defined per edge (already what we want)", "if", "len", "(", "min_vertices", ")", "!=", "len", "(", "edges", ")", ":", "raise", "ValueError", "(", "\"min_vertices list size does not match number of profile edges: \"", "\"len(%r) != %i\"", "%", "(", "min_vertices", ",", "len", "(", "edges", ")", ")", ")", "vertices_count", "=", "min_vertices", "# Utilities for building cross-section", "def", "get_xz", "(", "vertex", ")", ":", "if", "isinstance", "(", "vertex", ",", "cadquery", ".", "Vector", ")", ":", "vertex", "=", "vertex", ".", "wrapped", "# TODO: remove this, it's messy", "# where isinstance(vertex, FreeCAD.Base.Vector)", "return", "(", "vertex", ".", "x", ",", "vertex", ".", "z", ")", "def", "cart2polar", "(", "x", ",", "z", ",", "z_offset", "=", "0", ")", ":", "\"\"\"\n Convert cartesian coordinates to polar coordinates.\n Uses thread's lead height to give full 360deg translation.\n \"\"\"", "radius", "=", "x", "angle", "=", "(", "(", "z", "+", "z_offset", ")", "/", "lead", ")", "*", "(", "2", "*", "pi", ")", "# radians", "if", "not", "lefthand", ":", "angle", "=", "-", "angle", "return", "(", "radius", ",", "angle", ")", "def", "transform", "(", "vertex", ",", "z_offset", "=", "0", ")", ":", "# where isinstance(vertex, FreeCAD.Base.Vector)", "\"\"\"\n Transform profile vertex on the XZ plane to it's equivalent on\n the cross-section's XY plane\n \"\"\"", "(", "radius", ",", "angle", ")", "=", "cart2polar", "(", "*", "get_xz", "(", "vertex", ")", ",", "z_offset", "=", "z_offset", ")", "return", "(", "radius", "*", "cos", "(", "angle", ")", ",", "radius", "*", "sin", "(", "angle", ")", ")", "# Conversion methods", "def", "apply_spline", "(", "wp", ",", "edge", ",", "vert_count", ",", "z_offset", "=", "0", ")", ":", "\"\"\"\n Trace along edge and create a spline from the transformed verteces.\n \"\"\"", "curve", "=", "edge", ".", "wrapped", ".", "Curve", "# FreeCADPart.Geom* (depending on type)", "if", "edge", ".", "geomType", "(", ")", "==", "'CIRCLE'", ":", "iter_dist", "=", "edge", ".", "wrapped", ".", "ParameterRange", "[", "1", "]", "/", "vert_count", "else", ":", "iter_dist", "=", "edge", ".", "Length", "(", ")", "/", "vert_count", "points", "=", "[", "]", "for", "j", "in", "range", "(", "vert_count", ")", ":", "dist", "=", "(", "j", "+", "1", ")", "*", "iter_dist", "vert", "=", "curve", ".", "value", "(", "dist", ")", "points", ".", "append", "(", "transform", "(", "vert", ",", "z_offset", ")", ")", "return", "wp", ".", "spline", "(", "points", ")", "def", "apply_arc", "(", "wp", ",", "edge", ",", "z_offset", "=", "0", ")", ":", "\"\"\"\n Create an arc using edge's midpoint and endpoint.\n Only intended for use for vertical lines on the given profile.\n \"\"\"", "return", "wp", ".", "threePointArc", "(", "point1", "=", "transform", "(", "edge", ".", "wrapped", ".", "valueAt", "(", "edge", ".", "Length", "(", ")", "/", "2", ")", ",", "z_offset", ")", ",", "point2", "=", "transform", "(", "edge", ".", "wrapped", ".", "valueAt", "(", "edge", ".", "Length", "(", ")", ")", ",", "z_offset", ")", ",", ")", "def", "apply_radial_line", "(", "wp", ",", "edge", ",", "z_offset", "=", "0", ")", ":", "\"\"\"\n Create a straight radial line\n \"\"\"", "return", "wp", ".", "lineTo", "(", "*", "transform", "(", "edge", ".", "endPoint", "(", ")", ",", "z_offset", ")", ")", "# Build cross-section", "start_v", "=", "edges", "[", "0", "]", ".", "startPoint", "(", ")", ".", "wrapped", "cross_section", "=", "cadquery", ".", "Workplane", "(", "\"XY\"", ")", ".", "moveTo", "(", "*", "transform", "(", "start_v", ")", ")", "for", "i", "in", "range", "(", "start_count", ")", ":", "z_offset", "=", "i", "*", "pitch", "for", "(", "j", ",", "edge", ")", "in", "enumerate", "(", "wire", ".", "Edges", "(", ")", ")", ":", "# where: isinstance(edge, cadquery.Edge)", "if", "(", "edge", ".", "geomType", "(", ")", "==", "'LINE'", ")", "and", "(", "edge", ".", "startPoint", "(", ")", ".", "x", "==", "edge", ".", "endPoint", "(", ")", ".", "x", ")", ":", "# edge is a vertical line, plot a circular arc", "cross_section", "=", "apply_arc", "(", "cross_section", ",", "edge", ",", "z_offset", ")", "elif", "(", "edge", ".", "geomType", "(", ")", "==", "'LINE'", ")", "and", "(", "edge", ".", "startPoint", "(", ")", ".", "z", "==", "edge", ".", "endPoint", "(", ")", ".", "z", ")", ":", "# edge is a horizontal line, plot a radial line", "cross_section", "=", "apply_radial_line", "(", "cross_section", ",", "edge", ",", "z_offset", ")", "else", ":", "# create bezier spline along transformed points (default)", "cross_section", "=", "apply_spline", "(", "cross_section", ",", "edge", ",", "vertices_count", "[", "j", "]", ",", "z_offset", ")", "return", "cross_section", ".", "close", "(", ")" ]
r""" Converts a thread profile to it's equivalent cross-section. **Profile:** The thread profile contains a single wire along the XZ plane (note: wire will be projected onto the XZ plane; Y-coords will be ignored). The profile is expected to be of 1 thread rotation, so it's height (along the Z-axis) is the thread's "pitch". If start_count > 1, then the profile will effectively be duplicated. The resulting cross-section is designed to be swept along a helical path with a pitch of the thread's "lead" (which is {the height of the given profile} * start_count) **Method:** Each edge of the profile is converted to a bezier spline, aproximating its polar plot equivalent. **Resolution:** (via `min_vertices` parameter) Increasing the number of vertices used to define the bezier will increase the resulting thread's accuracy, but cost more to render. min_vertices may also be expressed as a list to set the number of vertices to set for each wire. where: len(min_vertices) == number of edges in profile **Example** .. doctest:: import cadquery from cqparts_fasteners.solidtypes.threads.base import profile_to_cross_section from Helpers import show # doctest: +SKIP profile = cadquery.Workplane("XZ") \ .moveTo(1, 0) \ .lineTo(2, 1).lineTo(1, 2) \ .wire() cross_section = profile_to_cross_section(profile) show(profile) # doctest: +SKIP show(cross_section) # doctest: +SKIP Will result in: .. image:: /_static/img/solidtypes.threads.base.profile_to_cross_section.01.png :param profile: workplane containing wire of thread profile. :type profile: :class:`cadquery.Workplane` :param lefthand: if True, cross-section is made backwards. :type lefthand: :class:`bool` :param start_count: profile is duplicated this many times. :type start_count: :class:`int` :param min_vertices: int or tuple of the desired resolution. :type min_vertices: :class:`int` or :class:`tuple` :return: workplane with a face ready to be swept into a thread. :rtype: :class:`cadquery.Workplane` :raises TypeError: if a problem is found with the given parameters. :raises ValueError: if ``min_vertices`` is a list with elements not equal to the numbmer of wire edges.
[ "r", "Converts", "a", "thread", "profile", "to", "it", "s", "equivalent", "cross", "-", "section", "." ]
python
train
39.083799
bfarr/kombine
examples/kepler/correlated_likelihood.py
https://github.com/bfarr/kombine/blob/50c946dee5da33e7baab71d9bd6c265ff02ffb13/examples/kepler/correlated_likelihood.py#L8-L23
def generate_covariance(ts, sigma, tau): r"""Generates a covariance matrix according to an squared-exponential autocovariance .. math:: \left\langle x_i x_j \right\rangle = \sigma_0^2 \delta_{ij} + \sigma^2 \exp\left[ - \frac{\left| t_i - t_j\right|^2}{2 \tau^2} \right] """ ndim = ts.shape[0] tis = ts[:, np.newaxis] tjs = ts[np.newaxis, :] return sigma*sigma*np.exp(-np.square(tis-tjs)/(2.0*tau*tau))
[ "def", "generate_covariance", "(", "ts", ",", "sigma", ",", "tau", ")", ":", "ndim", "=", "ts", ".", "shape", "[", "0", "]", "tis", "=", "ts", "[", ":", ",", "np", ".", "newaxis", "]", "tjs", "=", "ts", "[", "np", ".", "newaxis", ",", ":", "]", "return", "sigma", "*", "sigma", "*", "np", ".", "exp", "(", "-", "np", ".", "square", "(", "tis", "-", "tjs", ")", "/", "(", "2.0", "*", "tau", "*", "tau", ")", ")" ]
r"""Generates a covariance matrix according to an squared-exponential autocovariance .. math:: \left\langle x_i x_j \right\rangle = \sigma_0^2 \delta_{ij} + \sigma^2 \exp\left[ - \frac{\left| t_i - t_j\right|^2}{2 \tau^2} \right]
[ "r", "Generates", "a", "covariance", "matrix", "according", "to", "an", "squared", "-", "exponential", "autocovariance", "..", "math", "::", "\\", "left", "\\", "langle", "x_i", "x_j", "\\", "right", "\\", "rangle", "=", "\\", "sigma_0^2", "\\", "delta_", "{", "ij", "}", "+", "\\", "sigma^2", "\\", "exp", "\\", "left", "[", "-", "\\", "frac", "{", "\\", "left|", "t_i", "-", "t_j", "\\", "right|^2", "}", "{", "2", "\\", "tau^2", "}", "\\", "right", "]" ]
python
train
27.4375
binux/pyspider
pyspider/libs/response.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/response.py#L129-L137
def json(self): """Returns the json-encoded content of the response, if any.""" if hasattr(self, '_json'): return self._json try: self._json = json.loads(self.text or self.content) except ValueError: self._json = None return self._json
[ "def", "json", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_json'", ")", ":", "return", "self", ".", "_json", "try", ":", "self", ".", "_json", "=", "json", ".", "loads", "(", "self", ".", "text", "or", "self", ".", "content", ")", "except", "ValueError", ":", "self", ".", "_json", "=", "None", "return", "self", ".", "_json" ]
Returns the json-encoded content of the response, if any.
[ "Returns", "the", "json", "-", "encoded", "content", "of", "the", "response", "if", "any", "." ]
python
train
33.666667