repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
zsimic/runez
src/runez/serialize.py
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/serialize.py#L131-L141
def save(self, path=None, fatal=True, logger=None, sort_keys=True, indent=2): """ :param str|None path: Save this serializable to file with 'path' (default: self._path) :param bool|None fatal: Abort execution on failure if True :param callable|None logger: Logger to use :param bool sort_keys: Sort keys :param int indent: Indentation to use """ path = path or getattr(self, "_path", None) if path: return save_json(self.to_dict(), path, fatal=fatal, logger=logger, sort_keys=sort_keys, indent=indent)
[ "def", "save", "(", "self", ",", "path", "=", "None", ",", "fatal", "=", "True", ",", "logger", "=", "None", ",", "sort_keys", "=", "True", ",", "indent", "=", "2", ")", ":", "path", "=", "path", "or", "getattr", "(", "self", ",", "\"_path\"", ",", "None", ")", "if", "path", ":", "return", "save_json", "(", "self", ".", "to_dict", "(", ")", ",", "path", ",", "fatal", "=", "fatal", ",", "logger", "=", "logger", ",", "sort_keys", "=", "sort_keys", ",", "indent", "=", "indent", ")" ]
:param str|None path: Save this serializable to file with 'path' (default: self._path) :param bool|None fatal: Abort execution on failure if True :param callable|None logger: Logger to use :param bool sort_keys: Sort keys :param int indent: Indentation to use
[ ":", "param", "str|None", "path", ":", "Save", "this", "serializable", "to", "file", "with", "path", "(", "default", ":", "self", ".", "_path", ")", ":", "param", "bool|None", "fatal", ":", "Abort", "execution", "on", "failure", "if", "True", ":", "param", "callable|None", "logger", ":", "Logger", "to", "use", ":", "param", "bool", "sort_keys", ":", "Sort", "keys", ":", "param", "int", "indent", ":", "Indentation", "to", "use" ]
python
train
INM-6/hybridLFPy
examples/example_microcircuit_params.py
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_microcircuit_params.py#L123-L168
def get_T_yX(fname, y, y_in_Y, x_in_X, F_y): ''' compute the cell type specificity, defined as: :: T_yX = K_yX / K_YX = F_y * k_yX / sum_y(F_y*k_yX) ''' def _get_k_yX_mul_F_y(y, y_index, X_index): # Load data from json dictionary f = open(fname, 'r') data = json.load(f) f.close() #init variables k_yX = 0. for l in [str(key) for key in data['data'][y]['syn_dict'].keys()]: for x in x_in_X[X_index]: p_yxL = data['data'][y]['syn_dict'][l][x] / 100. k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron'] k_yX += p_yxL * k_yL return k_yX * F_y[y_index] #container T_yX = np.zeros((len(y), len(x_in_X))) #iterate over postsynaptic cell types for i, y_value in enumerate(y): #iterate over presynapse population inds for j in range(len(x_in_X)): k_yX_mul_F_y = 0 for k, yy in enumerate(sum(y_in_Y, [])): if y_value in yy: for yy_value in yy: ii = np.where(np.array(y) == yy_value)[0][0] k_yX_mul_F_y += _get_k_yX_mul_F_y(yy_value, ii, j) if k_yX_mul_F_y != 0: T_yX[i, j] = _get_k_yX_mul_F_y(y_value, i, j) / k_yX_mul_F_y return T_yX
[ "def", "get_T_yX", "(", "fname", ",", "y", ",", "y_in_Y", ",", "x_in_X", ",", "F_y", ")", ":", "def", "_get_k_yX_mul_F_y", "(", "y", ",", "y_index", ",", "X_index", ")", ":", "# Load data from json dictionary", "f", "=", "open", "(", "fname", ",", "'r'", ")", "data", "=", "json", ".", "load", "(", "f", ")", "f", ".", "close", "(", ")", "#init variables", "k_yX", "=", "0.", "for", "l", "in", "[", "str", "(", "key", ")", "for", "key", "in", "data", "[", "'data'", "]", "[", "y", "]", "[", "'syn_dict'", "]", ".", "keys", "(", ")", "]", ":", "for", "x", "in", "x_in_X", "[", "X_index", "]", ":", "p_yxL", "=", "data", "[", "'data'", "]", "[", "y", "]", "[", "'syn_dict'", "]", "[", "l", "]", "[", "x", "]", "/", "100.", "k_yL", "=", "data", "[", "'data'", "]", "[", "y", "]", "[", "'syn_dict'", "]", "[", "l", "]", "[", "'number of synapses per neuron'", "]", "k_yX", "+=", "p_yxL", "*", "k_yL", "return", "k_yX", "*", "F_y", "[", "y_index", "]", "#container", "T_yX", "=", "np", ".", "zeros", "(", "(", "len", "(", "y", ")", ",", "len", "(", "x_in_X", ")", ")", ")", "#iterate over postsynaptic cell types", "for", "i", ",", "y_value", "in", "enumerate", "(", "y", ")", ":", "#iterate over presynapse population inds", "for", "j", "in", "range", "(", "len", "(", "x_in_X", ")", ")", ":", "k_yX_mul_F_y", "=", "0", "for", "k", ",", "yy", "in", "enumerate", "(", "sum", "(", "y_in_Y", ",", "[", "]", ")", ")", ":", "if", "y_value", "in", "yy", ":", "for", "yy_value", "in", "yy", ":", "ii", "=", "np", ".", "where", "(", "np", ".", "array", "(", "y", ")", "==", "yy_value", ")", "[", "0", "]", "[", "0", "]", "k_yX_mul_F_y", "+=", "_get_k_yX_mul_F_y", "(", "yy_value", ",", "ii", ",", "j", ")", "if", "k_yX_mul_F_y", "!=", "0", ":", "T_yX", "[", "i", ",", "j", "]", "=", "_get_k_yX_mul_F_y", "(", "y_value", ",", "i", ",", "j", ")", "/", "k_yX_mul_F_y", "return", "T_yX" ]
compute the cell type specificity, defined as: :: T_yX = K_yX / K_YX = F_y * k_yX / sum_y(F_y*k_yX)
[ "compute", "the", "cell", "type", "specificity", "defined", "as", ":", "::", "T_yX", "=", "K_yX", "/", "K_YX", "=", "F_y", "*", "k_yX", "/", "sum_y", "(", "F_y", "*", "k_yX", ")" ]
python
train
OrangeTux/einder
einder/client.py
https://github.com/OrangeTux/einder/blob/deb2c5f79a69b684257fe939659c3bd751556fd5/einder/client.py#L80-L86
def send_key(self, key): """ Send a key to the Horizon box. """ cmd = struct.pack(">BBBBBBH", 4, 1, 0, 0, 0, 0, key) self.con.send(cmd) cmd = struct.pack(">BBBBBBH", 4, 0, 0, 0, 0, 0, key) self.con.send(cmd)
[ "def", "send_key", "(", "self", ",", "key", ")", ":", "cmd", "=", "struct", ".", "pack", "(", "\">BBBBBBH\"", ",", "4", ",", "1", ",", "0", ",", "0", ",", "0", ",", "0", ",", "key", ")", "self", ".", "con", ".", "send", "(", "cmd", ")", "cmd", "=", "struct", ".", "pack", "(", "\">BBBBBBH\"", ",", "4", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "key", ")", "self", ".", "con", ".", "send", "(", "cmd", ")" ]
Send a key to the Horizon box.
[ "Send", "a", "key", "to", "the", "Horizon", "box", "." ]
python
train
awacha/sastool
sastool/io/twodim.py
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/twodim.py#L438-L478
def writebdfv2(filename, bdf, bdfext='.bdf', bhfext='.bhf'): """Write a version 2 Bessy Data File Inputs ------ filename: string the name of the output file. One can give the complete header or datafile name or just the base name without the extensions. bdf: dict the BDF structure (in the same format as loaded by ``readbdfv2()`` bdfext: string, optional the extension of the data file bhfext: string, optional the extension of the header file Output ------ None Notes ----- BDFv2 header and scattering data are stored separately in the header and the data files. Given the file name both are saved. """ if filename.endswith(bdfext): basename = filename[:-len(bdfext)] elif filename.endswith(bhfext): basename = filename[:-len(bhfext)] else: basename = filename header.writebhfv2(basename + '.bhf', bdf) f = open(basename + '.bdf', 'wb') keys = ['RAWDATA', 'RAWERROR', 'CORRDATA', 'CORRERROR', 'NANDATA'] keys.extend( [x for x in list(bdf.keys()) if isinstance(bdf[x], np.ndarray) and x not in keys]) for k in keys: if k not in list(bdf.keys()): continue f.write('#%s[%d:%d]\n' % (k, bdf['xdim'], bdf['ydim'])) f.write(np.rot90(bdf[k], 3).astype('float32').tostring(order='F')) f.close()
[ "def", "writebdfv2", "(", "filename", ",", "bdf", ",", "bdfext", "=", "'.bdf'", ",", "bhfext", "=", "'.bhf'", ")", ":", "if", "filename", ".", "endswith", "(", "bdfext", ")", ":", "basename", "=", "filename", "[", ":", "-", "len", "(", "bdfext", ")", "]", "elif", "filename", ".", "endswith", "(", "bhfext", ")", ":", "basename", "=", "filename", "[", ":", "-", "len", "(", "bhfext", ")", "]", "else", ":", "basename", "=", "filename", "header", ".", "writebhfv2", "(", "basename", "+", "'.bhf'", ",", "bdf", ")", "f", "=", "open", "(", "basename", "+", "'.bdf'", ",", "'wb'", ")", "keys", "=", "[", "'RAWDATA'", ",", "'RAWERROR'", ",", "'CORRDATA'", ",", "'CORRERROR'", ",", "'NANDATA'", "]", "keys", ".", "extend", "(", "[", "x", "for", "x", "in", "list", "(", "bdf", ".", "keys", "(", ")", ")", "if", "isinstance", "(", "bdf", "[", "x", "]", ",", "np", ".", "ndarray", ")", "and", "x", "not", "in", "keys", "]", ")", "for", "k", "in", "keys", ":", "if", "k", "not", "in", "list", "(", "bdf", ".", "keys", "(", ")", ")", ":", "continue", "f", ".", "write", "(", "'#%s[%d:%d]\\n'", "%", "(", "k", ",", "bdf", "[", "'xdim'", "]", ",", "bdf", "[", "'ydim'", "]", ")", ")", "f", ".", "write", "(", "np", ".", "rot90", "(", "bdf", "[", "k", "]", ",", "3", ")", ".", "astype", "(", "'float32'", ")", ".", "tostring", "(", "order", "=", "'F'", ")", ")", "f", ".", "close", "(", ")" ]
Write a version 2 Bessy Data File Inputs ------ filename: string the name of the output file. One can give the complete header or datafile name or just the base name without the extensions. bdf: dict the BDF structure (in the same format as loaded by ``readbdfv2()`` bdfext: string, optional the extension of the data file bhfext: string, optional the extension of the header file Output ------ None Notes ----- BDFv2 header and scattering data are stored separately in the header and the data files. Given the file name both are saved.
[ "Write", "a", "version", "2", "Bessy", "Data", "File" ]
python
train
mfitzp/padua
padua/utils.py
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/utils.py#L8-L105
def qvalues(pv, m = None, verbose = False, lowmem = False, pi0 = None): """ Copyright (c) 2012, Nicolo Fusi, University of Sheffield All rights reserved. Estimates q-values from p-values Args ===== m: number of tests. If not specified m = pv.size verbose: print verbose messages? (default False) lowmem: use memory-efficient in-place algorithm pi0: if None, it's estimated as suggested in Storey and Tibshirani, 2003. For most GWAS this is not necessary, since pi0 is extremely likely to be 1 :param pv: :param m: :param verbose: :param lowmem: :param pi0: :return: """ assert(pv.min() >= 0 and pv.max() <= 1), "p-values should be between 0 and 1" original_shape = pv.shape pv = pv.ravel() # flattens the array in place, more efficient than flatten() if m == None: m = float(len(pv)) else: # the user has supplied an m m *= 1.0 # if the number of hypotheses is small, just set pi0 to 1 if len(pv) < 100 and pi0 == None: pi0 = 1.0 elif pi0 != None: pi0 = pi0 else: # evaluate pi0 for different lambdas pi0 = [] lam = sp.arange(0, 0.90, 0.01) counts = sp.array([(pv > i).sum() for i in sp.arange(0, 0.9, 0.01)]) for l in range(len(lam)): pi0.append(counts[l]/(m*(1-lam[l]))) pi0 = sp.array(pi0) # fit natural cubic spline tck = sp.interpolate.splrep(lam, pi0, k = 3) pi0 = sp.interpolate.splev(lam[-1], tck) if pi0 > 1: if verbose: print("got pi0 > 1 (%.3f) while estimating qvalues, setting it to 1" % pi0) pi0 = 1.0 assert(pi0 >= 0 and pi0 <= 1), "pi0 is not between 0 and 1: %f" % pi0 if lowmem: # low memory version, only uses 1 pv and 1 qv matrices qv = sp.zeros((len(pv),)) last_pv = pv.argmax() qv[last_pv] = (pi0*pv[last_pv]*m)/float(m) pv[last_pv] = -sp.inf prev_qv = last_pv for i in range(int(len(pv))-2, -1, -1): cur_max = pv.argmax() qv_i = (pi0*m*pv[cur_max]/float(i+1)) pv[cur_max] = -sp.inf qv_i1 = prev_qv qv[cur_max] = min(qv_i, qv_i1) prev_qv = qv[cur_max] else: p_ordered = sp.argsort(pv) pv = pv[p_ordered] qv = pi0 * m/len(pv) * pv qv[-1] = min(qv[-1],1.0) for i in range(len(pv)-2, -1, -1): qv[i] = min(pi0*m*pv[i]/(i+1.0), qv[i+1]) # reorder qvalues qv_temp = qv.copy() qv = sp.zeros_like(qv) qv[p_ordered] = qv_temp # reshape qvalues qv = qv.reshape(original_shape) return qv
[ "def", "qvalues", "(", "pv", ",", "m", "=", "None", ",", "verbose", "=", "False", ",", "lowmem", "=", "False", ",", "pi0", "=", "None", ")", ":", "assert", "(", "pv", ".", "min", "(", ")", ">=", "0", "and", "pv", ".", "max", "(", ")", "<=", "1", ")", ",", "\"p-values should be between 0 and 1\"", "original_shape", "=", "pv", ".", "shape", "pv", "=", "pv", ".", "ravel", "(", ")", "# flattens the array in place, more efficient than flatten() ", "if", "m", "==", "None", ":", "m", "=", "float", "(", "len", "(", "pv", ")", ")", "else", ":", "# the user has supplied an m", "m", "*=", "1.0", "# if the number of hypotheses is small, just set pi0 to 1", "if", "len", "(", "pv", ")", "<", "100", "and", "pi0", "==", "None", ":", "pi0", "=", "1.0", "elif", "pi0", "!=", "None", ":", "pi0", "=", "pi0", "else", ":", "# evaluate pi0 for different lambdas", "pi0", "=", "[", "]", "lam", "=", "sp", ".", "arange", "(", "0", ",", "0.90", ",", "0.01", ")", "counts", "=", "sp", ".", "array", "(", "[", "(", "pv", ">", "i", ")", ".", "sum", "(", ")", "for", "i", "in", "sp", ".", "arange", "(", "0", ",", "0.9", ",", "0.01", ")", "]", ")", "for", "l", "in", "range", "(", "len", "(", "lam", ")", ")", ":", "pi0", ".", "append", "(", "counts", "[", "l", "]", "/", "(", "m", "*", "(", "1", "-", "lam", "[", "l", "]", ")", ")", ")", "pi0", "=", "sp", ".", "array", "(", "pi0", ")", "# fit natural cubic spline", "tck", "=", "sp", ".", "interpolate", ".", "splrep", "(", "lam", ",", "pi0", ",", "k", "=", "3", ")", "pi0", "=", "sp", ".", "interpolate", ".", "splev", "(", "lam", "[", "-", "1", "]", ",", "tck", ")", "if", "pi0", ">", "1", ":", "if", "verbose", ":", "print", "(", "\"got pi0 > 1 (%.3f) while estimating qvalues, setting it to 1\"", "%", "pi0", ")", "pi0", "=", "1.0", "assert", "(", "pi0", ">=", "0", "and", "pi0", "<=", "1", ")", ",", "\"pi0 is not between 0 and 1: %f\"", "%", "pi0", "if", "lowmem", ":", "# low memory version, only uses 1 pv and 1 qv matrices", "qv", "=", "sp", ".", "zeros", "(", "(", "len", "(", "pv", ")", ",", ")", ")", "last_pv", "=", "pv", ".", "argmax", "(", ")", "qv", "[", "last_pv", "]", "=", "(", "pi0", "*", "pv", "[", "last_pv", "]", "*", "m", ")", "/", "float", "(", "m", ")", "pv", "[", "last_pv", "]", "=", "-", "sp", ".", "inf", "prev_qv", "=", "last_pv", "for", "i", "in", "range", "(", "int", "(", "len", "(", "pv", ")", ")", "-", "2", ",", "-", "1", ",", "-", "1", ")", ":", "cur_max", "=", "pv", ".", "argmax", "(", ")", "qv_i", "=", "(", "pi0", "*", "m", "*", "pv", "[", "cur_max", "]", "/", "float", "(", "i", "+", "1", ")", ")", "pv", "[", "cur_max", "]", "=", "-", "sp", ".", "inf", "qv_i1", "=", "prev_qv", "qv", "[", "cur_max", "]", "=", "min", "(", "qv_i", ",", "qv_i1", ")", "prev_qv", "=", "qv", "[", "cur_max", "]", "else", ":", "p_ordered", "=", "sp", ".", "argsort", "(", "pv", ")", "pv", "=", "pv", "[", "p_ordered", "]", "qv", "=", "pi0", "*", "m", "/", "len", "(", "pv", ")", "*", "pv", "qv", "[", "-", "1", "]", "=", "min", "(", "qv", "[", "-", "1", "]", ",", "1.0", ")", "for", "i", "in", "range", "(", "len", "(", "pv", ")", "-", "2", ",", "-", "1", ",", "-", "1", ")", ":", "qv", "[", "i", "]", "=", "min", "(", "pi0", "*", "m", "*", "pv", "[", "i", "]", "/", "(", "i", "+", "1.0", ")", ",", "qv", "[", "i", "+", "1", "]", ")", "# reorder qvalues", "qv_temp", "=", "qv", ".", "copy", "(", ")", "qv", "=", "sp", ".", "zeros_like", "(", "qv", ")", "qv", "[", "p_ordered", "]", "=", "qv_temp", "# reshape qvalues", "qv", "=", "qv", ".", "reshape", "(", "original_shape", ")", "return", "qv" ]
Copyright (c) 2012, Nicolo Fusi, University of Sheffield All rights reserved. Estimates q-values from p-values Args ===== m: number of tests. If not specified m = pv.size verbose: print verbose messages? (default False) lowmem: use memory-efficient in-place algorithm pi0: if None, it's estimated as suggested in Storey and Tibshirani, 2003. For most GWAS this is not necessary, since pi0 is extremely likely to be 1 :param pv: :param m: :param verbose: :param lowmem: :param pi0: :return:
[ "Copyright", "(", "c", ")", "2012", "Nicolo", "Fusi", "University", "of", "Sheffield", "All", "rights", "reserved", "." ]
python
train
mlperf/training
single_stage_detector/ssd/coco.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/single_stage_detector/ssd/coco.py#L200-L209
def loadAnns(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying anns :return: anns (object array) : loaded ann objects """ if _isArrayLike(ids): return [self.anns[id] for id in ids] elif type(ids) == int: return [self.anns[ids]]
[ "def", "loadAnns", "(", "self", ",", "ids", "=", "[", "]", ")", ":", "if", "_isArrayLike", "(", "ids", ")", ":", "return", "[", "self", ".", "anns", "[", "id", "]", "for", "id", "in", "ids", "]", "elif", "type", "(", "ids", ")", "==", "int", ":", "return", "[", "self", ".", "anns", "[", "ids", "]", "]" ]
Load anns with the specified ids. :param ids (int array) : integer ids specifying anns :return: anns (object array) : loaded ann objects
[ "Load", "anns", "with", "the", "specified", "ids", ".", ":", "param", "ids", "(", "int", "array", ")", ":", "integer", "ids", "specifying", "anns", ":", "return", ":", "anns", "(", "object", "array", ")", ":", "loaded", "ann", "objects" ]
python
train
pyvisa/pyvisa
pyvisa/ctwrapper/functions.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/ctwrapper/functions.py#L1887-L1901
def write_from_file(library, session, filename, count): """Take data from a file and write it out synchronously. Corresponds to viWriteFromFile function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param filename: Name of file from which data will be read. :param count: Number of bytes to be written. :return: Number of bytes actually transferred, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` """ return_count = ViUInt32() ret = library.viWriteFromFile(session, filename, count, return_count) return return_count, ret
[ "def", "write_from_file", "(", "library", ",", "session", ",", "filename", ",", "count", ")", ":", "return_count", "=", "ViUInt32", "(", ")", "ret", "=", "library", ".", "viWriteFromFile", "(", "session", ",", "filename", ",", "count", ",", "return_count", ")", "return", "return_count", ",", "ret" ]
Take data from a file and write it out synchronously. Corresponds to viWriteFromFile function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param filename: Name of file from which data will be read. :param count: Number of bytes to be written. :return: Number of bytes actually transferred, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode`
[ "Take", "data", "from", "a", "file", "and", "write", "it", "out", "synchronously", "." ]
python
train
wummel/patool
patoolib/__init__.py
https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/__init__.py#L529-L546
def _handle_archive(archive, command, verbosity=0, interactive=True, program=None, format=None, compression=None): """Test and list archives.""" if format is None: format, compression = get_archive_format(archive) check_archive_format(format, compression) if command not in ('list', 'test'): raise util.PatoolError("invalid archive command `%s'" % command) program = find_archive_program(format, command, program=program) check_program_compression(archive, command, program, compression) get_archive_cmdlist = get_archive_cmdlist_func(program, command, format) # prepare keyword arguments for command list cmdlist = get_archive_cmdlist(archive, compression, program, verbosity, interactive) if cmdlist: # an empty command list means the get_archive_cmdlist() function # already handled the command (eg. when it's a builtin Python # function) run_archive_cmdlist(cmdlist, verbosity=verbosity)
[ "def", "_handle_archive", "(", "archive", ",", "command", ",", "verbosity", "=", "0", ",", "interactive", "=", "True", ",", "program", "=", "None", ",", "format", "=", "None", ",", "compression", "=", "None", ")", ":", "if", "format", "is", "None", ":", "format", ",", "compression", "=", "get_archive_format", "(", "archive", ")", "check_archive_format", "(", "format", ",", "compression", ")", "if", "command", "not", "in", "(", "'list'", ",", "'test'", ")", ":", "raise", "util", ".", "PatoolError", "(", "\"invalid archive command `%s'\"", "%", "command", ")", "program", "=", "find_archive_program", "(", "format", ",", "command", ",", "program", "=", "program", ")", "check_program_compression", "(", "archive", ",", "command", ",", "program", ",", "compression", ")", "get_archive_cmdlist", "=", "get_archive_cmdlist_func", "(", "program", ",", "command", ",", "format", ")", "# prepare keyword arguments for command list", "cmdlist", "=", "get_archive_cmdlist", "(", "archive", ",", "compression", ",", "program", ",", "verbosity", ",", "interactive", ")", "if", "cmdlist", ":", "# an empty command list means the get_archive_cmdlist() function", "# already handled the command (eg. when it's a builtin Python", "# function)", "run_archive_cmdlist", "(", "cmdlist", ",", "verbosity", "=", "verbosity", ")" ]
Test and list archives.
[ "Test", "and", "list", "archives", "." ]
python
train
apache/incubator-heron
heron/instance/src/python/utils/metrics/metrics_helper.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/utils/metrics/metrics_helper.py#L219-L226
def _init_multi_count_metrics(self, pplan_helper): """Initializes the default values for a necessary set of MultiCountMetrics""" to_init = [self.metrics[i] for i in self.to_multi_init if i in self.metrics and isinstance(self.metrics[i], MultiCountMetric)] for out_stream in pplan_helper.get_my_spout().outputs: stream_id = out_stream.stream.id for metric in to_init: metric.add_key(stream_id)
[ "def", "_init_multi_count_metrics", "(", "self", ",", "pplan_helper", ")", ":", "to_init", "=", "[", "self", ".", "metrics", "[", "i", "]", "for", "i", "in", "self", ".", "to_multi_init", "if", "i", "in", "self", ".", "metrics", "and", "isinstance", "(", "self", ".", "metrics", "[", "i", "]", ",", "MultiCountMetric", ")", "]", "for", "out_stream", "in", "pplan_helper", ".", "get_my_spout", "(", ")", ".", "outputs", ":", "stream_id", "=", "out_stream", ".", "stream", ".", "id", "for", "metric", "in", "to_init", ":", "metric", ".", "add_key", "(", "stream_id", ")" ]
Initializes the default values for a necessary set of MultiCountMetrics
[ "Initializes", "the", "default", "values", "for", "a", "necessary", "set", "of", "MultiCountMetrics" ]
python
valid
saltstack/salt
salt/utils/network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L58-L64
def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters])
[ "def", "sanitize_host", "(", "host", ")", ":", "RFC952_characters", "=", "ascii_letters", "+", "digits", "+", "\".-\"", "return", "\"\"", ".", "join", "(", "[", "c", "for", "c", "in", "host", "[", "0", ":", "255", "]", "if", "c", "in", "RFC952_characters", "]", ")" ]
Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1
[ "Sanitize", "host", "string", ".", "https", ":", "//", "tools", ".", "ietf", ".", "org", "/", "html", "/", "rfc1123#section", "-", "2", ".", "1" ]
python
train
mixmastamyk/fr
fr/__init__.py
https://github.com/mixmastamyk/fr/blob/f96df8ed7210a033b9e711bbed768d4116213bfb/fr/__init__.py#L41-L59
def load_config(options): ''' Load options, platform, colors, and icons. ''' global opts, pform opts = options pform = options.pform global_ns = globals() # get colors if pform.hicolor: global_ns['dim_templ'] = ansi.dim8t global_ns['swap_clr_templ'] = ansi.csi8_blk % ansi.blu8 else: global_ns['dim_templ'] = ansi.dim4t global_ns['swap_clr_templ'] = ansi.fbblue # load icons into module namespace for varname in dir(pform): if varname.startswith('_') and varname.endswith('ico'): global_ns[varname] = getattr(pform, varname)
[ "def", "load_config", "(", "options", ")", ":", "global", "opts", ",", "pform", "opts", "=", "options", "pform", "=", "options", ".", "pform", "global_ns", "=", "globals", "(", ")", "# get colors", "if", "pform", ".", "hicolor", ":", "global_ns", "[", "'dim_templ'", "]", "=", "ansi", ".", "dim8t", "global_ns", "[", "'swap_clr_templ'", "]", "=", "ansi", ".", "csi8_blk", "%", "ansi", ".", "blu8", "else", ":", "global_ns", "[", "'dim_templ'", "]", "=", "ansi", ".", "dim4t", "global_ns", "[", "'swap_clr_templ'", "]", "=", "ansi", ".", "fbblue", "# load icons into module namespace", "for", "varname", "in", "dir", "(", "pform", ")", ":", "if", "varname", ".", "startswith", "(", "'_'", ")", "and", "varname", ".", "endswith", "(", "'ico'", ")", ":", "global_ns", "[", "varname", "]", "=", "getattr", "(", "pform", ",", "varname", ")" ]
Load options, platform, colors, and icons.
[ "Load", "options", "platform", "colors", "and", "icons", "." ]
python
train
kevin1024/vcrpy
vcr/stubs/__init__.py
https://github.com/kevin1024/vcrpy/blob/114fcd29b43c55896aaa6a6613bc7766f2707c8b/vcr/stubs/__init__.py#L129-L135
def _port_postfix(self): """ Returns empty string for the default port and ':port' otherwise """ port = self.real_connection.port default_port = {'https': 443, 'http': 80}[self._protocol] return ':{}'.format(port) if port != default_port else ''
[ "def", "_port_postfix", "(", "self", ")", ":", "port", "=", "self", ".", "real_connection", ".", "port", "default_port", "=", "{", "'https'", ":", "443", ",", "'http'", ":", "80", "}", "[", "self", ".", "_protocol", "]", "return", "':{}'", ".", "format", "(", "port", ")", "if", "port", "!=", "default_port", "else", "''" ]
Returns empty string for the default port and ':port' otherwise
[ "Returns", "empty", "string", "for", "the", "default", "port", "and", ":", "port", "otherwise" ]
python
train
jmgilman/Neolib
neolib/pyamf/remoting/amf0.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/amf0.py#L21-L37
def authenticateRequest(self, request, service_request, *args, **kwargs): """ Authenticates the request against the service. @param request: The AMF request @type request: L{Request<pyamf.remoting.Request>} """ username = password = None if 'Credentials' in request.headers: cred = request.headers['Credentials'] username = cred['userid'] password = cred['password'] return self.gateway.authenticateRequest(service_request, username, password, *args, **kwargs)
[ "def", "authenticateRequest", "(", "self", ",", "request", ",", "service_request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "username", "=", "password", "=", "None", "if", "'Credentials'", "in", "request", ".", "headers", ":", "cred", "=", "request", ".", "headers", "[", "'Credentials'", "]", "username", "=", "cred", "[", "'userid'", "]", "password", "=", "cred", "[", "'password'", "]", "return", "self", ".", "gateway", ".", "authenticateRequest", "(", "service_request", ",", "username", ",", "password", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Authenticates the request against the service. @param request: The AMF request @type request: L{Request<pyamf.remoting.Request>}
[ "Authenticates", "the", "request", "against", "the", "service", "." ]
python
train
O365/python-o365
O365/utils/utils.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/utils/utils.py#L1134-L1148
def order_by(self, attribute=None, *, ascending=True): """ Applies a order_by clause :param str attribute: attribute to apply on :param bool ascending: should it apply ascending order or descending :rtype: Query """ attribute = self._get_mapping(attribute) or self._attribute if attribute: self._order_by[attribute] = None if ascending else 'desc' else: raise ValueError( 'Attribute property needed. call on_attribute(attribute) ' 'or new(attribute)') return self
[ "def", "order_by", "(", "self", ",", "attribute", "=", "None", ",", "*", ",", "ascending", "=", "True", ")", ":", "attribute", "=", "self", ".", "_get_mapping", "(", "attribute", ")", "or", "self", ".", "_attribute", "if", "attribute", ":", "self", ".", "_order_by", "[", "attribute", "]", "=", "None", "if", "ascending", "else", "'desc'", "else", ":", "raise", "ValueError", "(", "'Attribute property needed. call on_attribute(attribute) '", "'or new(attribute)'", ")", "return", "self" ]
Applies a order_by clause :param str attribute: attribute to apply on :param bool ascending: should it apply ascending order or descending :rtype: Query
[ "Applies", "a", "order_by", "clause" ]
python
train
christophertbrown/bioscripts
ctbBio/rRNA_insertions.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L212-L229
def seqs2bool(seqs): """ convert orf and intron information to boolean # seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns]], ...]] # seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns], orfs?, introns?], ...]] """ for seq in seqs: for i, ins in enumerate(seqs[seq][2]): if len(ins[4]) > 0: ins.append(True) else: ins.append(False) if len(ins[5]) > 0: ins.append(True) else: ins.append(False) seqs[seq][2][i] = ins return seqs
[ "def", "seqs2bool", "(", "seqs", ")", ":", "for", "seq", "in", "seqs", ":", "for", "i", ",", "ins", "in", "enumerate", "(", "seqs", "[", "seq", "]", "[", "2", "]", ")", ":", "if", "len", "(", "ins", "[", "4", "]", ")", ">", "0", ":", "ins", ".", "append", "(", "True", ")", "else", ":", "ins", ".", "append", "(", "False", ")", "if", "len", "(", "ins", "[", "5", "]", ")", ">", "0", ":", "ins", ".", "append", "(", "True", ")", "else", ":", "ins", ".", "append", "(", "False", ")", "seqs", "[", "seq", "]", "[", "2", "]", "[", "i", "]", "=", "ins", "return", "seqs" ]
convert orf and intron information to boolean # seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns]], ...]] # seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns], orfs?, introns?], ...]]
[ "convert", "orf", "and", "intron", "information", "to", "boolean", "#", "seqs", "[", "id", "]", "=", "[", "gene", "model", "[[", "i", "-", "gene_pos", "i", "-", "model_pos", "i", "-", "length", "iseq", "[", "orfs", "]", "[", "introns", "]]", "...", "]]", "#", "seqs", "[", "id", "]", "=", "[", "gene", "model", "[[", "i", "-", "gene_pos", "i", "-", "model_pos", "i", "-", "length", "iseq", "[", "orfs", "]", "[", "introns", "]", "orfs?", "introns?", "]", "...", "]]" ]
python
train
bitesofcode/projexui
projexui/widgets/xorbquerywidget/xorbquickfilterwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbquerywidget/xorbquickfilterwidget.py#L190-L208
def setQuery(self, query): """ Sets the query information for this filter widget. :param query | <orb.Query> || None """ if query is None: return count = {} for widget in self.findChildren(QWidget): column = nativestring(widget.objectName()) count.setdefault(column, 0) count[column] += 1 success, value, _ = query.findValue(column, count[column]) if success: projexui.setWidgetValue(widget, value)
[ "def", "setQuery", "(", "self", ",", "query", ")", ":", "if", "query", "is", "None", ":", "return", "count", "=", "{", "}", "for", "widget", "in", "self", ".", "findChildren", "(", "QWidget", ")", ":", "column", "=", "nativestring", "(", "widget", ".", "objectName", "(", ")", ")", "count", ".", "setdefault", "(", "column", ",", "0", ")", "count", "[", "column", "]", "+=", "1", "success", ",", "value", ",", "_", "=", "query", ".", "findValue", "(", "column", ",", "count", "[", "column", "]", ")", "if", "success", ":", "projexui", ".", "setWidgetValue", "(", "widget", ",", "value", ")" ]
Sets the query information for this filter widget. :param query | <orb.Query> || None
[ "Sets", "the", "query", "information", "for", "this", "filter", "widget", ".", ":", "param", "query", "|", "<orb", ".", "Query", ">", "||", "None" ]
python
train
jendrikseipp/vulture
vulture/core.py
https://github.com/jendrikseipp/vulture/blob/fed11fb7e7ed065058a9fb1acd10052ece37f984/vulture/core.py#L510-L519
def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" for _, value in ast.iter_fields(node): if isinstance(value, list): self._handle_ast_list(value) for item in value: if isinstance(item, ast.AST): self.visit(item) elif isinstance(value, ast.AST): self.visit(value)
[ "def", "generic_visit", "(", "self", ",", "node", ")", ":", "for", "_", ",", "value", "in", "ast", ".", "iter_fields", "(", "node", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "self", ".", "_handle_ast_list", "(", "value", ")", "for", "item", "in", "value", ":", "if", "isinstance", "(", "item", ",", "ast", ".", "AST", ")", ":", "self", ".", "visit", "(", "item", ")", "elif", "isinstance", "(", "value", ",", "ast", ".", "AST", ")", ":", "self", ".", "visit", "(", "value", ")" ]
Called if no explicit visitor function exists for a node.
[ "Called", "if", "no", "explicit", "visitor", "function", "exists", "for", "a", "node", "." ]
python
train
scour-project/scour
scour/scour.py
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L857-L872
def removeUnreferencedIDs(referencedIDs, identifiedElements): """ Removes the unreferenced ID attributes. Returns the number of ID attributes removed """ global _num_ids_removed keepTags = ['font'] num = 0 for id in identifiedElements: node = identifiedElements[id] if id not in referencedIDs and node.nodeName not in keepTags: node.removeAttribute('id') _num_ids_removed += 1 num += 1 return num
[ "def", "removeUnreferencedIDs", "(", "referencedIDs", ",", "identifiedElements", ")", ":", "global", "_num_ids_removed", "keepTags", "=", "[", "'font'", "]", "num", "=", "0", "for", "id", "in", "identifiedElements", ":", "node", "=", "identifiedElements", "[", "id", "]", "if", "id", "not", "in", "referencedIDs", "and", "node", ".", "nodeName", "not", "in", "keepTags", ":", "node", ".", "removeAttribute", "(", "'id'", ")", "_num_ids_removed", "+=", "1", "num", "+=", "1", "return", "num" ]
Removes the unreferenced ID attributes. Returns the number of ID attributes removed
[ "Removes", "the", "unreferenced", "ID", "attributes", "." ]
python
train
PSPC-SPAC-buyandsell/von_agent
von_agent/agent/holder_prover.py
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L539-L550
def rev_regs(self) -> list: """ Return list of revocation registry identifiers for which HolderProver has tails files. :return: list of revocation registry identifiers for which HolderProver has tails files """ LOGGER.debug('HolderProver.rev_regs >>>') rv = [basename(f) for f in Tails.links(self._dir_tails)] LOGGER.debug('HolderProver.rev_regs <<< %s', rv) return rv
[ "def", "rev_regs", "(", "self", ")", "->", "list", ":", "LOGGER", ".", "debug", "(", "'HolderProver.rev_regs >>>'", ")", "rv", "=", "[", "basename", "(", "f", ")", "for", "f", "in", "Tails", ".", "links", "(", "self", ".", "_dir_tails", ")", "]", "LOGGER", ".", "debug", "(", "'HolderProver.rev_regs <<< %s'", ",", "rv", ")", "return", "rv" ]
Return list of revocation registry identifiers for which HolderProver has tails files. :return: list of revocation registry identifiers for which HolderProver has tails files
[ "Return", "list", "of", "revocation", "registry", "identifiers", "for", "which", "HolderProver", "has", "tails", "files", "." ]
python
train
ibelie/typy
typy/google/protobuf/internal/well_known_types.py
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/internal/well_known_types.py#L272-L299
def FromJsonString(self, value): """Converts a string to Duration. Args: value: A string to be converted. The string must end with 's'. Any fractional digits (or none) are accepted as long as they fit into precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s Raises: ParseError: On parsing problems. """ if len(value) < 1 or value[-1] != 's': raise ParseError( 'Duration must end with letter "s": {0}.'.format(value)) try: pos = value.find('.') if pos == -1: self.seconds = int(value[:-1]) self.nanos = 0 else: self.seconds = int(value[:pos]) if value[0] == '-': self.nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9)) else: self.nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9)) except ValueError: raise ParseError( 'Couldn\'t parse duration: {0}.'.format(value))
[ "def", "FromJsonString", "(", "self", ",", "value", ")", ":", "if", "len", "(", "value", ")", "<", "1", "or", "value", "[", "-", "1", "]", "!=", "'s'", ":", "raise", "ParseError", "(", "'Duration must end with letter \"s\": {0}.'", ".", "format", "(", "value", ")", ")", "try", ":", "pos", "=", "value", ".", "find", "(", "'.'", ")", "if", "pos", "==", "-", "1", ":", "self", ".", "seconds", "=", "int", "(", "value", "[", ":", "-", "1", "]", ")", "self", ".", "nanos", "=", "0", "else", ":", "self", ".", "seconds", "=", "int", "(", "value", "[", ":", "pos", "]", ")", "if", "value", "[", "0", "]", "==", "'-'", ":", "self", ".", "nanos", "=", "int", "(", "round", "(", "float", "(", "'-0{0}'", ".", "format", "(", "value", "[", "pos", ":", "-", "1", "]", ")", ")", "*", "1e9", ")", ")", "else", ":", "self", ".", "nanos", "=", "int", "(", "round", "(", "float", "(", "'0{0}'", ".", "format", "(", "value", "[", "pos", ":", "-", "1", "]", ")", ")", "*", "1e9", ")", ")", "except", "ValueError", ":", "raise", "ParseError", "(", "'Couldn\\'t parse duration: {0}.'", ".", "format", "(", "value", ")", ")" ]
Converts a string to Duration. Args: value: A string to be converted. The string must end with 's'. Any fractional digits (or none) are accepted as long as they fit into precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s Raises: ParseError: On parsing problems.
[ "Converts", "a", "string", "to", "Duration", "." ]
python
valid
cloudsigma/cgroupspy
cgroupspy/nodes.py
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L137-L149
def create_cgroup(self, name): """ Create a cgroup by name and attach it under this node. """ node = Node(name, parent=self) if node in self.children: raise RuntimeError('Node {} already exists under {}'.format(name, self.path)) name = name.encode() fp = os.path.join(self.full_path, name) os.mkdir(fp) self.children.append(node) return node
[ "def", "create_cgroup", "(", "self", ",", "name", ")", ":", "node", "=", "Node", "(", "name", ",", "parent", "=", "self", ")", "if", "node", "in", "self", ".", "children", ":", "raise", "RuntimeError", "(", "'Node {} already exists under {}'", ".", "format", "(", "name", ",", "self", ".", "path", ")", ")", "name", "=", "name", ".", "encode", "(", ")", "fp", "=", "os", ".", "path", ".", "join", "(", "self", ".", "full_path", ",", "name", ")", "os", ".", "mkdir", "(", "fp", ")", "self", ".", "children", ".", "append", "(", "node", ")", "return", "node" ]
Create a cgroup by name and attach it under this node.
[ "Create", "a", "cgroup", "by", "name", "and", "attach", "it", "under", "this", "node", "." ]
python
train
rasbt/biopandas
biopandas/mol2/pandas_mol2.py
https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/mol2/pandas_mol2.py#L250-L271
def distance_df(df, xyz=(0.00, 0.00, 0.00)): """Computes Euclidean distance between atoms and a 3D point. Parameters ---------- df : DataFrame DataFrame containing entries similar to the PandasMol2.df format for the the distance computation to the `xyz` reference coordinates. xyz : tuple (0.00, 0.00, 0.00) X, Y, and Z coordinate of the reference center for the distance computation Returns --------- pandas.Series : Pandas Series object containing the Euclidean distance between the atoms in the atom section and `xyz`. """ return np.sqrt(np.sum(df[['x', 'y', 'z']] .subtract(xyz, axis=1)**2, axis=1))
[ "def", "distance_df", "(", "df", ",", "xyz", "=", "(", "0.00", ",", "0.00", ",", "0.00", ")", ")", ":", "return", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "df", "[", "[", "'x'", ",", "'y'", ",", "'z'", "]", "]", ".", "subtract", "(", "xyz", ",", "axis", "=", "1", ")", "**", "2", ",", "axis", "=", "1", ")", ")" ]
Computes Euclidean distance between atoms and a 3D point. Parameters ---------- df : DataFrame DataFrame containing entries similar to the PandasMol2.df format for the the distance computation to the `xyz` reference coordinates. xyz : tuple (0.00, 0.00, 0.00) X, Y, and Z coordinate of the reference center for the distance computation Returns --------- pandas.Series : Pandas Series object containing the Euclidean distance between the atoms in the atom section and `xyz`.
[ "Computes", "Euclidean", "distance", "between", "atoms", "and", "a", "3D", "point", "." ]
python
train
boriel/zxbasic
asmlex.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asmlex.py#L293-L297
def t_LP(self, t): r'[[(]' if t.value != '[' and OPTIONS.bracket.value: t.type = 'LPP' return t
[ "def", "t_LP", "(", "self", ",", "t", ")", ":", "if", "t", ".", "value", "!=", "'['", "and", "OPTIONS", ".", "bracket", ".", "value", ":", "t", ".", "type", "=", "'LPP'", "return", "t" ]
r'[[(]
[ "r", "[[", "(", "]" ]
python
train
Esri/ArcREST
src/arcrest/common/geometry.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/geometry.py#L239-L251
def __geomToPointList(self, geom): """ converts a geometry object to a common.Geometry object """ if arcpyFound and isinstance(geom, arcpy.Multipoint): feature_geom = [] fPart = [] for part in geom: fPart = [] for pnt in part: fPart.append(Point(coord=[pnt.X, pnt.Y], wkid=geom.spatialReference.factoryCode, z=pnt.Z, m=pnt.M)) feature_geom.append(fPart) return feature_geom
[ "def", "__geomToPointList", "(", "self", ",", "geom", ")", ":", "if", "arcpyFound", "and", "isinstance", "(", "geom", ",", "arcpy", ".", "Multipoint", ")", ":", "feature_geom", "=", "[", "]", "fPart", "=", "[", "]", "for", "part", "in", "geom", ":", "fPart", "=", "[", "]", "for", "pnt", "in", "part", ":", "fPart", ".", "append", "(", "Point", "(", "coord", "=", "[", "pnt", ".", "X", ",", "pnt", ".", "Y", "]", ",", "wkid", "=", "geom", ".", "spatialReference", ".", "factoryCode", ",", "z", "=", "pnt", ".", "Z", ",", "m", "=", "pnt", ".", "M", ")", ")", "feature_geom", ".", "append", "(", "fPart", ")", "return", "feature_geom" ]
converts a geometry object to a common.Geometry object
[ "converts", "a", "geometry", "object", "to", "a", "common", ".", "Geometry", "object" ]
python
train
bitcraft/pyscroll
pyscroll/data.py
https://github.com/bitcraft/pyscroll/blob/b41c1016dfefd0e2d83a14a2ce40d7ad298c5b0f/pyscroll/data.py#L45-L106
def process_animation_queue(self, tile_view): """ Given the time and the tile view, process tile changes and return them :param tile_view: rect representing tiles on the screen :type tile_view: pygame.Rect :rtype: list """ # verify that there are tile substitutions ready self._update_time() try: if self._animation_queue[0].next > self._last_time: return # raised with the animation queue is empty (no animations at all) except IndexError: return new_tiles = list() new_tiles_append = new_tiles.append tile_layers = tuple(self.visible_tile_layers) get_tile_image = self.get_tile_image # test if the next scheduled tile change is ready while self._animation_queue[0].next <= self._last_time: # get the next tile/frame which is ready to be changed token = heappop(self._animation_queue) next_frame = token.advance(self._last_time) heappush(self._animation_queue, token) # following line for when all gid positions are known # for position in self._tracked_tiles & token.positions: for position in token.positions.copy(): x, y, l = position # if this tile is on the buffer (checked by using the tile view) if tile_view.collidepoint(x, y): # record the location of this tile, in case of a screen wipe, or sprite cover self._animated_tile[position] = next_frame.image # redraw the entire column of tiles for layer in tile_layers: if layer == l: # queue the new animated tile new_tiles_append((x, y, layer, next_frame.image)) else: # queue the normal tile image = get_tile_image(x, y, layer) if image: new_tiles_append((x, y, layer, image)) # not on screen, but was previously. clear it. else: token.positions.remove(position) return new_tiles
[ "def", "process_animation_queue", "(", "self", ",", "tile_view", ")", ":", "# verify that there are tile substitutions ready", "self", ".", "_update_time", "(", ")", "try", ":", "if", "self", ".", "_animation_queue", "[", "0", "]", ".", "next", ">", "self", ".", "_last_time", ":", "return", "# raised with the animation queue is empty (no animations at all)", "except", "IndexError", ":", "return", "new_tiles", "=", "list", "(", ")", "new_tiles_append", "=", "new_tiles", ".", "append", "tile_layers", "=", "tuple", "(", "self", ".", "visible_tile_layers", ")", "get_tile_image", "=", "self", ".", "get_tile_image", "# test if the next scheduled tile change is ready", "while", "self", ".", "_animation_queue", "[", "0", "]", ".", "next", "<=", "self", ".", "_last_time", ":", "# get the next tile/frame which is ready to be changed", "token", "=", "heappop", "(", "self", ".", "_animation_queue", ")", "next_frame", "=", "token", ".", "advance", "(", "self", ".", "_last_time", ")", "heappush", "(", "self", ".", "_animation_queue", ",", "token", ")", "# following line for when all gid positions are known", "# for position in self._tracked_tiles & token.positions:", "for", "position", "in", "token", ".", "positions", ".", "copy", "(", ")", ":", "x", ",", "y", ",", "l", "=", "position", "# if this tile is on the buffer (checked by using the tile view)", "if", "tile_view", ".", "collidepoint", "(", "x", ",", "y", ")", ":", "# record the location of this tile, in case of a screen wipe, or sprite cover", "self", ".", "_animated_tile", "[", "position", "]", "=", "next_frame", ".", "image", "# redraw the entire column of tiles", "for", "layer", "in", "tile_layers", ":", "if", "layer", "==", "l", ":", "# queue the new animated tile", "new_tiles_append", "(", "(", "x", ",", "y", ",", "layer", ",", "next_frame", ".", "image", ")", ")", "else", ":", "# queue the normal tile", "image", "=", "get_tile_image", "(", "x", ",", "y", ",", "layer", ")", "if", "image", ":", "new_tiles_append", "(", "(", "x", ",", "y", ",", "layer", ",", "image", ")", ")", "# not on screen, but was previously. clear it.", "else", ":", "token", ".", "positions", ".", "remove", "(", "position", ")", "return", "new_tiles" ]
Given the time and the tile view, process tile changes and return them :param tile_view: rect representing tiles on the screen :type tile_view: pygame.Rect :rtype: list
[ "Given", "the", "time", "and", "the", "tile", "view", "process", "tile", "changes", "and", "return", "them", ":", "param", "tile_view", ":", "rect", "representing", "tiles", "on", "the", "screen", ":", "type", "tile_view", ":", "pygame", ".", "Rect", ":", "rtype", ":", "list" ]
python
train
brian-rose/climlab
climlab/process/process.py
https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/process/process.py#L212-L282
def add_subprocess(self, name, proc): """Adds a single subprocess to this process. :param string name: name of the subprocess :param proc: a Process object :type proc: :class:`~climlab.process.process.Process` :raises: :exc:`ValueError` if ``proc`` is not a process :Example: Replacing an albedo subprocess through adding a subprocess with same name:: >>> from climlab.model.ebm import EBM_seasonal >>> from climlab.surface.albedo import StepFunctionAlbedo >>> # creating EBM model >>> ebm_s = EBM_seasonal() >>> print ebm_s .. code-block:: none :emphasize-lines: 8 climlab Process of type <class 'climlab.model.ebm.EBM_seasonal'>. State variables and domain shapes: Ts: (90, 1) The subprocess tree: top: <class 'climlab.model.ebm.EBM_seasonal'> diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'> LW: <class 'climlab.radiation.AplusBT.AplusBT'> albedo: <class 'climlab.surface.albedo.P2Albedo'> insolation: <class 'climlab.radiation.insolation.DailyInsolation'> :: >>> # creating and adding albedo feedback subprocess >>> step_albedo = StepFunctionAlbedo(state=ebm_s.state, **ebm_s.param) >>> ebm_s.add_subprocess('albedo', step_albedo) >>> >>> print ebm_s .. code-block:: none :emphasize-lines: 8 climlab Process of type <class 'climlab.model.ebm.EBM_seasonal'>. State variables and domain shapes: Ts: (90, 1) The subprocess tree: top: <class 'climlab.model.ebm.EBM_seasonal'> diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'> LW: <class 'climlab.radiation.AplusBT.AplusBT'> albedo: <class 'climlab.surface.albedo.StepFunctionAlbedo'> iceline: <class 'climlab.surface.albedo.Iceline'> cold_albedo: <class 'climlab.surface.albedo.ConstantAlbedo'> warm_albedo: <class 'climlab.surface.albedo.P2Albedo'> insolation: <class 'climlab.radiation.insolation.DailyInsolation'> """ if isinstance(proc, Process): self.subprocess.update({name: proc}) self.has_process_type_list = False # Add subprocess diagnostics to parent # (if there are no name conflicts) for diagname, value in proc.diagnostics.items(): #if not (diagname in self.diagnostics or hasattr(self, diagname)): # self.add_diagnostic(diagname, value) self.add_diagnostic(diagname, value) else: raise ValueError('subprocess must be Process object')
[ "def", "add_subprocess", "(", "self", ",", "name", ",", "proc", ")", ":", "if", "isinstance", "(", "proc", ",", "Process", ")", ":", "self", ".", "subprocess", ".", "update", "(", "{", "name", ":", "proc", "}", ")", "self", ".", "has_process_type_list", "=", "False", "# Add subprocess diagnostics to parent", "# (if there are no name conflicts)", "for", "diagname", ",", "value", "in", "proc", ".", "diagnostics", ".", "items", "(", ")", ":", "#if not (diagname in self.diagnostics or hasattr(self, diagname)):", "# self.add_diagnostic(diagname, value)", "self", ".", "add_diagnostic", "(", "diagname", ",", "value", ")", "else", ":", "raise", "ValueError", "(", "'subprocess must be Process object'", ")" ]
Adds a single subprocess to this process. :param string name: name of the subprocess :param proc: a Process object :type proc: :class:`~climlab.process.process.Process` :raises: :exc:`ValueError` if ``proc`` is not a process :Example: Replacing an albedo subprocess through adding a subprocess with same name:: >>> from climlab.model.ebm import EBM_seasonal >>> from climlab.surface.albedo import StepFunctionAlbedo >>> # creating EBM model >>> ebm_s = EBM_seasonal() >>> print ebm_s .. code-block:: none :emphasize-lines: 8 climlab Process of type <class 'climlab.model.ebm.EBM_seasonal'>. State variables and domain shapes: Ts: (90, 1) The subprocess tree: top: <class 'climlab.model.ebm.EBM_seasonal'> diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'> LW: <class 'climlab.radiation.AplusBT.AplusBT'> albedo: <class 'climlab.surface.albedo.P2Albedo'> insolation: <class 'climlab.radiation.insolation.DailyInsolation'> :: >>> # creating and adding albedo feedback subprocess >>> step_albedo = StepFunctionAlbedo(state=ebm_s.state, **ebm_s.param) >>> ebm_s.add_subprocess('albedo', step_albedo) >>> >>> print ebm_s .. code-block:: none :emphasize-lines: 8 climlab Process of type <class 'climlab.model.ebm.EBM_seasonal'>. State variables and domain shapes: Ts: (90, 1) The subprocess tree: top: <class 'climlab.model.ebm.EBM_seasonal'> diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'> LW: <class 'climlab.radiation.AplusBT.AplusBT'> albedo: <class 'climlab.surface.albedo.StepFunctionAlbedo'> iceline: <class 'climlab.surface.albedo.Iceline'> cold_albedo: <class 'climlab.surface.albedo.ConstantAlbedo'> warm_albedo: <class 'climlab.surface.albedo.P2Albedo'> insolation: <class 'climlab.radiation.insolation.DailyInsolation'>
[ "Adds", "a", "single", "subprocess", "to", "this", "process", "." ]
python
train
learningequality/ricecooker
ricecooker/classes/questions.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/classes/questions.py#L341-L370
def _recursive_url_find(self, item, image_list): """ Recursively traverses a dictionary-like data structure for Khan Academy assessment items in order to search for image links in `url` data attributes, and if it finds any it adds them to `image_list` and rewrites `url` attribute. Use cases: - `backgroundImage.url` attributes for graphs and images Args: item (dict): KA assessment item; will be modified in place image_list (list): image files (File objects) found during the traversal Returns: None """ recursive_fn = partial(self._recursive_url_find, image_list=image_list) if isinstance(item, list): list(map(recursive_fn, item)) elif isinstance(item, dict): if 'url' in item: if item['url']: item['url'], image_file = self.set_image(item['url']) image_list += image_file for field, field_data in item.items(): if isinstance(field_data, dict): self._recursive_url_find(field_data, image_list) elif isinstance(field_data, list): list(map(recursive_fn, field_data))
[ "def", "_recursive_url_find", "(", "self", ",", "item", ",", "image_list", ")", ":", "recursive_fn", "=", "partial", "(", "self", ".", "_recursive_url_find", ",", "image_list", "=", "image_list", ")", "if", "isinstance", "(", "item", ",", "list", ")", ":", "list", "(", "map", "(", "recursive_fn", ",", "item", ")", ")", "elif", "isinstance", "(", "item", ",", "dict", ")", ":", "if", "'url'", "in", "item", ":", "if", "item", "[", "'url'", "]", ":", "item", "[", "'url'", "]", ",", "image_file", "=", "self", ".", "set_image", "(", "item", "[", "'url'", "]", ")", "image_list", "+=", "image_file", "for", "field", ",", "field_data", "in", "item", ".", "items", "(", ")", ":", "if", "isinstance", "(", "field_data", ",", "dict", ")", ":", "self", ".", "_recursive_url_find", "(", "field_data", ",", "image_list", ")", "elif", "isinstance", "(", "field_data", ",", "list", ")", ":", "list", "(", "map", "(", "recursive_fn", ",", "field_data", ")", ")" ]
Recursively traverses a dictionary-like data structure for Khan Academy assessment items in order to search for image links in `url` data attributes, and if it finds any it adds them to `image_list` and rewrites `url` attribute. Use cases: - `backgroundImage.url` attributes for graphs and images Args: item (dict): KA assessment item; will be modified in place image_list (list): image files (File objects) found during the traversal Returns: None
[ "Recursively", "traverses", "a", "dictionary", "-", "like", "data", "structure", "for", "Khan", "Academy", "assessment", "items", "in", "order", "to", "search", "for", "image", "links", "in", "url", "data", "attributes", "and", "if", "it", "finds", "any", "it", "adds", "them", "to", "image_list", "and", "rewrites", "url", "attribute", ".", "Use", "cases", ":", "-", "backgroundImage", ".", "url", "attributes", "for", "graphs", "and", "images" ]
python
train
pyca/pyopenssl
src/OpenSSL/crypto.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/crypto.py#L2160-L2173
def get_issuer(self): """ Get the CRL's issuer. .. versionadded:: 16.1.0 :rtype: X509Name """ _issuer = _lib.X509_NAME_dup(_lib.X509_CRL_get_issuer(self._crl)) _openssl_assert(_issuer != _ffi.NULL) _issuer = _ffi.gc(_issuer, _lib.X509_NAME_free) issuer = X509Name.__new__(X509Name) issuer._name = _issuer return issuer
[ "def", "get_issuer", "(", "self", ")", ":", "_issuer", "=", "_lib", ".", "X509_NAME_dup", "(", "_lib", ".", "X509_CRL_get_issuer", "(", "self", ".", "_crl", ")", ")", "_openssl_assert", "(", "_issuer", "!=", "_ffi", ".", "NULL", ")", "_issuer", "=", "_ffi", ".", "gc", "(", "_issuer", ",", "_lib", ".", "X509_NAME_free", ")", "issuer", "=", "X509Name", ".", "__new__", "(", "X509Name", ")", "issuer", ".", "_name", "=", "_issuer", "return", "issuer" ]
Get the CRL's issuer. .. versionadded:: 16.1.0 :rtype: X509Name
[ "Get", "the", "CRL", "s", "issuer", "." ]
python
test
jeffknupp/sandman
sandman/model/models.py
https://github.com/jeffknupp/sandman/blob/253ea4d15cbccd9f0016d66fedd7478614cc0b2f/sandman/model/models.py#L147-L159
def from_dict(self, dictionary): """Set a set of attributes which correspond to the :class:`sandman.model.Model`'s columns. :param dict dictionary: A dictionary of attributes to set on the instance whose keys are the column names of the :class:`sandman.model.Model`'s underlying database table. """ for column in self.__table__.columns.keys(): value = dictionary.get(column, None) if value: setattr(self, column, value)
[ "def", "from_dict", "(", "self", ",", "dictionary", ")", ":", "for", "column", "in", "self", ".", "__table__", ".", "columns", ".", "keys", "(", ")", ":", "value", "=", "dictionary", ".", "get", "(", "column", ",", "None", ")", "if", "value", ":", "setattr", "(", "self", ",", "column", ",", "value", ")" ]
Set a set of attributes which correspond to the :class:`sandman.model.Model`'s columns. :param dict dictionary: A dictionary of attributes to set on the instance whose keys are the column names of the :class:`sandman.model.Model`'s underlying database table.
[ "Set", "a", "set", "of", "attributes", "which", "correspond", "to", "the", ":", "class", ":", "sandman", ".", "model", ".", "Model", "s", "columns", "." ]
python
train
sio2project/filetracker
filetracker/client/data_store.py
https://github.com/sio2project/filetracker/blob/359b474850622e3d0c25ee2596d7242c02f84efb/filetracker/client/data_store.py#L38-L47
def add_file(self, name, filename, compress_hint=True): """Saves the actual file in the store. ``compress_hint`` suggests whether the file should be compressed before transfer Works like :meth:`add_stream`, but ``filename`` is the name of an existing file in the filesystem. """ return self.add_stream(name, open(filename, 'rb'))
[ "def", "add_file", "(", "self", ",", "name", ",", "filename", ",", "compress_hint", "=", "True", ")", ":", "return", "self", ".", "add_stream", "(", "name", ",", "open", "(", "filename", ",", "'rb'", ")", ")" ]
Saves the actual file in the store. ``compress_hint`` suggests whether the file should be compressed before transfer Works like :meth:`add_stream`, but ``filename`` is the name of an existing file in the filesystem.
[ "Saves", "the", "actual", "file", "in", "the", "store", "." ]
python
train
cggh/scikit-allel
allel/chunked/storage_hdf5.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/storage_hdf5.py#L34-L50
def h5ftmp(**kwargs): """Create an HDF5 file backed by a temporary file.""" # create temporary file name suffix = kwargs.pop('suffix', '.h5') prefix = kwargs.pop('prefix', 'scikit_allel_') tempdir = kwargs.pop('dir', None) fn = tempfile.mktemp(suffix=suffix, prefix=prefix, dir=tempdir) atexit.register(os.remove, fn) # file creation args kwargs['mode'] = 'w' # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
[ "def", "h5ftmp", "(", "*", "*", "kwargs", ")", ":", "# create temporary file name", "suffix", "=", "kwargs", ".", "pop", "(", "'suffix'", ",", "'.h5'", ")", "prefix", "=", "kwargs", ".", "pop", "(", "'prefix'", ",", "'scikit_allel_'", ")", "tempdir", "=", "kwargs", ".", "pop", "(", "'dir'", ",", "None", ")", "fn", "=", "tempfile", ".", "mktemp", "(", "suffix", "=", "suffix", ",", "prefix", "=", "prefix", ",", "dir", "=", "tempdir", ")", "atexit", ".", "register", "(", "os", ".", "remove", ",", "fn", ")", "# file creation args", "kwargs", "[", "'mode'", "]", "=", "'w'", "# open HDF5 file", "h5f", "=", "h5py", ".", "File", "(", "fn", ",", "*", "*", "kwargs", ")", "return", "h5f" ]
Create an HDF5 file backed by a temporary file.
[ "Create", "an", "HDF5", "file", "backed", "by", "a", "temporary", "file", "." ]
python
train
NLeSC/noodles
examples/soba/soba.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/examples/soba/soba.py#L128-L132
def run(wf, *, display, n_threads=1): """Run the workflow using the dynamic-exclusion worker.""" worker = dynamic_exclusion_worker(display, n_threads) return noodles.Scheduler(error_handler=display.error_handler)\ .run(worker, get_workflow(wf))
[ "def", "run", "(", "wf", ",", "*", ",", "display", ",", "n_threads", "=", "1", ")", ":", "worker", "=", "dynamic_exclusion_worker", "(", "display", ",", "n_threads", ")", "return", "noodles", ".", "Scheduler", "(", "error_handler", "=", "display", ".", "error_handler", ")", ".", "run", "(", "worker", ",", "get_workflow", "(", "wf", ")", ")" ]
Run the workflow using the dynamic-exclusion worker.
[ "Run", "the", "workflow", "using", "the", "dynamic", "-", "exclusion", "worker", "." ]
python
train
onicagroup/runway
runway/templates/stacker/tfstate_blueprints/tf_state.py
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/templates/stacker/tfstate_blueprints/tf_state.py#L29-L150
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() self.template.add_version('2010-09-09') self.template.add_description('Terraform State Resources') # Conditions for i in ['BucketName', 'TableName']: template.add_condition( "%sOmitted" % i, Or(Equals(variables[i].ref, ''), Equals(variables[i].ref, 'undefined')) ) # Resources terraformlocktable = template.add_resource( dynamodb.Table( 'TerraformStateTable', AttributeDefinitions=[ dynamodb.AttributeDefinition( AttributeName='LockID', AttributeType='S' ) ], KeySchema=[ dynamodb.KeySchema( AttributeName='LockID', KeyType='HASH' ) ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2 ), TableName=If( 'TableNameOmitted', NoValue, variables['TableName'].ref ) ) ) template.add_output(Output( '%sName' % terraformlocktable.title, Description='Name of DynamoDB table for Terraform state', Value=terraformlocktable.ref() )) terraformstatebucket = template.add_resource( s3.Bucket( 'TerraformStateBucket', AccessControl=s3.Private, BucketName=If( 'BucketNameOmitted', NoValue, variables['BucketName'].ref ), LifecycleConfiguration=s3.LifecycleConfiguration( Rules=[ s3.LifecycleRule( NoncurrentVersionExpirationInDays=90, Status='Enabled' ) ] ), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled' ) ) ) template.add_output(Output( '%sName' % terraformstatebucket.title, Description='Name of bucket storing Terraform state', Value=terraformstatebucket.ref() )) template.add_output(Output( '%sArn' % terraformstatebucket.title, Description='Arn of bucket storing Terraform state', Value=terraformstatebucket.get_att('Arn') )) managementpolicy = template.add_resource( iam.ManagedPolicy( 'ManagementPolicy', Description='Managed policy for Terraform state management.', Path='/', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ # https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[terraformstatebucket.get_att('Arn')] ), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[ Join('', [terraformstatebucket.get_att('Arn'), '/*']) ] ), Statement( Action=[awacs.dynamodb.GetItem, awacs.dynamodb.PutItem, awacs.dynamodb.DeleteItem], Effect=Allow, Resource=[terraformlocktable.get_att('Arn')] ) ] ) ) ) template.add_output( Output( 'PolicyArn', Description='Managed policy Arn', Value=managementpolicy.ref() ) )
[ "def", "create_template", "(", "self", ")", ":", "template", "=", "self", ".", "template", "variables", "=", "self", ".", "get_variables", "(", ")", "self", ".", "template", ".", "add_version", "(", "'2010-09-09'", ")", "self", ".", "template", ".", "add_description", "(", "'Terraform State Resources'", ")", "# Conditions", "for", "i", "in", "[", "'BucketName'", ",", "'TableName'", "]", ":", "template", ".", "add_condition", "(", "\"%sOmitted\"", "%", "i", ",", "Or", "(", "Equals", "(", "variables", "[", "i", "]", ".", "ref", ",", "''", ")", ",", "Equals", "(", "variables", "[", "i", "]", ".", "ref", ",", "'undefined'", ")", ")", ")", "# Resources", "terraformlocktable", "=", "template", ".", "add_resource", "(", "dynamodb", ".", "Table", "(", "'TerraformStateTable'", ",", "AttributeDefinitions", "=", "[", "dynamodb", ".", "AttributeDefinition", "(", "AttributeName", "=", "'LockID'", ",", "AttributeType", "=", "'S'", ")", "]", ",", "KeySchema", "=", "[", "dynamodb", ".", "KeySchema", "(", "AttributeName", "=", "'LockID'", ",", "KeyType", "=", "'HASH'", ")", "]", ",", "ProvisionedThroughput", "=", "dynamodb", ".", "ProvisionedThroughput", "(", "ReadCapacityUnits", "=", "2", ",", "WriteCapacityUnits", "=", "2", ")", ",", "TableName", "=", "If", "(", "'TableNameOmitted'", ",", "NoValue", ",", "variables", "[", "'TableName'", "]", ".", "ref", ")", ")", ")", "template", ".", "add_output", "(", "Output", "(", "'%sName'", "%", "terraformlocktable", ".", "title", ",", "Description", "=", "'Name of DynamoDB table for Terraform state'", ",", "Value", "=", "terraformlocktable", ".", "ref", "(", ")", ")", ")", "terraformstatebucket", "=", "template", ".", "add_resource", "(", "s3", ".", "Bucket", "(", "'TerraformStateBucket'", ",", "AccessControl", "=", "s3", ".", "Private", ",", "BucketName", "=", "If", "(", "'BucketNameOmitted'", ",", "NoValue", ",", "variables", "[", "'BucketName'", "]", ".", "ref", ")", ",", "LifecycleConfiguration", "=", "s3", ".", "LifecycleConfiguration", "(", "Rules", "=", "[", "s3", ".", "LifecycleRule", "(", "NoncurrentVersionExpirationInDays", "=", "90", ",", "Status", "=", "'Enabled'", ")", "]", ")", ",", "VersioningConfiguration", "=", "s3", ".", "VersioningConfiguration", "(", "Status", "=", "'Enabled'", ")", ")", ")", "template", ".", "add_output", "(", "Output", "(", "'%sName'", "%", "terraformstatebucket", ".", "title", ",", "Description", "=", "'Name of bucket storing Terraform state'", ",", "Value", "=", "terraformstatebucket", ".", "ref", "(", ")", ")", ")", "template", ".", "add_output", "(", "Output", "(", "'%sArn'", "%", "terraformstatebucket", ".", "title", ",", "Description", "=", "'Arn of bucket storing Terraform state'", ",", "Value", "=", "terraformstatebucket", ".", "get_att", "(", "'Arn'", ")", ")", ")", "managementpolicy", "=", "template", ".", "add_resource", "(", "iam", ".", "ManagedPolicy", "(", "'ManagementPolicy'", ",", "Description", "=", "'Managed policy for Terraform state management.'", ",", "Path", "=", "'/'", ",", "PolicyDocument", "=", "PolicyDocument", "(", "Version", "=", "'2012-10-17'", ",", "Statement", "=", "[", "# https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions", "Statement", "(", "Action", "=", "[", "awacs", ".", "s3", ".", "ListBucket", "]", ",", "Effect", "=", "Allow", ",", "Resource", "=", "[", "terraformstatebucket", ".", "get_att", "(", "'Arn'", ")", "]", ")", ",", "Statement", "(", "Action", "=", "[", "awacs", ".", "s3", ".", "GetObject", ",", "awacs", ".", "s3", ".", "PutObject", "]", ",", "Effect", "=", "Allow", ",", "Resource", "=", "[", "Join", "(", "''", ",", "[", "terraformstatebucket", ".", "get_att", "(", "'Arn'", ")", ",", "'/*'", "]", ")", "]", ")", ",", "Statement", "(", "Action", "=", "[", "awacs", ".", "dynamodb", ".", "GetItem", ",", "awacs", ".", "dynamodb", ".", "PutItem", ",", "awacs", ".", "dynamodb", ".", "DeleteItem", "]", ",", "Effect", "=", "Allow", ",", "Resource", "=", "[", "terraformlocktable", ".", "get_att", "(", "'Arn'", ")", "]", ")", "]", ")", ")", ")", "template", ".", "add_output", "(", "Output", "(", "'PolicyArn'", ",", "Description", "=", "'Managed policy Arn'", ",", "Value", "=", "managementpolicy", ".", "ref", "(", ")", ")", ")" ]
Create template (main function called by Stacker).
[ "Create", "template", "(", "main", "function", "called", "by", "Stacker", ")", "." ]
python
train
sffjunkie/astral
src/astral.py
https://github.com/sffjunkie/astral/blob/b0aa63fce692357cd33c2bf36c69ed5b6582440c/src/astral.py#L1024-L1060
def daylight(self, date=None, local=True, use_elevation=True): """Calculates the daylight time (the time between sunrise and sunset) :param date: The date for which to calculate daylight. If no date is specified then the current date will be used. :type date: :class:`~datetime.date` :param local: True = Time to be returned in location's time zone; False = Time to be returned in UTC. If not specified then the time will be returned in local time :type local: bool :param use_elevation: True = Return times that allow for the location's elevation; False = Return times that don't use elevation. If not specified then times will take elevation into account. :type use_elevation: bool :returns: A tuple containing the start and end times :rtype: tuple(:class:`~datetime.datetime`, :class:`~datetime.datetime`) """ if local and self.timezone is None: raise ValueError("Local time requested but Location has no timezone set.") if self.astral is None: self.astral = Astral() if date is None: date = datetime.date.today() elevation = self.elevation if use_elevation else 0 start, end = self.astral.daylight_utc(date, self.latitude, self.longitude, observer_elevation=elevation) if local: return start.astimezone(self.tz), end.astimezone(self.tz) else: return start, end
[ "def", "daylight", "(", "self", ",", "date", "=", "None", ",", "local", "=", "True", ",", "use_elevation", "=", "True", ")", ":", "if", "local", "and", "self", ".", "timezone", "is", "None", ":", "raise", "ValueError", "(", "\"Local time requested but Location has no timezone set.\"", ")", "if", "self", ".", "astral", "is", "None", ":", "self", ".", "astral", "=", "Astral", "(", ")", "if", "date", "is", "None", ":", "date", "=", "datetime", ".", "date", ".", "today", "(", ")", "elevation", "=", "self", ".", "elevation", "if", "use_elevation", "else", "0", "start", ",", "end", "=", "self", ".", "astral", ".", "daylight_utc", "(", "date", ",", "self", ".", "latitude", ",", "self", ".", "longitude", ",", "observer_elevation", "=", "elevation", ")", "if", "local", ":", "return", "start", ".", "astimezone", "(", "self", ".", "tz", ")", ",", "end", ".", "astimezone", "(", "self", ".", "tz", ")", "else", ":", "return", "start", ",", "end" ]
Calculates the daylight time (the time between sunrise and sunset) :param date: The date for which to calculate daylight. If no date is specified then the current date will be used. :type date: :class:`~datetime.date` :param local: True = Time to be returned in location's time zone; False = Time to be returned in UTC. If not specified then the time will be returned in local time :type local: bool :param use_elevation: True = Return times that allow for the location's elevation; False = Return times that don't use elevation. If not specified then times will take elevation into account. :type use_elevation: bool :returns: A tuple containing the start and end times :rtype: tuple(:class:`~datetime.datetime`, :class:`~datetime.datetime`)
[ "Calculates", "the", "daylight", "time", "(", "the", "time", "between", "sunrise", "and", "sunset", ")" ]
python
train
fastai/fastai
old/fastai/transforms.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/old/fastai/transforms.py#L68-L92
def googlenet_resize(im, targ, min_area_frac, min_aspect_ratio, max_aspect_ratio, flip_hw_p, interpolation=cv2.INTER_AREA): """ Randomly crop an image with an aspect ratio and returns a squared resized image of size targ References: 1. https://arxiv.org/pdf/1409.4842.pdf 2. https://arxiv.org/pdf/1802.07888.pdf """ h,w,*_ = im.shape area = h*w for _ in range(10): targetArea = random.uniform(min_area_frac, 1.0) * area aspectR = random.uniform(min_aspect_ratio, max_aspect_ratio) ww = int(np.sqrt(targetArea * aspectR) + 0.5) hh = int(np.sqrt(targetArea / aspectR) + 0.5) if flip_hw_p: ww, hh = hh, ww if hh <= h and ww <= w: x1 = 0 if w == ww else random.randint(0, w - ww) y1 = 0 if h == hh else random.randint(0, h - hh) out = im[y1:y1 + hh, x1:x1 + ww] out = cv2.resize(out, (targ, targ), interpolation=interpolation) return out out = scale_min(im, targ, interpolation=interpolation) out = center_crop(out) return out
[ "def", "googlenet_resize", "(", "im", ",", "targ", ",", "min_area_frac", ",", "min_aspect_ratio", ",", "max_aspect_ratio", ",", "flip_hw_p", ",", "interpolation", "=", "cv2", ".", "INTER_AREA", ")", ":", "h", ",", "w", ",", "", "*", "_", "=", "im", ".", "shape", "area", "=", "h", "*", "w", "for", "_", "in", "range", "(", "10", ")", ":", "targetArea", "=", "random", ".", "uniform", "(", "min_area_frac", ",", "1.0", ")", "*", "area", "aspectR", "=", "random", ".", "uniform", "(", "min_aspect_ratio", ",", "max_aspect_ratio", ")", "ww", "=", "int", "(", "np", ".", "sqrt", "(", "targetArea", "*", "aspectR", ")", "+", "0.5", ")", "hh", "=", "int", "(", "np", ".", "sqrt", "(", "targetArea", "/", "aspectR", ")", "+", "0.5", ")", "if", "flip_hw_p", ":", "ww", ",", "hh", "=", "hh", ",", "ww", "if", "hh", "<=", "h", "and", "ww", "<=", "w", ":", "x1", "=", "0", "if", "w", "==", "ww", "else", "random", ".", "randint", "(", "0", ",", "w", "-", "ww", ")", "y1", "=", "0", "if", "h", "==", "hh", "else", "random", ".", "randint", "(", "0", ",", "h", "-", "hh", ")", "out", "=", "im", "[", "y1", ":", "y1", "+", "hh", ",", "x1", ":", "x1", "+", "ww", "]", "out", "=", "cv2", ".", "resize", "(", "out", ",", "(", "targ", ",", "targ", ")", ",", "interpolation", "=", "interpolation", ")", "return", "out", "out", "=", "scale_min", "(", "im", ",", "targ", ",", "interpolation", "=", "interpolation", ")", "out", "=", "center_crop", "(", "out", ")", "return", "out" ]
Randomly crop an image with an aspect ratio and returns a squared resized image of size targ References: 1. https://arxiv.org/pdf/1409.4842.pdf 2. https://arxiv.org/pdf/1802.07888.pdf
[ "Randomly", "crop", "an", "image", "with", "an", "aspect", "ratio", "and", "returns", "a", "squared", "resized", "image", "of", "size", "targ", "References", ":", "1", ".", "https", ":", "//", "arxiv", ".", "org", "/", "pdf", "/", "1409", ".", "4842", ".", "pdf", "2", ".", "https", ":", "//", "arxiv", ".", "org", "/", "pdf", "/", "1802", ".", "07888", ".", "pdf" ]
python
train
sosreport/sos
sos/policies/__init__.py
https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/policies/__init__.py#L70-L74
def is_enabled(self, name): """Check if given service name is enabled """ if self.services and name in self.services: return self.services[name]['config'] == 'enabled' return False
[ "def", "is_enabled", "(", "self", ",", "name", ")", ":", "if", "self", ".", "services", "and", "name", "in", "self", ".", "services", ":", "return", "self", ".", "services", "[", "name", "]", "[", "'config'", "]", "==", "'enabled'", "return", "False" ]
Check if given service name is enabled
[ "Check", "if", "given", "service", "name", "is", "enabled" ]
python
train
googleapis/google-cloud-python
logging/google/cloud/logging/handlers/app_engine.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/handlers/app_engine.py#L88-L103
def get_gae_labels(self): """Return the labels for GAE app. If the trace ID can be detected, it will be included as a label. Currently, no other labels are included. :rtype: dict :returns: Labels for GAE app. """ gae_labels = {} trace_id = get_trace_id() if trace_id is not None: gae_labels[_TRACE_ID_LABEL] = trace_id return gae_labels
[ "def", "get_gae_labels", "(", "self", ")", ":", "gae_labels", "=", "{", "}", "trace_id", "=", "get_trace_id", "(", ")", "if", "trace_id", "is", "not", "None", ":", "gae_labels", "[", "_TRACE_ID_LABEL", "]", "=", "trace_id", "return", "gae_labels" ]
Return the labels for GAE app. If the trace ID can be detected, it will be included as a label. Currently, no other labels are included. :rtype: dict :returns: Labels for GAE app.
[ "Return", "the", "labels", "for", "GAE", "app", "." ]
python
train
googleapis/google-cloud-python
core/google/cloud/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/core/google/cloud/client.py#L48-L82
def from_service_account_json(cls, json_credentials_path, *args, **kwargs): """Factory to retrieve JSON credentials while creating client. :type json_credentials_path: str :param json_credentials_path: The path to a private key file (this file was given to you when you created the service account). This file must contain a JSON object with a private key and other credentials information (downloaded from the Google APIs console). :type args: tuple :param args: Remaining positional arguments to pass to constructor. :type kwargs: dict :param kwargs: Remaining keyword arguments to pass to constructor. :rtype: :class:`_ClientFactoryMixin` :returns: The client created with the retrieved JSON credentials. :raises TypeError: if there is a conflict with the kwargs and the credentials created by the factory. """ if "credentials" in kwargs: raise TypeError("credentials must not be in keyword arguments") with io.open(json_credentials_path, "r", encoding="utf-8") as json_fi: credentials_info = json.load(json_fi) credentials = service_account.Credentials.from_service_account_info( credentials_info ) if cls._SET_PROJECT: if "project" not in kwargs: kwargs["project"] = credentials_info.get("project_id") kwargs["credentials"] = credentials return cls(*args, **kwargs)
[ "def", "from_service_account_json", "(", "cls", ",", "json_credentials_path", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "\"credentials\"", "in", "kwargs", ":", "raise", "TypeError", "(", "\"credentials must not be in keyword arguments\"", ")", "with", "io", ".", "open", "(", "json_credentials_path", ",", "\"r\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "json_fi", ":", "credentials_info", "=", "json", ".", "load", "(", "json_fi", ")", "credentials", "=", "service_account", ".", "Credentials", ".", "from_service_account_info", "(", "credentials_info", ")", "if", "cls", ".", "_SET_PROJECT", ":", "if", "\"project\"", "not", "in", "kwargs", ":", "kwargs", "[", "\"project\"", "]", "=", "credentials_info", ".", "get", "(", "\"project_id\"", ")", "kwargs", "[", "\"credentials\"", "]", "=", "credentials", "return", "cls", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Factory to retrieve JSON credentials while creating client. :type json_credentials_path: str :param json_credentials_path: The path to a private key file (this file was given to you when you created the service account). This file must contain a JSON object with a private key and other credentials information (downloaded from the Google APIs console). :type args: tuple :param args: Remaining positional arguments to pass to constructor. :type kwargs: dict :param kwargs: Remaining keyword arguments to pass to constructor. :rtype: :class:`_ClientFactoryMixin` :returns: The client created with the retrieved JSON credentials. :raises TypeError: if there is a conflict with the kwargs and the credentials created by the factory.
[ "Factory", "to", "retrieve", "JSON", "credentials", "while", "creating", "client", "." ]
python
train
BD2KGenomics/toil-lib
src/toil_lib/tools/mutation_callers.py
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/mutation_callers.py#L93-L129
def run_pindel(job, normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai): """ Calls Pindel to compute indels / deletions :param JobFunctionWrappingJob job: Passed automatically by Toil :param str normal_bam: Normal BAM FileStoreID :param str normal_bai: Normal BAM index FileStoreID :param str tumor_bam: Tumor BAM FileStoreID :param str tumor_bai: Tumor BAM Index FileStoreID :param str ref: Reference genome FileStoreID :param str fai: Reference index FileStoreID :return: Pindel output (tarball) FileStoreID :rtype: str """ work_dir = job.fileStore.getLocalTempDir() file_ids = [normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai] file_names = ['normal.bam', 'normal.bai', 'tumor.bam', 'tumor.bai', 'ref.fasta', 'ref.fasta.fai'] for file_store_id, name in zip(file_ids, file_names): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) # Create Pindel config with open(os.path.join(work_dir, 'pindel-config.txt'), 'w') as f: for bam in ['normal', 'tumor']: f.write('/data/{} {} {}\n'.format(bam + '.bam', get_mean_insert_size(work_dir, bam + '.bam'), bam)) # Call: Pindel parameters = ['-f', '/data/ref.fasta', '-i', '/data/pindel-config.txt', '--number_of_threads', str(job.cores), '--minimum_support_for_event', '3', '--report_long_insertions', 'true', '--report_breakpoints', 'true', '-o', 'pindel'] dockerCall(job=job, tool='quay.io/ucsc_cgl/pindel:0.2.5b6--4e8d1b31d4028f464b3409c6558fb9dfcad73f88', workDir=work_dir, parameters=parameters) # Collect output files and write to file store output_files = glob(os.path.join(work_dir, 'pindel*')) tarball_files('pindel.tar.gz', file_paths=output_files, output_dir=work_dir) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'pindel.tar.gz'))
[ "def", "run_pindel", "(", "job", ",", "normal_bam", ",", "normal_bai", ",", "tumor_bam", ",", "tumor_bai", ",", "ref", ",", "fai", ")", ":", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "file_ids", "=", "[", "normal_bam", ",", "normal_bai", ",", "tumor_bam", ",", "tumor_bai", ",", "ref", ",", "fai", "]", "file_names", "=", "[", "'normal.bam'", ",", "'normal.bai'", ",", "'tumor.bam'", ",", "'tumor.bai'", ",", "'ref.fasta'", ",", "'ref.fasta.fai'", "]", "for", "file_store_id", ",", "name", "in", "zip", "(", "file_ids", ",", "file_names", ")", ":", "job", ".", "fileStore", ".", "readGlobalFile", "(", "file_store_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "name", ")", ")", "# Create Pindel config", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'pindel-config.txt'", ")", ",", "'w'", ")", "as", "f", ":", "for", "bam", "in", "[", "'normal'", ",", "'tumor'", "]", ":", "f", ".", "write", "(", "'/data/{} {} {}\\n'", ".", "format", "(", "bam", "+", "'.bam'", ",", "get_mean_insert_size", "(", "work_dir", ",", "bam", "+", "'.bam'", ")", ",", "bam", ")", ")", "# Call: Pindel", "parameters", "=", "[", "'-f'", ",", "'/data/ref.fasta'", ",", "'-i'", ",", "'/data/pindel-config.txt'", ",", "'--number_of_threads'", ",", "str", "(", "job", ".", "cores", ")", ",", "'--minimum_support_for_event'", ",", "'3'", ",", "'--report_long_insertions'", ",", "'true'", ",", "'--report_breakpoints'", ",", "'true'", ",", "'-o'", ",", "'pindel'", "]", "dockerCall", "(", "job", "=", "job", ",", "tool", "=", "'quay.io/ucsc_cgl/pindel:0.2.5b6--4e8d1b31d4028f464b3409c6558fb9dfcad73f88'", ",", "workDir", "=", "work_dir", ",", "parameters", "=", "parameters", ")", "# Collect output files and write to file store", "output_files", "=", "glob", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'pindel*'", ")", ")", "tarball_files", "(", "'pindel.tar.gz'", ",", "file_paths", "=", "output_files", ",", "output_dir", "=", "work_dir", ")", "return", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'pindel.tar.gz'", ")", ")" ]
Calls Pindel to compute indels / deletions :param JobFunctionWrappingJob job: Passed automatically by Toil :param str normal_bam: Normal BAM FileStoreID :param str normal_bai: Normal BAM index FileStoreID :param str tumor_bam: Tumor BAM FileStoreID :param str tumor_bai: Tumor BAM Index FileStoreID :param str ref: Reference genome FileStoreID :param str fai: Reference index FileStoreID :return: Pindel output (tarball) FileStoreID :rtype: str
[ "Calls", "Pindel", "to", "compute", "indels", "/", "deletions" ]
python
test
summanlp/textrank
summa/preprocessing/snowball.py
https://github.com/summanlp/textrank/blob/6844bbe8c4b2b468020ae0dfd6574a743f9ad442/summa/preprocessing/snowball.py#L1875-L2017
def stem(self, word): """ Stem an Hungarian word and return the stemmed form. :param word: The word that is stemmed. :type word: str or unicode :return: The stemmed form. :rtype: unicode """ word = word.lower() r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs) # STEP 1: Remove instrumental case if r1.endswith(self.__step1_suffixes): for double_cons in self.__double_consonants: if word[-2-len(double_cons):-2] == double_cons: word = "".join((word[:-4], word[-3])) if r1[-2-len(double_cons):-2] == double_cons: r1 = "".join((r1[:-4], r1[-3])) break # STEP 2: Remove frequent cases for suffix in self.__step2_suffixes: if word.endswith(suffix): if r1.endswith(suffix): word = word[:-len(suffix)] r1 = r1[:-len(suffix)] if r1.endswith("\xE1"): word = "".join((word[:-1], "a")) r1 = "".join((r1[:-1], "a")) elif r1.endswith("\xE9"): word = "".join((word[:-1], "e")) r1 = "".join((r1[:-1], "e")) break # STEP 3: Remove special cases for suffix in self.__step3_suffixes: if r1.endswith(suffix): if suffix == "\xE9n": word = "".join((word[:-2], "e")) r1 = "".join((r1[:-2], "e")) else: word = "".join((word[:-len(suffix)], "a")) r1 = "".join((r1[:-len(suffix)], "a")) break # STEP 4: Remove other cases for suffix in self.__step4_suffixes: if r1.endswith(suffix): if suffix == "\xE1stul": word = "".join((word[:-5], "a")) r1 = "".join((r1[:-5], "a")) elif suffix == "\xE9st\xFCl": word = "".join((word[:-5], "e")) r1 = "".join((r1[:-5], "e")) else: word = word[:-len(suffix)] r1 = r1[:-len(suffix)] break # STEP 5: Remove factive case for suffix in self.__step5_suffixes: if r1.endswith(suffix): for double_cons in self.__double_consonants: if word[-1-len(double_cons):-1] == double_cons: word = "".join((word[:-3], word[-2])) if r1[-1-len(double_cons):-1] == double_cons: r1 = "".join((r1[:-3], r1[-2])) break # STEP 6: Remove owned for suffix in self.__step6_suffixes: if r1.endswith(suffix): if suffix in ("\xE1k\xE9", "\xE1\xE9i"): word = "".join((word[:-3], "a")) r1 = "".join((r1[:-3], "a")) elif suffix in ("\xE9k\xE9", "\xE9\xE9i", "\xE9\xE9"): word = "".join((word[:-len(suffix)], "e")) r1 = "".join((r1[:-len(suffix)], "e")) else: word = word[:-len(suffix)] r1 = r1[:-len(suffix)] break # STEP 7: Remove singular owner suffixes for suffix in self.__step7_suffixes: if word.endswith(suffix): if r1.endswith(suffix): if suffix in ("\xE1nk", "\xE1juk", "\xE1m", "\xE1d", "\xE1"): word = "".join((word[:-len(suffix)], "a")) r1 = "".join((r1[:-len(suffix)], "a")) elif suffix in ("\xE9nk", "\xE9j\xFCk", "\xE9m", "\xE9d", "\xE9"): word = "".join((word[:-len(suffix)], "e")) r1 = "".join((r1[:-len(suffix)], "e")) else: word = word[:-len(suffix)] r1 = r1[:-len(suffix)] break # STEP 8: Remove plural owner suffixes for suffix in self.__step8_suffixes: if word.endswith(suffix): if r1.endswith(suffix): if suffix in ("\xE1im", "\xE1id", "\xE1i", "\xE1ink", "\xE1itok", "\xE1ik"): word = "".join((word[:-len(suffix)], "a")) r1 = "".join((r1[:-len(suffix)], "a")) elif suffix in ("\xE9im", "\xE9id", "\xE9i", "\xE9ink", "\xE9itek", "\xE9ik"): word = "".join((word[:-len(suffix)], "e")) r1 = "".join((r1[:-len(suffix)], "e")) else: word = word[:-len(suffix)] r1 = r1[:-len(suffix)] break # STEP 9: Remove plural suffixes for suffix in self.__step9_suffixes: if word.endswith(suffix): if r1.endswith(suffix): if suffix == "\xE1k": word = "".join((word[:-2], "a")) elif suffix == "\xE9k": word = "".join((word[:-2], "e")) else: word = word[:-len(suffix)] break return word
[ "def", "stem", "(", "self", ",", "word", ")", ":", "word", "=", "word", ".", "lower", "(", ")", "r1", "=", "self", ".", "__r1_hungarian", "(", "word", ",", "self", ".", "__vowels", ",", "self", ".", "__digraphs", ")", "# STEP 1: Remove instrumental case", "if", "r1", ".", "endswith", "(", "self", ".", "__step1_suffixes", ")", ":", "for", "double_cons", "in", "self", ".", "__double_consonants", ":", "if", "word", "[", "-", "2", "-", "len", "(", "double_cons", ")", ":", "-", "2", "]", "==", "double_cons", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "4", "]", ",", "word", "[", "-", "3", "]", ")", ")", "if", "r1", "[", "-", "2", "-", "len", "(", "double_cons", ")", ":", "-", "2", "]", "==", "double_cons", ":", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "4", "]", ",", "r1", "[", "-", "3", "]", ")", ")", "break", "# STEP 2: Remove frequent cases", "for", "suffix", "in", "self", ".", "__step2_suffixes", ":", "if", "word", ".", "endswith", "(", "suffix", ")", ":", "if", "r1", ".", "endswith", "(", "suffix", ")", ":", "word", "=", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", "r1", "=", "r1", "[", ":", "-", "len", "(", "suffix", ")", "]", "if", "r1", ".", "endswith", "(", "\"\\xE1\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "1", "]", ",", "\"a\"", ")", ")", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "1", "]", ",", "\"a\"", ")", ")", "elif", "r1", ".", "endswith", "(", "\"\\xE9\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "1", "]", ",", "\"e\"", ")", ")", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "1", "]", ",", "\"e\"", ")", ")", "break", "# STEP 3: Remove special cases", "for", "suffix", "in", "self", ".", "__step3_suffixes", ":", "if", "r1", ".", "endswith", "(", "suffix", ")", ":", "if", "suffix", "==", "\"\\xE9n\"", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "2", "]", ",", "\"e\"", ")", ")", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "2", "]", ",", "\"e\"", ")", ")", "else", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"a\"", ")", ")", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"a\"", ")", ")", "break", "# STEP 4: Remove other cases", "for", "suffix", "in", "self", ".", "__step4_suffixes", ":", "if", "r1", ".", "endswith", "(", "suffix", ")", ":", "if", "suffix", "==", "\"\\xE1stul\"", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "5", "]", ",", "\"a\"", ")", ")", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "5", "]", ",", "\"a\"", ")", ")", "elif", "suffix", "==", "\"\\xE9st\\xFCl\"", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "5", "]", ",", "\"e\"", ")", ")", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "5", "]", ",", "\"e\"", ")", ")", "else", ":", "word", "=", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", "r1", "=", "r1", "[", ":", "-", "len", "(", "suffix", ")", "]", "break", "# STEP 5: Remove factive case", "for", "suffix", "in", "self", ".", "__step5_suffixes", ":", "if", "r1", ".", "endswith", "(", "suffix", ")", ":", "for", "double_cons", "in", "self", ".", "__double_consonants", ":", "if", "word", "[", "-", "1", "-", "len", "(", "double_cons", ")", ":", "-", "1", "]", "==", "double_cons", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "3", "]", ",", "word", "[", "-", "2", "]", ")", ")", "if", "r1", "[", "-", "1", "-", "len", "(", "double_cons", ")", ":", "-", "1", "]", "==", "double_cons", ":", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "3", "]", ",", "r1", "[", "-", "2", "]", ")", ")", "break", "# STEP 6: Remove owned", "for", "suffix", "in", "self", ".", "__step6_suffixes", ":", "if", "r1", ".", "endswith", "(", "suffix", ")", ":", "if", "suffix", "in", "(", "\"\\xE1k\\xE9\"", ",", "\"\\xE1\\xE9i\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "3", "]", ",", "\"a\"", ")", ")", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "3", "]", ",", "\"a\"", ")", ")", "elif", "suffix", "in", "(", "\"\\xE9k\\xE9\"", ",", "\"\\xE9\\xE9i\"", ",", "\"\\xE9\\xE9\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"e\"", ")", ")", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"e\"", ")", ")", "else", ":", "word", "=", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", "r1", "=", "r1", "[", ":", "-", "len", "(", "suffix", ")", "]", "break", "# STEP 7: Remove singular owner suffixes", "for", "suffix", "in", "self", ".", "__step7_suffixes", ":", "if", "word", ".", "endswith", "(", "suffix", ")", ":", "if", "r1", ".", "endswith", "(", "suffix", ")", ":", "if", "suffix", "in", "(", "\"\\xE1nk\"", ",", "\"\\xE1juk\"", ",", "\"\\xE1m\"", ",", "\"\\xE1d\"", ",", "\"\\xE1\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"a\"", ")", ")", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"a\"", ")", ")", "elif", "suffix", "in", "(", "\"\\xE9nk\"", ",", "\"\\xE9j\\xFCk\"", ",", "\"\\xE9m\"", ",", "\"\\xE9d\"", ",", "\"\\xE9\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"e\"", ")", ")", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"e\"", ")", ")", "else", ":", "word", "=", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", "r1", "=", "r1", "[", ":", "-", "len", "(", "suffix", ")", "]", "break", "# STEP 8: Remove plural owner suffixes", "for", "suffix", "in", "self", ".", "__step8_suffixes", ":", "if", "word", ".", "endswith", "(", "suffix", ")", ":", "if", "r1", ".", "endswith", "(", "suffix", ")", ":", "if", "suffix", "in", "(", "\"\\xE1im\"", ",", "\"\\xE1id\"", ",", "\"\\xE1i\"", ",", "\"\\xE1ink\"", ",", "\"\\xE1itok\"", ",", "\"\\xE1ik\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"a\"", ")", ")", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"a\"", ")", ")", "elif", "suffix", "in", "(", "\"\\xE9im\"", ",", "\"\\xE9id\"", ",", "\"\\xE9i\"", ",", "\"\\xE9ink\"", ",", "\"\\xE9itek\"", ",", "\"\\xE9ik\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"e\"", ")", ")", "r1", "=", "\"\"", ".", "join", "(", "(", "r1", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"e\"", ")", ")", "else", ":", "word", "=", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", "r1", "=", "r1", "[", ":", "-", "len", "(", "suffix", ")", "]", "break", "# STEP 9: Remove plural suffixes", "for", "suffix", "in", "self", ".", "__step9_suffixes", ":", "if", "word", ".", "endswith", "(", "suffix", ")", ":", "if", "r1", ".", "endswith", "(", "suffix", ")", ":", "if", "suffix", "==", "\"\\xE1k\"", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "2", "]", ",", "\"a\"", ")", ")", "elif", "suffix", "==", "\"\\xE9k\"", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "2", "]", ",", "\"e\"", ")", ")", "else", ":", "word", "=", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", "break", "return", "word" ]
Stem an Hungarian word and return the stemmed form. :param word: The word that is stemmed. :type word: str or unicode :return: The stemmed form. :rtype: unicode
[ "Stem", "an", "Hungarian", "word", "and", "return", "the", "stemmed", "form", "." ]
python
train
gwastro/pycbc
pycbc/inference/io/base_mcmc.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/io/base_mcmc.py#L35-L47
def write_resume_point(self): """Keeps a list of the number of iterations that were in a file when a run was resumed from a checkpoint.""" try: resume_pts = self.attrs["resume_points"].tolist() except KeyError: resume_pts = [] try: niterations = self.niterations except KeyError: niterations = 0 resume_pts.append(niterations) self.attrs["resume_points"] = resume_pts
[ "def", "write_resume_point", "(", "self", ")", ":", "try", ":", "resume_pts", "=", "self", ".", "attrs", "[", "\"resume_points\"", "]", ".", "tolist", "(", ")", "except", "KeyError", ":", "resume_pts", "=", "[", "]", "try", ":", "niterations", "=", "self", ".", "niterations", "except", "KeyError", ":", "niterations", "=", "0", "resume_pts", ".", "append", "(", "niterations", ")", "self", ".", "attrs", "[", "\"resume_points\"", "]", "=", "resume_pts" ]
Keeps a list of the number of iterations that were in a file when a run was resumed from a checkpoint.
[ "Keeps", "a", "list", "of", "the", "number", "of", "iterations", "that", "were", "in", "a", "file", "when", "a", "run", "was", "resumed", "from", "a", "checkpoint", "." ]
python
train
IdentityPython/pysaml2
src/saml2/soap.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/soap.py#L253-L280
def soap_fault(message=None, actor=None, code=None, detail=None): """ Create a SOAP Fault message :param message: Human readable error message :param actor: Who discovered the error :param code: Error code :param detail: More specific error message :return: A SOAP Fault message as a string """ _string = _actor = _code = _detail = None if message: _string = soapenv.Fault_faultstring(text=message) if actor: _actor = soapenv.Fault_faultactor(text=actor) if code: _code = soapenv.Fault_faultcode(text=code) if detail: _detail = soapenv.Fault_detail(text=detail) fault = soapenv.Fault( faultcode=_code, faultstring=_string, faultactor=_actor, detail=_detail, ) return "%s" % fault
[ "def", "soap_fault", "(", "message", "=", "None", ",", "actor", "=", "None", ",", "code", "=", "None", ",", "detail", "=", "None", ")", ":", "_string", "=", "_actor", "=", "_code", "=", "_detail", "=", "None", "if", "message", ":", "_string", "=", "soapenv", ".", "Fault_faultstring", "(", "text", "=", "message", ")", "if", "actor", ":", "_actor", "=", "soapenv", ".", "Fault_faultactor", "(", "text", "=", "actor", ")", "if", "code", ":", "_code", "=", "soapenv", ".", "Fault_faultcode", "(", "text", "=", "code", ")", "if", "detail", ":", "_detail", "=", "soapenv", ".", "Fault_detail", "(", "text", "=", "detail", ")", "fault", "=", "soapenv", ".", "Fault", "(", "faultcode", "=", "_code", ",", "faultstring", "=", "_string", ",", "faultactor", "=", "_actor", ",", "detail", "=", "_detail", ",", ")", "return", "\"%s\"", "%", "fault" ]
Create a SOAP Fault message :param message: Human readable error message :param actor: Who discovered the error :param code: Error code :param detail: More specific error message :return: A SOAP Fault message as a string
[ "Create", "a", "SOAP", "Fault", "message" ]
python
train
Microsoft/nni
tools/nni_trial_tool/hdfsClientUtility.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_trial_tool/hdfsClientUtility.py#L48-L67
def copyHdfsFileToLocal(hdfsFilePath, localFilePath, hdfsClient, override=True): '''Copy file from HDFS to local''' if not hdfsClient.exists(hdfsFilePath): raise Exception('HDFS file {} does not exist!'.format(hdfsFilePath)) try: file_status = hdfsClient.get_file_status(hdfsFilePath) if file_status.type != 'FILE': raise Exception('HDFS file path {} is not a file'.format(hdfsFilePath)) except Exception as exception: nni_log(LogType.Error, 'Get hdfs file {0} status error: {1}'.format(hdfsFilePath, str(exception))) raise exception if os.path.exists(localFilePath) and override: os.remove(localFilePath) try: hdfsClient.copy_to_local(hdfsFilePath, localFilePath) except Exception as exception: nni_log(LogType.Error, 'Copy hdfs file {0} to {1} error: {2}'.format(hdfsFilePath, localFilePath, str(exception))) raise exception nni_log(LogType.Info, 'Successfully copied hdfs file {0} to {1}, {2} bytes'.format(hdfsFilePath, localFilePath, file_status.length))
[ "def", "copyHdfsFileToLocal", "(", "hdfsFilePath", ",", "localFilePath", ",", "hdfsClient", ",", "override", "=", "True", ")", ":", "if", "not", "hdfsClient", ".", "exists", "(", "hdfsFilePath", ")", ":", "raise", "Exception", "(", "'HDFS file {} does not exist!'", ".", "format", "(", "hdfsFilePath", ")", ")", "try", ":", "file_status", "=", "hdfsClient", ".", "get_file_status", "(", "hdfsFilePath", ")", "if", "file_status", ".", "type", "!=", "'FILE'", ":", "raise", "Exception", "(", "'HDFS file path {} is not a file'", ".", "format", "(", "hdfsFilePath", ")", ")", "except", "Exception", "as", "exception", ":", "nni_log", "(", "LogType", ".", "Error", ",", "'Get hdfs file {0} status error: {1}'", ".", "format", "(", "hdfsFilePath", ",", "str", "(", "exception", ")", ")", ")", "raise", "exception", "if", "os", ".", "path", ".", "exists", "(", "localFilePath", ")", "and", "override", ":", "os", ".", "remove", "(", "localFilePath", ")", "try", ":", "hdfsClient", ".", "copy_to_local", "(", "hdfsFilePath", ",", "localFilePath", ")", "except", "Exception", "as", "exception", ":", "nni_log", "(", "LogType", ".", "Error", ",", "'Copy hdfs file {0} to {1} error: {2}'", ".", "format", "(", "hdfsFilePath", ",", "localFilePath", ",", "str", "(", "exception", ")", ")", ")", "raise", "exception", "nni_log", "(", "LogType", ".", "Info", ",", "'Successfully copied hdfs file {0} to {1}, {2} bytes'", ".", "format", "(", "hdfsFilePath", ",", "localFilePath", ",", "file_status", ".", "length", ")", ")" ]
Copy file from HDFS to local
[ "Copy", "file", "from", "HDFS", "to", "local" ]
python
train
geophysics-ubonn/crtomo_tools
lib/crtomo/tdManager.py
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/tdManager.py#L1686-L1698
def register_forward_model(self, pid_mag, pid_pha): """Register parameter sets as the forward models for magnitude and phase Parameters ---------- pid_mag: int parameter id corresponding to the magnitude model pid_pha: int parameter id corresponding to the phase model """ self.register_magnitude_model(pid_mag) self.register_phase_model(pid_pha)
[ "def", "register_forward_model", "(", "self", ",", "pid_mag", ",", "pid_pha", ")", ":", "self", ".", "register_magnitude_model", "(", "pid_mag", ")", "self", ".", "register_phase_model", "(", "pid_pha", ")" ]
Register parameter sets as the forward models for magnitude and phase Parameters ---------- pid_mag: int parameter id corresponding to the magnitude model pid_pha: int parameter id corresponding to the phase model
[ "Register", "parameter", "sets", "as", "the", "forward", "models", "for", "magnitude", "and", "phase" ]
python
train
dmwm/DBS
Server/Python/src/dbs/business/DBSMigrate.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/business/DBSMigrate.py#L77-L120
def prepareDatasetMigrationList(self, conn, request): """ Prepare the ordered lists of blocks based on input DATASET (note Block is different) 1. Get list of blocks from source 2. Check and see if these blocks are already at DST 3. Check if dataset has parents 4. Check if parent blocks are already at DST """ ordered_dict = {} order_counter = 0 srcdataset = request["migration_input"] url = request["migration_url"] try: tmp_ordered_dict = self.processDatasetBlocks(url, conn, srcdataset, order_counter) if tmp_ordered_dict != {}: ordered_dict.update(tmp_ordered_dict) self.logger.debug("ordered_dict length at level %s" %order_counter) self.logger.debug(len(ordered_dict)) else: #return {} m = 'Requested dataset %s is already in destination' %srcdataset dbsExceptionHandler('dbsException-invalid-input2', message=m, serverError=m) # Now process the parent datasets parent_ordered_dict = self.getParentDatasetsOrderedList(url, conn, srcdataset, order_counter+1) if parent_ordered_dict != {}: ordered_dict.update(parent_ordered_dict) self.logger.debug("***** parent ordered_dict length at level %s ******" %(order_counter+1)) self.logger.debug(len(ordered_dict)) return remove_duplicated_items(ordered_dict) except dbsException: raise except Exception as ex: if 'urlopen error' in str(ex): message='Connection to source DBS server refused. Check your source url.' elif 'Bad Request' in str(ex): message='cannot get data from the source DBS server. Check your migration input.' else: message='Failed to make a dataset migration list.' dbsExceptionHandler('dbsException-invalid-input2', \ serverError="""DBSMigrate/prepareDatasetMigrationList failed to prepare ordered block list: %s""" %str(ex), message=message)
[ "def", "prepareDatasetMigrationList", "(", "self", ",", "conn", ",", "request", ")", ":", "ordered_dict", "=", "{", "}", "order_counter", "=", "0", "srcdataset", "=", "request", "[", "\"migration_input\"", "]", "url", "=", "request", "[", "\"migration_url\"", "]", "try", ":", "tmp_ordered_dict", "=", "self", ".", "processDatasetBlocks", "(", "url", ",", "conn", ",", "srcdataset", ",", "order_counter", ")", "if", "tmp_ordered_dict", "!=", "{", "}", ":", "ordered_dict", ".", "update", "(", "tmp_ordered_dict", ")", "self", ".", "logger", ".", "debug", "(", "\"ordered_dict length at level %s\"", "%", "order_counter", ")", "self", ".", "logger", ".", "debug", "(", "len", "(", "ordered_dict", ")", ")", "else", ":", "#return {}", "m", "=", "'Requested dataset %s is already in destination'", "%", "srcdataset", "dbsExceptionHandler", "(", "'dbsException-invalid-input2'", ",", "message", "=", "m", ",", "serverError", "=", "m", ")", "# Now process the parent datasets", "parent_ordered_dict", "=", "self", ".", "getParentDatasetsOrderedList", "(", "url", ",", "conn", ",", "srcdataset", ",", "order_counter", "+", "1", ")", "if", "parent_ordered_dict", "!=", "{", "}", ":", "ordered_dict", ".", "update", "(", "parent_ordered_dict", ")", "self", ".", "logger", ".", "debug", "(", "\"***** parent ordered_dict length at level %s ******\"", "%", "(", "order_counter", "+", "1", ")", ")", "self", ".", "logger", ".", "debug", "(", "len", "(", "ordered_dict", ")", ")", "return", "remove_duplicated_items", "(", "ordered_dict", ")", "except", "dbsException", ":", "raise", "except", "Exception", "as", "ex", ":", "if", "'urlopen error'", "in", "str", "(", "ex", ")", ":", "message", "=", "'Connection to source DBS server refused. Check your source url.'", "elif", "'Bad Request'", "in", "str", "(", "ex", ")", ":", "message", "=", "'cannot get data from the source DBS server. Check your migration input.'", "else", ":", "message", "=", "'Failed to make a dataset migration list.'", "dbsExceptionHandler", "(", "'dbsException-invalid-input2'", ",", "serverError", "=", "\"\"\"DBSMigrate/prepareDatasetMigrationList failed\n to prepare ordered block list: %s\"\"\"", "%", "str", "(", "ex", ")", ",", "message", "=", "message", ")" ]
Prepare the ordered lists of blocks based on input DATASET (note Block is different) 1. Get list of blocks from source 2. Check and see if these blocks are already at DST 3. Check if dataset has parents 4. Check if parent blocks are already at DST
[ "Prepare", "the", "ordered", "lists", "of", "blocks", "based", "on", "input", "DATASET", "(", "note", "Block", "is", "different", ")", "1", ".", "Get", "list", "of", "blocks", "from", "source", "2", ".", "Check", "and", "see", "if", "these", "blocks", "are", "already", "at", "DST", "3", ".", "Check", "if", "dataset", "has", "parents", "4", ".", "Check", "if", "parent", "blocks", "are", "already", "at", "DST" ]
python
train
cds-astro/mocpy
mocpy/moc/moc.py
https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/moc/moc.py#L646-L712
def plot(self, title='MOC', frame=None): """ Plot the MOC object using a mollweide projection. **Deprecated**: New `fill` and `border` methods produce more reliable results and allow you to specify additional matplotlib style parameters. Parameters ---------- title : str The title of the plot frame : `astropy.coordinates.BaseCoordinateFrame`, optional Describes the coordinate system the plot will be (ICRS, Galactic are the only coordinate systems supported). """ frame = ICRS() if frame is None else frame from matplotlib.colors import LinearSegmentedColormap import matplotlib.pyplot as plt plot_order = 8 if self.max_order > plot_order: plotted_moc = self.degrade_to_order(plot_order) else: plotted_moc = self num_pixels_map = 1024 delta = 2. * np.pi / num_pixels_map x = np.arange(-np.pi, np.pi, delta) y = np.arange(-np.pi/2, np.pi/2, delta) lon_rad, lat_rad = np.meshgrid(x, y) hp = HEALPix(nside=(1 << plotted_moc.max_order), order='nested') if frame and not isinstance(frame, BaseCoordinateFrame): raise ValueError("Only Galactic/ICRS coordinate systems are supported." "Please set `coord` to either 'C' or 'G'.") pix_map = hp.lonlat_to_healpix(lon_rad * u.rad, lat_rad * u.rad) m = np.zeros(nside2npix(1 << plotted_moc.max_order)) pix_id = plotted_moc._best_res_pixels() # change the HEALPix cells if the frame of the MOC is not the same as the one associated with the plot method. if isinstance(frame, Galactic): lon, lat = hp.boundaries_lonlat(pix_id, step=2) sky_crd = SkyCoord(lon, lat, unit='deg') pix_id = hp.lonlat_to_healpix(sky_crd.galactic.l, sky_crd.galactic.b) m[pix_id] = 1 z = np.flip(m[pix_map], axis=1) plt.figure(figsize=(10, 10)) ax = plt.subplot(111, projection="mollweide") ax.set_xticklabels(['150°', '120°', '90°', '60°', '30°', '0°', '330°', '300°', '270°', '240°', '210°', '180°']) color_map = LinearSegmentedColormap.from_list('w2r', ['#eeeeee', '#aa0000']) color_map.set_under('w') color_map.set_bad('gray') ax.pcolormesh(x, y, z, cmap=color_map, vmin=0, vmax=1) ax.tick_params(labelsize=14, labelcolor='#000000') plt.title(title) plt.grid(True, linestyle='--', linewidth=1, color='#555555') plt.show()
[ "def", "plot", "(", "self", ",", "title", "=", "'MOC'", ",", "frame", "=", "None", ")", ":", "frame", "=", "ICRS", "(", ")", "if", "frame", "is", "None", "else", "frame", "from", "matplotlib", ".", "colors", "import", "LinearSegmentedColormap", "import", "matplotlib", ".", "pyplot", "as", "plt", "plot_order", "=", "8", "if", "self", ".", "max_order", ">", "plot_order", ":", "plotted_moc", "=", "self", ".", "degrade_to_order", "(", "plot_order", ")", "else", ":", "plotted_moc", "=", "self", "num_pixels_map", "=", "1024", "delta", "=", "2.", "*", "np", ".", "pi", "/", "num_pixels_map", "x", "=", "np", ".", "arange", "(", "-", "np", ".", "pi", ",", "np", ".", "pi", ",", "delta", ")", "y", "=", "np", ".", "arange", "(", "-", "np", ".", "pi", "/", "2", ",", "np", ".", "pi", "/", "2", ",", "delta", ")", "lon_rad", ",", "lat_rad", "=", "np", ".", "meshgrid", "(", "x", ",", "y", ")", "hp", "=", "HEALPix", "(", "nside", "=", "(", "1", "<<", "plotted_moc", ".", "max_order", ")", ",", "order", "=", "'nested'", ")", "if", "frame", "and", "not", "isinstance", "(", "frame", ",", "BaseCoordinateFrame", ")", ":", "raise", "ValueError", "(", "\"Only Galactic/ICRS coordinate systems are supported.\"", "\"Please set `coord` to either 'C' or 'G'.\"", ")", "pix_map", "=", "hp", ".", "lonlat_to_healpix", "(", "lon_rad", "*", "u", ".", "rad", ",", "lat_rad", "*", "u", ".", "rad", ")", "m", "=", "np", ".", "zeros", "(", "nside2npix", "(", "1", "<<", "plotted_moc", ".", "max_order", ")", ")", "pix_id", "=", "plotted_moc", ".", "_best_res_pixels", "(", ")", "# change the HEALPix cells if the frame of the MOC is not the same as the one associated with the plot method.", "if", "isinstance", "(", "frame", ",", "Galactic", ")", ":", "lon", ",", "lat", "=", "hp", ".", "boundaries_lonlat", "(", "pix_id", ",", "step", "=", "2", ")", "sky_crd", "=", "SkyCoord", "(", "lon", ",", "lat", ",", "unit", "=", "'deg'", ")", "pix_id", "=", "hp", ".", "lonlat_to_healpix", "(", "sky_crd", ".", "galactic", ".", "l", ",", "sky_crd", ".", "galactic", ".", "b", ")", "m", "[", "pix_id", "]", "=", "1", "z", "=", "np", ".", "flip", "(", "m", "[", "pix_map", "]", ",", "axis", "=", "1", ")", "plt", ".", "figure", "(", "figsize", "=", "(", "10", ",", "10", ")", ")", "ax", "=", "plt", ".", "subplot", "(", "111", ",", "projection", "=", "\"mollweide\"", ")", "ax", ".", "set_xticklabels", "(", "[", "'150°',", " ", "120°', ", "'", "0°', '", "6", "°', '3", "0", "', '0°", "'", " '330", "°", ", '300°", "'", " '270°'", ",", "'240°',", " ", "210°', ", "'", "80°'])", "", "", "", "", "color_map", "=", "LinearSegmentedColormap", ".", "from_list", "(", "'w2r'", ",", "[", "'#eeeeee'", ",", "'#aa0000'", "]", ")", "color_map", ".", "set_under", "(", "'w'", ")", "color_map", ".", "set_bad", "(", "'gray'", ")", "ax", ".", "pcolormesh", "(", "x", ",", "y", ",", "z", ",", "cmap", "=", "color_map", ",", "vmin", "=", "0", ",", "vmax", "=", "1", ")", "ax", ".", "tick_params", "(", "labelsize", "=", "14", ",", "labelcolor", "=", "'#000000'", ")", "plt", ".", "title", "(", "title", ")", "plt", ".", "grid", "(", "True", ",", "linestyle", "=", "'--'", ",", "linewidth", "=", "1", ",", "color", "=", "'#555555'", ")", "plt", ".", "show", "(", ")" ]
Plot the MOC object using a mollweide projection. **Deprecated**: New `fill` and `border` methods produce more reliable results and allow you to specify additional matplotlib style parameters. Parameters ---------- title : str The title of the plot frame : `astropy.coordinates.BaseCoordinateFrame`, optional Describes the coordinate system the plot will be (ICRS, Galactic are the only coordinate systems supported).
[ "Plot", "the", "MOC", "object", "using", "a", "mollweide", "projection", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/container_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L1390-L1400
def get_scoped_variable_from_name(self, name): """ Get the scoped variable for a unique name :param name: the unique name of the scoped variable :return: the scoped variable specified by the name :raises exceptions.AttributeError: if the name is not in the the scoped_variables dictionary """ for scoped_variable_id, scoped_variable in self.scoped_variables.items(): if scoped_variable.name == name: return scoped_variable_id raise AttributeError("Name %s is not in scoped_variables dictionary", name)
[ "def", "get_scoped_variable_from_name", "(", "self", ",", "name", ")", ":", "for", "scoped_variable_id", ",", "scoped_variable", "in", "self", ".", "scoped_variables", ".", "items", "(", ")", ":", "if", "scoped_variable", ".", "name", "==", "name", ":", "return", "scoped_variable_id", "raise", "AttributeError", "(", "\"Name %s is not in scoped_variables dictionary\"", ",", "name", ")" ]
Get the scoped variable for a unique name :param name: the unique name of the scoped variable :return: the scoped variable specified by the name :raises exceptions.AttributeError: if the name is not in the the scoped_variables dictionary
[ "Get", "the", "scoped", "variable", "for", "a", "unique", "name" ]
python
train
JNRowe/jnrbase
jnrbase/httplib2_certs.py
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/httplib2_certs.py#L41-L76
def find_certs() -> str: """Find suitable certificates for ``httplib2``. Warning: The default behaviour is to fall back to the bundled certificates when no system certificates can be found. If you're packaging ``jnrbase`` *please* set ``ALLOW_FALLBACK`` to ``False`` to disable this very much unwanted behaviour, but please maintain the option so that downstream users can inspect the configuration easily. See also: :pypi:`httplib2` Returns: Path to SSL certificates Raises: RuntimeError: When no suitable certificates are found """ bundle = path.realpath(path.dirname(httplib2.CA_CERTS)) # Some distros symlink the bundled path location to the system certs if not bundle.startswith(path.dirname(httplib2.__file__)): return bundle for platform, files in PLATFORM_FILES.items(): if sys.platform.startswith(platform): for cert_file in files: if path.exists(cert_file): return cert_file # An apparently common environment setting for macOS users to workaround # the lack of “standard” certs installation if path.exists(getenv('CURL_CA_BUNDLE', '')): return getenv('CURL_CA_BUNDLE') if ALLOW_FALLBACK: warnings.warn('No system certs detected, falling back to bundled', RuntimeWarning) return httplib2.CA_CERTS else: raise RuntimeError('No system certs detected!')
[ "def", "find_certs", "(", ")", "->", "str", ":", "bundle", "=", "path", ".", "realpath", "(", "path", ".", "dirname", "(", "httplib2", ".", "CA_CERTS", ")", ")", "# Some distros symlink the bundled path location to the system certs", "if", "not", "bundle", ".", "startswith", "(", "path", ".", "dirname", "(", "httplib2", ".", "__file__", ")", ")", ":", "return", "bundle", "for", "platform", ",", "files", "in", "PLATFORM_FILES", ".", "items", "(", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "platform", ")", ":", "for", "cert_file", "in", "files", ":", "if", "path", ".", "exists", "(", "cert_file", ")", ":", "return", "cert_file", "# An apparently common environment setting for macOS users to workaround", "# the lack of “standard” certs installation", "if", "path", ".", "exists", "(", "getenv", "(", "'CURL_CA_BUNDLE'", ",", "''", ")", ")", ":", "return", "getenv", "(", "'CURL_CA_BUNDLE'", ")", "if", "ALLOW_FALLBACK", ":", "warnings", ".", "warn", "(", "'No system certs detected, falling back to bundled'", ",", "RuntimeWarning", ")", "return", "httplib2", ".", "CA_CERTS", "else", ":", "raise", "RuntimeError", "(", "'No system certs detected!'", ")" ]
Find suitable certificates for ``httplib2``. Warning: The default behaviour is to fall back to the bundled certificates when no system certificates can be found. If you're packaging ``jnrbase`` *please* set ``ALLOW_FALLBACK`` to ``False`` to disable this very much unwanted behaviour, but please maintain the option so that downstream users can inspect the configuration easily. See also: :pypi:`httplib2` Returns: Path to SSL certificates Raises: RuntimeError: When no suitable certificates are found
[ "Find", "suitable", "certificates", "for", "httplib2", "." ]
python
train
openvax/mhcflurry
mhcflurry/class1_neural_network.py
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_neural_network.py#L226-L253
def network(self, borrow=False): """ Return the keras model associated with this predictor. Parameters ---------- borrow : bool Whether to return a cached model if possible. See borrow_cached_network for details Returns ------- keras.models.Model """ if self._network is None and self.network_json is not None: self.load_weights() if borrow: return self.borrow_cached_network( self.network_json, self.network_weights) else: import keras.models self._network = keras.models.model_from_json(self.network_json) if self.network_weights is not None: self._network.set_weights(self.network_weights) self.network_json = None self.network_weights = None return self._network
[ "def", "network", "(", "self", ",", "borrow", "=", "False", ")", ":", "if", "self", ".", "_network", "is", "None", "and", "self", ".", "network_json", "is", "not", "None", ":", "self", ".", "load_weights", "(", ")", "if", "borrow", ":", "return", "self", ".", "borrow_cached_network", "(", "self", ".", "network_json", ",", "self", ".", "network_weights", ")", "else", ":", "import", "keras", ".", "models", "self", ".", "_network", "=", "keras", ".", "models", ".", "model_from_json", "(", "self", ".", "network_json", ")", "if", "self", ".", "network_weights", "is", "not", "None", ":", "self", ".", "_network", ".", "set_weights", "(", "self", ".", "network_weights", ")", "self", ".", "network_json", "=", "None", "self", ".", "network_weights", "=", "None", "return", "self", ".", "_network" ]
Return the keras model associated with this predictor. Parameters ---------- borrow : bool Whether to return a cached model if possible. See borrow_cached_network for details Returns ------- keras.models.Model
[ "Return", "the", "keras", "model", "associated", "with", "this", "predictor", "." ]
python
train
cirruscluster/cirruscluster
cirruscluster/ext/ansible/inventory/__init__.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/inventory/__init__.py#L196-L208
def _hosts_in_unenumerated_pattern(self, pattern): """ Get all host names matching the pattern """ hosts = {} # ignore any negative checks here, this is handled elsewhere pattern = pattern.replace("!","").replace("&", "") groups = self.get_groups() for group in groups: for host in group.get_hosts(): if pattern == 'all' or self._match(group.name, pattern) or self._match(host.name, pattern): hosts[host.name] = host return sorted(hosts.values(), key=lambda x: x.name)
[ "def", "_hosts_in_unenumerated_pattern", "(", "self", ",", "pattern", ")", ":", "hosts", "=", "{", "}", "# ignore any negative checks here, this is handled elsewhere", "pattern", "=", "pattern", ".", "replace", "(", "\"!\"", ",", "\"\"", ")", ".", "replace", "(", "\"&\"", ",", "\"\"", ")", "groups", "=", "self", ".", "get_groups", "(", ")", "for", "group", "in", "groups", ":", "for", "host", "in", "group", ".", "get_hosts", "(", ")", ":", "if", "pattern", "==", "'all'", "or", "self", ".", "_match", "(", "group", ".", "name", ",", "pattern", ")", "or", "self", ".", "_match", "(", "host", ".", "name", ",", "pattern", ")", ":", "hosts", "[", "host", ".", "name", "]", "=", "host", "return", "sorted", "(", "hosts", ".", "values", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", ".", "name", ")" ]
Get all host names matching the pattern
[ "Get", "all", "host", "names", "matching", "the", "pattern" ]
python
train
deifyed/vault
libconman/target.py
https://github.com/deifyed/vault/blob/e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97/libconman/target.py#L83-L90
def deploy(self): ''' Creates a link at the original path of this target ''' if not os.path.exists(self.path): makedirs(self.path) link(self.vault_path, self.real_path)
[ "def", "deploy", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "path", ")", ":", "makedirs", "(", "self", ".", "path", ")", "link", "(", "self", ".", "vault_path", ",", "self", ".", "real_path", ")" ]
Creates a link at the original path of this target
[ "Creates", "a", "link", "at", "the", "original", "path", "of", "this", "target" ]
python
train
mitsei/dlkit
dlkit/json_/assessment/searches.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/searches.py#L218-L230
def get_assessments(self): """Gets the assessment list resulting from the search. return: (osid.assessment.AssessmentList) - the assessment list raise: IllegalState - the assessment list has already been retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.AssessmentList(self._results, runtime=self._runtime)
[ "def", "get_assessments", "(", "self", ")", ":", "if", "self", ".", "retrieved", ":", "raise", "errors", ".", "IllegalState", "(", "'List has already been retrieved.'", ")", "self", ".", "retrieved", "=", "True", "return", "objects", ".", "AssessmentList", "(", "self", ".", "_results", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the assessment list resulting from the search. return: (osid.assessment.AssessmentList) - the assessment list raise: IllegalState - the assessment list has already been retrieved *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "assessment", "list", "resulting", "from", "the", "search", "." ]
python
train
trivago/Protector
contrib/helpers/benchmark.py
https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/contrib/helpers/benchmark.py#L9-L17
def batches(iterable, n=1): """ From http://stackoverflow.com/a/8290508/270334 :param n: :param iterable: """ l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)]
[ "def", "batches", "(", "iterable", ",", "n", "=", "1", ")", ":", "l", "=", "len", "(", "iterable", ")", "for", "ndx", "in", "range", "(", "0", ",", "l", ",", "n", ")", ":", "yield", "iterable", "[", "ndx", ":", "min", "(", "ndx", "+", "n", ",", "l", ")", "]" ]
From http://stackoverflow.com/a/8290508/270334 :param n: :param iterable:
[ "From", "http", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "8290508", "/", "270334", ":", "param", "n", ":", ":", "param", "iterable", ":" ]
python
valid
viniciuschiele/flask-io
flask_io/tracing.py
https://github.com/viniciuschiele/flask-io/blob/4e559419b3d8e6859f83fa16557b00542d5f3aa7/flask_io/tracing.py#L71-L97
def __collect_trace_data(self, request, response, error, latency): """ Collects the tracing data from the given parameters. :param request: The Flask request. :param response: The flask response. :param error: The error occurred if any. :param latency: The time elapsed to process the request. :return: The tracing data. """ data = OrderedDict() data['latency'] = latency.elapsed data['request_method'] = request.environ['REQUEST_METHOD'] data['request_url'] = request.url data['request_headers'] = request.headers body = request.get_data(as_text=True) if body: data['request_body'] = body if response: data['response_status'] = response.status_code if error: data['error'] = str(error) return data
[ "def", "__collect_trace_data", "(", "self", ",", "request", ",", "response", ",", "error", ",", "latency", ")", ":", "data", "=", "OrderedDict", "(", ")", "data", "[", "'latency'", "]", "=", "latency", ".", "elapsed", "data", "[", "'request_method'", "]", "=", "request", ".", "environ", "[", "'REQUEST_METHOD'", "]", "data", "[", "'request_url'", "]", "=", "request", ".", "url", "data", "[", "'request_headers'", "]", "=", "request", ".", "headers", "body", "=", "request", ".", "get_data", "(", "as_text", "=", "True", ")", "if", "body", ":", "data", "[", "'request_body'", "]", "=", "body", "if", "response", ":", "data", "[", "'response_status'", "]", "=", "response", ".", "status_code", "if", "error", ":", "data", "[", "'error'", "]", "=", "str", "(", "error", ")", "return", "data" ]
Collects the tracing data from the given parameters. :param request: The Flask request. :param response: The flask response. :param error: The error occurred if any. :param latency: The time elapsed to process the request. :return: The tracing data.
[ "Collects", "the", "tracing", "data", "from", "the", "given", "parameters", ".", ":", "param", "request", ":", "The", "Flask", "request", ".", ":", "param", "response", ":", "The", "flask", "response", ".", ":", "param", "error", ":", "The", "error", "occurred", "if", "any", ".", ":", "param", "latency", ":", "The", "time", "elapsed", "to", "process", "the", "request", ".", ":", "return", ":", "The", "tracing", "data", "." ]
python
train
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/common/utilites/common_utils.py
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/common/utilites/common_utils.py#L11-L22
def back_slash_to_front_converter(string): """ Replacing all \ in the str to / :param string: single string to modify :type string: str """ try: if not string or not isinstance(string, str): return string return string.replace('\\', '/') except Exception: return string
[ "def", "back_slash_to_front_converter", "(", "string", ")", ":", "try", ":", "if", "not", "string", "or", "not", "isinstance", "(", "string", ",", "str", ")", ":", "return", "string", "return", "string", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "except", "Exception", ":", "return", "string" ]
Replacing all \ in the str to / :param string: single string to modify :type string: str
[ "Replacing", "all", "\\", "in", "the", "str", "to", "/", ":", "param", "string", ":", "single", "string", "to", "modify", ":", "type", "string", ":", "str" ]
python
train
urinieto/msaf
msaf/algorithms/cnmf/segmenter.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/cnmf/segmenter.py#L78-L95
def filter_activation_matrix(G, R): """Filters the activation matrix G, and returns a flattened copy.""" #import pylab as plt #plt.imshow(G, interpolation="nearest", aspect="auto") #plt.show() idx = np.argmax(G, axis=1) max_idx = np.arange(G.shape[0]) max_idx = (max_idx, idx.flatten()) G[:, :] = 0 G[max_idx] = idx + 1 # TODO: Order matters? G = np.sum(G, axis=1) G = median_filter(G[:, np.newaxis], R) return G.flatten()
[ "def", "filter_activation_matrix", "(", "G", ",", "R", ")", ":", "#import pylab as plt", "#plt.imshow(G, interpolation=\"nearest\", aspect=\"auto\")", "#plt.show()", "idx", "=", "np", ".", "argmax", "(", "G", ",", "axis", "=", "1", ")", "max_idx", "=", "np", ".", "arange", "(", "G", ".", "shape", "[", "0", "]", ")", "max_idx", "=", "(", "max_idx", ",", "idx", ".", "flatten", "(", ")", ")", "G", "[", ":", ",", ":", "]", "=", "0", "G", "[", "max_idx", "]", "=", "idx", "+", "1", "# TODO: Order matters?", "G", "=", "np", ".", "sum", "(", "G", ",", "axis", "=", "1", ")", "G", "=", "median_filter", "(", "G", "[", ":", ",", "np", ".", "newaxis", "]", ",", "R", ")", "return", "G", ".", "flatten", "(", ")" ]
Filters the activation matrix G, and returns a flattened copy.
[ "Filters", "the", "activation", "matrix", "G", "and", "returns", "a", "flattened", "copy", "." ]
python
test
amelchio/pysonos
pysonos/events.py
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/events.py#L586-L633
def unsubscribe(self): """Unsubscribe from the service's events. Once unsubscribed, a Subscription instance should not be reused """ # Trying to unsubscribe if already unsubscribed, or not yet # subscribed, fails silently if self._has_been_unsubscribed or not self.is_subscribed: return # Cancel any auto renew self._auto_renew_thread_flag.set() # Send an unsubscribe request like this: # UNSUBSCRIBE publisher path HTTP/1.1 # HOST: publisher host:publisher port # SID: uuid:subscription UUID headers = { 'SID': self.sid } response = None try: response = requests.request( 'UNSUBSCRIBE', self.service.base_url + self.service.event_subscription_url, headers=headers, timeout=3) except requests.exceptions.RequestException: pass self.is_subscribed = False self._timestamp = None log.info( "Unsubscribed from %s, sid: %s", self.service.base_url + self.service.event_subscription_url, self.sid) # remove queue from event queues and sid to service mappings with _subscriptions_lock: try: del _subscriptions[self.sid] except KeyError: pass self._has_been_unsubscribed = True # Ignore "412 Client Error: Precondition Failed for url:" # from rebooted speakers. if response and response.status_code != 412: response.raise_for_status()
[ "def", "unsubscribe", "(", "self", ")", ":", "# Trying to unsubscribe if already unsubscribed, or not yet", "# subscribed, fails silently", "if", "self", ".", "_has_been_unsubscribed", "or", "not", "self", ".", "is_subscribed", ":", "return", "# Cancel any auto renew", "self", ".", "_auto_renew_thread_flag", ".", "set", "(", ")", "# Send an unsubscribe request like this:", "# UNSUBSCRIBE publisher path HTTP/1.1", "# HOST: publisher host:publisher port", "# SID: uuid:subscription UUID", "headers", "=", "{", "'SID'", ":", "self", ".", "sid", "}", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "request", "(", "'UNSUBSCRIBE'", ",", "self", ".", "service", ".", "base_url", "+", "self", ".", "service", ".", "event_subscription_url", ",", "headers", "=", "headers", ",", "timeout", "=", "3", ")", "except", "requests", ".", "exceptions", ".", "RequestException", ":", "pass", "self", ".", "is_subscribed", "=", "False", "self", ".", "_timestamp", "=", "None", "log", ".", "info", "(", "\"Unsubscribed from %s, sid: %s\"", ",", "self", ".", "service", ".", "base_url", "+", "self", ".", "service", ".", "event_subscription_url", ",", "self", ".", "sid", ")", "# remove queue from event queues and sid to service mappings", "with", "_subscriptions_lock", ":", "try", ":", "del", "_subscriptions", "[", "self", ".", "sid", "]", "except", "KeyError", ":", "pass", "self", ".", "_has_been_unsubscribed", "=", "True", "# Ignore \"412 Client Error: Precondition Failed for url:\"", "# from rebooted speakers.", "if", "response", "and", "response", ".", "status_code", "!=", "412", ":", "response", ".", "raise_for_status", "(", ")" ]
Unsubscribe from the service's events. Once unsubscribed, a Subscription instance should not be reused
[ "Unsubscribe", "from", "the", "service", "s", "events", "." ]
python
train
brocade/pynos
pynos/versions/base/yang/brocade_ras.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/brocade_ras.py#L429-L439
def support_support_param_directory(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") support = ET.SubElement(config, "support", xmlns="urn:brocade.com:mgmt:brocade-ras") support_param = ET.SubElement(support, "support-param") directory = ET.SubElement(support_param, "directory") directory.text = kwargs.pop('directory') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "support_support_param_directory", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "support", "=", "ET", ".", "SubElement", "(", "config", ",", "\"support\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ras\"", ")", "support_param", "=", "ET", ".", "SubElement", "(", "support", ",", "\"support-param\"", ")", "directory", "=", "ET", ".", "SubElement", "(", "support_param", ",", "\"directory\"", ")", "directory", ".", "text", "=", "kwargs", ".", "pop", "(", "'directory'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
ergo/ziggurat_foundations
ziggurat_foundations/models/services/resource_tree_postgres.py
https://github.com/ergo/ziggurat_foundations/blob/9eeec894d08e8d7defa60ddc04b63f69cd4cbeba/ziggurat_foundations/models/services/resource_tree_postgres.py#L318-L339
def set_position(cls, resource_id, to_position, db_session=None, *args, **kwargs): """ Sets node position for new node in the tree :param resource_id: resource to move :param to_position: new position :param db_session: :return:def count_children(cls, resource_id, db_session=None): """ db_session = get_db_session(db_session) # lets lock rows to prevent bad tree states resource = ResourceService.lock_resource_for_update( resource_id=resource_id, db_session=db_session ) cls.check_node_position( resource.parent_id, to_position, on_same_branch=True, db_session=db_session ) cls.shift_ordering_up(resource.parent_id, to_position, db_session=db_session) db_session.flush() db_session.expire(resource) resource.ordering = to_position return True
[ "def", "set_position", "(", "cls", ",", "resource_id", ",", "to_position", ",", "db_session", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "db_session", "=", "get_db_session", "(", "db_session", ")", "# lets lock rows to prevent bad tree states", "resource", "=", "ResourceService", ".", "lock_resource_for_update", "(", "resource_id", "=", "resource_id", ",", "db_session", "=", "db_session", ")", "cls", ".", "check_node_position", "(", "resource", ".", "parent_id", ",", "to_position", ",", "on_same_branch", "=", "True", ",", "db_session", "=", "db_session", ")", "cls", ".", "shift_ordering_up", "(", "resource", ".", "parent_id", ",", "to_position", ",", "db_session", "=", "db_session", ")", "db_session", ".", "flush", "(", ")", "db_session", ".", "expire", "(", "resource", ")", "resource", ".", "ordering", "=", "to_position", "return", "True" ]
Sets node position for new node in the tree :param resource_id: resource to move :param to_position: new position :param db_session: :return:def count_children(cls, resource_id, db_session=None):
[ "Sets", "node", "position", "for", "new", "node", "in", "the", "tree" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/translate.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/translate.py#L278-L284
def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False): """Get vocab for distill problems.""" # We assume that vocab file is present in data_dir directory where the # data generated will be stored. vocab_filepath = os.path.join(data_dir, self.vocab_filename) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
[ "def", "get_or_create_vocab", "(", "self", ",", "data_dir", ",", "tmp_dir", ",", "force_get", "=", "False", ")", ":", "# We assume that vocab file is present in data_dir directory where the", "# data generated will be stored.", "vocab_filepath", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "self", ".", "vocab_filename", ")", "encoder", "=", "text_encoder", ".", "SubwordTextEncoder", "(", "vocab_filepath", ")", "return", "encoder" ]
Get vocab for distill problems.
[ "Get", "vocab", "for", "distill", "problems", "." ]
python
train
scdoshi/django-bits
bits/templatetags/custom_utils.py
https://github.com/scdoshi/django-bits/blob/0a2f4fd9374d2a8acb8df9a7b83eebcf2782256f/bits/templatetags/custom_utils.py#L19-L37
def as_widget(self, widget=None, attrs=None, only_initial=False): """ Renders the field by rendering the passed widget, adding any HTML attributes passed as attrs. If no widget is specified, then the field's default widget will be used. """ if not widget: widget = self.field.widget attrs = attrs or {} auto_id = self.auto_id if auto_id and 'id' not in attrs and 'id' not in widget.attrs: if not only_initial: attrs['id'] = auto_id else: attrs['id'] = self.html_initial_id name = "" return widget.render(name, self.value(), attrs=attrs)
[ "def", "as_widget", "(", "self", ",", "widget", "=", "None", ",", "attrs", "=", "None", ",", "only_initial", "=", "False", ")", ":", "if", "not", "widget", ":", "widget", "=", "self", ".", "field", ".", "widget", "attrs", "=", "attrs", "or", "{", "}", "auto_id", "=", "self", ".", "auto_id", "if", "auto_id", "and", "'id'", "not", "in", "attrs", "and", "'id'", "not", "in", "widget", ".", "attrs", ":", "if", "not", "only_initial", ":", "attrs", "[", "'id'", "]", "=", "auto_id", "else", ":", "attrs", "[", "'id'", "]", "=", "self", ".", "html_initial_id", "name", "=", "\"\"", "return", "widget", ".", "render", "(", "name", ",", "self", ".", "value", "(", ")", ",", "attrs", "=", "attrs", ")" ]
Renders the field by rendering the passed widget, adding any HTML attributes passed as attrs. If no widget is specified, then the field's default widget will be used.
[ "Renders", "the", "field", "by", "rendering", "the", "passed", "widget", "adding", "any", "HTML", "attributes", "passed", "as", "attrs", ".", "If", "no", "widget", "is", "specified", "then", "the", "field", "s", "default", "widget", "will", "be", "used", "." ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/vpn/ipsec/translation/update.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/vpn/ipsec/translation/update.py#L31-L46
def cli(env, context_id, translation_id, static_ip, remote_ip, note): """Update an address translation for an IPSEC tunnel context. A separate configuration request should be made to realize changes on network devices. """ manager = SoftLayer.IPSECManager(env.client) succeeded = manager.update_translation(context_id, translation_id, static_ip=static_ip, remote_ip=remote_ip, notes=note) if succeeded: env.out('Updated translation #{}'.format(translation_id)) else: raise CLIHalt('Failed to update translation #{}'.format(translation_id))
[ "def", "cli", "(", "env", ",", "context_id", ",", "translation_id", ",", "static_ip", ",", "remote_ip", ",", "note", ")", ":", "manager", "=", "SoftLayer", ".", "IPSECManager", "(", "env", ".", "client", ")", "succeeded", "=", "manager", ".", "update_translation", "(", "context_id", ",", "translation_id", ",", "static_ip", "=", "static_ip", ",", "remote_ip", "=", "remote_ip", ",", "notes", "=", "note", ")", "if", "succeeded", ":", "env", ".", "out", "(", "'Updated translation #{}'", ".", "format", "(", "translation_id", ")", ")", "else", ":", "raise", "CLIHalt", "(", "'Failed to update translation #{}'", ".", "format", "(", "translation_id", ")", ")" ]
Update an address translation for an IPSEC tunnel context. A separate configuration request should be made to realize changes on network devices.
[ "Update", "an", "address", "translation", "for", "an", "IPSEC", "tunnel", "context", "." ]
python
train
openwisp/netjsonconfig
netjsonconfig/utils.py
https://github.com/openwisp/netjsonconfig/blob/c23ce9732720856e2f6dc54060db71a8182c7d4b/netjsonconfig/utils.py#L114-L122
def get_copy(dict_, key, default=None): """ Looks for a key in a dictionary, if found returns a deepcopied value, otherwise returns default value """ value = dict_.get(key, default) if value: return deepcopy(value) return value
[ "def", "get_copy", "(", "dict_", ",", "key", ",", "default", "=", "None", ")", ":", "value", "=", "dict_", ".", "get", "(", "key", ",", "default", ")", "if", "value", ":", "return", "deepcopy", "(", "value", ")", "return", "value" ]
Looks for a key in a dictionary, if found returns a deepcopied value, otherwise returns default value
[ "Looks", "for", "a", "key", "in", "a", "dictionary", "if", "found", "returns", "a", "deepcopied", "value", "otherwise", "returns", "default", "value" ]
python
valid
sdss/tree
python/tree/tree.py
https://github.com/sdss/tree/blob/f61fe0876c138ccb61874912d4b8590dadfa835c/python/tree/tree.py#L185-L202
def get_paths(self, key): ''' Retrieve a set of environment paths from the config Parameters: key (str): The section name to grab from the environment Returns: self.environ[newkey] (OrderedDict): An ordered dict containing all of the paths from the specified section, as key:val = name:path ''' newkey = key if key in self.environ else key.upper() if key.upper() \ in self.environ else None if newkey: return self.environ[newkey] else: raise KeyError('Key {0} not found in tree environment'.format(key))
[ "def", "get_paths", "(", "self", ",", "key", ")", ":", "newkey", "=", "key", "if", "key", "in", "self", ".", "environ", "else", "key", ".", "upper", "(", ")", "if", "key", ".", "upper", "(", ")", "in", "self", ".", "environ", "else", "None", "if", "newkey", ":", "return", "self", ".", "environ", "[", "newkey", "]", "else", ":", "raise", "KeyError", "(", "'Key {0} not found in tree environment'", ".", "format", "(", "key", ")", ")" ]
Retrieve a set of environment paths from the config Parameters: key (str): The section name to grab from the environment Returns: self.environ[newkey] (OrderedDict): An ordered dict containing all of the paths from the specified section, as key:val = name:path
[ "Retrieve", "a", "set", "of", "environment", "paths", "from", "the", "config" ]
python
train
adewes/blitzdb
blitzdb/backends/file/queries.py
https://github.com/adewes/blitzdb/blob/4b459e0bcde9e1f6224dd4e3bea74194586864b0/blitzdb/backends/file/queries.py#L27-L48
def filter_query(key, expression): """Filter documents with a key that satisfies an expression.""" if (isinstance(expression, dict) and len(expression) == 1 and list(expression.keys())[0].startswith('$')): compiled_expression = compile_query(expression) elif callable(expression): def _filter(index, expression=expression): result = [store_key for value, store_keys in index.get_index().items() if expression(value) for store_key in store_keys] return result compiled_expression = _filter else: compiled_expression = expression def _get(query_function, key=key, expression=compiled_expression): """Get document key and check against expression.""" return query_function(key, expression) return _get
[ "def", "filter_query", "(", "key", ",", "expression", ")", ":", "if", "(", "isinstance", "(", "expression", ",", "dict", ")", "and", "len", "(", "expression", ")", "==", "1", "and", "list", "(", "expression", ".", "keys", "(", ")", ")", "[", "0", "]", ".", "startswith", "(", "'$'", ")", ")", ":", "compiled_expression", "=", "compile_query", "(", "expression", ")", "elif", "callable", "(", "expression", ")", ":", "def", "_filter", "(", "index", ",", "expression", "=", "expression", ")", ":", "result", "=", "[", "store_key", "for", "value", ",", "store_keys", "in", "index", ".", "get_index", "(", ")", ".", "items", "(", ")", "if", "expression", "(", "value", ")", "for", "store_key", "in", "store_keys", "]", "return", "result", "compiled_expression", "=", "_filter", "else", ":", "compiled_expression", "=", "expression", "def", "_get", "(", "query_function", ",", "key", "=", "key", ",", "expression", "=", "compiled_expression", ")", ":", "\"\"\"Get document key and check against expression.\"\"\"", "return", "query_function", "(", "key", ",", "expression", ")", "return", "_get" ]
Filter documents with a key that satisfies an expression.
[ "Filter", "documents", "with", "a", "key", "that", "satisfies", "an", "expression", "." ]
python
train
FutunnOpen/futuquant
futuquant/examples/TinyQuant/TinyQuantFrame.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/TinyQuant/TinyQuantFrame.py#L130-L153
def get_tiny_trade_order(self, order_id, acc_id=0): """得到订单信息""" ret, data = self._trade_ctx.order_list_query(order_id=order_id, status_filter_list=[], code='', start='', end='', trd_env=self._env_type, acc_id=acc_id) if ret != ft.RET_OK: return ret, data order = TinyTradeOrder() for ix, row in data.iterrows(): if order_id != str(row['order_id']): continue order.symbol = row['code'] order.order_id = order_id order.direction = row['trd_side'] order.price = float(row['price']) order.total_volume = int(row['qty']) order.trade_volume = int(row['dealt_qty']) order.create_time = row['create_time'] order.updated_time = row['updated_time'] order.trade_avg_price = float(row['dealt_avg_price']) if row['dealt_avg_price'] else 0 order.order_status = row['order_status'] break return ret, order
[ "def", "get_tiny_trade_order", "(", "self", ",", "order_id", ",", "acc_id", "=", "0", ")", ":", "ret", ",", "data", "=", "self", ".", "_trade_ctx", ".", "order_list_query", "(", "order_id", "=", "order_id", ",", "status_filter_list", "=", "[", "]", ",", "code", "=", "''", ",", "start", "=", "''", ",", "end", "=", "''", ",", "trd_env", "=", "self", ".", "_env_type", ",", "acc_id", "=", "acc_id", ")", "if", "ret", "!=", "ft", ".", "RET_OK", ":", "return", "ret", ",", "data", "order", "=", "TinyTradeOrder", "(", ")", "for", "ix", ",", "row", "in", "data", ".", "iterrows", "(", ")", ":", "if", "order_id", "!=", "str", "(", "row", "[", "'order_id'", "]", ")", ":", "continue", "order", ".", "symbol", "=", "row", "[", "'code'", "]", "order", ".", "order_id", "=", "order_id", "order", ".", "direction", "=", "row", "[", "'trd_side'", "]", "order", ".", "price", "=", "float", "(", "row", "[", "'price'", "]", ")", "order", ".", "total_volume", "=", "int", "(", "row", "[", "'qty'", "]", ")", "order", ".", "trade_volume", "=", "int", "(", "row", "[", "'dealt_qty'", "]", ")", "order", ".", "create_time", "=", "row", "[", "'create_time'", "]", "order", ".", "updated_time", "=", "row", "[", "'updated_time'", "]", "order", ".", "trade_avg_price", "=", "float", "(", "row", "[", "'dealt_avg_price'", "]", ")", "if", "row", "[", "'dealt_avg_price'", "]", "else", "0", "order", ".", "order_status", "=", "row", "[", "'order_status'", "]", "break", "return", "ret", ",", "order" ]
得到订单信息
[ "得到订单信息" ]
python
train
horazont/aioxmpp
aioxmpp/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/service.py#L634-L646
def independent_from(self, other): """ Return whether the services are independent (neither depends on the other). :param other: Another service. :type other: :class:`aioxmpp.service.Service` .. versionadded:: 0.11 """ if self is other: return False return not self.orders_after(other) and not other.orders_after(self)
[ "def", "independent_from", "(", "self", ",", "other", ")", ":", "if", "self", "is", "other", ":", "return", "False", "return", "not", "self", ".", "orders_after", "(", "other", ")", "and", "not", "other", ".", "orders_after", "(", "self", ")" ]
Return whether the services are independent (neither depends on the other). :param other: Another service. :type other: :class:`aioxmpp.service.Service` .. versionadded:: 0.11
[ "Return", "whether", "the", "services", "are", "independent", "(", "neither", "depends", "on", "the", "other", ")", "." ]
python
train
sternoru/goscalecms
goscale/themes/site_middleware.py
https://github.com/sternoru/goscalecms/blob/7eee50357c47ebdfe3e573a8b4be3b67892d229e/goscale/themes/site_middleware.py#L7-L28
def make_tls_property(default=None): """Creates a class-wide instance property with a thread-specific value.""" class TLSProperty(object): def __init__(self): from threading import local self.local = local() def __get__(self, instance, cls): if not instance: return self return self.value def __set__(self, instance, value): self.value = value def _get_value(self): return getattr(self.local, 'value', default) def _set_value(self, value): self.local.value = value value = property(_get_value, _set_value) return TLSProperty()
[ "def", "make_tls_property", "(", "default", "=", "None", ")", ":", "class", "TLSProperty", "(", "object", ")", ":", "def", "__init__", "(", "self", ")", ":", "from", "threading", "import", "local", "self", ".", "local", "=", "local", "(", ")", "def", "__get__", "(", "self", ",", "instance", ",", "cls", ")", ":", "if", "not", "instance", ":", "return", "self", "return", "self", ".", "value", "def", "__set__", "(", "self", ",", "instance", ",", "value", ")", ":", "self", ".", "value", "=", "value", "def", "_get_value", "(", "self", ")", ":", "return", "getattr", "(", "self", ".", "local", ",", "'value'", ",", "default", ")", "def", "_set_value", "(", "self", ",", "value", ")", ":", "self", ".", "local", ".", "value", "=", "value", "value", "=", "property", "(", "_get_value", ",", "_set_value", ")", "return", "TLSProperty", "(", ")" ]
Creates a class-wide instance property with a thread-specific value.
[ "Creates", "a", "class", "-", "wide", "instance", "property", "with", "a", "thread", "-", "specific", "value", "." ]
python
train
edx/ease
ease/util_functions.py
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L428-L442
def get_wordnet_syns(word): """ Utilize wordnet (installed with nltk) to get synonyms for words word is the input word returns a list of unique synonyms """ synonyms = [] regex = r"_" pat = re.compile(regex) synset = nltk.wordnet.wordnet.synsets(word) for ss in synset: for swords in ss.lemma_names: synonyms.append(pat.sub(" ", swords.lower())) synonyms = f7(synonyms) return synonyms
[ "def", "get_wordnet_syns", "(", "word", ")", ":", "synonyms", "=", "[", "]", "regex", "=", "r\"_\"", "pat", "=", "re", ".", "compile", "(", "regex", ")", "synset", "=", "nltk", ".", "wordnet", ".", "wordnet", ".", "synsets", "(", "word", ")", "for", "ss", "in", "synset", ":", "for", "swords", "in", "ss", ".", "lemma_names", ":", "synonyms", ".", "append", "(", "pat", ".", "sub", "(", "\" \"", ",", "swords", ".", "lower", "(", ")", ")", ")", "synonyms", "=", "f7", "(", "synonyms", ")", "return", "synonyms" ]
Utilize wordnet (installed with nltk) to get synonyms for words word is the input word returns a list of unique synonyms
[ "Utilize", "wordnet", "(", "installed", "with", "nltk", ")", "to", "get", "synonyms", "for", "words", "word", "is", "the", "input", "word", "returns", "a", "list", "of", "unique", "synonyms" ]
python
valid
jopohl/urh
src/urh/ainterpretation/AutoInterpretation.py
https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/ainterpretation/AutoInterpretation.py#L28-L47
def get_most_frequent_value(values: list): """ Return the most frequent value in list. If there is no unique one, return the maximum of the most frequent values :param values: :return: """ if len(values) == 0: return None most_common = Counter(values).most_common() result, max_count = most_common[0] for value, count in most_common: if count < max_count: return result else: result = value return result
[ "def", "get_most_frequent_value", "(", "values", ":", "list", ")", ":", "if", "len", "(", "values", ")", "==", "0", ":", "return", "None", "most_common", "=", "Counter", "(", "values", ")", ".", "most_common", "(", ")", "result", ",", "max_count", "=", "most_common", "[", "0", "]", "for", "value", ",", "count", "in", "most_common", ":", "if", "count", "<", "max_count", ":", "return", "result", "else", ":", "result", "=", "value", "return", "result" ]
Return the most frequent value in list. If there is no unique one, return the maximum of the most frequent values :param values: :return:
[ "Return", "the", "most", "frequent", "value", "in", "list", ".", "If", "there", "is", "no", "unique", "one", "return", "the", "maximum", "of", "the", "most", "frequent", "values" ]
python
train
odlgroup/odl
odl/set/sets.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/sets.py#L267-L274
def element(self, inp=None): """Return an element from ``inp`` or from scratch.""" if inp is not None: s = str(inp)[:self.length] s += ' ' * (self.length - len(s)) return s else: return ' ' * self.length
[ "def", "element", "(", "self", ",", "inp", "=", "None", ")", ":", "if", "inp", "is", "not", "None", ":", "s", "=", "str", "(", "inp", ")", "[", ":", "self", ".", "length", "]", "s", "+=", "' '", "*", "(", "self", ".", "length", "-", "len", "(", "s", ")", ")", "return", "s", "else", ":", "return", "' '", "*", "self", ".", "length" ]
Return an element from ``inp`` or from scratch.
[ "Return", "an", "element", "from", "inp", "or", "from", "scratch", "." ]
python
train
DigitalGlobe/gbdxtools
gbdxtools/images/util/image.py
https://github.com/DigitalGlobe/gbdxtools/blob/def62f8f2d77b168aa2bd115290aaa0f9a08a4bb/gbdxtools/images/util/image.py#L65-L81
def can_acomp(cat_id): """ Checks to see if a CatalogID can be atmos. compensated or not. Args: catalogID (str): The catalog ID from the platform catalog. Returns: available (bool): Whether or not the image can be acomp'd """ url = 'https://rda.geobigdata.io/v1/stripMetadata/{}/capabilities'.format(cat_id) auth = Auth() r = _req_with_retries(auth.gbdx_connection, url) try: data = r.json() return data['acompVersion'] is not None except: return False
[ "def", "can_acomp", "(", "cat_id", ")", ":", "url", "=", "'https://rda.geobigdata.io/v1/stripMetadata/{}/capabilities'", ".", "format", "(", "cat_id", ")", "auth", "=", "Auth", "(", ")", "r", "=", "_req_with_retries", "(", "auth", ".", "gbdx_connection", ",", "url", ")", "try", ":", "data", "=", "r", ".", "json", "(", ")", "return", "data", "[", "'acompVersion'", "]", "is", "not", "None", "except", ":", "return", "False" ]
Checks to see if a CatalogID can be atmos. compensated or not. Args: catalogID (str): The catalog ID from the platform catalog. Returns: available (bool): Whether or not the image can be acomp'd
[ "Checks", "to", "see", "if", "a", "CatalogID", "can", "be", "atmos", ".", "compensated", "or", "not", "." ]
python
valid
napalm-automation-community/napalm-panos
napalm_panos/panos.py
https://github.com/napalm-automation-community/napalm-panos/blob/9210a81a7a4a47c724d169031414a0743de4b035/napalm_panos/panos.py#L302-L321
def commit_config(self): """ Netmiko is being used to commit the configuration because it takes a better care of results compared to pan-python. """ if self.loaded: if self.ssh_connection is False: self._open_ssh() try: self.ssh_device.commit() time.sleep(3) self.loaded = False self.changed = True except: # noqa if self.merge_config: raise MergeConfigException('Error while commiting config') else: raise ReplaceConfigException('Error while commiting config') else: raise ReplaceConfigException('No config loaded.')
[ "def", "commit_config", "(", "self", ")", ":", "if", "self", ".", "loaded", ":", "if", "self", ".", "ssh_connection", "is", "False", ":", "self", ".", "_open_ssh", "(", ")", "try", ":", "self", ".", "ssh_device", ".", "commit", "(", ")", "time", ".", "sleep", "(", "3", ")", "self", ".", "loaded", "=", "False", "self", ".", "changed", "=", "True", "except", ":", "# noqa", "if", "self", ".", "merge_config", ":", "raise", "MergeConfigException", "(", "'Error while commiting config'", ")", "else", ":", "raise", "ReplaceConfigException", "(", "'Error while commiting config'", ")", "else", ":", "raise", "ReplaceConfigException", "(", "'No config loaded.'", ")" ]
Netmiko is being used to commit the configuration because it takes a better care of results compared to pan-python.
[ "Netmiko", "is", "being", "used", "to", "commit", "the", "configuration", "because", "it", "takes", "a", "better", "care", "of", "results", "compared", "to", "pan", "-", "python", "." ]
python
train
jdrumgoole/pymongo_formatter
pymongo_formatter/formatter.py
https://github.com/jdrumgoole/pymongo_formatter/blob/313fef8f2ff5e7d4f1515ea59a99ec25f7999e7b/pymongo_formatter/formatter.py#L80-L99
def select_fields(doc, field_list): ''' Take 'doc' and create a new doc using only keys from the 'fields' list. Supports referencing fields using dotted notation "a.b.c" so we can parse nested fields the way MongoDB does. The nested field class is a hack. It should be a sub-class of dict. ''' if field_list is None or len(field_list) == 0: return doc newDoc = Nested_Dict({}) oldDoc = Nested_Dict(doc) for i in field_list: if oldDoc.has_key(i): # print( "doc: %s" % doc ) # print( "i: %s" %i ) newDoc.set_value(i, oldDoc.get_value(i)) return newDoc.dict_value()
[ "def", "select_fields", "(", "doc", ",", "field_list", ")", ":", "if", "field_list", "is", "None", "or", "len", "(", "field_list", ")", "==", "0", ":", "return", "doc", "newDoc", "=", "Nested_Dict", "(", "{", "}", ")", "oldDoc", "=", "Nested_Dict", "(", "doc", ")", "for", "i", "in", "field_list", ":", "if", "oldDoc", ".", "has_key", "(", "i", ")", ":", "# print( \"doc: %s\" % doc )", "# print( \"i: %s\" %i )", "newDoc", ".", "set_value", "(", "i", ",", "oldDoc", ".", "get_value", "(", "i", ")", ")", "return", "newDoc", ".", "dict_value", "(", ")" ]
Take 'doc' and create a new doc using only keys from the 'fields' list. Supports referencing fields using dotted notation "a.b.c" so we can parse nested fields the way MongoDB does. The nested field class is a hack. It should be a sub-class of dict.
[ "Take", "doc", "and", "create", "a", "new", "doc", "using", "only", "keys", "from", "the", "fields", "list", ".", "Supports", "referencing", "fields", "using", "dotted", "notation", "a", ".", "b", ".", "c", "so", "we", "can", "parse", "nested", "fields", "the", "way", "MongoDB", "does", ".", "The", "nested", "field", "class", "is", "a", "hack", ".", "It", "should", "be", "a", "sub", "-", "class", "of", "dict", "." ]
python
test
callowayproject/Transmogrify
transmogrify/processors.py
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/processors.py#L30-L49
def parse_size(image, size): """ Parse a size string (i.e. "200", "200x100", "x200", etc.) into a (width, height) tuple. """ bits = size.split("x") if image.size[0] == 0 or image.size[1] == 0: ratio = 1.0 else: ratio = float(image.size[0]) / float(image.size[1]) if len(bits) == 1 or not bits[1]: width = int(bits[0]) height = int(1 / ratio * width) elif not bits[0]: height = int(bits[1]) width = int(height * ratio) else: width, height = map(int, bits) return width, height
[ "def", "parse_size", "(", "image", ",", "size", ")", ":", "bits", "=", "size", ".", "split", "(", "\"x\"", ")", "if", "image", ".", "size", "[", "0", "]", "==", "0", "or", "image", ".", "size", "[", "1", "]", "==", "0", ":", "ratio", "=", "1.0", "else", ":", "ratio", "=", "float", "(", "image", ".", "size", "[", "0", "]", ")", "/", "float", "(", "image", ".", "size", "[", "1", "]", ")", "if", "len", "(", "bits", ")", "==", "1", "or", "not", "bits", "[", "1", "]", ":", "width", "=", "int", "(", "bits", "[", "0", "]", ")", "height", "=", "int", "(", "1", "/", "ratio", "*", "width", ")", "elif", "not", "bits", "[", "0", "]", ":", "height", "=", "int", "(", "bits", "[", "1", "]", ")", "width", "=", "int", "(", "height", "*", "ratio", ")", "else", ":", "width", ",", "height", "=", "map", "(", "int", ",", "bits", ")", "return", "width", ",", "height" ]
Parse a size string (i.e. "200", "200x100", "x200", etc.) into a (width, height) tuple.
[ "Parse", "a", "size", "string", "(", "i", ".", "e", ".", "200", "200x100", "x200", "etc", ".", ")", "into", "a", "(", "width", "height", ")", "tuple", "." ]
python
train
untwisted/quickirc
quickirc.py
https://github.com/untwisted/quickirc/blob/4e92ffd45eef03eee2ba0b659b19b4f40a2dbf99/quickirc.py#L184-L197
def extract_ctcp(self, spin, nick, user, host, target, msg): """ it is used to extract ctcp requests into pieces. """ # The ctcp delimiter token. DELIM = '\001' if not msg.startswith(DELIM) or not msg.endswith(DELIM): return ctcp_args = msg.strip(DELIM).split(' ') spawn(spin, ctcp_args[0], (nick, user, host, target, msg), *ctcp_args[1:])
[ "def", "extract_ctcp", "(", "self", ",", "spin", ",", "nick", ",", "user", ",", "host", ",", "target", ",", "msg", ")", ":", "# The ctcp delimiter token.", "DELIM", "=", "'\\001'", "if", "not", "msg", ".", "startswith", "(", "DELIM", ")", "or", "not", "msg", ".", "endswith", "(", "DELIM", ")", ":", "return", "ctcp_args", "=", "msg", ".", "strip", "(", "DELIM", ")", ".", "split", "(", "' '", ")", "spawn", "(", "spin", ",", "ctcp_args", "[", "0", "]", ",", "(", "nick", ",", "user", ",", "host", ",", "target", ",", "msg", ")", ",", "*", "ctcp_args", "[", "1", ":", "]", ")" ]
it is used to extract ctcp requests into pieces.
[ "it", "is", "used", "to", "extract", "ctcp", "requests", "into", "pieces", "." ]
python
train
johnbywater/eventsourcing
eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py#L318-L325
def register_new_node(suffix_node_id=None): """Factory method, registers new node. """ node_id = uuid4() event = Node.Created(originator_id=node_id, suffix_node_id=suffix_node_id) entity = Node.mutate(event=event) publish(event) return entity
[ "def", "register_new_node", "(", "suffix_node_id", "=", "None", ")", ":", "node_id", "=", "uuid4", "(", ")", "event", "=", "Node", ".", "Created", "(", "originator_id", "=", "node_id", ",", "suffix_node_id", "=", "suffix_node_id", ")", "entity", "=", "Node", ".", "mutate", "(", "event", "=", "event", ")", "publish", "(", "event", ")", "return", "entity" ]
Factory method, registers new node.
[ "Factory", "method", "registers", "new", "node", "." ]
python
train
chrisjsewell/jsonextended
jsonextended/utils.py
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/utils.py#L59-L79
def get_data_path(data, module, check_exists=True): """return a directory path to data within a module Parameters ---------- data : str or list[str] file name or list of sub-directories and file name (e.g. ['lammps','data.txt']) """ basepath = os.path.dirname(os.path.abspath(inspect.getfile(module))) if isinstance(data, basestring): data = [data] dirpath = os.path.join(basepath, *data) if check_exists: assert os.path.exists(dirpath), '{0} does not exist'.format(dirpath) return pathlib.Path(dirpath)
[ "def", "get_data_path", "(", "data", ",", "module", ",", "check_exists", "=", "True", ")", ":", "basepath", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "inspect", ".", "getfile", "(", "module", ")", ")", ")", "if", "isinstance", "(", "data", ",", "basestring", ")", ":", "data", "=", "[", "data", "]", "dirpath", "=", "os", ".", "path", ".", "join", "(", "basepath", ",", "*", "data", ")", "if", "check_exists", ":", "assert", "os", ".", "path", ".", "exists", "(", "dirpath", ")", ",", "'{0} does not exist'", ".", "format", "(", "dirpath", ")", "return", "pathlib", ".", "Path", "(", "dirpath", ")" ]
return a directory path to data within a module Parameters ---------- data : str or list[str] file name or list of sub-directories and file name (e.g. ['lammps','data.txt'])
[ "return", "a", "directory", "path", "to", "data", "within", "a", "module" ]
python
train
saltstack/salt
salt/returners/django_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/django_return.py#L70-L82
def save_load(jid, load, minions=None): ''' Save the load to the specified jid ''' signaled = dispatch.Signal( providing_args=['jid', 'load']).send( sender='save_load', jid=jid, load=load) for signal in signaled: log.debug( 'Django returner function \'save_load\' signaled %s ' 'which responded with %s', signal[0], signal[1] )
[ "def", "save_load", "(", "jid", ",", "load", ",", "minions", "=", "None", ")", ":", "signaled", "=", "dispatch", ".", "Signal", "(", "providing_args", "=", "[", "'jid'", ",", "'load'", "]", ")", ".", "send", "(", "sender", "=", "'save_load'", ",", "jid", "=", "jid", ",", "load", "=", "load", ")", "for", "signal", "in", "signaled", ":", "log", ".", "debug", "(", "'Django returner function \\'save_load\\' signaled %s '", "'which responded with %s'", ",", "signal", "[", "0", "]", ",", "signal", "[", "1", "]", ")" ]
Save the load to the specified jid
[ "Save", "the", "load", "to", "the", "specified", "jid" ]
python
train
spyder-ide/spyder-notebook
spyder_notebook/widgets/client.py
https://github.com/spyder-ide/spyder-notebook/blob/54e626b9d2a3fccd3e4625b0f97fe06e5bb1a6db/spyder_notebook/widgets/client.py#L216-L222
def get_short_name(self): """Get a short name for the notebook.""" sname = osp.splitext(osp.basename(self.filename))[0] if len(sname) > 20: fm = QFontMetrics(QFont()) sname = fm.elidedText(sname, Qt.ElideRight, 110) return sname
[ "def", "get_short_name", "(", "self", ")", ":", "sname", "=", "osp", ".", "splitext", "(", "osp", ".", "basename", "(", "self", ".", "filename", ")", ")", "[", "0", "]", "if", "len", "(", "sname", ")", ">", "20", ":", "fm", "=", "QFontMetrics", "(", "QFont", "(", ")", ")", "sname", "=", "fm", ".", "elidedText", "(", "sname", ",", "Qt", ".", "ElideRight", ",", "110", ")", "return", "sname" ]
Get a short name for the notebook.
[ "Get", "a", "short", "name", "for", "the", "notebook", "." ]
python
train
ray-project/ray
python/ray/rllib/optimizers/segment_tree.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/optimizers/segment_tree.py#L59-L83
def reduce(self, start=0, end=None): """Returns result of applying `self.operation` to a contiguous subsequence of the array. self.operation( arr[start], operation(arr[start+1], operation(... arr[end]))) Parameters ---------- start: int beginning of the subsequence end: int end of the subsequences Returns ------- reduced: obj result of reducing self.operation over the specified range of array elements. """ if end is None: end = self._capacity - 1 if end < 0: end += self._capacity return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
[ "def", "reduce", "(", "self", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "if", "end", "is", "None", ":", "end", "=", "self", ".", "_capacity", "-", "1", "if", "end", "<", "0", ":", "end", "+=", "self", ".", "_capacity", "return", "self", ".", "_reduce_helper", "(", "start", ",", "end", ",", "1", ",", "0", ",", "self", ".", "_capacity", "-", "1", ")" ]
Returns result of applying `self.operation` to a contiguous subsequence of the array. self.operation( arr[start], operation(arr[start+1], operation(... arr[end]))) Parameters ---------- start: int beginning of the subsequence end: int end of the subsequences Returns ------- reduced: obj result of reducing self.operation over the specified range of array elements.
[ "Returns", "result", "of", "applying", "self", ".", "operation", "to", "a", "contiguous", "subsequence", "of", "the", "array", "." ]
python
train
BD2KGenomics/protect
src/protect/pipeline/ProTECT.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L860-L901
def get_patient_bams(job, patient_dict, sample_type, univ_options, bwa_options, mutect_options): """ Convenience function to return the bam and its index in the correct format for a sample type. :param dict patient_dict: dict of patient info :param str sample_type: 'tumor_rna', 'tumor_dna', 'normal_dna' :param dict univ_options: Dict of universal options used by almost all tools :param dict bwa_options: Options specific to bwa :param dict bwa_options: Options specific to mutect :return: formatted dict of bam and bai :rtype: dict """ output_dict = {} if 'dna' in sample_type: sample_info = 'fix_pg_sorted' prefix = sample_type + '_' + sample_info else: sample_info = 'genome_sorted' prefix = 'rna_' + sample_info if sample_type + '_bam' in patient_dict['gdc_inputs']: output_dict[prefix + '.bam'] = patient_dict[sample_type + '_bam'][0] output_dict[prefix + '.bam.bai'] = patient_dict[sample_type + '_bam'][1] elif sample_type + '_bai' in patient_dict: output_dict[prefix + '.bam'] = patient_dict[sample_type + '_bam'] output_dict[prefix + '.bam.bai'] = patient_dict[sample_type + '_bai'] else: from protect.alignment.dna import index_bamfile, index_disk output_job = job.wrapJobFn(index_bamfile, patient_dict[sample_type + '_bam'], 'rna' if sample_type == 'tumor_rna' else sample_type, univ_options, bwa_options['samtools'], sample_info=sample_info, export=False, disk=PromisedRequirement(index_disk, patient_dict[sample_type + '_bam'])) job.addChild(output_job) output_dict = output_job.rv() if sample_type == 'tumor_rna': if 'tumor_rna_transcriptome_bam' not in patient_dict: patient_dict['tumor_rna_transcriptome_bam'] = None return{'rna_genome': output_dict, 'rna_transcriptome.bam': patient_dict['tumor_rna_transcriptome_bam']} else: return output_dict
[ "def", "get_patient_bams", "(", "job", ",", "patient_dict", ",", "sample_type", ",", "univ_options", ",", "bwa_options", ",", "mutect_options", ")", ":", "output_dict", "=", "{", "}", "if", "'dna'", "in", "sample_type", ":", "sample_info", "=", "'fix_pg_sorted'", "prefix", "=", "sample_type", "+", "'_'", "+", "sample_info", "else", ":", "sample_info", "=", "'genome_sorted'", "prefix", "=", "'rna_'", "+", "sample_info", "if", "sample_type", "+", "'_bam'", "in", "patient_dict", "[", "'gdc_inputs'", "]", ":", "output_dict", "[", "prefix", "+", "'.bam'", "]", "=", "patient_dict", "[", "sample_type", "+", "'_bam'", "]", "[", "0", "]", "output_dict", "[", "prefix", "+", "'.bam.bai'", "]", "=", "patient_dict", "[", "sample_type", "+", "'_bam'", "]", "[", "1", "]", "elif", "sample_type", "+", "'_bai'", "in", "patient_dict", ":", "output_dict", "[", "prefix", "+", "'.bam'", "]", "=", "patient_dict", "[", "sample_type", "+", "'_bam'", "]", "output_dict", "[", "prefix", "+", "'.bam.bai'", "]", "=", "patient_dict", "[", "sample_type", "+", "'_bai'", "]", "else", ":", "from", "protect", ".", "alignment", ".", "dna", "import", "index_bamfile", ",", "index_disk", "output_job", "=", "job", ".", "wrapJobFn", "(", "index_bamfile", ",", "patient_dict", "[", "sample_type", "+", "'_bam'", "]", ",", "'rna'", "if", "sample_type", "==", "'tumor_rna'", "else", "sample_type", ",", "univ_options", ",", "bwa_options", "[", "'samtools'", "]", ",", "sample_info", "=", "sample_info", ",", "export", "=", "False", ",", "disk", "=", "PromisedRequirement", "(", "index_disk", ",", "patient_dict", "[", "sample_type", "+", "'_bam'", "]", ")", ")", "job", ".", "addChild", "(", "output_job", ")", "output_dict", "=", "output_job", ".", "rv", "(", ")", "if", "sample_type", "==", "'tumor_rna'", ":", "if", "'tumor_rna_transcriptome_bam'", "not", "in", "patient_dict", ":", "patient_dict", "[", "'tumor_rna_transcriptome_bam'", "]", "=", "None", "return", "{", "'rna_genome'", ":", "output_dict", ",", "'rna_transcriptome.bam'", ":", "patient_dict", "[", "'tumor_rna_transcriptome_bam'", "]", "}", "else", ":", "return", "output_dict" ]
Convenience function to return the bam and its index in the correct format for a sample type. :param dict patient_dict: dict of patient info :param str sample_type: 'tumor_rna', 'tumor_dna', 'normal_dna' :param dict univ_options: Dict of universal options used by almost all tools :param dict bwa_options: Options specific to bwa :param dict bwa_options: Options specific to mutect :return: formatted dict of bam and bai :rtype: dict
[ "Convenience", "function", "to", "return", "the", "bam", "and", "its", "index", "in", "the", "correct", "format", "for", "a", "sample", "type", "." ]
python
train
Alignak-monitoring/alignak
alignak/http/generic_interface.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/generic_interface.py#L367-L380
def _results(self, scheduler_instance_id): """Get the results of the executed actions for the scheduler which instance id is provided Calling this method for daemons that are not configured as passive do not make sense. Indeed, this service should only be exposed on poller and reactionner daemons. :param scheduler_instance_id: instance id of the scheduler :type scheduler_instance_id: string :return: serialized list :rtype: str """ with self.app.lock: res = self.app.get_results_from_passive(scheduler_instance_id) return serialize(res, True)
[ "def", "_results", "(", "self", ",", "scheduler_instance_id", ")", ":", "with", "self", ".", "app", ".", "lock", ":", "res", "=", "self", ".", "app", ".", "get_results_from_passive", "(", "scheduler_instance_id", ")", "return", "serialize", "(", "res", ",", "True", ")" ]
Get the results of the executed actions for the scheduler which instance id is provided Calling this method for daemons that are not configured as passive do not make sense. Indeed, this service should only be exposed on poller and reactionner daemons. :param scheduler_instance_id: instance id of the scheduler :type scheduler_instance_id: string :return: serialized list :rtype: str
[ "Get", "the", "results", "of", "the", "executed", "actions", "for", "the", "scheduler", "which", "instance", "id", "is", "provided" ]
python
train
keon/algorithms
algorithms/sort/merge_sort.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/sort/merge_sort.py#L16-L38
def merge(left, right, merged): """ Merge helper Complexity: O(n) """ left_cursor, right_cursor = 0, 0 while left_cursor < len(left) and right_cursor < len(right): # Sort each one and place into the result if left[left_cursor] <= right[right_cursor]: merged[left_cursor+right_cursor]=left[left_cursor] left_cursor += 1 else: merged[left_cursor + right_cursor] = right[right_cursor] right_cursor += 1 # Add the left overs if there's any left to the result for left_cursor in range(left_cursor, len(left)): merged[left_cursor + right_cursor] = left[left_cursor] # Add the left overs if there's any left to the result for right_cursor in range(right_cursor, len(right)): merged[left_cursor + right_cursor] = right[right_cursor] # Return result return merged
[ "def", "merge", "(", "left", ",", "right", ",", "merged", ")", ":", "left_cursor", ",", "right_cursor", "=", "0", ",", "0", "while", "left_cursor", "<", "len", "(", "left", ")", "and", "right_cursor", "<", "len", "(", "right", ")", ":", "# Sort each one and place into the result", "if", "left", "[", "left_cursor", "]", "<=", "right", "[", "right_cursor", "]", ":", "merged", "[", "left_cursor", "+", "right_cursor", "]", "=", "left", "[", "left_cursor", "]", "left_cursor", "+=", "1", "else", ":", "merged", "[", "left_cursor", "+", "right_cursor", "]", "=", "right", "[", "right_cursor", "]", "right_cursor", "+=", "1", "# Add the left overs if there's any left to the result", "for", "left_cursor", "in", "range", "(", "left_cursor", ",", "len", "(", "left", ")", ")", ":", "merged", "[", "left_cursor", "+", "right_cursor", "]", "=", "left", "[", "left_cursor", "]", "# Add the left overs if there's any left to the result", "for", "right_cursor", "in", "range", "(", "right_cursor", ",", "len", "(", "right", ")", ")", ":", "merged", "[", "left_cursor", "+", "right_cursor", "]", "=", "right", "[", "right_cursor", "]", "# Return result", "return", "merged" ]
Merge helper Complexity: O(n)
[ "Merge", "helper", "Complexity", ":", "O", "(", "n", ")" ]
python
train
aio-libs/aiohttp
aiohttp/multipart.py
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/multipart.py#L435-L443
def decode(self, data: bytes) -> bytes: """Decodes data according the specified Content-Encoding or Content-Transfer-Encoding headers value. """ if CONTENT_TRANSFER_ENCODING in self.headers: data = self._decode_content_transfer(data) if CONTENT_ENCODING in self.headers: return self._decode_content(data) return data
[ "def", "decode", "(", "self", ",", "data", ":", "bytes", ")", "->", "bytes", ":", "if", "CONTENT_TRANSFER_ENCODING", "in", "self", ".", "headers", ":", "data", "=", "self", ".", "_decode_content_transfer", "(", "data", ")", "if", "CONTENT_ENCODING", "in", "self", ".", "headers", ":", "return", "self", ".", "_decode_content", "(", "data", ")", "return", "data" ]
Decodes data according the specified Content-Encoding or Content-Transfer-Encoding headers value.
[ "Decodes", "data", "according", "the", "specified", "Content", "-", "Encoding", "or", "Content", "-", "Transfer", "-", "Encoding", "headers", "value", "." ]
python
train
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L2885-L2898
def enable_performance_data(self): """Enable performance data processing (globally) Format of the line that triggers function call:: ENABLE_PERFORMANCE_DATA :return: None """ if not self.my_conf.process_performance_data: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_PERFORMANCE_DATA_ENABLED"].value self.my_conf.process_performance_data = True self.my_conf.explode_global_conf() self.daemon.update_program_status()
[ "def", "enable_performance_data", "(", "self", ")", ":", "if", "not", "self", ".", "my_conf", ".", "process_performance_data", ":", "self", ".", "my_conf", ".", "modified_attributes", "|=", "DICT_MODATTR", "[", "\"MODATTR_PERFORMANCE_DATA_ENABLED\"", "]", ".", "value", "self", ".", "my_conf", ".", "process_performance_data", "=", "True", "self", ".", "my_conf", ".", "explode_global_conf", "(", ")", "self", ".", "daemon", ".", "update_program_status", "(", ")" ]
Enable performance data processing (globally) Format of the line that triggers function call:: ENABLE_PERFORMANCE_DATA :return: None
[ "Enable", "performance", "data", "processing", "(", "globally", ")", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
mwgielen/jackal
jackal/core.py
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L100-L105
def argument_count(self): """ Uses the command line arguments to fill the count function and call it. """ arguments, _ = self.argparser.parse_known_args() return self.count(**vars(arguments))
[ "def", "argument_count", "(", "self", ")", ":", "arguments", ",", "_", "=", "self", ".", "argparser", ".", "parse_known_args", "(", ")", "return", "self", ".", "count", "(", "*", "*", "vars", "(", "arguments", ")", ")" ]
Uses the command line arguments to fill the count function and call it.
[ "Uses", "the", "command", "line", "arguments", "to", "fill", "the", "count", "function", "and", "call", "it", "." ]
python
valid
jstitch/MambuPy
MambuPy/mambuconfig.py
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/mambuconfig.py#L119-L152
def get_conf(conf, sect, opt): """ Gets a config 'opt' from 'conf' file, under section 'sect'. If no 'opt' exists under 'sect', it looks for option on the default_configs dictionary If there exists an environmental variable named MAMBUPY_{upper_case_opt}, it overrides whatever the conf files or default_configs dict says. But if you send a command line argument named mambupy_{lower_case_opt}, it overrides anything else. Args: conf (ConfigParser): ConfigParser that reads from certain config file (INI format) sect (string): section under the config file opt (string): option to read Returns: string: configuration option. If not found on conf, returns a value from default_configs dict. If environmental variable exists with name MAMBUPY_{upper_case_opt} it overrides anything else """ argu = getattr(args, "mambupy_"+opt.lower()) if not argu: envir = os.environ.get("MAMBUPY_"+opt.upper()) if not envir: try: return conf.get(sect,opt) except NoSectionError: return default_configs[opt] return envir return argu
[ "def", "get_conf", "(", "conf", ",", "sect", ",", "opt", ")", ":", "argu", "=", "getattr", "(", "args", ",", "\"mambupy_\"", "+", "opt", ".", "lower", "(", ")", ")", "if", "not", "argu", ":", "envir", "=", "os", ".", "environ", ".", "get", "(", "\"MAMBUPY_\"", "+", "opt", ".", "upper", "(", ")", ")", "if", "not", "envir", ":", "try", ":", "return", "conf", ".", "get", "(", "sect", ",", "opt", ")", "except", "NoSectionError", ":", "return", "default_configs", "[", "opt", "]", "return", "envir", "return", "argu" ]
Gets a config 'opt' from 'conf' file, under section 'sect'. If no 'opt' exists under 'sect', it looks for option on the default_configs dictionary If there exists an environmental variable named MAMBUPY_{upper_case_opt}, it overrides whatever the conf files or default_configs dict says. But if you send a command line argument named mambupy_{lower_case_opt}, it overrides anything else. Args: conf (ConfigParser): ConfigParser that reads from certain config file (INI format) sect (string): section under the config file opt (string): option to read Returns: string: configuration option. If not found on conf, returns a value from default_configs dict. If environmental variable exists with name MAMBUPY_{upper_case_opt} it overrides anything else
[ "Gets", "a", "config", "opt", "from", "conf", "file", "under", "section", "sect", "." ]
python
train
sassoftware/saspy
saspy/sasdata.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasdata.py#L520-L570
def info(self): """ Display the column info on a SAS data object :return: Pandas data frame """ if self.results.casefold() != 'pandas': print("The info method only works with Pandas results") return None info_code = """ data work._statsInfo ; do rows=0 by 1 while( not last ) ; set {0}.{1}{2} end=last; array chrs _character_ ; array nums _numeric_ ; array ccounts(999) _temporary_ ; array ncounts(999) _temporary_ ; do over chrs; ccounts(_i_) + missing(chrs) ; end; do over nums; ncounts(_i_) + missing(nums); end; end ; length Variable $32 type $8. ; Do over chrs; Type = 'char'; Variable = vname(chrs) ; N = rows; Nmiss = ccounts(_i_) ; Output ; end ; Do over nums; Type = 'numeric'; Variable = vname(nums) ; N = rows; Nmiss = ncounts(_i_) ; if variable ^= 'rows' then output; end ; stop; keep Variable N NMISS Type ; run; """ if self.sas.nosub: print(info_code.format(self.libref, self.table, self._dsopts())) return None info_pd = self._returnPD(info_code.format(self.libref, self.table, self._dsopts()), '_statsInfo') info_pd = info_pd.iloc[:, :] info_pd.index.name = None info_pd.name = None return info_pd
[ "def", "info", "(", "self", ")", ":", "if", "self", ".", "results", ".", "casefold", "(", ")", "!=", "'pandas'", ":", "print", "(", "\"The info method only works with Pandas results\"", ")", "return", "None", "info_code", "=", "\"\"\"\n data work._statsInfo ;\n do rows=0 by 1 while( not last ) ;\n set {0}.{1}{2} end=last;\n array chrs _character_ ;\n array nums _numeric_ ;\n array ccounts(999) _temporary_ ;\n array ncounts(999) _temporary_ ;\n do over chrs;\n ccounts(_i_) + missing(chrs) ;\n end;\n do over nums;\n ncounts(_i_) + missing(nums);\n end; \n end ;\n length Variable $32 type $8. ;\n Do over chrs;\n Type = 'char';\n Variable = vname(chrs) ;\n N = rows;\n Nmiss = ccounts(_i_) ;\n Output ;\n end ;\n Do over nums;\n Type = 'numeric';\n Variable = vname(nums) ;\n N = rows;\n Nmiss = ncounts(_i_) ;\n if variable ^= 'rows' then output;\n end ;\n stop;\n keep Variable N NMISS Type ;\n run;\n \"\"\"", "if", "self", ".", "sas", ".", "nosub", ":", "print", "(", "info_code", ".", "format", "(", "self", ".", "libref", ",", "self", ".", "table", ",", "self", ".", "_dsopts", "(", ")", ")", ")", "return", "None", "info_pd", "=", "self", ".", "_returnPD", "(", "info_code", ".", "format", "(", "self", ".", "libref", ",", "self", ".", "table", ",", "self", ".", "_dsopts", "(", ")", ")", ",", "'_statsInfo'", ")", "info_pd", "=", "info_pd", ".", "iloc", "[", ":", ",", ":", "]", "info_pd", ".", "index", ".", "name", "=", "None", "info_pd", ".", "name", "=", "None", "return", "info_pd" ]
Display the column info on a SAS data object :return: Pandas data frame
[ "Display", "the", "column", "info", "on", "a", "SAS", "data", "object" ]
python
train
aliyun/aliyun-odps-python-sdk
odps/df/expr/expressions.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/expressions.py#L207-L255
def persist(self, name, partitions=None, partition=None, lifecycle=None, project=None, **kwargs): """ Persist the execution into a new table. If `partitions` not specified, will create a new table without partitions if the table does not exist, and insert the SQL result into it. If `partitions` are specified, they will be the partition fields of the new table. If `partition` is specified, the data will be inserted into the exact partition of the table. :param name: table name :param partitions: list of string, the partition fields :type partitions: list :param partition: persist to a specified partition :type partition: string or PartitionSpec :param lifecycle: table lifecycle. If absent, `options.lifecycle` will be used. :type lifecycle: int :param project: project name, if not provided, will be the default project :param hints: settings for SQL, e.g. `odps.sql.mapper.split.size` :type hints: dict :param priority: instance priority, 9 as default :type priority: int :param running_cluster: cluster to run this instance :param overwrite: overwrite the table, True as default :type overwrite: bool :param drop_table: drop table if exists, False as default :type drop_table: bool :param create_table: create table first if not exits, True as default :type create_table: bool :param drop_partition: drop partition if exists, False as default :type drop_partition: bool :param create_partition: create partition if not exists, None as default :type create_partition: bool :param cast: cast all columns' types as the existed table, False as default :type cast: bool :return: :class:`odps.df.DataFrame` :Example: >>> df = df['name', 'id', 'ds'] >>> df.persist('odps_new_table') >>> df.persist('odps_new_table', partition='pt=test') >>> df.persist('odps_new_table', partitions=['ds']) """ if lifecycle is None and options.lifecycle is not None: lifecycle = \ options.lifecycle if not name.startswith(TEMP_TABLE_PREFIX) \ else options.temp_lifecycle return self._handle_delay_call('persist', self, name, partitions=partitions, partition=partition, lifecycle=lifecycle, project=project, **kwargs)
[ "def", "persist", "(", "self", ",", "name", ",", "partitions", "=", "None", ",", "partition", "=", "None", ",", "lifecycle", "=", "None", ",", "project", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "lifecycle", "is", "None", "and", "options", ".", "lifecycle", "is", "not", "None", ":", "lifecycle", "=", "options", ".", "lifecycle", "if", "not", "name", ".", "startswith", "(", "TEMP_TABLE_PREFIX", ")", "else", "options", ".", "temp_lifecycle", "return", "self", ".", "_handle_delay_call", "(", "'persist'", ",", "self", ",", "name", ",", "partitions", "=", "partitions", ",", "partition", "=", "partition", ",", "lifecycle", "=", "lifecycle", ",", "project", "=", "project", ",", "*", "*", "kwargs", ")" ]
Persist the execution into a new table. If `partitions` not specified, will create a new table without partitions if the table does not exist, and insert the SQL result into it. If `partitions` are specified, they will be the partition fields of the new table. If `partition` is specified, the data will be inserted into the exact partition of the table. :param name: table name :param partitions: list of string, the partition fields :type partitions: list :param partition: persist to a specified partition :type partition: string or PartitionSpec :param lifecycle: table lifecycle. If absent, `options.lifecycle` will be used. :type lifecycle: int :param project: project name, if not provided, will be the default project :param hints: settings for SQL, e.g. `odps.sql.mapper.split.size` :type hints: dict :param priority: instance priority, 9 as default :type priority: int :param running_cluster: cluster to run this instance :param overwrite: overwrite the table, True as default :type overwrite: bool :param drop_table: drop table if exists, False as default :type drop_table: bool :param create_table: create table first if not exits, True as default :type create_table: bool :param drop_partition: drop partition if exists, False as default :type drop_partition: bool :param create_partition: create partition if not exists, None as default :type create_partition: bool :param cast: cast all columns' types as the existed table, False as default :type cast: bool :return: :class:`odps.df.DataFrame` :Example: >>> df = df['name', 'id', 'ds'] >>> df.persist('odps_new_table') >>> df.persist('odps_new_table', partition='pt=test') >>> df.persist('odps_new_table', partitions=['ds'])
[ "Persist", "the", "execution", "into", "a", "new", "table", ".", "If", "partitions", "not", "specified", "will", "create", "a", "new", "table", "without", "partitions", "if", "the", "table", "does", "not", "exist", "and", "insert", "the", "SQL", "result", "into", "it", ".", "If", "partitions", "are", "specified", "they", "will", "be", "the", "partition", "fields", "of", "the", "new", "table", ".", "If", "partition", "is", "specified", "the", "data", "will", "be", "inserted", "into", "the", "exact", "partition", "of", "the", "table", "." ]
python
train
boriel/zxbasic
arch/zx48k/backend/__8bit.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__8bit.py#L894-L945
def _shru8(ins): """ Shift 8bit unsigned integer to the right. The result is pushed onto the stack. Optimizations: * If 1nd or 2nd op is 0 then do nothing * If 2nd op is < 4 then unroll loop """ op1, op2 = tuple(ins.quad[2:]) if is_int(op2): op2 = int8(op2) output = _8bit_oper(op1) if op2 == 0: output.append('push af') return output if op2 < 4: output.extend(['srl a'] * op2) output.append('push af') return output label = tmp_label() output.append('ld b, %i' % int8(op2)) output.append('%s:' % label) output.append('srl a') output.append('djnz %s' % label) output.append('push af') return output if is_int(op1) and int(op1) == 0: output = _8bit_oper(op2) output.append('xor a') output.append('push af') return output output = _8bit_oper(op1, op2, True) label = tmp_label() label2 = tmp_label() output.append('or a') output.append('ld b, a') output.append('ld a, h') output.append('jr z, %s' % label2) output.append('%s:' % label) output.append('srl a') output.append('djnz %s' % label) output.append('%s:' % label2) output.append('push af') return output
[ "def", "_shru8", "(", "ins", ")", ":", "op1", ",", "op2", "=", "tuple", "(", "ins", ".", "quad", "[", "2", ":", "]", ")", "if", "is_int", "(", "op2", ")", ":", "op2", "=", "int8", "(", "op2", ")", "output", "=", "_8bit_oper", "(", "op1", ")", "if", "op2", "==", "0", ":", "output", ".", "append", "(", "'push af'", ")", "return", "output", "if", "op2", "<", "4", ":", "output", ".", "extend", "(", "[", "'srl a'", "]", "*", "op2", ")", "output", ".", "append", "(", "'push af'", ")", "return", "output", "label", "=", "tmp_label", "(", ")", "output", ".", "append", "(", "'ld b, %i'", "%", "int8", "(", "op2", ")", ")", "output", ".", "append", "(", "'%s:'", "%", "label", ")", "output", ".", "append", "(", "'srl a'", ")", "output", ".", "append", "(", "'djnz %s'", "%", "label", ")", "output", ".", "append", "(", "'push af'", ")", "return", "output", "if", "is_int", "(", "op1", ")", "and", "int", "(", "op1", ")", "==", "0", ":", "output", "=", "_8bit_oper", "(", "op2", ")", "output", ".", "append", "(", "'xor a'", ")", "output", ".", "append", "(", "'push af'", ")", "return", "output", "output", "=", "_8bit_oper", "(", "op1", ",", "op2", ",", "True", ")", "label", "=", "tmp_label", "(", ")", "label2", "=", "tmp_label", "(", ")", "output", ".", "append", "(", "'or a'", ")", "output", ".", "append", "(", "'ld b, a'", ")", "output", ".", "append", "(", "'ld a, h'", ")", "output", ".", "append", "(", "'jr z, %s'", "%", "label2", ")", "output", ".", "append", "(", "'%s:'", "%", "label", ")", "output", ".", "append", "(", "'srl a'", ")", "output", ".", "append", "(", "'djnz %s'", "%", "label", ")", "output", ".", "append", "(", "'%s:'", "%", "label2", ")", "output", ".", "append", "(", "'push af'", ")", "return", "output" ]
Shift 8bit unsigned integer to the right. The result is pushed onto the stack. Optimizations: * If 1nd or 2nd op is 0 then do nothing * If 2nd op is < 4 then unroll loop
[ "Shift", "8bit", "unsigned", "integer", "to", "the", "right", ".", "The", "result", "is", "pushed", "onto", "the", "stack", "." ]
python
train
globus/globus-cli
globus_cli/parsing/shared_options.py
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/parsing/shared_options.py#L403-L428
def task_id_arg(*args, **kwargs): """ This is the `TASK_ID` argument consumed by many Transfer Task operations. It accept a toggle on whether or not it is required Usage: >>> @task_id_option >>> def command_func(task_id): >>> ... or >>> @task_id_option(required=False) >>> def command_func(task_id): >>> ... By default, the task ID is made required; pass `required=False` to the decorator arguments to make it optional. """ def inner_decorator(f, required=True): f = click.argument("TASK_ID", required=required)(f) return f return detect_and_decorate(inner_decorator, args, kwargs)
[ "def", "task_id_arg", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "inner_decorator", "(", "f", ",", "required", "=", "True", ")", ":", "f", "=", "click", ".", "argument", "(", "\"TASK_ID\"", ",", "required", "=", "required", ")", "(", "f", ")", "return", "f", "return", "detect_and_decorate", "(", "inner_decorator", ",", "args", ",", "kwargs", ")" ]
This is the `TASK_ID` argument consumed by many Transfer Task operations. It accept a toggle on whether or not it is required Usage: >>> @task_id_option >>> def command_func(task_id): >>> ... or >>> @task_id_option(required=False) >>> def command_func(task_id): >>> ... By default, the task ID is made required; pass `required=False` to the decorator arguments to make it optional.
[ "This", "is", "the", "TASK_ID", "argument", "consumed", "by", "many", "Transfer", "Task", "operations", ".", "It", "accept", "a", "toggle", "on", "whether", "or", "not", "it", "is", "required" ]
python
train
CleanCut/green
green/output.py
https://github.com/CleanCut/green/blob/6434515302472363b7d10135be76ed8cd3934d80/green/output.py#L22-L27
def debug(message, level=1): """ So we can tune how much debug output we get when we turn it on. """ if level <= debug_level: logging.debug(' ' * (level - 1) * 2 + str(message))
[ "def", "debug", "(", "message", ",", "level", "=", "1", ")", ":", "if", "level", "<=", "debug_level", ":", "logging", ".", "debug", "(", "' '", "*", "(", "level", "-", "1", ")", "*", "2", "+", "str", "(", "message", ")", ")" ]
So we can tune how much debug output we get when we turn it on.
[ "So", "we", "can", "tune", "how", "much", "debug", "output", "we", "get", "when", "we", "turn", "it", "on", "." ]
python
train
OLC-Bioinformatics/sipprverse
cgecore/cmdline.py
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/cmdline.py#L93-L120
def print_timers(self): ''' PRINT EXECUTION TIMES FOR THE LIST OF PROGRAMS ''' self.timer += time() total_time = self.timer tmp = '* %s *' debug.log( '', '* '*29, tmp%(' '*51), tmp%('%s %s %s'%('Program Name'.ljust(20), 'Status'.ljust(7), 'Execute Time (H:M:S)')), tmp%('='*51) ) for name in self.list: if self.exists(name): timer = getattr(self, name).get_time() status = getattr(self, name).get_status() self.timer -= timer debug.log(tmp%(self.return_timer(name, status, timer))) else: debug.log(tmp%("%s %s -- : -- : --"%(name[:20].ljust(20),' '*8))) debug.log( tmp%(self.return_timer('Wrapper', '', self.timer)), tmp%('='*51), tmp%(self.return_timer('Total', '', total_time)), tmp%(' '*51), '* '*29, '' )
[ "def", "print_timers", "(", "self", ")", ":", "self", ".", "timer", "+=", "time", "(", ")", "total_time", "=", "self", ".", "timer", "tmp", "=", "'* %s *'", "debug", ".", "log", "(", "''", ",", "'* '", "*", "29", ",", "tmp", "%", "(", "' '", "*", "51", ")", ",", "tmp", "%", "(", "'%s %s %s'", "%", "(", "'Program Name'", ".", "ljust", "(", "20", ")", ",", "'Status'", ".", "ljust", "(", "7", ")", ",", "'Execute Time (H:M:S)'", ")", ")", ",", "tmp", "%", "(", "'='", "*", "51", ")", ")", "for", "name", "in", "self", ".", "list", ":", "if", "self", ".", "exists", "(", "name", ")", ":", "timer", "=", "getattr", "(", "self", ",", "name", ")", ".", "get_time", "(", ")", "status", "=", "getattr", "(", "self", ",", "name", ")", ".", "get_status", "(", ")", "self", ".", "timer", "-=", "timer", "debug", ".", "log", "(", "tmp", "%", "(", "self", ".", "return_timer", "(", "name", ",", "status", ",", "timer", ")", ")", ")", "else", ":", "debug", ".", "log", "(", "tmp", "%", "(", "\"%s %s -- : -- : --\"", "%", "(", "name", "[", ":", "20", "]", ".", "ljust", "(", "20", ")", ",", "' '", "*", "8", ")", ")", ")", "debug", ".", "log", "(", "tmp", "%", "(", "self", ".", "return_timer", "(", "'Wrapper'", ",", "''", ",", "self", ".", "timer", ")", ")", ",", "tmp", "%", "(", "'='", "*", "51", ")", ",", "tmp", "%", "(", "self", ".", "return_timer", "(", "'Total'", ",", "''", ",", "total_time", ")", ")", ",", "tmp", "%", "(", "' '", "*", "51", ")", ",", "'* '", "*", "29", ",", "''", ")" ]
PRINT EXECUTION TIMES FOR THE LIST OF PROGRAMS
[ "PRINT", "EXECUTION", "TIMES", "FOR", "THE", "LIST", "OF", "PROGRAMS" ]
python
train
mottosso/be
be/vendor/requests/utils.py
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/requests/utils.py#L487-L530
def should_bypass_proxies(url): """ Returns whether we should bypass proxies or not. """ get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) # First check whether no_proxy is defined. If it is, check that the URL # we're getting isn't in the no_proxy list. no_proxy = get_proxy('no_proxy') netloc = urlparse(url).netloc if no_proxy: # We need to check whether we match here. We need to see if we match # the end of the netloc, both with and without the port. no_proxy = no_proxy.replace(' ', '').split(',') ip = netloc.split(':')[0] if is_ipv4_address(ip): for proxy_ip in no_proxy: if is_valid_cidr(proxy_ip): if address_in_network(ip, proxy_ip): return True else: for host in no_proxy: if netloc.endswith(host) or netloc.split(':')[0].endswith(host): # The URL does match something in no_proxy, so we don't want # to apply the proxies on this URL. return True # If the system proxy settings indicate that this URL should be bypassed, # don't proxy. # The proxy_bypass function is incredibly buggy on OS X in early versions # of Python 2.6, so allow this call to fail. Only catch the specific # exceptions we've seen, though: this call failing in other ways can reveal # legitimate problems. try: bypass = proxy_bypass(netloc) except (TypeError, socket.gaierror): bypass = False if bypass: return True return False
[ "def", "should_bypass_proxies", "(", "url", ")", ":", "get_proxy", "=", "lambda", "k", ":", "os", ".", "environ", ".", "get", "(", "k", ")", "or", "os", ".", "environ", ".", "get", "(", "k", ".", "upper", "(", ")", ")", "# First check whether no_proxy is defined. If it is, check that the URL", "# we're getting isn't in the no_proxy list.", "no_proxy", "=", "get_proxy", "(", "'no_proxy'", ")", "netloc", "=", "urlparse", "(", "url", ")", ".", "netloc", "if", "no_proxy", ":", "# We need to check whether we match here. We need to see if we match", "# the end of the netloc, both with and without the port.", "no_proxy", "=", "no_proxy", ".", "replace", "(", "' '", ",", "''", ")", ".", "split", "(", "','", ")", "ip", "=", "netloc", ".", "split", "(", "':'", ")", "[", "0", "]", "if", "is_ipv4_address", "(", "ip", ")", ":", "for", "proxy_ip", "in", "no_proxy", ":", "if", "is_valid_cidr", "(", "proxy_ip", ")", ":", "if", "address_in_network", "(", "ip", ",", "proxy_ip", ")", ":", "return", "True", "else", ":", "for", "host", "in", "no_proxy", ":", "if", "netloc", ".", "endswith", "(", "host", ")", "or", "netloc", ".", "split", "(", "':'", ")", "[", "0", "]", ".", "endswith", "(", "host", ")", ":", "# The URL does match something in no_proxy, so we don't want", "# to apply the proxies on this URL.", "return", "True", "# If the system proxy settings indicate that this URL should be bypassed,", "# don't proxy.", "# The proxy_bypass function is incredibly buggy on OS X in early versions", "# of Python 2.6, so allow this call to fail. Only catch the specific", "# exceptions we've seen, though: this call failing in other ways can reveal", "# legitimate problems.", "try", ":", "bypass", "=", "proxy_bypass", "(", "netloc", ")", "except", "(", "TypeError", ",", "socket", ".", "gaierror", ")", ":", "bypass", "=", "False", "if", "bypass", ":", "return", "True", "return", "False" ]
Returns whether we should bypass proxies or not.
[ "Returns", "whether", "we", "should", "bypass", "proxies", "or", "not", "." ]
python
train
google/pyringe
pyringe/inferior.py
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L458-L466
def needsattached(func): """Decorator to prevent commands from being used when not attached.""" @functools.wraps(func) def wrap(self, *args, **kwargs): if not self.attached: raise PositionError('Not attached to any process.') return func(self, *args, **kwargs) return wrap
[ "def", "needsattached", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrap", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "attached", ":", "raise", "PositionError", "(", "'Not attached to any process.'", ")", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrap" ]
Decorator to prevent commands from being used when not attached.
[ "Decorator", "to", "prevent", "commands", "from", "being", "used", "when", "not", "attached", "." ]
python
train
riga/tfdeploy
tfdeploy.py
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1619-L1627
def MatrixDiagPart(a): """ Batched diag op that returns only the diagonal elements. """ r = np.zeros(a.shape[:-2] + (min(a.shape[-2:]),)) for coord in np.ndindex(a.shape[:-2]): pos = coord + (Ellipsis,) r[pos] = np.diagonal(a[pos]) return r,
[ "def", "MatrixDiagPart", "(", "a", ")", ":", "r", "=", "np", ".", "zeros", "(", "a", ".", "shape", "[", ":", "-", "2", "]", "+", "(", "min", "(", "a", ".", "shape", "[", "-", "2", ":", "]", ")", ",", ")", ")", "for", "coord", "in", "np", ".", "ndindex", "(", "a", ".", "shape", "[", ":", "-", "2", "]", ")", ":", "pos", "=", "coord", "+", "(", "Ellipsis", ",", ")", "r", "[", "pos", "]", "=", "np", ".", "diagonal", "(", "a", "[", "pos", "]", ")", "return", "r", "," ]
Batched diag op that returns only the diagonal elements.
[ "Batched", "diag", "op", "that", "returns", "only", "the", "diagonal", "elements", "." ]
python
train
mitsei/dlkit
dlkit/json_/grading/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/objects.py#L730-L742
def clear_numeric_score_increment(self): """Clears the numeric score increment. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score if (self.get_numeric_score_increment_metadata().is_read_only() or self.get_numeric_score_increment_metadata().is_required()): raise errors.NoAccess() self._my_map['numericScoreIncrement'] = self._numeric_score_increment_default
[ "def", "clear_numeric_score_increment", "(", "self", ")", ":", "# Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score", "if", "(", "self", ".", "get_numeric_score_increment_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", ".", "get_numeric_score_increment_metadata", "(", ")", ".", "is_required", "(", ")", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "self", ".", "_my_map", "[", "'numericScoreIncrement'", "]", "=", "self", ".", "_numeric_score_increment_default" ]
Clears the numeric score increment. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
[ "Clears", "the", "numeric", "score", "increment", "." ]
python
train
serge-sans-paille/pythran
pythran/toolchain.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/toolchain.py#L79-L84
def _write_temp(content, suffix): '''write `content` to a temporary XXX`suffix` file and return the filename. It is user's responsibility to delete when done.''' with NamedTemporaryFile(mode='w', suffix=suffix, delete=False) as out: out.write(content) return out.name
[ "def", "_write_temp", "(", "content", ",", "suffix", ")", ":", "with", "NamedTemporaryFile", "(", "mode", "=", "'w'", ",", "suffix", "=", "suffix", ",", "delete", "=", "False", ")", "as", "out", ":", "out", ".", "write", "(", "content", ")", "return", "out", ".", "name" ]
write `content` to a temporary XXX`suffix` file and return the filename. It is user's responsibility to delete when done.
[ "write", "content", "to", "a", "temporary", "XXX", "suffix", "file", "and", "return", "the", "filename", ".", "It", "is", "user", "s", "responsibility", "to", "delete", "when", "done", "." ]
python
train
pandas-dev/pandas
pandas/core/indexes/base.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4925-L4943
def insert(self, loc, item): """ Make new Index inserting new item at location. Follows Python list.append semantics for negative values. Parameters ---------- loc : int item : object Returns ------- new_index : Index """ _self = np.asarray(self) item = self._coerce_scalar_to_index(item)._ndarray_values idx = np.concatenate((_self[:loc], item, _self[loc:])) return self._shallow_copy_with_infer(idx)
[ "def", "insert", "(", "self", ",", "loc", ",", "item", ")", ":", "_self", "=", "np", ".", "asarray", "(", "self", ")", "item", "=", "self", ".", "_coerce_scalar_to_index", "(", "item", ")", ".", "_ndarray_values", "idx", "=", "np", ".", "concatenate", "(", "(", "_self", "[", ":", "loc", "]", ",", "item", ",", "_self", "[", "loc", ":", "]", ")", ")", "return", "self", ".", "_shallow_copy_with_infer", "(", "idx", ")" ]
Make new Index inserting new item at location. Follows Python list.append semantics for negative values. Parameters ---------- loc : int item : object Returns ------- new_index : Index
[ "Make", "new", "Index", "inserting", "new", "item", "at", "location", "." ]
python
train