repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
pyamg/pyamg
pyamg/blackbox.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/blackbox.py#L55-L155
def solver_configuration(A, B=None, verb=True): """Generate a dictionary of SA parameters for an arbitray matrix A. Parameters ---------- A : array, matrix, csr_matrix, bsr_matrix (n x n) matrix to invert, CSR or BSR format preferred for efficiency B : None, array Near null-space modes used to construct the smoothed aggregation solver If None, the constant vector is used If (n x m) array, then B is passed to smoothed_aggregation_solver verb : bool If True, print verbose output during runtime Returns ------- config : dict A dictionary of solver configuration parameters that one uses to generate a smoothed aggregation solver Notes ----- The config dictionary contains the following parameter entries: symmetry, smooth, presmoother, postsmoother, B, strength, max_levels, max_coarse, coarse_solver, aggregate, keep. See smoothed_aggregtion_solver for each parameter's description. Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg import solver_configuration >>> A = poisson((40,40),format='csr') >>> solver_config = solver_configuration(A,verb=False) """ # Ensure acceptable format of A A = make_csr(A) config = {} # Detect symmetry if ishermitian(A, fast_check=True): config['symmetry'] = 'hermitian' if verb: print(" Detected a Hermitian matrix") else: config['symmetry'] = 'nonsymmetric' if verb: print(" Detected a non-Hermitian matrix") # Symmetry dependent parameters if config['symmetry'] == 'hermitian': config['smooth'] = ('energy', {'krylov': 'cg', 'maxiter': 3, 'degree': 2, 'weighting': 'local'}) config['presmoother'] = ('block_gauss_seidel', {'sweep': 'symmetric', 'iterations': 1}) config['postsmoother'] = ('block_gauss_seidel', {'sweep': 'symmetric', 'iterations': 1}) else: config['smooth'] = ('energy', {'krylov': 'gmres', 'maxiter': 3, 'degree': 2, 'weighting': 'local'}) config['presmoother'] = ('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 2}) config['postsmoother'] = ('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 2}) # Determine near null-space modes B if B is None: # B is the constant for each variable in a node if isspmatrix_bsr(A) and A.blocksize[0] > 1: bsize = A.blocksize[0] config['B'] = np.kron(np.ones((int(A.shape[0] / bsize), 1), dtype=A.dtype), np.eye(bsize)) else: config['B'] = np.ones((A.shape[0], 1), dtype=A.dtype) elif (isinstance(B, type(np.zeros((1,)))) or isinstance(B, type(sp.mat(np.zeros((1,)))))): if len(B.shape) == 1: B = B.reshape(-1, 1) if (B.shape[0] != A.shape[0]) or (B.shape[1] == 0): raise TypeError('Invalid dimensions of B, B.shape[0] must equal \ A.shape[0]') else: config['B'] = np.array(B, dtype=A.dtype) else: raise TypeError('Invalid B') if config['symmetry'] == 'hermitian': config['BH'] = None else: config['BH'] = config['B'].copy() # Set non-symmetry related parameters config['strength'] = ('evolution', {'k': 2, 'proj_type': 'l2', 'epsilon': 3.0}) config['max_levels'] = 15 config['max_coarse'] = 500 config['coarse_solver'] = 'pinv' config['aggregate'] = 'standard' config['keep'] = False return config
[ "def", "solver_configuration", "(", "A", ",", "B", "=", "None", ",", "verb", "=", "True", ")", ":", "# Ensure acceptable format of A", "A", "=", "make_csr", "(", "A", ")", "config", "=", "{", "}", "# Detect symmetry", "if", "ishermitian", "(", "A", ",", "fast_check", "=", "True", ")", ":", "config", "[", "'symmetry'", "]", "=", "'hermitian'", "if", "verb", ":", "print", "(", "\" Detected a Hermitian matrix\"", ")", "else", ":", "config", "[", "'symmetry'", "]", "=", "'nonsymmetric'", "if", "verb", ":", "print", "(", "\" Detected a non-Hermitian matrix\"", ")", "# Symmetry dependent parameters", "if", "config", "[", "'symmetry'", "]", "==", "'hermitian'", ":", "config", "[", "'smooth'", "]", "=", "(", "'energy'", ",", "{", "'krylov'", ":", "'cg'", ",", "'maxiter'", ":", "3", ",", "'degree'", ":", "2", ",", "'weighting'", ":", "'local'", "}", ")", "config", "[", "'presmoother'", "]", "=", "(", "'block_gauss_seidel'", ",", "{", "'sweep'", ":", "'symmetric'", ",", "'iterations'", ":", "1", "}", ")", "config", "[", "'postsmoother'", "]", "=", "(", "'block_gauss_seidel'", ",", "{", "'sweep'", ":", "'symmetric'", ",", "'iterations'", ":", "1", "}", ")", "else", ":", "config", "[", "'smooth'", "]", "=", "(", "'energy'", ",", "{", "'krylov'", ":", "'gmres'", ",", "'maxiter'", ":", "3", ",", "'degree'", ":", "2", ",", "'weighting'", ":", "'local'", "}", ")", "config", "[", "'presmoother'", "]", "=", "(", "'gauss_seidel_nr'", ",", "{", "'sweep'", ":", "'symmetric'", ",", "'iterations'", ":", "2", "}", ")", "config", "[", "'postsmoother'", "]", "=", "(", "'gauss_seidel_nr'", ",", "{", "'sweep'", ":", "'symmetric'", ",", "'iterations'", ":", "2", "}", ")", "# Determine near null-space modes B", "if", "B", "is", "None", ":", "# B is the constant for each variable in a node", "if", "isspmatrix_bsr", "(", "A", ")", "and", "A", ".", "blocksize", "[", "0", "]", ">", "1", ":", "bsize", "=", "A", ".", "blocksize", "[", "0", "]", "config", "[", "'B'", "]", "=", "np", ".", "kron", "(", "np", ".", "ones", "(", "(", "int", "(", "A", ".", "shape", "[", "0", "]", "/", "bsize", ")", ",", "1", ")", ",", "dtype", "=", "A", ".", "dtype", ")", ",", "np", ".", "eye", "(", "bsize", ")", ")", "else", ":", "config", "[", "'B'", "]", "=", "np", ".", "ones", "(", "(", "A", ".", "shape", "[", "0", "]", ",", "1", ")", ",", "dtype", "=", "A", ".", "dtype", ")", "elif", "(", "isinstance", "(", "B", ",", "type", "(", "np", ".", "zeros", "(", "(", "1", ",", ")", ")", ")", ")", "or", "isinstance", "(", "B", ",", "type", "(", "sp", ".", "mat", "(", "np", ".", "zeros", "(", "(", "1", ",", ")", ")", ")", ")", ")", ")", ":", "if", "len", "(", "B", ".", "shape", ")", "==", "1", ":", "B", "=", "B", ".", "reshape", "(", "-", "1", ",", "1", ")", "if", "(", "B", ".", "shape", "[", "0", "]", "!=", "A", ".", "shape", "[", "0", "]", ")", "or", "(", "B", ".", "shape", "[", "1", "]", "==", "0", ")", ":", "raise", "TypeError", "(", "'Invalid dimensions of B, B.shape[0] must equal \\\n A.shape[0]'", ")", "else", ":", "config", "[", "'B'", "]", "=", "np", ".", "array", "(", "B", ",", "dtype", "=", "A", ".", "dtype", ")", "else", ":", "raise", "TypeError", "(", "'Invalid B'", ")", "if", "config", "[", "'symmetry'", "]", "==", "'hermitian'", ":", "config", "[", "'BH'", "]", "=", "None", "else", ":", "config", "[", "'BH'", "]", "=", "config", "[", "'B'", "]", ".", "copy", "(", ")", "# Set non-symmetry related parameters", "config", "[", "'strength'", "]", "=", "(", "'evolution'", ",", "{", "'k'", ":", "2", ",", "'proj_type'", ":", "'l2'", ",", "'epsilon'", ":", "3.0", "}", ")", "config", "[", "'max_levels'", "]", "=", "15", "config", "[", "'max_coarse'", "]", "=", "500", "config", "[", "'coarse_solver'", "]", "=", "'pinv'", "config", "[", "'aggregate'", "]", "=", "'standard'", "config", "[", "'keep'", "]", "=", "False", "return", "config" ]
Generate a dictionary of SA parameters for an arbitray matrix A. Parameters ---------- A : array, matrix, csr_matrix, bsr_matrix (n x n) matrix to invert, CSR or BSR format preferred for efficiency B : None, array Near null-space modes used to construct the smoothed aggregation solver If None, the constant vector is used If (n x m) array, then B is passed to smoothed_aggregation_solver verb : bool If True, print verbose output during runtime Returns ------- config : dict A dictionary of solver configuration parameters that one uses to generate a smoothed aggregation solver Notes ----- The config dictionary contains the following parameter entries: symmetry, smooth, presmoother, postsmoother, B, strength, max_levels, max_coarse, coarse_solver, aggregate, keep. See smoothed_aggregtion_solver for each parameter's description. Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg import solver_configuration >>> A = poisson((40,40),format='csr') >>> solver_config = solver_configuration(A,verb=False)
[ "Generate", "a", "dictionary", "of", "SA", "parameters", "for", "an", "arbitray", "matrix", "A", "." ]
python
train
37.207921
underworldcode/stripy
stripy-src/stripy/cartesian.py
https://github.com/underworldcode/stripy/blob/d4c3480c3e58c88489ded695eadbe7cd5bf94b48/stripy-src/stripy/cartesian.py#L889-L905
def edge_lengths(self): """ Compute the edge-lengths of each triangle in the triangulation. """ simplex = self.simplices.T # simplex is vectors a, b, c defining the corners a = self.points[simplex[0]] b = self.points[simplex[1]] c = self.points[simplex[2]] # norm to calculate length ab = np.linalg.norm(b - a, axis=1) bc = np.linalg.norm(c - a, axis=1) ac = np.linalg.norm(a - c, axis=1) return ab, bc, ac
[ "def", "edge_lengths", "(", "self", ")", ":", "simplex", "=", "self", ".", "simplices", ".", "T", "# simplex is vectors a, b, c defining the corners", "a", "=", "self", ".", "points", "[", "simplex", "[", "0", "]", "]", "b", "=", "self", ".", "points", "[", "simplex", "[", "1", "]", "]", "c", "=", "self", ".", "points", "[", "simplex", "[", "2", "]", "]", "# norm to calculate length", "ab", "=", "np", ".", "linalg", ".", "norm", "(", "b", "-", "a", ",", "axis", "=", "1", ")", "bc", "=", "np", ".", "linalg", ".", "norm", "(", "c", "-", "a", ",", "axis", "=", "1", ")", "ac", "=", "np", ".", "linalg", ".", "norm", "(", "a", "-", "c", ",", "axis", "=", "1", ")", "return", "ab", ",", "bc", ",", "ac" ]
Compute the edge-lengths of each triangle in the triangulation.
[ "Compute", "the", "edge", "-", "lengths", "of", "each", "triangle", "in", "the", "triangulation", "." ]
python
train
29.235294
StackStorm/pybind
pybind/slxos/v17s_1_02/routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/ospf/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/ospf/__init__.py#L197-L218
def _set_ospf_level1(self, v, load=False): """ Setter method for ospf_level1, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/ospf/ospf_level1 (empty) If this variable is read-only (config: false) in the source YANG file, then _set_ospf_level1 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ospf_level1() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="ospf-level1", rest_name="level-1", parent=self, choice=(u'ch-ospf-levels', u'ca-ospf-level1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1 routes only', u'alt-name': u'level-1', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ospf_level1 must be of a type compatible with empty""", 'defined-type': "empty", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="ospf-level1", rest_name="level-1", parent=self, choice=(u'ch-ospf-levels', u'ca-ospf-level1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1 routes only', u'alt-name': u'level-1', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)""", }) self.__ospf_level1 = t if hasattr(self, '_set'): self._set()
[ "def", "_set_ospf_level1", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGBool", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"ospf-level1\"", ",", "rest_name", "=", "\"level-1\"", ",", "parent", "=", "self", ",", "choice", "=", "(", "u'ch-ospf-levels'", ",", "u'ca-ospf-level1'", ")", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-full-command'", ":", "None", ",", "u'info'", ":", "u'IS-IS Level-1 routes only'", ",", "u'alt-name'", ":", "u'level-1'", ",", "u'cli-full-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-isis'", ",", "defining_module", "=", "'brocade-isis'", ",", "yang_type", "=", "'empty'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"ospf_level1 must be of a type compatible with empty\"\"\"", ",", "'defined-type'", ":", "\"empty\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"ospf-level1\", rest_name=\"level-1\", parent=self, choice=(u'ch-ospf-levels', u'ca-ospf-level1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1 routes only', u'alt-name': u'level-1', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__ospf_level1", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for ospf_level1, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/ospf/ospf_level1 (empty) If this variable is read-only (config: false) in the source YANG file, then _set_ospf_level1 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ospf_level1() directly.
[ "Setter", "method", "for", "ospf_level1", "mapped", "from", "YANG", "variable", "/", "routing_system", "/", "router", "/", "isis", "/", "router_isis_cmds_holder", "/", "address_family", "/", "ipv6", "/", "af_ipv6_unicast", "/", "af_ipv6_attributes", "/", "af_common_attributes", "/", "redistribute", "/", "ospf", "/", "ospf_level1", "(", "empty", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_ospf_level1", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_ospf_level1", "()", "directly", "." ]
python
train
84.727273
oscarlazoarjona/fast
fast/atomic_structure.py
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/atomic_structure.py#L1894-L2127
def calculate_r_matrices(fine_states, reduced_matrix_elements, q=None, numeric=True, convention=1): ur"""Calculate the matrix elements of the electric dipole (in the helicity basis). We calculate all matrix elements for the D2 line in Rb 87. >>> from sympy import symbols, pprint >>> red = symbols("r", positive=True) >>> reduced_matrix_elements = [[0, -red], [red, 0]] >>> g = State("Rb", 87, 5, 0, 1/Integer(2)) >>> e = State("Rb", 87, 5, 1, 3/Integer(2)) >>> fine_levels = [g, e] >>> r = calculate_r_matrices(fine_levels, reduced_matrix_elements, ... numeric=False) >>> pprint(r[0][8:,:8]) ⎑ √3β‹…r ⎀ ⎒ 0 0 ──── 0 0 0 0 0 βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ -√15β‹…r √15β‹…r βŽ₯ ⎒ 0 ─────── 0 0 0 ───── 0 0 βŽ₯ ⎒ 12 60 βŽ₯ ⎒ βŽ₯ ⎒ -√15β‹…r √5β‹…r βŽ₯ ⎒ 0 0 ─────── 0 0 0 ──── 0 βŽ₯ ⎒ 12 20 βŽ₯ ⎒ βŽ₯ ⎒ √10β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 0 ───── βŽ₯ ⎒ 20 βŽ₯ ⎒ βŽ₯ ⎒√2β‹…r -√6β‹…r βŽ₯ βŽ’β”€β”€β”€β”€ 0 0 0 ────── 0 0 0 βŽ₯ ⎒ 4 12 βŽ₯ ⎒ βŽ₯ ⎒ r -r βŽ₯ ⎒ 0 ─ 0 0 0 ─── 0 0 βŽ₯ ⎒ 4 4 βŽ₯ ⎒ βŽ₯ ⎒ √3β‹…r -r βŽ₯ ⎒ 0 0 ──── 0 0 0 ─── 0 βŽ₯ ⎒ 12 4 βŽ₯ ⎒ βŽ₯ ⎒ -√6β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 0 ──────βŽ₯ ⎒ 12 βŽ₯ ⎒ βŽ₯ ⎒ 0 0 0 0 0 0 0 0 βŽ₯ ⎒ βŽ₯ ⎒ r βŽ₯ ⎒ 0 0 0 ─ 0 0 0 0 βŽ₯ ⎒ 2 βŽ₯ ⎒ βŽ₯ ⎒ √6β‹…r βŽ₯ ⎒ 0 0 0 0 ──── 0 0 0 βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ √10β‹…r βŽ₯ ⎒ 0 0 0 0 0 ───── 0 0 βŽ₯ ⎒ 10 βŽ₯ ⎒ βŽ₯ ⎒ √5β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 ──── 0 βŽ₯ ⎒ 10 βŽ₯ ⎒ βŽ₯ ⎒ √15β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 0 ───── βŽ₯ ⎒ 30 βŽ₯ ⎒ βŽ₯ ⎒ 0 0 0 0 0 0 0 0 βŽ₯ ⎒ βŽ₯ ⎣ 0 0 0 0 0 0 0 0 ⎦ >>> pprint(r[1][8:,:8]) ⎑ -√3β‹…r ⎀ ⎒ 0 ────── 0 0 0 0 0 0 βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒√15β‹…r -√5β‹…r βŽ₯ βŽ’β”€β”€β”€β”€β”€ 0 0 0 ────── 0 0 0 βŽ₯ ⎒ 12 20 βŽ₯ ⎒ βŽ₯ ⎒ -√15β‹…r βŽ₯ ⎒ 0 0 0 0 0 ─────── 0 0 βŽ₯ ⎒ 30 βŽ₯ ⎒ βŽ₯ ⎒ -√15β‹…r -√5β‹…r βŽ₯ ⎒ 0 0 ─────── 0 0 0 ────── 0 βŽ₯ ⎒ 12 20 βŽ₯ ⎒ βŽ₯ ⎒ √3β‹…r βŽ₯ ⎒ 0 0 0 ──── 0 0 0 0 βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ r √3β‹…r βŽ₯ ⎒ ─ 0 0 0 ──── 0 0 0 βŽ₯ ⎒ 4 12 βŽ₯ ⎒ βŽ₯ ⎒ √3β‹…r βŽ₯ ⎒ 0 ──── 0 0 0 0 0 0 βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ r -√3β‹…r βŽ₯ ⎒ 0 0 ─ 0 0 0 ────── 0 βŽ₯ ⎒ 4 12 βŽ₯ ⎒ βŽ₯ ⎒ -√3β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 0 ──────βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ 0 0 0 0 0 0 0 0 βŽ₯ ⎒ βŽ₯ ⎒ √3β‹…r βŽ₯ ⎒ 0 0 0 ──── 0 0 0 0 βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ √30β‹…r βŽ₯ ⎒ 0 0 0 0 ───── 0 0 0 βŽ₯ ⎒ 15 βŽ₯ ⎒ βŽ₯ ⎒ √15β‹…r βŽ₯ ⎒ 0 0 0 0 0 ───── 0 0 βŽ₯ ⎒ 10 βŽ₯ ⎒ βŽ₯ ⎒ √30β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 ───── 0 βŽ₯ ⎒ 15 βŽ₯ ⎒ βŽ₯ ⎒ √3β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 0 ──── βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎣ 0 0 0 0 0 0 0 0 ⎦ >>> pprint(r[2][8:,:8]) ⎑√3β‹…r ⎀ βŽ’β”€β”€β”€β”€ 0 0 0 0 0 0 0βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ √10β‹…r βŽ₯ ⎒ 0 0 0 ───── 0 0 0 0βŽ₯ ⎒ 20 βŽ₯ ⎒ βŽ₯ ⎒√15β‹…r √5β‹…r βŽ₯ βŽ’β”€β”€β”€β”€β”€ 0 0 0 ──── 0 0 0βŽ₯ ⎒ 12 20 βŽ₯ ⎒ βŽ₯ ⎒ √15β‹…r √15β‹…r βŽ₯ ⎒ 0 ───── 0 0 0 ───── 0 0βŽ₯ ⎒ 12 60 βŽ₯ ⎒ βŽ₯ ⎒ 0 0 0 0 0 0 0 0βŽ₯ ⎒ βŽ₯ ⎒ √6β‹…r βŽ₯ ⎒ 0 0 0 ──── 0 0 0 0βŽ₯ ⎒ 12 βŽ₯ ⎒ βŽ₯ ⎒√3β‹…r r βŽ₯ βŽ’β”€β”€β”€β”€ 0 0 0 ─ 0 0 0βŽ₯ ⎒ 12 4 βŽ₯ ⎒ βŽ₯ ⎒ r r βŽ₯ ⎒ 0 ─ 0 0 0 ─ 0 0βŽ₯ ⎒ 4 4 βŽ₯ ⎒ βŽ₯ ⎒ √2β‹…r √6β‹…r βŽ₯ ⎒ 0 0 ──── 0 0 0 ──── 0βŽ₯ ⎒ 4 12 βŽ₯ ⎒ βŽ₯ ⎒ 0 0 0 0 0 0 0 0βŽ₯ ⎒ βŽ₯ ⎒ 0 0 0 0 0 0 0 0βŽ₯ ⎒ βŽ₯ ⎒ √15β‹…r βŽ₯ ⎒ 0 0 0 ───── 0 0 0 0βŽ₯ ⎒ 30 βŽ₯ ⎒ βŽ₯ ⎒ √5β‹…r βŽ₯ ⎒ 0 0 0 0 ──── 0 0 0βŽ₯ ⎒ 10 βŽ₯ ⎒ βŽ₯ ⎒ √10β‹…r βŽ₯ ⎒ 0 0 0 0 0 ───── 0 0βŽ₯ ⎒ 10 βŽ₯ ⎒ βŽ₯ ⎒ √6β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 ──── 0βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ rβŽ₯ ⎒ 0 0 0 0 0 0 0 ─βŽ₯ ⎣ 2⎦ """ magnetic_states = make_list_of_states(fine_states, 'magnetic', verbose=0) aux = calculate_boundaries(fine_states, magnetic_states) index_list_fine, index_list_hyperfine = aux Ne = len(magnetic_states) r = [[[0 for j in range(Ne)] for i in range(Ne)] for p in range(3)] II = fine_states[0].i for p in [-1, 0, 1]: for i in range(Ne): ei = magnetic_states[i] ii = fine_index(i, index_list_fine) for j in range(Ne): ej = magnetic_states[j] jj = fine_index(j, index_list_fine) reduced_matrix_elementij = reduced_matrix_elements[ii][jj] if reduced_matrix_elementij != 0: ji = ei.j; jj = ej.j fi = ei.f; fj = ej.f mi = ei.m; mj = ej.m rpij = matrix_element(ji, fi, mi, jj, fj, mj, II, reduced_matrix_elementij, p, numeric=numeric, convention=convention) if q == 1: r[p+1][i][j] = rpij*delta_lesser(i, j) elif q == -1: r[p+1][i][j] = rpij*delta_greater(i, j) else: r[p+1][i][j] = rpij if not numeric: r = [Matrix(ri) for ri in r] return r
[ "def", "calculate_r_matrices", "(", "fine_states", ",", "reduced_matrix_elements", ",", "q", "=", "None", ",", "numeric", "=", "True", ",", "convention", "=", "1", ")", ":", "magnetic_states", "=", "make_list_of_states", "(", "fine_states", ",", "'magnetic'", ",", "verbose", "=", "0", ")", "aux", "=", "calculate_boundaries", "(", "fine_states", ",", "magnetic_states", ")", "index_list_fine", ",", "index_list_hyperfine", "=", "aux", "Ne", "=", "len", "(", "magnetic_states", ")", "r", "=", "[", "[", "[", "0", "for", "j", "in", "range", "(", "Ne", ")", "]", "for", "i", "in", "range", "(", "Ne", ")", "]", "for", "p", "in", "range", "(", "3", ")", "]", "II", "=", "fine_states", "[", "0", "]", ".", "i", "for", "p", "in", "[", "-", "1", ",", "0", ",", "1", "]", ":", "for", "i", "in", "range", "(", "Ne", ")", ":", "ei", "=", "magnetic_states", "[", "i", "]", "ii", "=", "fine_index", "(", "i", ",", "index_list_fine", ")", "for", "j", "in", "range", "(", "Ne", ")", ":", "ej", "=", "magnetic_states", "[", "j", "]", "jj", "=", "fine_index", "(", "j", ",", "index_list_fine", ")", "reduced_matrix_elementij", "=", "reduced_matrix_elements", "[", "ii", "]", "[", "jj", "]", "if", "reduced_matrix_elementij", "!=", "0", ":", "ji", "=", "ei", ".", "j", "jj", "=", "ej", ".", "j", "fi", "=", "ei", ".", "f", "fj", "=", "ej", ".", "f", "mi", "=", "ei", ".", "m", "mj", "=", "ej", ".", "m", "rpij", "=", "matrix_element", "(", "ji", ",", "fi", ",", "mi", ",", "jj", ",", "fj", ",", "mj", ",", "II", ",", "reduced_matrix_elementij", ",", "p", ",", "numeric", "=", "numeric", ",", "convention", "=", "convention", ")", "if", "q", "==", "1", ":", "r", "[", "p", "+", "1", "]", "[", "i", "]", "[", "j", "]", "=", "rpij", "*", "delta_lesser", "(", "i", ",", "j", ")", "elif", "q", "==", "-", "1", ":", "r", "[", "p", "+", "1", "]", "[", "i", "]", "[", "j", "]", "=", "rpij", "*", "delta_greater", "(", "i", ",", "j", ")", "else", ":", "r", "[", "p", "+", "1", "]", "[", "i", "]", "[", "j", "]", "=", "rpij", "if", "not", "numeric", ":", "r", "=", "[", "Matrix", "(", "ri", ")", "for", "ri", "in", "r", "]", "return", "r" ]
ur"""Calculate the matrix elements of the electric dipole (in the helicity basis). We calculate all matrix elements for the D2 line in Rb 87. >>> from sympy import symbols, pprint >>> red = symbols("r", positive=True) >>> reduced_matrix_elements = [[0, -red], [red, 0]] >>> g = State("Rb", 87, 5, 0, 1/Integer(2)) >>> e = State("Rb", 87, 5, 1, 3/Integer(2)) >>> fine_levels = [g, e] >>> r = calculate_r_matrices(fine_levels, reduced_matrix_elements, ... numeric=False) >>> pprint(r[0][8:,:8]) ⎑ √3β‹…r ⎀ ⎒ 0 0 ──── 0 0 0 0 0 βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ -√15β‹…r √15β‹…r βŽ₯ ⎒ 0 ─────── 0 0 0 ───── 0 0 βŽ₯ ⎒ 12 60 βŽ₯ ⎒ βŽ₯ ⎒ -√15β‹…r √5β‹…r βŽ₯ ⎒ 0 0 ─────── 0 0 0 ──── 0 βŽ₯ ⎒ 12 20 βŽ₯ ⎒ βŽ₯ ⎒ √10β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 0 ───── βŽ₯ ⎒ 20 βŽ₯ ⎒ βŽ₯ ⎒√2β‹…r -√6β‹…r βŽ₯ βŽ’β”€β”€β”€β”€ 0 0 0 ────── 0 0 0 βŽ₯ ⎒ 4 12 βŽ₯ ⎒ βŽ₯ ⎒ r -r βŽ₯ ⎒ 0 ─ 0 0 0 ─── 0 0 βŽ₯ ⎒ 4 4 βŽ₯ ⎒ βŽ₯ ⎒ √3β‹…r -r βŽ₯ ⎒ 0 0 ──── 0 0 0 ─── 0 βŽ₯ ⎒ 12 4 βŽ₯ ⎒ βŽ₯ ⎒ -√6β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 0 ──────βŽ₯ ⎒ 12 βŽ₯ ⎒ βŽ₯ ⎒ 0 0 0 0 0 0 0 0 βŽ₯ ⎒ βŽ₯ ⎒ r βŽ₯ ⎒ 0 0 0 ─ 0 0 0 0 βŽ₯ ⎒ 2 βŽ₯ ⎒ βŽ₯ ⎒ √6β‹…r βŽ₯ ⎒ 0 0 0 0 ──── 0 0 0 βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ √10β‹…r βŽ₯ ⎒ 0 0 0 0 0 ───── 0 0 βŽ₯ ⎒ 10 βŽ₯ ⎒ βŽ₯ ⎒ √5β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 ──── 0 βŽ₯ ⎒ 10 βŽ₯ ⎒ βŽ₯ ⎒ √15β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 0 ───── βŽ₯ ⎒ 30 βŽ₯ ⎒ βŽ₯ ⎒ 0 0 0 0 0 0 0 0 βŽ₯ ⎒ βŽ₯ ⎣ 0 0 0 0 0 0 0 0 ⎦ >>> pprint(r[1][8:,:8]) ⎑ -√3β‹…r ⎀ ⎒ 0 ────── 0 0 0 0 0 0 βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒√15β‹…r -√5β‹…r βŽ₯ βŽ’β”€β”€β”€β”€β”€ 0 0 0 ────── 0 0 0 βŽ₯ ⎒ 12 20 βŽ₯ ⎒ βŽ₯ ⎒ -√15β‹…r βŽ₯ ⎒ 0 0 0 0 0 ─────── 0 0 βŽ₯ ⎒ 30 βŽ₯ ⎒ βŽ₯ ⎒ -√15β‹…r -√5β‹…r βŽ₯ ⎒ 0 0 ─────── 0 0 0 ────── 0 βŽ₯ ⎒ 12 20 βŽ₯ ⎒ βŽ₯ ⎒ √3β‹…r βŽ₯ ⎒ 0 0 0 ──── 0 0 0 0 βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ r √3β‹…r βŽ₯ ⎒ ─ 0 0 0 ──── 0 0 0 βŽ₯ ⎒ 4 12 βŽ₯ ⎒ βŽ₯ ⎒ √3β‹…r βŽ₯ ⎒ 0 ──── 0 0 0 0 0 0 βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ r -√3β‹…r βŽ₯ ⎒ 0 0 ─ 0 0 0 ────── 0 βŽ₯ ⎒ 4 12 βŽ₯ ⎒ βŽ₯ ⎒ -√3β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 0 ──────βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ 0 0 0 0 0 0 0 0 βŽ₯ ⎒ βŽ₯ ⎒ √3β‹…r βŽ₯ ⎒ 0 0 0 ──── 0 0 0 0 βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ √30β‹…r βŽ₯ ⎒ 0 0 0 0 ───── 0 0 0 βŽ₯ ⎒ 15 βŽ₯ ⎒ βŽ₯ ⎒ √15β‹…r βŽ₯ ⎒ 0 0 0 0 0 ───── 0 0 βŽ₯ ⎒ 10 βŽ₯ ⎒ βŽ₯ ⎒ √30β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 ───── 0 βŽ₯ ⎒ 15 βŽ₯ ⎒ βŽ₯ ⎒ √3β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 0 ──── βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎣ 0 0 0 0 0 0 0 0 ⎦ >>> pprint(r[2][8:,:8]) ⎑√3β‹…r ⎀ βŽ’β”€β”€β”€β”€ 0 0 0 0 0 0 0βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ √10β‹…r βŽ₯ ⎒ 0 0 0 ───── 0 0 0 0βŽ₯ ⎒ 20 βŽ₯ ⎒ βŽ₯ ⎒√15β‹…r √5β‹…r βŽ₯ βŽ’β”€β”€β”€β”€β”€ 0 0 0 ──── 0 0 0βŽ₯ ⎒ 12 20 βŽ₯ ⎒ βŽ₯ ⎒ √15β‹…r √15β‹…r βŽ₯ ⎒ 0 ───── 0 0 0 ───── 0 0βŽ₯ ⎒ 12 60 βŽ₯ ⎒ βŽ₯ ⎒ 0 0 0 0 0 0 0 0βŽ₯ ⎒ βŽ₯ ⎒ √6β‹…r βŽ₯ ⎒ 0 0 0 ──── 0 0 0 0βŽ₯ ⎒ 12 βŽ₯ ⎒ βŽ₯ ⎒√3β‹…r r βŽ₯ βŽ’β”€β”€β”€β”€ 0 0 0 ─ 0 0 0βŽ₯ ⎒ 12 4 βŽ₯ ⎒ βŽ₯ ⎒ r r βŽ₯ ⎒ 0 ─ 0 0 0 ─ 0 0βŽ₯ ⎒ 4 4 βŽ₯ ⎒ βŽ₯ ⎒ √2β‹…r √6β‹…r βŽ₯ ⎒ 0 0 ──── 0 0 0 ──── 0βŽ₯ ⎒ 4 12 βŽ₯ ⎒ βŽ₯ ⎒ 0 0 0 0 0 0 0 0βŽ₯ ⎒ βŽ₯ ⎒ 0 0 0 0 0 0 0 0βŽ₯ ⎒ βŽ₯ ⎒ √15β‹…r βŽ₯ ⎒ 0 0 0 ───── 0 0 0 0βŽ₯ ⎒ 30 βŽ₯ ⎒ βŽ₯ ⎒ √5β‹…r βŽ₯ ⎒ 0 0 0 0 ──── 0 0 0βŽ₯ ⎒ 10 βŽ₯ ⎒ βŽ₯ ⎒ √10β‹…r βŽ₯ ⎒ 0 0 0 0 0 ───── 0 0βŽ₯ ⎒ 10 βŽ₯ ⎒ βŽ₯ ⎒ √6β‹…r βŽ₯ ⎒ 0 0 0 0 0 0 ──── 0βŽ₯ ⎒ 6 βŽ₯ ⎒ βŽ₯ ⎒ rβŽ₯ ⎒ 0 0 0 0 0 0 0 ─βŽ₯ ⎣ 2⎦
[ "ur", "Calculate", "the", "matrix", "elements", "of", "the", "electric", "dipole", "(", "in", "the", "helicity", "basis", ")", "." ]
python
train
53.423077
thisfred/val
val/_val.py
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L358-L363
def _validated(self, data): """Convert data or die trying.""" try: return self.convert(data) except (TypeError, ValueError) as ex: raise NotValid(*ex.args)
[ "def", "_validated", "(", "self", ",", "data", ")", ":", "try", ":", "return", "self", ".", "convert", "(", "data", ")", "except", "(", "TypeError", ",", "ValueError", ")", "as", "ex", ":", "raise", "NotValid", "(", "*", "ex", ".", "args", ")" ]
Convert data or die trying.
[ "Convert", "data", "or", "die", "trying", "." ]
python
train
33
ga4gh/ga4gh-server
ga4gh/server/exceptions.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/exceptions.py#L17-L26
def getExceptionClass(errorCode): """ Converts the specified error code into the corresponding class object. Raises a KeyError if the errorCode is not found. """ classMap = {} for name, class_ in inspect.getmembers(sys.modules[__name__]): if inspect.isclass(class_) and issubclass(class_, BaseServerException): classMap[class_.getErrorCode()] = class_ return classMap[errorCode]
[ "def", "getExceptionClass", "(", "errorCode", ")", ":", "classMap", "=", "{", "}", "for", "name", ",", "class_", "in", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ")", ":", "if", "inspect", ".", "isclass", "(", "class_", ")", "and", "issubclass", "(", "class_", ",", "BaseServerException", ")", ":", "classMap", "[", "class_", ".", "getErrorCode", "(", ")", "]", "=", "class_", "return", "classMap", "[", "errorCode", "]" ]
Converts the specified error code into the corresponding class object. Raises a KeyError if the errorCode is not found.
[ "Converts", "the", "specified", "error", "code", "into", "the", "corresponding", "class", "object", ".", "Raises", "a", "KeyError", "if", "the", "errorCode", "is", "not", "found", "." ]
python
train
41.7
DLR-RM/RAFCON
source/rafcon/gui/mygaphas/utils/cache/value_cache.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/utils/cache/value_cache.py#L42-L57
def store_value(self, name, value, parameters=None): """Stores the value of a certain variable The value of a variable with name 'name' is stored together with the parameters that were used for the calculation. :param str name: The name of the variable :param value: The value to be cached :param dict parameters: The parameters on which the value depends """ if not isinstance(parameters, dict): raise TypeError("parameters must be a dict") hash = self._parameter_hash(parameters) if name not in self._cache: self._cache[name] = {} self._cache[name][hash.hexdigest()] = value
[ "def", "store_value", "(", "self", ",", "name", ",", "value", ",", "parameters", "=", "None", ")", ":", "if", "not", "isinstance", "(", "parameters", ",", "dict", ")", ":", "raise", "TypeError", "(", "\"parameters must be a dict\"", ")", "hash", "=", "self", ".", "_parameter_hash", "(", "parameters", ")", "if", "name", "not", "in", "self", ".", "_cache", ":", "self", ".", "_cache", "[", "name", "]", "=", "{", "}", "self", ".", "_cache", "[", "name", "]", "[", "hash", ".", "hexdigest", "(", ")", "]", "=", "value" ]
Stores the value of a certain variable The value of a variable with name 'name' is stored together with the parameters that were used for the calculation. :param str name: The name of the variable :param value: The value to be cached :param dict parameters: The parameters on which the value depends
[ "Stores", "the", "value", "of", "a", "certain", "variable" ]
python
train
42.1875
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2307-L2323
def _extract_blocks(x, block_h, block_w): """Helper function for local 2d attention. Args: x: a [batch, height, width, depth] tensor block_h: An integer. block height block_w: An inteter. block width returns: a [batch, num_heads, height/block_h, width/block_w, depth] tensor """ (_, height, width, depth) = common_layers.shape_list(x) assert height % block_h == 0 assert width % block_w == 0 x = tf.reshape(x, [-1, height//block_h, block_h, width//block_w, block_w, depth]) return tf.transpose(x, [0, 1, 3, 2, 4, 5])
[ "def", "_extract_blocks", "(", "x", ",", "block_h", ",", "block_w", ")", ":", "(", "_", ",", "height", ",", "width", ",", "depth", ")", "=", "common_layers", ".", "shape_list", "(", "x", ")", "assert", "height", "%", "block_h", "==", "0", "assert", "width", "%", "block_w", "==", "0", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", ",", "height", "//", "block_h", ",", "block_h", ",", "width", "//", "block_w", ",", "block_w", ",", "depth", "]", ")", "return", "tf", ".", "transpose", "(", "x", ",", "[", "0", ",", "1", ",", "3", ",", "2", ",", "4", ",", "5", "]", ")" ]
Helper function for local 2d attention. Args: x: a [batch, height, width, depth] tensor block_h: An integer. block height block_w: An inteter. block width returns: a [batch, num_heads, height/block_h, width/block_w, depth] tensor
[ "Helper", "function", "for", "local", "2d", "attention", "." ]
python
train
32.764706
praekelt/django-analytics
analytics/geckoboard_views.py
https://github.com/praekelt/django-analytics/blob/29c22d03374ccc0ec451650e2c2886d324f6e5c6/analytics/geckoboard_views.py#L43-L54
def get_next_colour(): """ Gets the next colour in the Geckoboard colour list. """ colour = settings.GECKOBOARD_COLOURS[get_next_colour.cur_colour] get_next_colour.cur_colour += 1 if get_next_colour.cur_colour >= len(settings.GECKOBOARD_COLOURS): get_next_colour.cur_colour = 0 return colour
[ "def", "get_next_colour", "(", ")", ":", "colour", "=", "settings", ".", "GECKOBOARD_COLOURS", "[", "get_next_colour", ".", "cur_colour", "]", "get_next_colour", ".", "cur_colour", "+=", "1", "if", "get_next_colour", ".", "cur_colour", ">=", "len", "(", "settings", ".", "GECKOBOARD_COLOURS", ")", ":", "get_next_colour", ".", "cur_colour", "=", "0", "return", "colour" ]
Gets the next colour in the Geckoboard colour list.
[ "Gets", "the", "next", "colour", "in", "the", "Geckoboard", "colour", "list", "." ]
python
test
26.583333
insightindustry/validator-collection
validator_collection/validators.py
https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/validators.py#L2517-L2552
def ipv6(value, allow_empty = False, **kwargs): """Validate that ``value`` is a valid IP address version 6. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises InvalidIPAddressError: if ``value`` is not a valid IP version 6 address or empty with ``allow_empty`` is not set to ``True`` """ if not value and allow_empty is False: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None if not isinstance(value, str): raise errors.InvalidIPAddressError('value (%s) is not a valid ipv6' % value) value = value.lower().strip() is_valid = IPV6_REGEX.match(value) if not is_valid: raise errors.InvalidIPAddressError('value (%s) is not a valid ipv6' % value) return value
[ "def", "ipv6", "(", "value", ",", "allow_empty", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "value", "and", "allow_empty", "is", "False", ":", "raise", "errors", ".", "EmptyValueError", "(", "'value (%s) was empty'", "%", "value", ")", "elif", "not", "value", ":", "return", "None", "if", "not", "isinstance", "(", "value", ",", "str", ")", ":", "raise", "errors", ".", "InvalidIPAddressError", "(", "'value (%s) is not a valid ipv6'", "%", "value", ")", "value", "=", "value", ".", "lower", "(", ")", ".", "strip", "(", ")", "is_valid", "=", "IPV6_REGEX", ".", "match", "(", "value", ")", "if", "not", "is_valid", ":", "raise", "errors", ".", "InvalidIPAddressError", "(", "'value (%s) is not a valid ipv6'", "%", "value", ")", "return", "value" ]
Validate that ``value`` is a valid IP address version 6. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises InvalidIPAddressError: if ``value`` is not a valid IP version 6 address or empty with ``allow_empty`` is not set to ``True``
[ "Validate", "that", "value", "is", "a", "valid", "IP", "address", "version", "6", "." ]
python
train
33.416667
mabuchilab/QNET
src/qnet/algebra/core/algebraic_properties.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/algebraic_properties.py#L93-L104
def idem(cls, ops, kwargs): """Remove duplicate arguments and order them via the cls's order_key key object/function. E.g.:: >>> class Set(Operation): ... order_key = lambda val: val ... simplifications = [idem, ] >>> Set.create(1,2,3,1,3) Set(1, 2, 3) """ return sorted(set(ops), key=cls.order_key), kwargs
[ "def", "idem", "(", "cls", ",", "ops", ",", "kwargs", ")", ":", "return", "sorted", "(", "set", "(", "ops", ")", ",", "key", "=", "cls", ".", "order_key", ")", ",", "kwargs" ]
Remove duplicate arguments and order them via the cls's order_key key object/function. E.g.:: >>> class Set(Operation): ... order_key = lambda val: val ... simplifications = [idem, ] >>> Set.create(1,2,3,1,3) Set(1, 2, 3)
[ "Remove", "duplicate", "arguments", "and", "order", "them", "via", "the", "cls", "s", "order_key", "key", "object", "/", "function", ".", "E", ".", "g", ".", "::" ]
python
train
30.416667
UDST/pandana
pandana/loaders/osm.py
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/loaders/osm.py#L160-L189
def node_query(lat_min, lng_min, lat_max, lng_max, tags=None): """ Search for OSM nodes within a bounding box that match given tags. Parameters ---------- lat_min, lng_min, lat_max, lng_max : float tags : str or list of str, optional Node tags that will be used to filter the search. See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide for information about OSM Overpass queries and http://wiki.openstreetmap.org/wiki/Map_Features for a list of tags. Returns ------- nodes : pandas.DataFrame Will have 'lat' and 'lon' columns, plus other columns for the tags associated with the node (these will vary based on the query). Index will be the OSM node IDs. """ node_data = make_osm_query(build_node_query( lat_min, lng_min, lat_max, lng_max, tags=tags)) if len(node_data['elements']) == 0: raise RuntimeError('OSM query results contain no data.') nodes = [process_node(n) for n in node_data['elements']] return pd.DataFrame.from_records(nodes, index='id')
[ "def", "node_query", "(", "lat_min", ",", "lng_min", ",", "lat_max", ",", "lng_max", ",", "tags", "=", "None", ")", ":", "node_data", "=", "make_osm_query", "(", "build_node_query", "(", "lat_min", ",", "lng_min", ",", "lat_max", ",", "lng_max", ",", "tags", "=", "tags", ")", ")", "if", "len", "(", "node_data", "[", "'elements'", "]", ")", "==", "0", ":", "raise", "RuntimeError", "(", "'OSM query results contain no data.'", ")", "nodes", "=", "[", "process_node", "(", "n", ")", "for", "n", "in", "node_data", "[", "'elements'", "]", "]", "return", "pd", ".", "DataFrame", ".", "from_records", "(", "nodes", ",", "index", "=", "'id'", ")" ]
Search for OSM nodes within a bounding box that match given tags. Parameters ---------- lat_min, lng_min, lat_max, lng_max : float tags : str or list of str, optional Node tags that will be used to filter the search. See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide for information about OSM Overpass queries and http://wiki.openstreetmap.org/wiki/Map_Features for a list of tags. Returns ------- nodes : pandas.DataFrame Will have 'lat' and 'lon' columns, plus other columns for the tags associated with the node (these will vary based on the query). Index will be the OSM node IDs.
[ "Search", "for", "OSM", "nodes", "within", "a", "bounding", "box", "that", "match", "given", "tags", "." ]
python
test
35.966667
ValvePython/steam
steam/client/builtins/web.py
https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/client/builtins/web.py#L22-L54
def get_web_session_cookies(self): """Get web authentication cookies via WebAPI's ``AuthenticateUser`` .. note:: The cookies are valid only while :class:`.SteamClient` instance is logged on. :return: dict with authentication cookies :rtype: :class:`dict`, :class:`None` """ if not self.logged_on: return None resp = self.send_job_and_wait(MsgProto(EMsg.ClientRequestWebAPIAuthenticateUserNonce), timeout=7) if resp is None: return None skey, ekey = generate_session_key() data = { 'steamid': self.steam_id, 'sessionkey': ekey, 'encrypted_loginkey': symmetric_encrypt(resp.webapi_authenticate_user_nonce.encode('ascii'), skey), } try: resp = webapi.post('ISteamUserAuth', 'AuthenticateUser', 1, params=data) except Exception as exp: self._LOG.debug("get_web_session_cookies error: %s" % str(exp)) return None return { 'steamLogin': resp['authenticateuser']['token'], 'steamLoginSecure': resp['authenticateuser']['tokensecure'], }
[ "def", "get_web_session_cookies", "(", "self", ")", ":", "if", "not", "self", ".", "logged_on", ":", "return", "None", "resp", "=", "self", ".", "send_job_and_wait", "(", "MsgProto", "(", "EMsg", ".", "ClientRequestWebAPIAuthenticateUserNonce", ")", ",", "timeout", "=", "7", ")", "if", "resp", "is", "None", ":", "return", "None", "skey", ",", "ekey", "=", "generate_session_key", "(", ")", "data", "=", "{", "'steamid'", ":", "self", ".", "steam_id", ",", "'sessionkey'", ":", "ekey", ",", "'encrypted_loginkey'", ":", "symmetric_encrypt", "(", "resp", ".", "webapi_authenticate_user_nonce", ".", "encode", "(", "'ascii'", ")", ",", "skey", ")", ",", "}", "try", ":", "resp", "=", "webapi", ".", "post", "(", "'ISteamUserAuth'", ",", "'AuthenticateUser'", ",", "1", ",", "params", "=", "data", ")", "except", "Exception", "as", "exp", ":", "self", ".", "_LOG", ".", "debug", "(", "\"get_web_session_cookies error: %s\"", "%", "str", "(", "exp", ")", ")", "return", "None", "return", "{", "'steamLogin'", ":", "resp", "[", "'authenticateuser'", "]", "[", "'token'", "]", ",", "'steamLoginSecure'", ":", "resp", "[", "'authenticateuser'", "]", "[", "'tokensecure'", "]", ",", "}" ]
Get web authentication cookies via WebAPI's ``AuthenticateUser`` .. note:: The cookies are valid only while :class:`.SteamClient` instance is logged on. :return: dict with authentication cookies :rtype: :class:`dict`, :class:`None`
[ "Get", "web", "authentication", "cookies", "via", "WebAPI", "s", "AuthenticateUser" ]
python
train
34.30303
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/gridfs/__init__.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/gridfs/__init__.py#L239-L264
def find_one(self, filter=None, *args, **kwargs): """Get a single file from gridfs. All arguments to :meth:`find` are also valid arguments for :meth:`find_one`, although any `limit` argument will be ignored. Returns a single :class:`~gridfs.grid_file.GridOut`, or ``None`` if no matching file is found. For example:: file = fs.find_one({"filename": "lisa.txt"}) :Parameters: - `filter` (optional): a dictionary specifying the query to be performing OR any other type to be used as the value for a query for ``"_id"`` in the file collection. - `*args` (optional): any additional positional arguments are the same as the arguments to :meth:`find`. - `**kwargs` (optional): any additional keyword arguments are the same as the arguments to :meth:`find`. """ if filter is not None and not isinstance(filter, Mapping): filter = {"_id": filter} for f in self.find(filter, *args, **kwargs): return f return None
[ "def", "find_one", "(", "self", ",", "filter", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "filter", "is", "not", "None", "and", "not", "isinstance", "(", "filter", ",", "Mapping", ")", ":", "filter", "=", "{", "\"_id\"", ":", "filter", "}", "for", "f", "in", "self", ".", "find", "(", "filter", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "f", "return", "None" ]
Get a single file from gridfs. All arguments to :meth:`find` are also valid arguments for :meth:`find_one`, although any `limit` argument will be ignored. Returns a single :class:`~gridfs.grid_file.GridOut`, or ``None`` if no matching file is found. For example:: file = fs.find_one({"filename": "lisa.txt"}) :Parameters: - `filter` (optional): a dictionary specifying the query to be performing OR any other type to be used as the value for a query for ``"_id"`` in the file collection. - `*args` (optional): any additional positional arguments are the same as the arguments to :meth:`find`. - `**kwargs` (optional): any additional keyword arguments are the same as the arguments to :meth:`find`.
[ "Get", "a", "single", "file", "from", "gridfs", "." ]
python
train
41.461538
sentinel-hub/sentinelhub-py
sentinelhub/aws.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/aws.py#L570-L577
def tile_is_valid(self): """ Checks if tile has tile info and valid timestamp :return: `True` if tile is valid and `False` otherwise :rtype: bool """ return self.tile_info is not None \ and (self.datetime == self.date or self.datetime == self.parse_datetime(self.tile_info['timestamp']))
[ "def", "tile_is_valid", "(", "self", ")", ":", "return", "self", ".", "tile_info", "is", "not", "None", "and", "(", "self", ".", "datetime", "==", "self", ".", "date", "or", "self", ".", "datetime", "==", "self", ".", "parse_datetime", "(", "self", ".", "tile_info", "[", "'timestamp'", "]", ")", ")" ]
Checks if tile has tile info and valid timestamp :return: `True` if tile is valid and `False` otherwise :rtype: bool
[ "Checks", "if", "tile", "has", "tile", "info", "and", "valid", "timestamp" ]
python
train
41.625
nerdvegas/rez
src/rez/suite.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/suite.py#L219-L223
def bump_context(self, name): """Causes the context's tools to take priority over all others.""" data = self._context(name) data["priority"] = self._next_priority self._flush_tools()
[ "def", "bump_context", "(", "self", ",", "name", ")", ":", "data", "=", "self", ".", "_context", "(", "name", ")", "data", "[", "\"priority\"", "]", "=", "self", ".", "_next_priority", "self", ".", "_flush_tools", "(", ")" ]
Causes the context's tools to take priority over all others.
[ "Causes", "the", "context", "s", "tools", "to", "take", "priority", "over", "all", "others", "." ]
python
train
42
Bogdanp/anom-py
anom/conditions.py
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/conditions.py#L21-L23
def is_none(entity, prop, name): "bool: True if the value of a property is None." return is_not_empty(entity, prop, name) and getattr(entity, name) is None
[ "def", "is_none", "(", "entity", ",", "prop", ",", "name", ")", ":", "return", "is_not_empty", "(", "entity", ",", "prop", ",", "name", ")", "and", "getattr", "(", "entity", ",", "name", ")", "is", "None" ]
bool: True if the value of a property is None.
[ "bool", ":", "True", "if", "the", "value", "of", "a", "property", "is", "None", "." ]
python
train
53.666667
aparo/pyes
pyes/facets.py
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/facets.py#L20-L22
def add_geo_facet(self, *args, **kwargs): """Add a geo factory facet""" self.facets.append(GeoDistanceFacet(*args, **kwargs))
[ "def", "add_geo_facet", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "facets", ".", "append", "(", "GeoDistanceFacet", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
Add a geo factory facet
[ "Add", "a", "geo", "factory", "facet" ]
python
train
46.333333
olitheolix/qtmacs
qtmacs/qtmacsmain_macros.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/qtmacsmain_macros.py#L483-L491
def abort(self, msgObj): """ Disconnect all signals and turn macro processing in the event handler back on. """ self.qteMain.qtesigKeyparsed.disconnect(self.qteKeyPress) self.qteMain.qtesigAbort.disconnect(self.abort) self.qteActive = False self.qteMain.qteEnableMacroProcessing()
[ "def", "abort", "(", "self", ",", "msgObj", ")", ":", "self", ".", "qteMain", ".", "qtesigKeyparsed", ".", "disconnect", "(", "self", ".", "qteKeyPress", ")", "self", ".", "qteMain", ".", "qtesigAbort", ".", "disconnect", "(", "self", ".", "abort", ")", "self", ".", "qteActive", "=", "False", "self", ".", "qteMain", ".", "qteEnableMacroProcessing", "(", ")" ]
Disconnect all signals and turn macro processing in the event handler back on.
[ "Disconnect", "all", "signals", "and", "turn", "macro", "processing", "in", "the", "event", "handler", "back", "on", "." ]
python
train
37.333333
moonlitesolutions/SolrClient
SolrClient/indexq.py
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L240-L279
def complete(self, filepath): ''' Marks the item as complete by moving it to the done directory and optionally gzipping it. ''' if not os.path.exists(filepath): raise FileNotFoundError("Can't Complete {}, it doesn't exist".format(filepath)) if self._devel: self.logger.debug("Completing - {} ".format(filepath)) if self.rotate_complete: try: complete_dir = str(self.rotate_complete()) except Exception as e: self.logger.error("rotate_complete function failed with the following exception.") self.logger.exception(e) raise newdir = os.path.join(self._done_dir, complete_dir) newpath = os.path.join(newdir, os.path.split(filepath)[-1] ) if not os.path.isdir(newdir): self.logger.debug("Making new directory: {}".format(newdir)) os.makedirs(newdir) else: newpath = os.path.join(self._done_dir, os.path.split(filepath)[-1] ) try: if self._compress_complete: if not filepath.endswith('.gz'): # Compressing complete, but existing file not compressed # Compress and move it and kick out newpath += '.gz' self._compress_and_move(filepath, newpath) return newpath # else the file is already compressed and can just be moved #if not compressing completed file, just move it shutil.move(filepath, newpath) self.logger.info(" Completed - {}".format(filepath)) except Exception as e: self.logger.error("Couldn't Complete {}".format(filepath)) self.logger.exception(e) raise return newpath
[ "def", "complete", "(", "self", ",", "filepath", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "raise", "FileNotFoundError", "(", "\"Can't Complete {}, it doesn't exist\"", ".", "format", "(", "filepath", ")", ")", "if", "self", ".", "_devel", ":", "self", ".", "logger", ".", "debug", "(", "\"Completing - {} \"", ".", "format", "(", "filepath", ")", ")", "if", "self", ".", "rotate_complete", ":", "try", ":", "complete_dir", "=", "str", "(", "self", ".", "rotate_complete", "(", ")", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "\"rotate_complete function failed with the following exception.\"", ")", "self", ".", "logger", ".", "exception", "(", "e", ")", "raise", "newdir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_done_dir", ",", "complete_dir", ")", "newpath", "=", "os", ".", "path", ".", "join", "(", "newdir", ",", "os", ".", "path", ".", "split", "(", "filepath", ")", "[", "-", "1", "]", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "newdir", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Making new directory: {}\"", ".", "format", "(", "newdir", ")", ")", "os", ".", "makedirs", "(", "newdir", ")", "else", ":", "newpath", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_done_dir", ",", "os", ".", "path", ".", "split", "(", "filepath", ")", "[", "-", "1", "]", ")", "try", ":", "if", "self", ".", "_compress_complete", ":", "if", "not", "filepath", ".", "endswith", "(", "'.gz'", ")", ":", "# Compressing complete, but existing file not compressed", "# Compress and move it and kick out", "newpath", "+=", "'.gz'", "self", ".", "_compress_and_move", "(", "filepath", ",", "newpath", ")", "return", "newpath", "# else the file is already compressed and can just be moved", "#if not compressing completed file, just move it", "shutil", ".", "move", "(", "filepath", ",", "newpath", ")", "self", ".", "logger", ".", "info", "(", "\" Completed - {}\"", ".", "format", "(", "filepath", ")", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "\"Couldn't Complete {}\"", ".", "format", "(", "filepath", ")", ")", "self", ".", "logger", ".", "exception", "(", "e", ")", "raise", "return", "newpath" ]
Marks the item as complete by moving it to the done directory and optionally gzipping it.
[ "Marks", "the", "item", "as", "complete", "by", "moving", "it", "to", "the", "done", "directory", "and", "optionally", "gzipping", "it", "." ]
python
train
45.4
cjdrake/pyeda
pyeda/boolalg/bdd.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/bdd.py#L198-L208
def _bddnode(root, lo, hi): """Return a unique BDD node.""" if lo is hi: node = lo else: key = (root, lo, hi) try: node = _NODES[key] except KeyError: node = _NODES[key] = BDDNode(*key) return node
[ "def", "_bddnode", "(", "root", ",", "lo", ",", "hi", ")", ":", "if", "lo", "is", "hi", ":", "node", "=", "lo", "else", ":", "key", "=", "(", "root", ",", "lo", ",", "hi", ")", "try", ":", "node", "=", "_NODES", "[", "key", "]", "except", "KeyError", ":", "node", "=", "_NODES", "[", "key", "]", "=", "BDDNode", "(", "*", "key", ")", "return", "node" ]
Return a unique BDD node.
[ "Return", "a", "unique", "BDD", "node", "." ]
python
train
23.545455
securestate/termineter
lib/termineter/interface.py
https://github.com/securestate/termineter/blob/d657d25d97c7739e650b951c396404e857e56625/lib/termineter/interface.py#L428-L440
def do_reload(self, args): """Reload a module in to the framework""" if args.module is not None: if args.module not in self.frmwk.modules: self.print_error('Invalid Module Selected.') return module = self.frmwk.modules[args.module] elif self.frmwk.current_module: module = self.frmwk.current_module else: self.print_error('Must \'use\' module first') return self.reload_module(module)
[ "def", "do_reload", "(", "self", ",", "args", ")", ":", "if", "args", ".", "module", "is", "not", "None", ":", "if", "args", ".", "module", "not", "in", "self", ".", "frmwk", ".", "modules", ":", "self", ".", "print_error", "(", "'Invalid Module Selected.'", ")", "return", "module", "=", "self", ".", "frmwk", ".", "modules", "[", "args", ".", "module", "]", "elif", "self", ".", "frmwk", ".", "current_module", ":", "module", "=", "self", ".", "frmwk", ".", "current_module", "else", ":", "self", ".", "print_error", "(", "'Must \\'use\\' module first'", ")", "return", "self", ".", "reload_module", "(", "module", ")" ]
Reload a module in to the framework
[ "Reload", "a", "module", "in", "to", "the", "framework" ]
python
train
31.153846
waqasbhatti/astrobase
astrobase/lcproc/checkplotproc.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/checkplotproc.py#L754-L948
def parallel_update_objectinfo_cplist( cplist, liststartindex=None, maxobjects=None, nworkers=NCPUS, fast_mode=False, findercmap='gray_r', finderconvolve=None, deredden_object=True, custom_bandpasses=None, gaia_submit_timeout=10.0, gaia_submit_tries=3, gaia_max_timeout=180.0, gaia_mirror=None, complete_query_later=True, lclistpkl=None, nbrradiusarcsec=60.0, maxnumneighbors=5, plotdpi=100, findercachedir='~/.astrobase/stamp-cache', verbose=True ): ''' This updates objectinfo for a list of checkplots. Useful in cases where a previous round of GAIA/finderchart/external catalog acquisition failed. This will preserve the following keys in the checkplots if they exist: comments varinfo objectinfo.objecttags Parameters ---------- cplist : list of str A list of checkplot pickle file names to update. liststartindex : int The index of the input list to start working at. maxobjects : int The maximum number of objects to process in this run. Use this with `liststartindex` to effectively distribute working on a large list of input checkplot pickles over several sessions or machines. nworkers : int The number of parallel workers that will work on the checkplot update process. fast_mode : bool or float This runs the external catalog operations in a "fast" mode, with short timeouts and not trying to hit external catalogs that take a long time to respond. See the docstring for `checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this works. If this is True, will run in "fast" mode with default timeouts (5 seconds in most cases). If this is a float, will run in "fast" mode with the provided timeout value in seconds. findercmap : str or matplotlib.cm.Colormap object findercmap : str or matplotlib.cm.ColorMap object The Colormap object to use for the finder chart image. finderconvolve : astropy.convolution.Kernel object or None If not None, the Kernel object to use for convolving the finder image. deredden_objects : bool If this is True, will use the 2MASS DUST service to get extinction coefficients in various bands, and then try to deredden the magnitudes and colors of the object already present in the checkplot's objectinfo dict. custom_bandpasses : dict This is a dict used to provide custom bandpass definitions for any magnitude measurements in the objectinfo dict that are not automatically recognized by the `varclass.starfeatures.color_features` function. See its docstring for details on the required format. gaia_submit_timeout : float Sets the timeout in seconds to use when submitting a request to look up the object's information to the GAIA service. Note that if `fast_mode` is set, this is ignored. gaia_submit_tries : int Sets the maximum number of times the GAIA services will be contacted to obtain this object's information. If `fast_mode` is set, this is ignored, and the services will be contacted only once (meaning that a failure to respond will be silently ignored and no GAIA data will be added to the checkplot's objectinfo dict). gaia_max_timeout : float Sets the timeout in seconds to use when waiting for the GAIA service to respond to our request for the object's information. Note that if `fast_mode` is set, this is ignored. gaia_mirror : str This sets the GAIA mirror to use. This is a key in the `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each mirror. complete_query_later : bool If this is True, saves the state of GAIA queries that are not yet complete when `gaia_max_timeout` is reached while waiting for the GAIA service to respond to our request. A later call for GAIA info on the same object will attempt to pick up the results from the existing query if it's completed. If `fast_mode` is True, this is ignored. lclistpkl : dict or str If this is provided, must be a dict resulting from reading a catalog produced by the `lcproc.catalogs.make_lclist` function or a str path pointing to the pickle file produced by that function. This catalog is used to find neighbors of the current object in the current light curve collection. Looking at neighbors of the object within the radius specified by `nbrradiusarcsec` is useful for light curves produced by instruments that have a large pixel scale, so are susceptible to blending of variability and potential confusion of neighbor variability with that of the actual object being looked at. If this is None, no neighbor lookups will be performed. nbrradiusarcsec : float The radius in arcseconds to use for a search conducted around the coordinates of this object to look for any potential confusion and blending of variability amplitude caused by their proximity. maxnumneighbors : int The maximum number of neighbors that will have their light curves and magnitudes noted in this checkplot as potential blends with the target object. plotdpi : int The resolution in DPI of the plots to generate in this function (e.g. the finder chart, etc.) findercachedir : str The path to the astrobase cache directory for finder chart downloads from the NASA SkyView service. verbose : bool If True, will indicate progress and warn about potential problems. Returns ------- list of str Paths to the updated checkplot pickle file. ''' # work around the Darwin segfault after fork if no network activity in # main thread bug: https://bugs.python.org/issue30385#msg293958 if sys.platform == 'darwin': import requests requests.get('http://captive.apple.com/hotspot-detect.html') # handle the start and end indices if (liststartindex is not None) and (maxobjects is None): cplist = cplist[liststartindex:] elif (liststartindex is None) and (maxobjects is not None): cplist = cplist[:maxobjects] elif (liststartindex is not None) and (maxobjects is not None): cplist = ( cplist[liststartindex:liststartindex+maxobjects] ) tasks = [(x, {'fast_mode':fast_mode, 'findercmap':findercmap, 'finderconvolve':finderconvolve, 'deredden_object':deredden_object, 'custom_bandpasses':custom_bandpasses, 'gaia_submit_timeout':gaia_submit_timeout, 'gaia_submit_tries':gaia_submit_tries, 'gaia_max_timeout':gaia_max_timeout, 'gaia_mirror':gaia_mirror, 'complete_query_later':complete_query_later, 'lclistpkl':lclistpkl, 'nbrradiusarcsec':nbrradiusarcsec, 'maxnumneighbors':maxnumneighbors, 'plotdpi':plotdpi, 'findercachedir':findercachedir, 'verbose':verbose}) for x in cplist] resultfutures = [] results = [] with ProcessPoolExecutor(max_workers=nworkers) as executor: resultfutures = executor.map(cp_objectinfo_worker, tasks) results = [x for x in resultfutures] executor.shutdown() return results
[ "def", "parallel_update_objectinfo_cplist", "(", "cplist", ",", "liststartindex", "=", "None", ",", "maxobjects", "=", "None", ",", "nworkers", "=", "NCPUS", ",", "fast_mode", "=", "False", ",", "findercmap", "=", "'gray_r'", ",", "finderconvolve", "=", "None", ",", "deredden_object", "=", "True", ",", "custom_bandpasses", "=", "None", ",", "gaia_submit_timeout", "=", "10.0", ",", "gaia_submit_tries", "=", "3", ",", "gaia_max_timeout", "=", "180.0", ",", "gaia_mirror", "=", "None", ",", "complete_query_later", "=", "True", ",", "lclistpkl", "=", "None", ",", "nbrradiusarcsec", "=", "60.0", ",", "maxnumneighbors", "=", "5", ",", "plotdpi", "=", "100", ",", "findercachedir", "=", "'~/.astrobase/stamp-cache'", ",", "verbose", "=", "True", ")", ":", "# work around the Darwin segfault after fork if no network activity in", "# main thread bug: https://bugs.python.org/issue30385#msg293958", "if", "sys", ".", "platform", "==", "'darwin'", ":", "import", "requests", "requests", ".", "get", "(", "'http://captive.apple.com/hotspot-detect.html'", ")", "# handle the start and end indices", "if", "(", "liststartindex", "is", "not", "None", ")", "and", "(", "maxobjects", "is", "None", ")", ":", "cplist", "=", "cplist", "[", "liststartindex", ":", "]", "elif", "(", "liststartindex", "is", "None", ")", "and", "(", "maxobjects", "is", "not", "None", ")", ":", "cplist", "=", "cplist", "[", ":", "maxobjects", "]", "elif", "(", "liststartindex", "is", "not", "None", ")", "and", "(", "maxobjects", "is", "not", "None", ")", ":", "cplist", "=", "(", "cplist", "[", "liststartindex", ":", "liststartindex", "+", "maxobjects", "]", ")", "tasks", "=", "[", "(", "x", ",", "{", "'fast_mode'", ":", "fast_mode", ",", "'findercmap'", ":", "findercmap", ",", "'finderconvolve'", ":", "finderconvolve", ",", "'deredden_object'", ":", "deredden_object", ",", "'custom_bandpasses'", ":", "custom_bandpasses", ",", "'gaia_submit_timeout'", ":", "gaia_submit_timeout", ",", "'gaia_submit_tries'", ":", "gaia_submit_tries", ",", "'gaia_max_timeout'", ":", "gaia_max_timeout", ",", "'gaia_mirror'", ":", "gaia_mirror", ",", "'complete_query_later'", ":", "complete_query_later", ",", "'lclistpkl'", ":", "lclistpkl", ",", "'nbrradiusarcsec'", ":", "nbrradiusarcsec", ",", "'maxnumneighbors'", ":", "maxnumneighbors", ",", "'plotdpi'", ":", "plotdpi", ",", "'findercachedir'", ":", "findercachedir", ",", "'verbose'", ":", "verbose", "}", ")", "for", "x", "in", "cplist", "]", "resultfutures", "=", "[", "]", "results", "=", "[", "]", "with", "ProcessPoolExecutor", "(", "max_workers", "=", "nworkers", ")", "as", "executor", ":", "resultfutures", "=", "executor", ".", "map", "(", "cp_objectinfo_worker", ",", "tasks", ")", "results", "=", "[", "x", "for", "x", "in", "resultfutures", "]", "executor", ".", "shutdown", "(", ")", "return", "results" ]
This updates objectinfo for a list of checkplots. Useful in cases where a previous round of GAIA/finderchart/external catalog acquisition failed. This will preserve the following keys in the checkplots if they exist: comments varinfo objectinfo.objecttags Parameters ---------- cplist : list of str A list of checkplot pickle file names to update. liststartindex : int The index of the input list to start working at. maxobjects : int The maximum number of objects to process in this run. Use this with `liststartindex` to effectively distribute working on a large list of input checkplot pickles over several sessions or machines. nworkers : int The number of parallel workers that will work on the checkplot update process. fast_mode : bool or float This runs the external catalog operations in a "fast" mode, with short timeouts and not trying to hit external catalogs that take a long time to respond. See the docstring for `checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this works. If this is True, will run in "fast" mode with default timeouts (5 seconds in most cases). If this is a float, will run in "fast" mode with the provided timeout value in seconds. findercmap : str or matplotlib.cm.Colormap object findercmap : str or matplotlib.cm.ColorMap object The Colormap object to use for the finder chart image. finderconvolve : astropy.convolution.Kernel object or None If not None, the Kernel object to use for convolving the finder image. deredden_objects : bool If this is True, will use the 2MASS DUST service to get extinction coefficients in various bands, and then try to deredden the magnitudes and colors of the object already present in the checkplot's objectinfo dict. custom_bandpasses : dict This is a dict used to provide custom bandpass definitions for any magnitude measurements in the objectinfo dict that are not automatically recognized by the `varclass.starfeatures.color_features` function. See its docstring for details on the required format. gaia_submit_timeout : float Sets the timeout in seconds to use when submitting a request to look up the object's information to the GAIA service. Note that if `fast_mode` is set, this is ignored. gaia_submit_tries : int Sets the maximum number of times the GAIA services will be contacted to obtain this object's information. If `fast_mode` is set, this is ignored, and the services will be contacted only once (meaning that a failure to respond will be silently ignored and no GAIA data will be added to the checkplot's objectinfo dict). gaia_max_timeout : float Sets the timeout in seconds to use when waiting for the GAIA service to respond to our request for the object's information. Note that if `fast_mode` is set, this is ignored. gaia_mirror : str This sets the GAIA mirror to use. This is a key in the `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each mirror. complete_query_later : bool If this is True, saves the state of GAIA queries that are not yet complete when `gaia_max_timeout` is reached while waiting for the GAIA service to respond to our request. A later call for GAIA info on the same object will attempt to pick up the results from the existing query if it's completed. If `fast_mode` is True, this is ignored. lclistpkl : dict or str If this is provided, must be a dict resulting from reading a catalog produced by the `lcproc.catalogs.make_lclist` function or a str path pointing to the pickle file produced by that function. This catalog is used to find neighbors of the current object in the current light curve collection. Looking at neighbors of the object within the radius specified by `nbrradiusarcsec` is useful for light curves produced by instruments that have a large pixel scale, so are susceptible to blending of variability and potential confusion of neighbor variability with that of the actual object being looked at. If this is None, no neighbor lookups will be performed. nbrradiusarcsec : float The radius in arcseconds to use for a search conducted around the coordinates of this object to look for any potential confusion and blending of variability amplitude caused by their proximity. maxnumneighbors : int The maximum number of neighbors that will have their light curves and magnitudes noted in this checkplot as potential blends with the target object. plotdpi : int The resolution in DPI of the plots to generate in this function (e.g. the finder chart, etc.) findercachedir : str The path to the astrobase cache directory for finder chart downloads from the NASA SkyView service. verbose : bool If True, will indicate progress and warn about potential problems. Returns ------- list of str Paths to the updated checkplot pickle file.
[ "This", "updates", "objectinfo", "for", "a", "list", "of", "checkplots", "." ]
python
valid
38.774359
solvebio/solvebio-python
solvebio/utils/tabulate.py
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L470-L489
def _build_row(cells, padding, begin, sep, end): "Return a string which represents a row of data cells." pad = " " * padding padded_cells = [pad + cell + pad for cell in cells] # SolveBio: we're only displaying Key-Value tuples (dimension of 2). # enforce that we don't wrap lines by setting a max # limit on row width which is equal to TTY_COLS (see printing) rendered_cells = (begin + sep.join(padded_cells) + end).rstrip() if len(rendered_cells) > TTY_COLS: if not cells[-1].endswith(" ") and not cells[-1].endswith("-"): terminating_str = " ... " else: terminating_str = "" rendered_cells = "{0}{1}{2}".format( rendered_cells[:TTY_COLS - len(terminating_str) - 1], terminating_str, end) return rendered_cells
[ "def", "_build_row", "(", "cells", ",", "padding", ",", "begin", ",", "sep", ",", "end", ")", ":", "pad", "=", "\" \"", "*", "padding", "padded_cells", "=", "[", "pad", "+", "cell", "+", "pad", "for", "cell", "in", "cells", "]", "# SolveBio: we're only displaying Key-Value tuples (dimension of 2).", "# enforce that we don't wrap lines by setting a max", "# limit on row width which is equal to TTY_COLS (see printing)", "rendered_cells", "=", "(", "begin", "+", "sep", ".", "join", "(", "padded_cells", ")", "+", "end", ")", ".", "rstrip", "(", ")", "if", "len", "(", "rendered_cells", ")", ">", "TTY_COLS", ":", "if", "not", "cells", "[", "-", "1", "]", ".", "endswith", "(", "\" \"", ")", "and", "not", "cells", "[", "-", "1", "]", ".", "endswith", "(", "\"-\"", ")", ":", "terminating_str", "=", "\" ... \"", "else", ":", "terminating_str", "=", "\"\"", "rendered_cells", "=", "\"{0}{1}{2}\"", ".", "format", "(", "rendered_cells", "[", ":", "TTY_COLS", "-", "len", "(", "terminating_str", ")", "-", "1", "]", ",", "terminating_str", ",", "end", ")", "return", "rendered_cells" ]
Return a string which represents a row of data cells.
[ "Return", "a", "string", "which", "represents", "a", "row", "of", "data", "cells", "." ]
python
test
40.3
awslabs/serverless-application-model
samtranslator/model/sam_resources.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/model/sam_resources.py#L644-L688
def _construct_lambda_layer(self, intrinsics_resolver): """Constructs and returns the Lambda function. :returns: a list containing the Lambda function and execution role resources :rtype: list """ # Resolve intrinsics if applicable: self.LayerName = self._resolve_string_parameter(intrinsics_resolver, self.LayerName, 'LayerName') self.LicenseInfo = self._resolve_string_parameter(intrinsics_resolver, self.LicenseInfo, 'LicenseInfo') self.Description = self._resolve_string_parameter(intrinsics_resolver, self.Description, 'Description') self.RetentionPolicy = self._resolve_string_parameter(intrinsics_resolver, self.RetentionPolicy, 'RetentionPolicy') retention_policy_value = self._get_retention_policy_value() attributes = self.get_passthrough_resource_attributes() if attributes is None: attributes = {} attributes['DeletionPolicy'] = retention_policy_value old_logical_id = self.logical_id new_logical_id = logical_id_generator.LogicalIdGenerator(old_logical_id, self.to_dict()).gen() self.logical_id = new_logical_id lambda_layer = LambdaLayerVersion(self.logical_id, depends_on=self.depends_on, attributes=attributes) # Changing the LayerName property: when a layer is published, it is given an Arn # example: arn:aws:lambda:us-west-2:123456789012:layer:MyLayer:1 # where MyLayer is the LayerName property if it exists; otherwise, it is the # LogicalId of this resource. Since a LayerVersion is an immutable resource, when # CloudFormation updates this resource, it will ALWAYS create a new version then # delete the old version if the logical ids match. What this does is change the # logical id of every layer (so a `DeletionPolicy: Retain` can work) and set the # LayerName property of the layer so that the Arn will still always be the same # with the exception of an incrementing version number. if not self.LayerName: self.LayerName = old_logical_id lambda_layer.LayerName = self.LayerName lambda_layer.Description = self.Description lambda_layer.Content = construct_s3_location_object(self.ContentUri, self.logical_id, 'ContentUri') lambda_layer.CompatibleRuntimes = self.CompatibleRuntimes lambda_layer.LicenseInfo = self.LicenseInfo return lambda_layer
[ "def", "_construct_lambda_layer", "(", "self", ",", "intrinsics_resolver", ")", ":", "# Resolve intrinsics if applicable:", "self", ".", "LayerName", "=", "self", ".", "_resolve_string_parameter", "(", "intrinsics_resolver", ",", "self", ".", "LayerName", ",", "'LayerName'", ")", "self", ".", "LicenseInfo", "=", "self", ".", "_resolve_string_parameter", "(", "intrinsics_resolver", ",", "self", ".", "LicenseInfo", ",", "'LicenseInfo'", ")", "self", ".", "Description", "=", "self", ".", "_resolve_string_parameter", "(", "intrinsics_resolver", ",", "self", ".", "Description", ",", "'Description'", ")", "self", ".", "RetentionPolicy", "=", "self", ".", "_resolve_string_parameter", "(", "intrinsics_resolver", ",", "self", ".", "RetentionPolicy", ",", "'RetentionPolicy'", ")", "retention_policy_value", "=", "self", ".", "_get_retention_policy_value", "(", ")", "attributes", "=", "self", ".", "get_passthrough_resource_attributes", "(", ")", "if", "attributes", "is", "None", ":", "attributes", "=", "{", "}", "attributes", "[", "'DeletionPolicy'", "]", "=", "retention_policy_value", "old_logical_id", "=", "self", ".", "logical_id", "new_logical_id", "=", "logical_id_generator", ".", "LogicalIdGenerator", "(", "old_logical_id", ",", "self", ".", "to_dict", "(", ")", ")", ".", "gen", "(", ")", "self", ".", "logical_id", "=", "new_logical_id", "lambda_layer", "=", "LambdaLayerVersion", "(", "self", ".", "logical_id", ",", "depends_on", "=", "self", ".", "depends_on", ",", "attributes", "=", "attributes", ")", "# Changing the LayerName property: when a layer is published, it is given an Arn", "# example: arn:aws:lambda:us-west-2:123456789012:layer:MyLayer:1", "# where MyLayer is the LayerName property if it exists; otherwise, it is the", "# LogicalId of this resource. Since a LayerVersion is an immutable resource, when", "# CloudFormation updates this resource, it will ALWAYS create a new version then", "# delete the old version if the logical ids match. What this does is change the", "# logical id of every layer (so a `DeletionPolicy: Retain` can work) and set the", "# LayerName property of the layer so that the Arn will still always be the same", "# with the exception of an incrementing version number.", "if", "not", "self", ".", "LayerName", ":", "self", ".", "LayerName", "=", "old_logical_id", "lambda_layer", ".", "LayerName", "=", "self", ".", "LayerName", "lambda_layer", ".", "Description", "=", "self", ".", "Description", "lambda_layer", ".", "Content", "=", "construct_s3_location_object", "(", "self", ".", "ContentUri", ",", "self", ".", "logical_id", ",", "'ContentUri'", ")", "lambda_layer", ".", "CompatibleRuntimes", "=", "self", ".", "CompatibleRuntimes", "lambda_layer", ".", "LicenseInfo", "=", "self", ".", "LicenseInfo", "return", "lambda_layer" ]
Constructs and returns the Lambda function. :returns: a list containing the Lambda function and execution role resources :rtype: list
[ "Constructs", "and", "returns", "the", "Lambda", "function", "." ]
python
train
55.2
watson-developer-cloud/python-sdk
ibm_watson/assistant_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v1.py#L3047-L3056
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'created') and self.created is not None: _dict['created'] = datetime_to_string(self.created) if hasattr(self, 'updated') and self.updated is not None: _dict['updated'] = datetime_to_string(self.updated) return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'text'", ")", "and", "self", ".", "text", "is", "not", "None", ":", "_dict", "[", "'text'", "]", "=", "self", ".", "text", "if", "hasattr", "(", "self", ",", "'created'", ")", "and", "self", ".", "created", "is", "not", "None", ":", "_dict", "[", "'created'", "]", "=", "datetime_to_string", "(", "self", ".", "created", ")", "if", "hasattr", "(", "self", ",", "'updated'", ")", "and", "self", ".", "updated", "is", "not", "None", ":", "_dict", "[", "'updated'", "]", "=", "datetime_to_string", "(", "self", ".", "updated", ")", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
47.2
honeynet/beeswarm
beeswarm/drones/client/baits/ftp.py
https://github.com/honeynet/beeswarm/blob/db51ea0bc29f631c3e3b5312b479ac9d5e31079a/beeswarm/drones/client/baits/ftp.py#L128-L140
def decide(self): """ Decides the next command to be launched based on the current state. :return: Tuple containing the next command name, and it's parameters. """ next_command_name = random.choice(self.COMMAND_MAP[self.state['last_command']]) param = '' if next_command_name == 'retrieve': param = random.choice(self.state['file_list']) elif next_command_name == 'cwd': param = random.choice(self.state['dir_list']) return next_command_name, param
[ "def", "decide", "(", "self", ")", ":", "next_command_name", "=", "random", ".", "choice", "(", "self", ".", "COMMAND_MAP", "[", "self", ".", "state", "[", "'last_command'", "]", "]", ")", "param", "=", "''", "if", "next_command_name", "==", "'retrieve'", ":", "param", "=", "random", ".", "choice", "(", "self", ".", "state", "[", "'file_list'", "]", ")", "elif", "next_command_name", "==", "'cwd'", ":", "param", "=", "random", ".", "choice", "(", "self", ".", "state", "[", "'dir_list'", "]", ")", "return", "next_command_name", ",", "param" ]
Decides the next command to be launched based on the current state. :return: Tuple containing the next command name, and it's parameters.
[ "Decides", "the", "next", "command", "to", "be", "launched", "based", "on", "the", "current", "state", "." ]
python
train
41.307692
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/frontend_widget.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/frontend_widget.py#L69-L73
def setFormat(self, start, count, format): """ Reimplemented to highlight selectively. """ start += self._current_offset super(FrontendHighlighter, self).setFormat(start, count, format)
[ "def", "setFormat", "(", "self", ",", "start", ",", "count", ",", "format", ")", ":", "start", "+=", "self", ".", "_current_offset", "super", "(", "FrontendHighlighter", ",", "self", ")", ".", "setFormat", "(", "start", ",", "count", ",", "format", ")" ]
Reimplemented to highlight selectively.
[ "Reimplemented", "to", "highlight", "selectively", "." ]
python
test
42.6
dsoprea/PySecure
pysecure/adapters/ssha.py
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/ssha.py#L249-L260
def _ssh_forward_accept(ssh_session, timeout_ms): """Waiting for an incoming connection from a reverse forwarded port. Note that this results in a kernel block until a connection is received. """ ssh_channel = c_ssh_forward_accept(c_void_p(ssh_session), c_int(timeout_ms)) if ssh_channel is None: raise SshTimeoutException() return ssh_channel
[ "def", "_ssh_forward_accept", "(", "ssh_session", ",", "timeout_ms", ")", ":", "ssh_channel", "=", "c_ssh_forward_accept", "(", "c_void_p", "(", "ssh_session", ")", ",", "c_int", "(", "timeout_ms", ")", ")", "if", "ssh_channel", "is", "None", ":", "raise", "SshTimeoutException", "(", ")", "return", "ssh_channel" ]
Waiting for an incoming connection from a reverse forwarded port. Note that this results in a kernel block until a connection is received.
[ "Waiting", "for", "an", "incoming", "connection", "from", "a", "reverse", "forwarded", "port", ".", "Note", "that", "this", "results", "in", "a", "kernel", "block", "until", "a", "connection", "is", "received", "." ]
python
train
33.916667
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L3901-L3909
def xpathNextSelf(self, ctxt): """Traversal function for the "self" direction The self axis contains just the context node itself """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.xmlXPathNextSelf(ctxt__o, self._o) if ret is None:raise xpathError('xmlXPathNextSelf() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "xpathNextSelf", "(", "self", ",", "ctxt", ")", ":", "if", "ctxt", "is", "None", ":", "ctxt__o", "=", "None", "else", ":", "ctxt__o", "=", "ctxt", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlXPathNextSelf", "(", "ctxt__o", ",", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "raise", "xpathError", "(", "'xmlXPathNextSelf() failed'", ")", "__tmp", "=", "xmlNode", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
Traversal function for the "self" direction The self axis contains just the context node itself
[ "Traversal", "function", "for", "the", "self", "direction", "The", "self", "axis", "contains", "just", "the", "context", "node", "itself" ]
python
train
44.444444
edx/edx-enterprise
enterprise/admin/forms.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/admin/forms.py#L227-L231
def clean_notify(self): """ Clean the notify_on_enrollment field. """ return self.cleaned_data.get(self.Fields.NOTIFY, self.NotificationTypes.DEFAULT)
[ "def", "clean_notify", "(", "self", ")", ":", "return", "self", ".", "cleaned_data", ".", "get", "(", "self", ".", "Fields", ".", "NOTIFY", ",", "self", ".", "NotificationTypes", ".", "DEFAULT", ")" ]
Clean the notify_on_enrollment field.
[ "Clean", "the", "notify_on_enrollment", "field", "." ]
python
valid
35.6
crocs-muni/roca
roca/detect.py
https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L1027-L1066
def process_inputs(self): """ Processes input data :return: """ ret = [] files = self.args.files if files is None: return ret for fname in files: if fname == '-': if self.args.base64stdin: for line in sys.stdin: data = base64.b64decode(line) ret.append(self.process_file(data, fname)) continue else: fh = sys.stdin elif fname.endswith('.tar') or fname.endswith('.tar.gz'): sub = self.process_tar(fname) ret.append(sub) continue elif not os.path.isfile(fname): sub = self.process_dir(fname) ret.append(sub) continue else: fh = open(fname, 'rb') with fh: data = fh.read() sub = self.process_file(data, fname) ret.append(sub) return ret
[ "def", "process_inputs", "(", "self", ")", ":", "ret", "=", "[", "]", "files", "=", "self", ".", "args", ".", "files", "if", "files", "is", "None", ":", "return", "ret", "for", "fname", "in", "files", ":", "if", "fname", "==", "'-'", ":", "if", "self", ".", "args", ".", "base64stdin", ":", "for", "line", "in", "sys", ".", "stdin", ":", "data", "=", "base64", ".", "b64decode", "(", "line", ")", "ret", ".", "append", "(", "self", ".", "process_file", "(", "data", ",", "fname", ")", ")", "continue", "else", ":", "fh", "=", "sys", ".", "stdin", "elif", "fname", ".", "endswith", "(", "'.tar'", ")", "or", "fname", ".", "endswith", "(", "'.tar.gz'", ")", ":", "sub", "=", "self", ".", "process_tar", "(", "fname", ")", "ret", ".", "append", "(", "sub", ")", "continue", "elif", "not", "os", ".", "path", ".", "isfile", "(", "fname", ")", ":", "sub", "=", "self", ".", "process_dir", "(", "fname", ")", "ret", ".", "append", "(", "sub", ")", "continue", "else", ":", "fh", "=", "open", "(", "fname", ",", "'rb'", ")", "with", "fh", ":", "data", "=", "fh", ".", "read", "(", ")", "sub", "=", "self", ".", "process_file", "(", "data", ",", "fname", ")", "ret", ".", "append", "(", "sub", ")", "return", "ret" ]
Processes input data :return:
[ "Processes", "input", "data", ":", "return", ":" ]
python
train
26.125
mongodb/mongo-python-driver
pymongo/message.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/message.py#L796-L803
def _delete(collection_name, spec, opts, flags): """Get an OP_DELETE message.""" encoded = _dict_to_bson(spec, False, opts) # Uses extensions. return b"".join([ _ZERO_32, _make_c_string(collection_name), _pack_int(flags), encoded]), len(encoded)
[ "def", "_delete", "(", "collection_name", ",", "spec", ",", "opts", ",", "flags", ")", ":", "encoded", "=", "_dict_to_bson", "(", "spec", ",", "False", ",", "opts", ")", "# Uses extensions.", "return", "b\"\"", ".", "join", "(", "[", "_ZERO_32", ",", "_make_c_string", "(", "collection_name", ")", ",", "_pack_int", "(", "flags", ")", ",", "encoded", "]", ")", ",", "len", "(", "encoded", ")" ]
Get an OP_DELETE message.
[ "Get", "an", "OP_DELETE", "message", "." ]
python
train
35.375
Esri/ArcREST
src/arcrest/enrichment/_geoenrichment.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/enrichment/_geoenrichment.py#L382-L443
def dataCollections(self, countryName=None, addDerivativeVariables=None, outFields=None, suppressNullValues=False): """ The GeoEnrichment service uses the concept of a data collection to define the data attributes returned by the enrichment service. Each data collection has a unique name that acts as an ID that is passed in the dataCollections parameter of the GeoEnrichment service. Some data collections (such as default) can be used in all supported countries. Other data collections may only be available in one or a collection of countries. Data collections may only be available in a subset of countries because of differences in the demographic data that is available for each country. A list of data collections for all available countries can be generated with the data collection discover method. For full help please go here: http://resources.arcgis.com/en/help/arcgis-rest-api/#/Data_collections/02r30000021t000000/ Inputs: countryName - lets the user supply and optional name of a country in order to get information about the data collections in that given country. addDerivativeVariables - Optional parameter to specify a list of field names that include variables for the derivative statistics. outFields - Optional parameter to specify a list of output fields in the response. suppressNullValues - Optional parameter to return only values that are not NULL in the output response. Adding the optional suppressNullValues parameter to any data collections discovery method will reduce the size of the output that is returned """ if addDerivativeVariables is None: addDerivativeVariables = ["*"] if outFields is None: outFields = ["*"] if countryName is None: url = self._base_url + self._url_data_collection else: url = self._base_url + self._url_data_collection + "/%s" % countryName params = { "f" : "token" } _addDerivVals = ["percent","index","average","all","*"] if addDerivativeVariables in _addDerivVals: params['addDerivativeVariables'] = addDerivativeVariables if not outFields is None: params['outFields'] = outFields if not suppressNullValues is None and \ isinstance(suppressNullValues, bool): if suppressNullValues: params['suppressNullValues'] = "true" else: params['suppressNullValues'] = "false" return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "dataCollections", "(", "self", ",", "countryName", "=", "None", ",", "addDerivativeVariables", "=", "None", ",", "outFields", "=", "None", ",", "suppressNullValues", "=", "False", ")", ":", "if", "addDerivativeVariables", "is", "None", ":", "addDerivativeVariables", "=", "[", "\"*\"", "]", "if", "outFields", "is", "None", ":", "outFields", "=", "[", "\"*\"", "]", "if", "countryName", "is", "None", ":", "url", "=", "self", ".", "_base_url", "+", "self", ".", "_url_data_collection", "else", ":", "url", "=", "self", ".", "_base_url", "+", "self", ".", "_url_data_collection", "+", "\"/%s\"", "%", "countryName", "params", "=", "{", "\"f\"", ":", "\"token\"", "}", "_addDerivVals", "=", "[", "\"percent\"", ",", "\"index\"", ",", "\"average\"", ",", "\"all\"", ",", "\"*\"", "]", "if", "addDerivativeVariables", "in", "_addDerivVals", ":", "params", "[", "'addDerivativeVariables'", "]", "=", "addDerivativeVariables", "if", "not", "outFields", "is", "None", ":", "params", "[", "'outFields'", "]", "=", "outFields", "if", "not", "suppressNullValues", "is", "None", "and", "isinstance", "(", "suppressNullValues", ",", "bool", ")", ":", "if", "suppressNullValues", ":", "params", "[", "'suppressNullValues'", "]", "=", "\"true\"", "else", ":", "params", "[", "'suppressNullValues'", "]", "=", "\"false\"", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
The GeoEnrichment service uses the concept of a data collection to define the data attributes returned by the enrichment service. Each data collection has a unique name that acts as an ID that is passed in the dataCollections parameter of the GeoEnrichment service. Some data collections (such as default) can be used in all supported countries. Other data collections may only be available in one or a collection of countries. Data collections may only be available in a subset of countries because of differences in the demographic data that is available for each country. A list of data collections for all available countries can be generated with the data collection discover method. For full help please go here: http://resources.arcgis.com/en/help/arcgis-rest-api/#/Data_collections/02r30000021t000000/ Inputs: countryName - lets the user supply and optional name of a country in order to get information about the data collections in that given country. addDerivativeVariables - Optional parameter to specify a list of field names that include variables for the derivative statistics. outFields - Optional parameter to specify a list of output fields in the response. suppressNullValues - Optional parameter to return only values that are not NULL in the output response. Adding the optional suppressNullValues parameter to any data collections discovery method will reduce the size of the output that is returned
[ "The", "GeoEnrichment", "service", "uses", "the", "concept", "of", "a", "data", "collection", "to", "define", "the", "data", "attributes", "returned", "by", "the", "enrichment", "service", ".", "Each", "data", "collection", "has", "a", "unique", "name", "that", "acts", "as", "an", "ID", "that", "is", "passed", "in", "the", "dataCollections", "parameter", "of", "the", "GeoEnrichment", "service", ".", "Some", "data", "collections", "(", "such", "as", "default", ")", "can", "be", "used", "in", "all", "supported", "countries", ".", "Other", "data", "collections", "may", "only", "be", "available", "in", "one", "or", "a", "collection", "of", "countries", ".", "Data", "collections", "may", "only", "be", "available", "in", "a", "subset", "of", "countries", "because", "of", "differences", "in", "the", "demographic", "data", "that", "is", "available", "for", "each", "country", ".", "A", "list", "of", "data", "collections", "for", "all", "available", "countries", "can", "be", "generated", "with", "the", "data", "collection", "discover", "method", "." ]
python
train
48.983871
saltstack/salt
salt/cloud/clouds/ec2.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/ec2.py#L3583-L3621
def _extract_instance_info(instances): ''' Given an instance query, return a dict of all instance data ''' ret = {} for instance in instances: # items could be type dict or list (for stopped EC2 instances) if isinstance(instance['instancesSet']['item'], list): for item in instance['instancesSet']['item']: name = _extract_name_tag(item) ret[name] = item ret[name]['name'] = name ret[name].update( dict( id=item['instanceId'], image=item['imageId'], size=item['instanceType'], state=item['instanceState']['name'], private_ips=item.get('privateIpAddress', []), public_ips=item.get('ipAddress', []) ) ) else: item = instance['instancesSet']['item'] name = _extract_name_tag(item) ret[name] = item ret[name]['name'] = name ret[name].update( dict( id=item['instanceId'], image=item['imageId'], size=item['instanceType'], state=item['instanceState']['name'], private_ips=item.get('privateIpAddress', []), public_ips=item.get('ipAddress', []) ) ) return ret
[ "def", "_extract_instance_info", "(", "instances", ")", ":", "ret", "=", "{", "}", "for", "instance", "in", "instances", ":", "# items could be type dict or list (for stopped EC2 instances)", "if", "isinstance", "(", "instance", "[", "'instancesSet'", "]", "[", "'item'", "]", ",", "list", ")", ":", "for", "item", "in", "instance", "[", "'instancesSet'", "]", "[", "'item'", "]", ":", "name", "=", "_extract_name_tag", "(", "item", ")", "ret", "[", "name", "]", "=", "item", "ret", "[", "name", "]", "[", "'name'", "]", "=", "name", "ret", "[", "name", "]", ".", "update", "(", "dict", "(", "id", "=", "item", "[", "'instanceId'", "]", ",", "image", "=", "item", "[", "'imageId'", "]", ",", "size", "=", "item", "[", "'instanceType'", "]", ",", "state", "=", "item", "[", "'instanceState'", "]", "[", "'name'", "]", ",", "private_ips", "=", "item", ".", "get", "(", "'privateIpAddress'", ",", "[", "]", ")", ",", "public_ips", "=", "item", ".", "get", "(", "'ipAddress'", ",", "[", "]", ")", ")", ")", "else", ":", "item", "=", "instance", "[", "'instancesSet'", "]", "[", "'item'", "]", "name", "=", "_extract_name_tag", "(", "item", ")", "ret", "[", "name", "]", "=", "item", "ret", "[", "name", "]", "[", "'name'", "]", "=", "name", "ret", "[", "name", "]", ".", "update", "(", "dict", "(", "id", "=", "item", "[", "'instanceId'", "]", ",", "image", "=", "item", "[", "'imageId'", "]", ",", "size", "=", "item", "[", "'instanceType'", "]", ",", "state", "=", "item", "[", "'instanceState'", "]", "[", "'name'", "]", ",", "private_ips", "=", "item", ".", "get", "(", "'privateIpAddress'", ",", "[", "]", ")", ",", "public_ips", "=", "item", ".", "get", "(", "'ipAddress'", ",", "[", "]", ")", ")", ")", "return", "ret" ]
Given an instance query, return a dict of all instance data
[ "Given", "an", "instance", "query", "return", "a", "dict", "of", "all", "instance", "data" ]
python
train
37.487179
contentful-labs/contentful.py
contentful/cda/serialization.py
https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L118-L134
def create_content_type(json): """Create :class:`.resource.ContentType` from JSON. :param json: JSON dict. :return: ContentType instance. """ result = ContentType(json['sys']) for field in json['fields']: field_id = field['id'] del field['id'] result.fields[field_id] = field result.name = json['name'] result.display_field = json.get('displayField') return result
[ "def", "create_content_type", "(", "json", ")", ":", "result", "=", "ContentType", "(", "json", "[", "'sys'", "]", ")", "for", "field", "in", "json", "[", "'fields'", "]", ":", "field_id", "=", "field", "[", "'id'", "]", "del", "field", "[", "'id'", "]", "result", ".", "fields", "[", "field_id", "]", "=", "field", "result", ".", "name", "=", "json", "[", "'name'", "]", "result", ".", "display_field", "=", "json", ".", "get", "(", "'displayField'", ")", "return", "result" ]
Create :class:`.resource.ContentType` from JSON. :param json: JSON dict. :return: ContentType instance.
[ "Create", ":", "class", ":", ".", "resource", ".", "ContentType", "from", "JSON", "." ]
python
train
27.058824
saltstack/salt
salt/modules/aliases.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aliases.py#L64-L106
def __write_aliases_file(lines): ''' Write a new copy of the aliases file. Lines is a list of lines as returned by __parse_aliases. ''' afn = __get_aliases_filename() adir = os.path.dirname(afn) out = tempfile.NamedTemporaryFile(dir=adir, delete=False) if not __opts__.get('integration.test', False): if os.path.isfile(afn): afn_st = os.stat(afn) os.chmod(out.name, stat.S_IMODE(afn_st.st_mode)) os.chown(out.name, afn_st.st_uid, afn_st.st_gid) else: os.chmod(out.name, 0o644) os.chown(out.name, 0, 0) for (line_alias, line_target, line_comment) in lines: if isinstance(line_target, list): line_target = ', '.join(line_target) if not line_comment: line_comment = '' if line_alias and line_target: write_line = '{0}: {1}{2}\n'.format( line_alias, line_target, line_comment ) else: write_line = '{0}\n'.format(line_comment) if six.PY3: write_line = write_line.encode(__salt_system_encoding__) out.write(write_line) out.close() os.rename(out.name, afn) # Search $PATH for the newalises command newaliases = salt.utils.path.which('newaliases') if newaliases is not None: __salt__['cmd.run'](newaliases) return True
[ "def", "__write_aliases_file", "(", "lines", ")", ":", "afn", "=", "__get_aliases_filename", "(", ")", "adir", "=", "os", ".", "path", ".", "dirname", "(", "afn", ")", "out", "=", "tempfile", ".", "NamedTemporaryFile", "(", "dir", "=", "adir", ",", "delete", "=", "False", ")", "if", "not", "__opts__", ".", "get", "(", "'integration.test'", ",", "False", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "afn", ")", ":", "afn_st", "=", "os", ".", "stat", "(", "afn", ")", "os", ".", "chmod", "(", "out", ".", "name", ",", "stat", ".", "S_IMODE", "(", "afn_st", ".", "st_mode", ")", ")", "os", ".", "chown", "(", "out", ".", "name", ",", "afn_st", ".", "st_uid", ",", "afn_st", ".", "st_gid", ")", "else", ":", "os", ".", "chmod", "(", "out", ".", "name", ",", "0o644", ")", "os", ".", "chown", "(", "out", ".", "name", ",", "0", ",", "0", ")", "for", "(", "line_alias", ",", "line_target", ",", "line_comment", ")", "in", "lines", ":", "if", "isinstance", "(", "line_target", ",", "list", ")", ":", "line_target", "=", "', '", ".", "join", "(", "line_target", ")", "if", "not", "line_comment", ":", "line_comment", "=", "''", "if", "line_alias", "and", "line_target", ":", "write_line", "=", "'{0}: {1}{2}\\n'", ".", "format", "(", "line_alias", ",", "line_target", ",", "line_comment", ")", "else", ":", "write_line", "=", "'{0}\\n'", ".", "format", "(", "line_comment", ")", "if", "six", ".", "PY3", ":", "write_line", "=", "write_line", ".", "encode", "(", "__salt_system_encoding__", ")", "out", ".", "write", "(", "write_line", ")", "out", ".", "close", "(", ")", "os", ".", "rename", "(", "out", ".", "name", ",", "afn", ")", "# Search $PATH for the newalises command", "newaliases", "=", "salt", ".", "utils", ".", "path", ".", "which", "(", "'newaliases'", ")", "if", "newaliases", "is", "not", "None", ":", "__salt__", "[", "'cmd.run'", "]", "(", "newaliases", ")", "return", "True" ]
Write a new copy of the aliases file. Lines is a list of lines as returned by __parse_aliases.
[ "Write", "a", "new", "copy", "of", "the", "aliases", "file", ".", "Lines", "is", "a", "list", "of", "lines", "as", "returned", "by", "__parse_aliases", "." ]
python
train
31.511628
VIVelev/PyDojoML
dojo/cluster/kmeans.py
https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/cluster/kmeans.py#L77-L102
def fit(self, X): """The K-Means itself """ self._X = super().cluster(X) candidates = [] for _ in range(self.n_runs): self._init_random_centroids() while True: prev_clusters = self.clusters self._assign_clusters() self._move_centroids() if np.all(prev_clusters == self.clusters): break self._calc_distortion() candidates.append((self.distortion, self.centroids, self.clusters)) candidates.sort(key=lambda x: x[0]) self.distortion = candidates[0][0] self.centroids = candidates[0][1] self.clusters = candidates[0][2] return self
[ "def", "fit", "(", "self", ",", "X", ")", ":", "self", ".", "_X", "=", "super", "(", ")", ".", "cluster", "(", "X", ")", "candidates", "=", "[", "]", "for", "_", "in", "range", "(", "self", ".", "n_runs", ")", ":", "self", ".", "_init_random_centroids", "(", ")", "while", "True", ":", "prev_clusters", "=", "self", ".", "clusters", "self", ".", "_assign_clusters", "(", ")", "self", ".", "_move_centroids", "(", ")", "if", "np", ".", "all", "(", "prev_clusters", "==", "self", ".", "clusters", ")", ":", "break", "self", ".", "_calc_distortion", "(", ")", "candidates", ".", "append", "(", "(", "self", ".", "distortion", ",", "self", ".", "centroids", ",", "self", ".", "clusters", ")", ")", "candidates", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "self", ".", "distortion", "=", "candidates", "[", "0", "]", "[", "0", "]", "self", ".", "centroids", "=", "candidates", "[", "0", "]", "[", "1", "]", "self", ".", "clusters", "=", "candidates", "[", "0", "]", "[", "2", "]", "return", "self" ]
The K-Means itself
[ "The", "K", "-", "Means", "itself" ]
python
train
28
ionelmc/python-cogen
cogen/core/proactors/epoll_impl.py
https://github.com/ionelmc/python-cogen/blob/83b0edb88425eba6e5bfda9f1dcd34642517e2a8/cogen/core/proactors/epoll_impl.py#L49-L83
def run(self, timeout = 0): """ Run a proactor loop and return new socket events. Timeout is a timedelta object, 0 if active coros or None. epoll timeout param is a integer number of miliseconds (seconds/1000). """ ptimeout = int(timeout.microseconds/1000+timeout.seconds*1000 if timeout else (self.m_resolution if timeout is None else 0)) if self.tokens: epoll_fd = self.epoll_fd events = epoll_wait(epoll_fd, 1024, ptimeout) len_events = len(events)-1 for nr, (ev, fd) in enumerate(events): act = self.shadow.pop(fd) if ev & EPOLLHUP: epoll_ctl(self.epoll_fd, EPOLL_CTL_DEL, fd, 0) self.handle_error_event(act, 'Hang up.', ConnectionClosed) elif ev & EPOLLERR: epoll_ctl(self.epoll_fd, EPOLL_CTL_DEL, fd, 0) self.handle_error_event(act, 'Unknown error.') else: if nr == len_events: ret = self.yield_event(act) if not ret: epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, ev | EPOLLONESHOT) self.shadow[fd] = act return ret else: if not self.handle_event(act): epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, ev | EPOLLONESHOT) self.shadow[fd] = act else: sleep(timeout)
[ "def", "run", "(", "self", ",", "timeout", "=", "0", ")", ":", "ptimeout", "=", "int", "(", "timeout", ".", "microseconds", "/", "1000", "+", "timeout", ".", "seconds", "*", "1000", "if", "timeout", "else", "(", "self", ".", "m_resolution", "if", "timeout", "is", "None", "else", "0", ")", ")", "if", "self", ".", "tokens", ":", "epoll_fd", "=", "self", ".", "epoll_fd", "events", "=", "epoll_wait", "(", "epoll_fd", ",", "1024", ",", "ptimeout", ")", "len_events", "=", "len", "(", "events", ")", "-", "1", "for", "nr", ",", "(", "ev", ",", "fd", ")", "in", "enumerate", "(", "events", ")", ":", "act", "=", "self", ".", "shadow", ".", "pop", "(", "fd", ")", "if", "ev", "&", "EPOLLHUP", ":", "epoll_ctl", "(", "self", ".", "epoll_fd", ",", "EPOLL_CTL_DEL", ",", "fd", ",", "0", ")", "self", ".", "handle_error_event", "(", "act", ",", "'Hang up.'", ",", "ConnectionClosed", ")", "elif", "ev", "&", "EPOLLERR", ":", "epoll_ctl", "(", "self", ".", "epoll_fd", ",", "EPOLL_CTL_DEL", ",", "fd", ",", "0", ")", "self", ".", "handle_error_event", "(", "act", ",", "'Unknown error.'", ")", "else", ":", "if", "nr", "==", "len_events", ":", "ret", "=", "self", ".", "yield_event", "(", "act", ")", "if", "not", "ret", ":", "epoll_ctl", "(", "epoll_fd", ",", "EPOLL_CTL_MOD", ",", "fd", ",", "ev", "|", "EPOLLONESHOT", ")", "self", ".", "shadow", "[", "fd", "]", "=", "act", "return", "ret", "else", ":", "if", "not", "self", ".", "handle_event", "(", "act", ")", ":", "epoll_ctl", "(", "epoll_fd", ",", "EPOLL_CTL_MOD", ",", "fd", ",", "ev", "|", "EPOLLONESHOT", ")", "self", ".", "shadow", "[", "fd", "]", "=", "act", "else", ":", "sleep", "(", "timeout", ")" ]
Run a proactor loop and return new socket events. Timeout is a timedelta object, 0 if active coros or None. epoll timeout param is a integer number of miliseconds (seconds/1000).
[ "Run", "a", "proactor", "loop", "and", "return", "new", "socket", "events", ".", "Timeout", "is", "a", "timedelta", "object", "0", "if", "active", "coros", "or", "None", ".", "epoll", "timeout", "param", "is", "a", "integer", "number", "of", "miliseconds", "(", "seconds", "/", "1000", ")", "." ]
python
train
45.314286
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L2534-L2537
def organization_membership_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/organization_memberships#create-membership" api_path = "/api/v2/organization_memberships.json" return self.call(api_path, method="POST", data=data, **kwargs)
[ "def", "organization_membership_create", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/organization_memberships.json\"", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"POST\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/organization_memberships#create-membership
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "organization_memberships#create", "-", "membership" ]
python
train
71.5
graphistry/pygraphistry
graphistry/plotter.py
https://github.com/graphistry/pygraphistry/blob/3dfc50e60232c6f5fedd6e5fa9d3048b606944b8/graphistry/plotter.py#L350-L381
def pandas2igraph(self, edges, directed=True): """Convert a pandas edge dataframe to an IGraph graph. Uses current bindings. Defaults to treating edges as directed. **Example** :: import graphistry g = graphistry.bind() es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]}) g = g.bind(source='src', destination='dst') ig = g.pandas2igraph(es) ig.vs['community'] = ig.community_infomap().membership g.bind(point_color='community').plot(ig) """ import igraph self._check_mandatory_bindings(False) self._check_bound_attribs(edges, ['source', 'destination'], 'Edge') self._node = self._node or Plotter._defaultNodeId eattribs = edges.columns.values.tolist() eattribs.remove(self._source) eattribs.remove(self._destination) cols = [self._source, self._destination] + eattribs etuples = [tuple(x) for x in edges[cols].values] return igraph.Graph.TupleList(etuples, directed=directed, edge_attrs=eattribs, vertex_name_attr=self._node)
[ "def", "pandas2igraph", "(", "self", ",", "edges", ",", "directed", "=", "True", ")", ":", "import", "igraph", "self", ".", "_check_mandatory_bindings", "(", "False", ")", "self", ".", "_check_bound_attribs", "(", "edges", ",", "[", "'source'", ",", "'destination'", "]", ",", "'Edge'", ")", "self", ".", "_node", "=", "self", ".", "_node", "or", "Plotter", ".", "_defaultNodeId", "eattribs", "=", "edges", ".", "columns", ".", "values", ".", "tolist", "(", ")", "eattribs", ".", "remove", "(", "self", ".", "_source", ")", "eattribs", ".", "remove", "(", "self", ".", "_destination", ")", "cols", "=", "[", "self", ".", "_source", ",", "self", ".", "_destination", "]", "+", "eattribs", "etuples", "=", "[", "tuple", "(", "x", ")", "for", "x", "in", "edges", "[", "cols", "]", ".", "values", "]", "return", "igraph", ".", "Graph", ".", "TupleList", "(", "etuples", ",", "directed", "=", "directed", ",", "edge_attrs", "=", "eattribs", ",", "vertex_name_attr", "=", "self", ".", "_node", ")" ]
Convert a pandas edge dataframe to an IGraph graph. Uses current bindings. Defaults to treating edges as directed. **Example** :: import graphistry g = graphistry.bind() es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]}) g = g.bind(source='src', destination='dst') ig = g.pandas2igraph(es) ig.vs['community'] = ig.community_infomap().membership g.bind(point_color='community').plot(ig)
[ "Convert", "a", "pandas", "edge", "dataframe", "to", "an", "IGraph", "graph", "." ]
python
train
37.125
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer_2d.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer_2d.py#L896-L907
def img2img_transformer_tiny(): """Tiny params.""" hparams = img2img_transformer2d_base() hparams.num_hidden_layers = 2 hparams.hidden_size = 128 hparams.batch_size = 4 hparams.max_length = 128 hparams.attention_key_channels = hparams.attention_value_channels = 0 hparams.filter_size = 128 hparams.num_heads = 1 hparams.pos = "timing" return hparams
[ "def", "img2img_transformer_tiny", "(", ")", ":", "hparams", "=", "img2img_transformer2d_base", "(", ")", "hparams", ".", "num_hidden_layers", "=", "2", "hparams", ".", "hidden_size", "=", "128", "hparams", ".", "batch_size", "=", "4", "hparams", ".", "max_length", "=", "128", "hparams", ".", "attention_key_channels", "=", "hparams", ".", "attention_value_channels", "=", "0", "hparams", ".", "filter_size", "=", "128", "hparams", ".", "num_heads", "=", "1", "hparams", ".", "pos", "=", "\"timing\"", "return", "hparams" ]
Tiny params.
[ "Tiny", "params", "." ]
python
train
30
jedie/DragonPy
boot_dragonpy.py
https://github.com/jedie/DragonPy/blob/6659e5b5133aab26979a498ee7453495773a4f6c/boot_dragonpy.py#L913-L956
def create_environment(home_dir, site_packages=False, clear=False, unzip_setuptools=False, prompt=None, search_dirs=None, download=False, no_setuptools=False, no_pip=False, no_wheel=False, symlink=True): """ Creates a new environment in ``home_dir``. If ``site_packages`` is true, then the global ``site-packages/`` directory will be on the path. If ``clear`` is true (default False) then the environment will first be cleared. """ home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir) py_executable = os.path.abspath(install_python( home_dir, lib_dir, inc_dir, bin_dir, site_packages=site_packages, clear=clear, symlink=symlink)) install_distutils(home_dir) to_install = [] if not no_setuptools: to_install.append('setuptools') if not no_pip: to_install.append('pip') if not no_wheel: to_install.append('wheel') if to_install: install_wheel( to_install, py_executable, search_dirs, download=download, ) install_activate(home_dir, bin_dir, prompt) install_python_config(home_dir, bin_dir, prompt)
[ "def", "create_environment", "(", "home_dir", ",", "site_packages", "=", "False", ",", "clear", "=", "False", ",", "unzip_setuptools", "=", "False", ",", "prompt", "=", "None", ",", "search_dirs", "=", "None", ",", "download", "=", "False", ",", "no_setuptools", "=", "False", ",", "no_pip", "=", "False", ",", "no_wheel", "=", "False", ",", "symlink", "=", "True", ")", ":", "home_dir", ",", "lib_dir", ",", "inc_dir", ",", "bin_dir", "=", "path_locations", "(", "home_dir", ")", "py_executable", "=", "os", ".", "path", ".", "abspath", "(", "install_python", "(", "home_dir", ",", "lib_dir", ",", "inc_dir", ",", "bin_dir", ",", "site_packages", "=", "site_packages", ",", "clear", "=", "clear", ",", "symlink", "=", "symlink", ")", ")", "install_distutils", "(", "home_dir", ")", "to_install", "=", "[", "]", "if", "not", "no_setuptools", ":", "to_install", ".", "append", "(", "'setuptools'", ")", "if", "not", "no_pip", ":", "to_install", ".", "append", "(", "'pip'", ")", "if", "not", "no_wheel", ":", "to_install", ".", "append", "(", "'wheel'", ")", "if", "to_install", ":", "install_wheel", "(", "to_install", ",", "py_executable", ",", "search_dirs", ",", "download", "=", "download", ",", ")", "install_activate", "(", "home_dir", ",", "bin_dir", ",", "prompt", ")", "install_python_config", "(", "home_dir", ",", "bin_dir", ",", "prompt", ")" ]
Creates a new environment in ``home_dir``. If ``site_packages`` is true, then the global ``site-packages/`` directory will be on the path. If ``clear`` is true (default False) then the environment will first be cleared.
[ "Creates", "a", "new", "environment", "in", "home_dir", "." ]
python
train
28.113636
Azure/blobxfer
blobxfer/operations/resume.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/resume.py#L151-L189
def add_or_update_record( self, final_path, ase, chunk_size, next_integrity_chunk, completed, md5): # type: (DownloadResumeManager, pathlib.Path, # blobxfer.models.azure.StorageEntity, int, int, bool, # str) -> None """Add or update a resume record :param DownloadResumeManager self: this :param pathlib.Path final_path: final path :param blobxfer.models.azure.StorageEntity ase: Storage Entity :param int chunk_size: chunk size in bytes :param int next_integrity_chunk: next integrity chunk :param bool completed: if completed :param str md5: md5 hex digest """ key = blobxfer.operations.resume._BaseResumeManager.\ generate_record_key(ase) with self.datalock(): dl = self.get_record(ase, key=key, lock=False) if dl is None: dl = blobxfer.models.resume.Download( final_path=str(final_path), length=ase._size, chunk_size=chunk_size, next_integrity_chunk=next_integrity_chunk, completed=completed, md5=md5, ) else: if (dl.completed or next_integrity_chunk < dl.next_integrity_chunk): return if completed: dl.completed = completed else: dl.next_integrity_chunk = next_integrity_chunk dl.md5hexdigest = md5 self._data[key] = dl self._data.sync()
[ "def", "add_or_update_record", "(", "self", ",", "final_path", ",", "ase", ",", "chunk_size", ",", "next_integrity_chunk", ",", "completed", ",", "md5", ")", ":", "# type: (DownloadResumeManager, pathlib.Path,", "# blobxfer.models.azure.StorageEntity, int, int, bool,", "# str) -> None", "key", "=", "blobxfer", ".", "operations", ".", "resume", ".", "_BaseResumeManager", ".", "generate_record_key", "(", "ase", ")", "with", "self", ".", "datalock", "(", ")", ":", "dl", "=", "self", ".", "get_record", "(", "ase", ",", "key", "=", "key", ",", "lock", "=", "False", ")", "if", "dl", "is", "None", ":", "dl", "=", "blobxfer", ".", "models", ".", "resume", ".", "Download", "(", "final_path", "=", "str", "(", "final_path", ")", ",", "length", "=", "ase", ".", "_size", ",", "chunk_size", "=", "chunk_size", ",", "next_integrity_chunk", "=", "next_integrity_chunk", ",", "completed", "=", "completed", ",", "md5", "=", "md5", ",", ")", "else", ":", "if", "(", "dl", ".", "completed", "or", "next_integrity_chunk", "<", "dl", ".", "next_integrity_chunk", ")", ":", "return", "if", "completed", ":", "dl", ".", "completed", "=", "completed", "else", ":", "dl", ".", "next_integrity_chunk", "=", "next_integrity_chunk", "dl", ".", "md5hexdigest", "=", "md5", "self", ".", "_data", "[", "key", "]", "=", "dl", "self", ".", "_data", ".", "sync", "(", ")" ]
Add or update a resume record :param DownloadResumeManager self: this :param pathlib.Path final_path: final path :param blobxfer.models.azure.StorageEntity ase: Storage Entity :param int chunk_size: chunk size in bytes :param int next_integrity_chunk: next integrity chunk :param bool completed: if completed :param str md5: md5 hex digest
[ "Add", "or", "update", "a", "resume", "record", ":", "param", "DownloadResumeManager", "self", ":", "this", ":", "param", "pathlib", ".", "Path", "final_path", ":", "final", "path", ":", "param", "blobxfer", ".", "models", ".", "azure", ".", "StorageEntity", "ase", ":", "Storage", "Entity", ":", "param", "int", "chunk_size", ":", "chunk", "size", "in", "bytes", ":", "param", "int", "next_integrity_chunk", ":", "next", "integrity", "chunk", ":", "param", "bool", "completed", ":", "if", "completed", ":", "param", "str", "md5", ":", "md5", "hex", "digest" ]
python
train
41.794872
odlgroup/odl
odl/space/base_tensors.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/base_tensors.py#L907-L1006
def show(self, title=None, method='', indices=None, force_show=False, fig=None, **kwargs): """Display the function graphically. Parameters ---------- title : string, optional Set the title of the figure method : string, optional 1d methods: ``'plot'`` : graph plot ``'scatter'`` : scattered 2d points (2nd axis <-> value) 2d methods: ``'imshow'`` : image plot with coloring according to value, including a colorbar. ``'scatter'`` : cloud of scattered 3d points (3rd axis <-> value) indices : index expression, optional Display a slice of the array instead of the full array. The index expression is most easily created with the `numpy.s_` constructor, i.e. supply ``np.s_[:, 1, :]`` to display the first slice along the second axis. For data with 3 or more dimensions, the 2d slice in the first two axes at the "middle" along the remaining axes is shown (semantically ``[:, :, shape[2:] // 2]``). This option is mutually exclusive to ``coords``. force_show : bool, optional Whether the plot should be forced to be shown now or deferred until later. Note that some backends always displays the plot, regardless of this value. fig : `matplotlib.figure.Figure`, optional The figure to show in. Expected to be of same "style", as the figure given by this function. The most common use case is that ``fig`` is the return value of an earlier call to this function. kwargs : {'figsize', 'saveto', 'clim', ...}, optional Extra keyword arguments passed on to the display method. See the Matplotlib functions for documentation of extra options. Returns ------- fig : `matplotlib.figure.Figure` The resulting figure. It is also shown to the user. See Also -------- odl.util.graphics.show_discrete_data : Underlying implementation """ from odl.discr import uniform_grid from odl.util.graphics import show_discrete_data # Default to showing x-y slice "in the middle" if indices is None and self.ndim >= 3: indices = tuple( [slice(None)] * 2 + [n // 2 for n in self.space.shape[2:]] ) if isinstance(indices, (Integral, slice)): indices = (indices,) elif indices is None or indices == Ellipsis: indices = (slice(None),) * self.ndim else: indices = tuple(indices) # Replace None by slice(None) indices = tuple(slice(None) if idx is None else idx for idx in indices) if Ellipsis in indices: # Replace Ellipsis with the correct number of [:] expressions pos = indices.index(Ellipsis) indices = (indices[:pos] + (np.s_[:], ) * (self.ndim - len(indices) + 1) + indices[pos + 1:]) if len(indices) < self.ndim: raise ValueError('too few axes ({} < {})'.format(len(indices), self.ndim)) if len(indices) > self.ndim: raise ValueError('too many axes ({} > {})'.format(len(indices), self.ndim)) # Squeeze grid and values according to the index expression full_grid = uniform_grid([0] * self.ndim, np.array(self.shape) - 1, self.shape) grid = full_grid[indices].squeeze() values = self.asarray()[indices].squeeze() return show_discrete_data(values, grid, title=title, method=method, force_show=force_show, fig=fig, **kwargs)
[ "def", "show", "(", "self", ",", "title", "=", "None", ",", "method", "=", "''", ",", "indices", "=", "None", ",", "force_show", "=", "False", ",", "fig", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "odl", ".", "discr", "import", "uniform_grid", "from", "odl", ".", "util", ".", "graphics", "import", "show_discrete_data", "# Default to showing x-y slice \"in the middle\"", "if", "indices", "is", "None", "and", "self", ".", "ndim", ">=", "3", ":", "indices", "=", "tuple", "(", "[", "slice", "(", "None", ")", "]", "*", "2", "+", "[", "n", "//", "2", "for", "n", "in", "self", ".", "space", ".", "shape", "[", "2", ":", "]", "]", ")", "if", "isinstance", "(", "indices", ",", "(", "Integral", ",", "slice", ")", ")", ":", "indices", "=", "(", "indices", ",", ")", "elif", "indices", "is", "None", "or", "indices", "==", "Ellipsis", ":", "indices", "=", "(", "slice", "(", "None", ")", ",", ")", "*", "self", ".", "ndim", "else", ":", "indices", "=", "tuple", "(", "indices", ")", "# Replace None by slice(None)", "indices", "=", "tuple", "(", "slice", "(", "None", ")", "if", "idx", "is", "None", "else", "idx", "for", "idx", "in", "indices", ")", "if", "Ellipsis", "in", "indices", ":", "# Replace Ellipsis with the correct number of [:] expressions", "pos", "=", "indices", ".", "index", "(", "Ellipsis", ")", "indices", "=", "(", "indices", "[", ":", "pos", "]", "+", "(", "np", ".", "s_", "[", ":", "]", ",", ")", "*", "(", "self", ".", "ndim", "-", "len", "(", "indices", ")", "+", "1", ")", "+", "indices", "[", "pos", "+", "1", ":", "]", ")", "if", "len", "(", "indices", ")", "<", "self", ".", "ndim", ":", "raise", "ValueError", "(", "'too few axes ({} < {})'", ".", "format", "(", "len", "(", "indices", ")", ",", "self", ".", "ndim", ")", ")", "if", "len", "(", "indices", ")", ">", "self", ".", "ndim", ":", "raise", "ValueError", "(", "'too many axes ({} > {})'", ".", "format", "(", "len", "(", "indices", ")", ",", "self", ".", "ndim", ")", ")", "# Squeeze grid and values according to the index expression", "full_grid", "=", "uniform_grid", "(", "[", "0", "]", "*", "self", ".", "ndim", ",", "np", ".", "array", "(", "self", ".", "shape", ")", "-", "1", ",", "self", ".", "shape", ")", "grid", "=", "full_grid", "[", "indices", "]", ".", "squeeze", "(", ")", "values", "=", "self", ".", "asarray", "(", ")", "[", "indices", "]", ".", "squeeze", "(", ")", "return", "show_discrete_data", "(", "values", ",", "grid", ",", "title", "=", "title", ",", "method", "=", "method", ",", "force_show", "=", "force_show", ",", "fig", "=", "fig", ",", "*", "*", "kwargs", ")" ]
Display the function graphically. Parameters ---------- title : string, optional Set the title of the figure method : string, optional 1d methods: ``'plot'`` : graph plot ``'scatter'`` : scattered 2d points (2nd axis <-> value) 2d methods: ``'imshow'`` : image plot with coloring according to value, including a colorbar. ``'scatter'`` : cloud of scattered 3d points (3rd axis <-> value) indices : index expression, optional Display a slice of the array instead of the full array. The index expression is most easily created with the `numpy.s_` constructor, i.e. supply ``np.s_[:, 1, :]`` to display the first slice along the second axis. For data with 3 or more dimensions, the 2d slice in the first two axes at the "middle" along the remaining axes is shown (semantically ``[:, :, shape[2:] // 2]``). This option is mutually exclusive to ``coords``. force_show : bool, optional Whether the plot should be forced to be shown now or deferred until later. Note that some backends always displays the plot, regardless of this value. fig : `matplotlib.figure.Figure`, optional The figure to show in. Expected to be of same "style", as the figure given by this function. The most common use case is that ``fig`` is the return value of an earlier call to this function. kwargs : {'figsize', 'saveto', 'clim', ...}, optional Extra keyword arguments passed on to the display method. See the Matplotlib functions for documentation of extra options. Returns ------- fig : `matplotlib.figure.Figure` The resulting figure. It is also shown to the user. See Also -------- odl.util.graphics.show_discrete_data : Underlying implementation
[ "Display", "the", "function", "graphically", "." ]
python
train
39.27
edx/bok-choy
bok_choy/browser.py
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L79-L104
def save_source(driver, name): """ Save the rendered HTML of the browser. The location of the source can be configured by the environment variable `SAVED_SOURCE_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. name (str): A name to use in the output file name. Note that ".html" is appended automatically Returns: None """ source = driver.page_source file_name = os.path.join(os.environ.get('SAVED_SOURCE_DIR'), '{name}.html'.format(name=name)) try: with open(file_name, 'wb') as output_file: output_file.write(source.encode('utf-8')) except Exception: # pylint: disable=broad-except msg = u"Could not save the browser page source to {}.".format(file_name) LOGGER.warning(msg)
[ "def", "save_source", "(", "driver", ",", "name", ")", ":", "source", "=", "driver", ".", "page_source", "file_name", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", ".", "get", "(", "'SAVED_SOURCE_DIR'", ")", ",", "'{name}.html'", ".", "format", "(", "name", "=", "name", ")", ")", "try", ":", "with", "open", "(", "file_name", ",", "'wb'", ")", "as", "output_file", ":", "output_file", ".", "write", "(", "source", ".", "encode", "(", "'utf-8'", ")", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "msg", "=", "u\"Could not save the browser page source to {}.\"", ".", "format", "(", "file_name", ")", "LOGGER", ".", "warning", "(", "msg", ")" ]
Save the rendered HTML of the browser. The location of the source can be configured by the environment variable `SAVED_SOURCE_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. name (str): A name to use in the output file name. Note that ".html" is appended automatically Returns: None
[ "Save", "the", "rendered", "HTML", "of", "the", "browser", "." ]
python
train
34.269231
jkenlooper/chill
src/chill/operate.py
https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/operate.py#L391-L481
def operate_menu(): "Select between these operations on the database" selection = True while selection: print globals()['operate_menu'].__doc__ selection = select([ 'chill.database functions', 'execute sql file', 'render_node', 'New collection', 'Manage collection', 'Add document for node', 'help', ]) if selection == 'chill.database functions': mode_database_functions() elif selection == 'execute sql file': print "View the sql file and show a fill in the blanks interface with raw_input" sqlfile = choose_query_file() if not sqlfile: # return to the menu choices if not file picked selection = True else: sql_named_placeholders_re = re.compile(r":(\w+)") sql = fetch_query_string(sqlfile) placeholders = set(sql_named_placeholders_re.findall(sql)) print sql data = {} for placeholder in placeholders: value = raw_input(placeholder + ': ') data[placeholder] = value result = [] try: result = db.execute(text(sql), data) except DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) if result and result.returns_rows: result = result.fetchall() print result if not result: print 'No results.' else: kw = result[0] if 'node_id' in kw: print 'render node %s' % kw['node_id'] value = render_node(kw['node_id'], **kw) print safe_dump(value, default_flow_style=False) else: #print safe_dump(rowify(result, [(x, None) for x in result[0].keys()]), default_flow_style=False) print safe_dump([dict(zip(x.keys(), x.values())) for x in result], default_flow_style=False) elif selection == 'render_node': print globals()['render_node'].__doc__ node_id = existing_node_input() value = render_value_for_node(node_id) print safe_dump(value, default_flow_style=False) elif selection == 'New collection': mode_new_collection() elif selection == 'Manage collection': mode_collection() elif selection == 'Add document for node': folder = current_app.config.get('DOCUMENT_FOLDER') if not folder: print "No DOCUMENT_FOLDER configured for the application." else: choices = map(os.path.basename, glob(os.path.join(folder, '*')) ) choices.sort() if len(choices) == 0: print "No files found in DOCUMENT_FOLDER." else: filename = select(choices) if filename: defaultname = os.path.splitext(filename)[0] nodename = raw_input("Enter name for node [{0}]: ".format(defaultname)) or defaultname node = insert_node(name=nodename, value=filename) print "Added document '%s' to node '%s' with id: %s" % (filename, nodename, node) elif selection == 'help': print "------" print __doc__ print "------" else: print 'Done'
[ "def", "operate_menu", "(", ")", ":", "selection", "=", "True", "while", "selection", ":", "print", "globals", "(", ")", "[", "'operate_menu'", "]", ".", "__doc__", "selection", "=", "select", "(", "[", "'chill.database functions'", ",", "'execute sql file'", ",", "'render_node'", ",", "'New collection'", ",", "'Manage collection'", ",", "'Add document for node'", ",", "'help'", ",", "]", ")", "if", "selection", "==", "'chill.database functions'", ":", "mode_database_functions", "(", ")", "elif", "selection", "==", "'execute sql file'", ":", "print", "\"View the sql file and show a fill in the blanks interface with raw_input\"", "sqlfile", "=", "choose_query_file", "(", ")", "if", "not", "sqlfile", ":", "# return to the menu choices if not file picked", "selection", "=", "True", "else", ":", "sql_named_placeholders_re", "=", "re", ".", "compile", "(", "r\":(\\w+)\"", ")", "sql", "=", "fetch_query_string", "(", "sqlfile", ")", "placeholders", "=", "set", "(", "sql_named_placeholders_re", ".", "findall", "(", "sql", ")", ")", "print", "sql", "data", "=", "{", "}", "for", "placeholder", "in", "placeholders", ":", "value", "=", "raw_input", "(", "placeholder", "+", "': '", ")", "data", "[", "placeholder", "]", "=", "value", "result", "=", "[", "]", "try", ":", "result", "=", "db", ".", "execute", "(", "text", "(", "sql", ")", ",", "data", ")", "except", "DatabaseError", "as", "err", ":", "current_app", ".", "logger", ".", "error", "(", "\"DatabaseError: %s\"", ",", "err", ")", "if", "result", "and", "result", ".", "returns_rows", ":", "result", "=", "result", ".", "fetchall", "(", ")", "print", "result", "if", "not", "result", ":", "print", "'No results.'", "else", ":", "kw", "=", "result", "[", "0", "]", "if", "'node_id'", "in", "kw", ":", "print", "'render node %s'", "%", "kw", "[", "'node_id'", "]", "value", "=", "render_node", "(", "kw", "[", "'node_id'", "]", ",", "*", "*", "kw", ")", "print", "safe_dump", "(", "value", ",", "default_flow_style", "=", "False", ")", "else", ":", "#print safe_dump(rowify(result, [(x, None) for x in result[0].keys()]), default_flow_style=False)", "print", "safe_dump", "(", "[", "dict", "(", "zip", "(", "x", ".", "keys", "(", ")", ",", "x", ".", "values", "(", ")", ")", ")", "for", "x", "in", "result", "]", ",", "default_flow_style", "=", "False", ")", "elif", "selection", "==", "'render_node'", ":", "print", "globals", "(", ")", "[", "'render_node'", "]", ".", "__doc__", "node_id", "=", "existing_node_input", "(", ")", "value", "=", "render_value_for_node", "(", "node_id", ")", "print", "safe_dump", "(", "value", ",", "default_flow_style", "=", "False", ")", "elif", "selection", "==", "'New collection'", ":", "mode_new_collection", "(", ")", "elif", "selection", "==", "'Manage collection'", ":", "mode_collection", "(", ")", "elif", "selection", "==", "'Add document for node'", ":", "folder", "=", "current_app", ".", "config", ".", "get", "(", "'DOCUMENT_FOLDER'", ")", "if", "not", "folder", ":", "print", "\"No DOCUMENT_FOLDER configured for the application.\"", "else", ":", "choices", "=", "map", "(", "os", ".", "path", ".", "basename", ",", "glob", "(", "os", ".", "path", ".", "join", "(", "folder", ",", "'*'", ")", ")", ")", "choices", ".", "sort", "(", ")", "if", "len", "(", "choices", ")", "==", "0", ":", "print", "\"No files found in DOCUMENT_FOLDER.\"", "else", ":", "filename", "=", "select", "(", "choices", ")", "if", "filename", ":", "defaultname", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]", "nodename", "=", "raw_input", "(", "\"Enter name for node [{0}]: \"", ".", "format", "(", "defaultname", ")", ")", "or", "defaultname", "node", "=", "insert_node", "(", "name", "=", "nodename", ",", "value", "=", "filename", ")", "print", "\"Added document '%s' to node '%s' with id: %s\"", "%", "(", "filename", ",", "nodename", ",", "node", ")", "elif", "selection", "==", "'help'", ":", "print", "\"------\"", "print", "__doc__", "print", "\"------\"", "else", ":", "print", "'Done'" ]
Select between these operations on the database
[ "Select", "between", "these", "operations", "on", "the", "database" ]
python
train
40.725275
rackerlabs/simpl
simpl/utils/cli.py
https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/utils/cli.py#L36-L49
def error(self, message, print_help=False): """Provide a more helpful message if there are too few arguments.""" if 'too few arguments' in message.lower(): target = sys.argv.pop(0) sys.argv.insert( 0, os.path.basename(target) or os.path.relpath(target)) message = ("%s. Try getting help with `%s --help`" % (message, " ".join(sys.argv))) if print_help: self.print_help() else: self.print_usage() sys.stderr.write('\nerror: %s\n' % message) sys.exit(2)
[ "def", "error", "(", "self", ",", "message", ",", "print_help", "=", "False", ")", ":", "if", "'too few arguments'", "in", "message", ".", "lower", "(", ")", ":", "target", "=", "sys", ".", "argv", ".", "pop", "(", "0", ")", "sys", ".", "argv", ".", "insert", "(", "0", ",", "os", ".", "path", ".", "basename", "(", "target", ")", "or", "os", ".", "path", ".", "relpath", "(", "target", ")", ")", "message", "=", "(", "\"%s. Try getting help with `%s --help`\"", "%", "(", "message", ",", "\" \"", ".", "join", "(", "sys", ".", "argv", ")", ")", ")", "if", "print_help", ":", "self", ".", "print_help", "(", ")", "else", ":", "self", ".", "print_usage", "(", ")", "sys", ".", "stderr", ".", "write", "(", "'\\nerror: %s\\n'", "%", "message", ")", "sys", ".", "exit", "(", "2", ")" ]
Provide a more helpful message if there are too few arguments.
[ "Provide", "a", "more", "helpful", "message", "if", "there", "are", "too", "few", "arguments", "." ]
python
train
41.785714
Ouranosinc/xclim
xclim/generic.py
https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/generic.py#L247-L256
def default_freq(**indexer): """Return the default frequency.""" freq = 'AS-JAN' if indexer: if 'DJF' in indexer.values(): freq = 'AS-DEC' if 'month' in indexer and sorted(indexer.values()) != indexer.values(): raise (NotImplementedError) return freq
[ "def", "default_freq", "(", "*", "*", "indexer", ")", ":", "freq", "=", "'AS-JAN'", "if", "indexer", ":", "if", "'DJF'", "in", "indexer", ".", "values", "(", ")", ":", "freq", "=", "'AS-DEC'", "if", "'month'", "in", "indexer", "and", "sorted", "(", "indexer", ".", "values", "(", ")", ")", "!=", "indexer", ".", "values", "(", ")", ":", "raise", "(", "NotImplementedError", ")", "return", "freq" ]
Return the default frequency.
[ "Return", "the", "default", "frequency", "." ]
python
train
29.8
NuGrid/NuGridPy
nugridpy/ppn.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/ppn.py#L813-L850
def _getattr(self, attri, fname=None, numtype='cycNum'): ''' Private method for getting an attribute, called from get.''' if str(fname.__class__)=="<type 'list'>": isList=True else: isList=False data=[] if fname==None: fname=self.files numtype='file' isList=True if isList: for i in range(len(fname)): if attri in self.cattrs: data.append(self.getCycleData(attri,fname[i],numtype)) elif attri in self.dcols: data.append(self.getColData(attri,fname[i],numtype)) elif attri in self.get('ISOTP',fname,numtype): data.append(self.getElement(attri,fname[i],numtype)) else: print('Attribute '+attri+ ' does not exist') print('Returning none') return None else: if attri in self.cattrs: return self.getCycleData(attri,fname,numtype) elif attri in self.dcols: return self.getColData(attri,fname,numtype) elif attri in self.get('ISOTP',fname,numtype): return self.getElement(attri,fname,numtype) else: print('Attribute '+attri+ ' does not exist') print('Returning none') return None return data
[ "def", "_getattr", "(", "self", ",", "attri", ",", "fname", "=", "None", ",", "numtype", "=", "'cycNum'", ")", ":", "if", "str", "(", "fname", ".", "__class__", ")", "==", "\"<type 'list'>\"", ":", "isList", "=", "True", "else", ":", "isList", "=", "False", "data", "=", "[", "]", "if", "fname", "==", "None", ":", "fname", "=", "self", ".", "files", "numtype", "=", "'file'", "isList", "=", "True", "if", "isList", ":", "for", "i", "in", "range", "(", "len", "(", "fname", ")", ")", ":", "if", "attri", "in", "self", ".", "cattrs", ":", "data", ".", "append", "(", "self", ".", "getCycleData", "(", "attri", ",", "fname", "[", "i", "]", ",", "numtype", ")", ")", "elif", "attri", "in", "self", ".", "dcols", ":", "data", ".", "append", "(", "self", ".", "getColData", "(", "attri", ",", "fname", "[", "i", "]", ",", "numtype", ")", ")", "elif", "attri", "in", "self", ".", "get", "(", "'ISOTP'", ",", "fname", ",", "numtype", ")", ":", "data", ".", "append", "(", "self", ".", "getElement", "(", "attri", ",", "fname", "[", "i", "]", ",", "numtype", ")", ")", "else", ":", "print", "(", "'Attribute '", "+", "attri", "+", "' does not exist'", ")", "print", "(", "'Returning none'", ")", "return", "None", "else", ":", "if", "attri", "in", "self", ".", "cattrs", ":", "return", "self", ".", "getCycleData", "(", "attri", ",", "fname", ",", "numtype", ")", "elif", "attri", "in", "self", ".", "dcols", ":", "return", "self", ".", "getColData", "(", "attri", ",", "fname", ",", "numtype", ")", "elif", "attri", "in", "self", ".", "get", "(", "'ISOTP'", ",", "fname", ",", "numtype", ")", ":", "return", "self", ".", "getElement", "(", "attri", ",", "fname", ",", "numtype", ")", "else", ":", "print", "(", "'Attribute '", "+", "attri", "+", "' does not exist'", ")", "print", "(", "'Returning none'", ")", "return", "None", "return", "data" ]
Private method for getting an attribute, called from get.
[ "Private", "method", "for", "getting", "an", "attribute", "called", "from", "get", "." ]
python
train
37.210526
gwastro/pycbc
pycbc/workflow/core.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/core.py#L54-L67
def check_output_error_and_retcode(*popenargs, **kwargs): """ This function is used to obtain the stdout of a command. It is only used internally, recommend using the make_external_call command if you want to call external executables. """ if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE, *popenargs, **kwargs) output, error = process.communicate() retcode = process.poll() return output, error, retcode
[ "def", "check_output_error_and_retcode", "(", "*", "popenargs", ",", "*", "*", "kwargs", ")", ":", "if", "'stdout'", "in", "kwargs", ":", "raise", "ValueError", "(", "'stdout argument not allowed, it will be overridden.'", ")", "process", "=", "subprocess", ".", "Popen", "(", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "*", "popenargs", ",", "*", "*", "kwargs", ")", "output", ",", "error", "=", "process", ".", "communicate", "(", ")", "retcode", "=", "process", ".", "poll", "(", ")", "return", "output", ",", "error", ",", "retcode" ]
This function is used to obtain the stdout of a command. It is only used internally, recommend using the make_external_call command if you want to call external executables.
[ "This", "function", "is", "used", "to", "obtain", "the", "stdout", "of", "a", "command", ".", "It", "is", "only", "used", "internally", "recommend", "using", "the", "make_external_call", "command", "if", "you", "want", "to", "call", "external", "executables", "." ]
python
train
44.357143
mikeboers/Flask-ACL
flask_acl/extension.py
https://github.com/mikeboers/Flask-ACL/blob/7339b89f96ad8686d1526e25c138244ad912e12d/flask_acl/extension.py#L116-L144
def route_acl(self, *acl, **options): """Decorator to attach an ACL to a route. E.g:: @app.route('/url/to/view') @authz.route_acl(''' ALLOW WHEEL ALL DENY ANY ALL ''') def my_admin_function(): pass """ def _route_acl(func): func.__acl__ = acl @functools.wraps(func) def wrapped(*args, **kwargs): permission = 'http.' + request.method.lower() local_opts = options.copy() local_opts.setdefault('default', current_app.config['ACL_ROUTE_DEFAULT_STATE']) self.assert_can(permission, func, **local_opts) return func(*args, **kwargs) return wrapped return _route_acl
[ "def", "route_acl", "(", "self", ",", "*", "acl", ",", "*", "*", "options", ")", ":", "def", "_route_acl", "(", "func", ")", ":", "func", ".", "__acl__", "=", "acl", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "permission", "=", "'http.'", "+", "request", ".", "method", ".", "lower", "(", ")", "local_opts", "=", "options", ".", "copy", "(", ")", "local_opts", ".", "setdefault", "(", "'default'", ",", "current_app", ".", "config", "[", "'ACL_ROUTE_DEFAULT_STATE'", "]", ")", "self", ".", "assert_can", "(", "permission", ",", "func", ",", "*", "*", "local_opts", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped", "return", "_route_acl" ]
Decorator to attach an ACL to a route. E.g:: @app.route('/url/to/view') @authz.route_acl(''' ALLOW WHEEL ALL DENY ANY ALL ''') def my_admin_function(): pass
[ "Decorator", "to", "attach", "an", "ACL", "to", "a", "route", "." ]
python
train
28.034483
moluwole/Bast
bast/migration.py
https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/migration.py#L102-L125
def check_packages(db_name): """ Check if the driver for the user defined host is available. If it is not available, download it using PIP :param db_name: :return: """ print('Checking for required Database Driver') reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']) installed_packages = [r.decode().split('==')[0] for r in reqs.split()] # print(installed_packages) if db_name.lower() == 'mysql': if 'PyMySQL' not in installed_packages: print('Installing required Database Driver') os.system('pip install pymysql') if db_name.lower() == 'postgresql': if 'psycopg2-binary' not in installed_packages: print('Installing required Database Driver') os.system('pip install psycopg2-binary') return True
[ "def", "check_packages", "(", "db_name", ")", ":", "print", "(", "'Checking for required Database Driver'", ")", "reqs", "=", "subprocess", ".", "check_output", "(", "[", "sys", ".", "executable", ",", "'-m'", ",", "'pip'", ",", "'freeze'", "]", ")", "installed_packages", "=", "[", "r", ".", "decode", "(", ")", ".", "split", "(", "'=='", ")", "[", "0", "]", "for", "r", "in", "reqs", ".", "split", "(", ")", "]", "# print(installed_packages)", "if", "db_name", ".", "lower", "(", ")", "==", "'mysql'", ":", "if", "'PyMySQL'", "not", "in", "installed_packages", ":", "print", "(", "'Installing required Database Driver'", ")", "os", ".", "system", "(", "'pip install pymysql'", ")", "if", "db_name", ".", "lower", "(", ")", "==", "'postgresql'", ":", "if", "'psycopg2-binary'", "not", "in", "installed_packages", ":", "print", "(", "'Installing required Database Driver'", ")", "os", ".", "system", "(", "'pip install psycopg2-binary'", ")", "return", "True" ]
Check if the driver for the user defined host is available. If it is not available, download it using PIP :param db_name: :return:
[ "Check", "if", "the", "driver", "for", "the", "user", "defined", "host", "is", "available", ".", "If", "it", "is", "not", "available", "download", "it", "using", "PIP", ":", "param", "db_name", ":", ":", "return", ":" ]
python
train
36.75
miguelgrinberg/python-socketio
socketio/server.py
https://github.com/miguelgrinberg/python-socketio/blob/c0c1bf8d21e3597389b18938550a0724dd9676b7/socketio/server.py#L377-L391
def get_session(self, sid, namespace=None): """Return the user session for a client. :param sid: The session id of the client. :param namespace: The Socket.IO namespace. If this argument is omitted the default namespace is used. The return value is a dictionary. Modifications made to this dictionary are not guaranteed to be preserved unless ``save_session()`` is called, or when the ``session`` context manager is used. """ namespace = namespace or '/' eio_session = self.eio.get_session(sid) return eio_session.setdefault(namespace, {})
[ "def", "get_session", "(", "self", ",", "sid", ",", "namespace", "=", "None", ")", ":", "namespace", "=", "namespace", "or", "'/'", "eio_session", "=", "self", ".", "eio", ".", "get_session", "(", "sid", ")", "return", "eio_session", ".", "setdefault", "(", "namespace", ",", "{", "}", ")" ]
Return the user session for a client. :param sid: The session id of the client. :param namespace: The Socket.IO namespace. If this argument is omitted the default namespace is used. The return value is a dictionary. Modifications made to this dictionary are not guaranteed to be preserved unless ``save_session()`` is called, or when the ``session`` context manager is used.
[ "Return", "the", "user", "session", "for", "a", "client", "." ]
python
train
42.733333
bluedynamics/cone.ugm
src/cone/ugm/model/localmanager.py
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/model/localmanager.py#L118-L125
def local_manager_rule(self): """Return rule for local manager. """ adm_gid = self.local_manager_gid if not adm_gid: return None config = self.root['settings']['ugm_localmanager'].attrs return config[adm_gid]
[ "def", "local_manager_rule", "(", "self", ")", ":", "adm_gid", "=", "self", ".", "local_manager_gid", "if", "not", "adm_gid", ":", "return", "None", "config", "=", "self", ".", "root", "[", "'settings'", "]", "[", "'ugm_localmanager'", "]", ".", "attrs", "return", "config", "[", "adm_gid", "]" ]
Return rule for local manager.
[ "Return", "rule", "for", "local", "manager", "." ]
python
train
32.625
broadinstitute/fiss
firecloud/fiss.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1163-L1167
def health(args): """ Health FireCloud Server """ r = fapi.health() fapi._check_response_code(r, 200) return r.content
[ "def", "health", "(", "args", ")", ":", "r", "=", "fapi", ".", "health", "(", ")", "fapi", ".", "_check_response_code", "(", "r", ",", "200", ")", "return", "r", ".", "content" ]
Health FireCloud Server
[ "Health", "FireCloud", "Server" ]
python
train
26
earlzo/hfut
examples/curriculum_calendar.py
https://github.com/earlzo/hfut/blob/09270a9647fba79f26fd1a8a3c53c0678b5257a1/examples/curriculum_calendar.py#L14-L47
def schedule2calendar(schedule, name='课葨', using_todo=True): """ ε°†δΈŠθ―Ύζ—Άι—΄θ‘¨θ½¬ζ’δΈΊ icalendar :param schedule: δΈŠθ―Ύζ—Άι—΄θ‘¨ :param name: ζ—₯εŽ†εη§° :param using_todo: 使用 ``icalendar.Todo`` θ€ŒδΈζ˜― ``icalendar.Event`` 作为活动类 :return: icalendar.Calendar() """ # https://zh.wikipedia.org/wiki/ICalendar # http://icalendar.readthedocs.io/en/latest # https://tools.ietf.org/html/rfc5545 cal = icalendar.Calendar() cal.add('X-WR-TIMEZONE', 'Asia/Shanghai') cal.add('X-WR-CALNAME', name) cls = icalendar.Todo if using_todo else icalendar.Event for week, start, end, data in schedule: # "δΊ‹δ»Ά"η»„δ»Άζ›΄ε…·ι€šη”¨ζ€§, Google ζ—₯εŽ†δΈζ”―ζŒ"εΎ…εŠž"η»„δ»Ά item = cls( SUMMARY='第{:02d}周-{}'.format(week, data), DTSTART=icalendar.vDatetime(start), DTEND=icalendar.vDatetime(end), DESCRIPTION='θ΅·ε§‹δΊŽ {}, η»“ζŸδΊŽ {}'.format(start.strftime('%H:%M'), end.strftime('%H:%M')) ) now = datetime.now() # θΏ™δΈͺηŠΆζ€"δΊ‹δ»Ά"η»„δ»Άζ˜―ζ²‘ζœ‰ηš„, ε―ΉδΊŽεΎ…εŠžεˆ—θ‘¨η±»εΊ”η”¨ζœ‰δ½œη”¨ # https://tools.ietf.org/html/rfc5545#section-3.2.12 if using_todo: if start < now < end: item.add('STATUS', 'IN-PROCESS') elif now > end: item.add('STATUS', 'COMPLETED') cal.add_component(item) return cal
[ "def", "schedule2calendar", "(", "schedule", ",", "name", "=", "'课葨', us", "i", "g_todo=Tru", "e", "):", "", "", "# https://zh.wikipedia.org/wiki/ICalendar", "# http://icalendar.readthedocs.io/en/latest", "# https://tools.ietf.org/html/rfc5545", "cal", "=", "icalendar", ".", "Calendar", "(", ")", "cal", ".", "add", "(", "'X-WR-TIMEZONE'", ",", "'Asia/Shanghai'", ")", "cal", ".", "add", "(", "'X-WR-CALNAME'", ",", "name", ")", "cls", "=", "icalendar", ".", "Todo", "if", "using_todo", "else", "icalendar", ".", "Event", "for", "week", ",", "start", ",", "end", ",", "data", "in", "schedule", ":", "# \"δΊ‹δ»Ά\"η»„δ»Άζ›΄ε…·ι€šη”¨ζ€§, Google ζ—₯εŽ†δΈζ”―ζŒ\"εΎ…εŠž\"η»„δ»Ά", "item", "=", "cls", "(", "SUMMARY", "=", "'第{:02d}周-{}'.for", "m", "at(wee", "k", ", da", "t", "),", "", "", "DTSTART", "=", "icalendar", ".", "vDatetime", "(", "start", ")", ",", "DTEND", "=", "icalendar", ".", "vDatetime", "(", "end", ")", ",", "DESCRIPTION", "=", "'θ΅·ε§‹δΊŽ {}, η»“ζŸδΊŽ {}'.format(star", "t", ".strft", "i", "me('%", "H", ":%M'), e", "n", "d.strft", "i", "m", "('%", "H", ":%M'))", "", "", "", "", ")", "now", "=", "datetime", ".", "now", "(", ")", "# θΏ™δΈͺηŠΆζ€\"δΊ‹δ»Ά\"η»„δ»Άζ˜―ζ²‘ζœ‰ηš„, ε―ΉδΊŽεΎ…εŠžεˆ—θ‘¨η±»εΊ”η”¨ζœ‰δ½œη”¨", "# https://tools.ietf.org/html/rfc5545#section-3.2.12", "if", "using_todo", ":", "if", "start", "<", "now", "<", "end", ":", "item", ".", "add", "(", "'STATUS'", ",", "'IN-PROCESS'", ")", "elif", "now", ">", "end", ":", "item", ".", "add", "(", "'STATUS'", ",", "'COMPLETED'", ")", "cal", ".", "add_component", "(", "item", ")", "return", "cal" ]
ε°†δΈŠθ―Ύζ—Άι—΄θ‘¨θ½¬ζ’δΈΊ icalendar :param schedule: δΈŠθ―Ύζ—Άι—΄θ‘¨ :param name: ζ—₯εŽ†εη§° :param using_todo: 使用 ``icalendar.Todo`` θ€ŒδΈζ˜― ``icalendar.Event`` 作为活动类 :return: icalendar.Calendar()
[ "ε°†δΈŠθ―Ύζ—Άι—΄θ‘¨θ½¬ζ’δΈΊ", "icalendar" ]
python
train
36.735294
polysquare/polysquare-generic-file-linter
polysquarelinter/spelling.py
https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L61-L68
def clear_caches(): # suppress(unused-function) """Clear all caches.""" for _, reader in _spellchecker_cache.values(): reader.close() _spellchecker_cache.clear() _valid_words_cache.clear() _user_dictionary_cache.clear()
[ "def", "clear_caches", "(", ")", ":", "# suppress(unused-function)", "for", "_", ",", "reader", "in", "_spellchecker_cache", ".", "values", "(", ")", ":", "reader", ".", "close", "(", ")", "_spellchecker_cache", ".", "clear", "(", ")", "_valid_words_cache", ".", "clear", "(", ")", "_user_dictionary_cache", ".", "clear", "(", ")" ]
Clear all caches.
[ "Clear", "all", "caches", "." ]
python
train
30.25
rm-hull/luma.core
luma/core/legacy/__init__.py
https://github.com/rm-hull/luma.core/blob/034b628fb304a01e77732a299c0b42e94d6443db/luma/core/legacy/__init__.py#L33-L58
def text(draw, xy, txt, fill=None, font=None): """ Draw a legacy font starting at :py:attr:`x`, :py:attr:`y` using the prescribed fill and font. :param draw: A valid canvas to draw the text onto. :type draw: PIL.ImageDraw :param txt: The text string to display (must be ASCII only). :type txt: str :param xy: An ``(x, y)`` tuple denoting the top-left corner to draw the text. :type xy: tuple :param fill: The fill color to use (standard Pillow color name or RGB tuple). :param font: The font (from :py:mod:`luma.core.legacy.font`) to use. """ font = font or DEFAULT_FONT x, y = xy for ch in txt: for byte in font[ord(ch)]: for j in range(8): if byte & 0x01 > 0: draw.point((x, y + j), fill=fill) byte >>= 1 x += 1
[ "def", "text", "(", "draw", ",", "xy", ",", "txt", ",", "fill", "=", "None", ",", "font", "=", "None", ")", ":", "font", "=", "font", "or", "DEFAULT_FONT", "x", ",", "y", "=", "xy", "for", "ch", "in", "txt", ":", "for", "byte", "in", "font", "[", "ord", "(", "ch", ")", "]", ":", "for", "j", "in", "range", "(", "8", ")", ":", "if", "byte", "&", "0x01", ">", "0", ":", "draw", ".", "point", "(", "(", "x", ",", "y", "+", "j", ")", ",", "fill", "=", "fill", ")", "byte", ">>=", "1", "x", "+=", "1" ]
Draw a legacy font starting at :py:attr:`x`, :py:attr:`y` using the prescribed fill and font. :param draw: A valid canvas to draw the text onto. :type draw: PIL.ImageDraw :param txt: The text string to display (must be ASCII only). :type txt: str :param xy: An ``(x, y)`` tuple denoting the top-left corner to draw the text. :type xy: tuple :param fill: The fill color to use (standard Pillow color name or RGB tuple). :param font: The font (from :py:mod:`luma.core.legacy.font`) to use.
[ "Draw", "a", "legacy", "font", "starting", "at", ":", "py", ":", "attr", ":", "x", ":", "py", ":", "attr", ":", "y", "using", "the", "prescribed", "fill", "and", "font", "." ]
python
train
32.692308
CI-WATER/gsshapy
gsshapy/orm/evt.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/evt.py#L115-L120
def as_yml(self): """ Return yml compatible version of self """ return YmlFileEvent(name=str(self.name), subfolder=str(self.subfolder))
[ "def", "as_yml", "(", "self", ")", ":", "return", "YmlFileEvent", "(", "name", "=", "str", "(", "self", ".", "name", ")", ",", "subfolder", "=", "str", "(", "self", ".", "subfolder", ")", ")" ]
Return yml compatible version of self
[ "Return", "yml", "compatible", "version", "of", "self" ]
python
train
31.666667
eaton-lab/toytree
toytree/etemini.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/etemini.py#L241-L274
def add_child(self, child=None, name=None, dist=None, support=None): """ Adds a new child to this node. If child node is not suplied as an argument, a new node instance will be created. Parameters ---------- child: the node instance to be added as a child. name: the name that will be given to the child. dist: the distance from the node to the child. support': the support value of child partition. Returns: -------- The child node instance """ if child is None: child = self.__class__() if name is not None: child.name = name if dist is not None: child.dist = dist if support is not None: child.support = support self.children.append(child) child.up = self return child
[ "def", "add_child", "(", "self", ",", "child", "=", "None", ",", "name", "=", "None", ",", "dist", "=", "None", ",", "support", "=", "None", ")", ":", "if", "child", "is", "None", ":", "child", "=", "self", ".", "__class__", "(", ")", "if", "name", "is", "not", "None", ":", "child", ".", "name", "=", "name", "if", "dist", "is", "not", "None", ":", "child", ".", "dist", "=", "dist", "if", "support", "is", "not", "None", ":", "child", ".", "support", "=", "support", "self", ".", "children", ".", "append", "(", "child", ")", "child", ".", "up", "=", "self", "return", "child" ]
Adds a new child to this node. If child node is not suplied as an argument, a new node instance will be created. Parameters ---------- child: the node instance to be added as a child. name: the name that will be given to the child. dist: the distance from the node to the child. support': the support value of child partition. Returns: -------- The child node instance
[ "Adds", "a", "new", "child", "to", "this", "node", ".", "If", "child", "node", "is", "not", "suplied", "as", "an", "argument", "a", "new", "node", "instance", "will", "be", "created", ".", "Parameters", "----------" ]
python
train
26.911765
zsimic/runez
src/runez/click.py
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/click.py#L70-L76
def version(*args, **attrs): """Show the version and exit.""" if hasattr(sys, "_getframe"): package = attrs.pop("package", sys._getframe(1).f_globals.get("__package__")) if package: attrs.setdefault("version", get_version(package)) return click.version_option(*args, **attrs)
[ "def", "version", "(", "*", "args", ",", "*", "*", "attrs", ")", ":", "if", "hasattr", "(", "sys", ",", "\"_getframe\"", ")", ":", "package", "=", "attrs", ".", "pop", "(", "\"package\"", ",", "sys", ".", "_getframe", "(", "1", ")", ".", "f_globals", ".", "get", "(", "\"__package__\"", ")", ")", "if", "package", ":", "attrs", ".", "setdefault", "(", "\"version\"", ",", "get_version", "(", "package", ")", ")", "return", "click", ".", "version_option", "(", "*", "args", ",", "*", "*", "attrs", ")" ]
Show the version and exit.
[ "Show", "the", "version", "and", "exit", "." ]
python
train
44.142857
gwastro/pycbc
pycbc/inference/models/base.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/base.py#L577-L607
def prior_rvs(self, size=1, prior=None): """Returns random variates drawn from the prior. If the ``sampling_params`` are different from the ``variable_params``, the variates are transformed to the `sampling_params` parameter space before being returned. Parameters ---------- size : int, optional Number of random values to return for each parameter. Default is 1. prior : JointDistribution, optional Use the given prior to draw values rather than the saved prior. Returns ------- FieldArray A field array of the random values. """ # draw values from the prior if prior is None: prior = self.prior_distribution p0 = prior.rvs(size=size) # transform if necessary if self.sampling_transforms is not None: ptrans = self.sampling_transforms.apply(p0) # pull out the sampling args p0 = FieldArray.from_arrays([ptrans[arg] for arg in self.sampling_params], names=self.sampling_params) return p0
[ "def", "prior_rvs", "(", "self", ",", "size", "=", "1", ",", "prior", "=", "None", ")", ":", "# draw values from the prior", "if", "prior", "is", "None", ":", "prior", "=", "self", ".", "prior_distribution", "p0", "=", "prior", ".", "rvs", "(", "size", "=", "size", ")", "# transform if necessary", "if", "self", ".", "sampling_transforms", "is", "not", "None", ":", "ptrans", "=", "self", ".", "sampling_transforms", ".", "apply", "(", "p0", ")", "# pull out the sampling args", "p0", "=", "FieldArray", ".", "from_arrays", "(", "[", "ptrans", "[", "arg", "]", "for", "arg", "in", "self", ".", "sampling_params", "]", ",", "names", "=", "self", ".", "sampling_params", ")", "return", "p0" ]
Returns random variates drawn from the prior. If the ``sampling_params`` are different from the ``variable_params``, the variates are transformed to the `sampling_params` parameter space before being returned. Parameters ---------- size : int, optional Number of random values to return for each parameter. Default is 1. prior : JointDistribution, optional Use the given prior to draw values rather than the saved prior. Returns ------- FieldArray A field array of the random values.
[ "Returns", "random", "variates", "drawn", "from", "the", "prior", "." ]
python
train
37.741935
opendatateam/udata
udata/models/__init__.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/__init__.py#L60-L79
def resolve_model(self, model): ''' Resolve a model given a name or dict with `class` entry. :raises ValueError: model specification is wrong or does not exists ''' if not model: raise ValueError('Unsupported model specifications') if isinstance(model, basestring): classname = model elif isinstance(model, dict) and 'class' in model: classname = model['class'] else: raise ValueError('Unsupported model specifications') try: return get_document(classname) except self.NotRegistered: message = 'Model "{0}" does not exist'.format(classname) raise ValueError(message)
[ "def", "resolve_model", "(", "self", ",", "model", ")", ":", "if", "not", "model", ":", "raise", "ValueError", "(", "'Unsupported model specifications'", ")", "if", "isinstance", "(", "model", ",", "basestring", ")", ":", "classname", "=", "model", "elif", "isinstance", "(", "model", ",", "dict", ")", "and", "'class'", "in", "model", ":", "classname", "=", "model", "[", "'class'", "]", "else", ":", "raise", "ValueError", "(", "'Unsupported model specifications'", ")", "try", ":", "return", "get_document", "(", "classname", ")", "except", "self", ".", "NotRegistered", ":", "message", "=", "'Model \"{0}\" does not exist'", ".", "format", "(", "classname", ")", "raise", "ValueError", "(", "message", ")" ]
Resolve a model given a name or dict with `class` entry. :raises ValueError: model specification is wrong or does not exists
[ "Resolve", "a", "model", "given", "a", "name", "or", "dict", "with", "class", "entry", "." ]
python
train
35.65
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/listviews.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/listviews.py#L227-L236
def reorder(self, single_column=False): """Force a reorder of the displayed items""" if single_column: columns = self.sortOrder[:1] else: columns = self.sortOrder for ascending,column in columns[::-1]: # Python 2.2+ guarantees stable sort, so sort by each column in reverse # order will order by the assigned columns self.sorted.sort( key=column.get, reverse=(not ascending))
[ "def", "reorder", "(", "self", ",", "single_column", "=", "False", ")", ":", "if", "single_column", ":", "columns", "=", "self", ".", "sortOrder", "[", ":", "1", "]", "else", ":", "columns", "=", "self", ".", "sortOrder", "for", "ascending", ",", "column", "in", "columns", "[", ":", ":", "-", "1", "]", ":", "# Python 2.2+ guarantees stable sort, so sort by each column in reverse ", "# order will order by the assigned columns ", "self", ".", "sorted", ".", "sort", "(", "key", "=", "column", ".", "get", ",", "reverse", "=", "(", "not", "ascending", ")", ")" ]
Force a reorder of the displayed items
[ "Force", "a", "reorder", "of", "the", "displayed", "items" ]
python
train
46
brocade/pynos
pynos/versions/base/yang/brocade_ras.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/brocade_ras.py#L293-L304
def bna_config_cmd_output_status_string(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") bna_config_cmd = ET.Element("bna_config_cmd") config = bna_config_cmd output = ET.SubElement(bna_config_cmd, "output") status_string = ET.SubElement(output, "status-string") status_string.text = kwargs.pop('status_string') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "bna_config_cmd_output_status_string", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "bna_config_cmd", "=", "ET", ".", "Element", "(", "\"bna_config_cmd\"", ")", "config", "=", "bna_config_cmd", "output", "=", "ET", ".", "SubElement", "(", "bna_config_cmd", ",", "\"output\"", ")", "status_string", "=", "ET", ".", "SubElement", "(", "output", ",", "\"status-string\"", ")", "status_string", ".", "text", "=", "kwargs", ".", "pop", "(", "'status_string'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
40
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L515-L541
def accuracy_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification accuracy. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.accuracy) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.accuracy) taus.append(tau) self.threshold = orig_thresh return scores, taus
[ "def", "accuracy_curve", "(", "self", ",", "delta_tau", "=", "0.01", ")", ":", "# compute thresholds based on the sorted probabilities", "orig_thresh", "=", "self", ".", "threshold", "sorted_labels", ",", "sorted_probs", "=", "self", ".", "sorted_values", "scores", "=", "[", "]", "taus", "=", "[", "]", "tau", "=", "0", "for", "k", "in", "range", "(", "len", "(", "sorted_labels", ")", ")", ":", "# compute new accuracy", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "accuracy", ")", "taus", ".", "append", "(", "tau", ")", "# update threshold", "tau", "=", "sorted_probs", "[", "k", "]", "# add last datapoint", "tau", "=", "1.0", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "accuracy", ")", "taus", ".", "append", "(", "tau", ")", "self", ".", "threshold", "=", "orig_thresh", "return", "scores", ",", "taus" ]
Computes the relationship between probability threshold and classification accuracy.
[ "Computes", "the", "relationship", "between", "probability", "threshold", "and", "classification", "accuracy", "." ]
python
train
29.222222
saltstack/salt
salt/runners/net.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/net.py#L183-L213
def _find_interfaces_ip(mac): ''' Helper to search the interfaces IPs using the MAC address. ''' try: mac = napalm_helpers.convert(napalm_helpers.mac, mac) except AddrFormatError: return ('', '', []) all_interfaces = _get_mine('net.interfaces') all_ipaddrs = _get_mine('net.ipaddrs') for device, device_interfaces in six.iteritems(all_interfaces): if not device_interfaces.get('result', False): continue for interface, interface_details in six.iteritems(device_interfaces.get('out', {})): try: interface_mac = napalm_helpers.convert(napalm_helpers.mac, interface_details.get('mac_address')) except AddrFormatError: continue if mac != interface_mac: continue interface_ipaddrs = all_ipaddrs.get(device, {}).get('out', {}).get(interface, {}) ip_addresses = interface_ipaddrs.get('ipv4', {}) ip_addresses.update(interface_ipaddrs.get('ipv6', {})) interface_ips = ['{0}/{1}'.format(ip_addr, addr_details.get('prefix_length', '32')) for ip_addr, addr_details in six.iteritems(ip_addresses)] return device, interface, interface_ips return ('', '', [])
[ "def", "_find_interfaces_ip", "(", "mac", ")", ":", "try", ":", "mac", "=", "napalm_helpers", ".", "convert", "(", "napalm_helpers", ".", "mac", ",", "mac", ")", "except", "AddrFormatError", ":", "return", "(", "''", ",", "''", ",", "[", "]", ")", "all_interfaces", "=", "_get_mine", "(", "'net.interfaces'", ")", "all_ipaddrs", "=", "_get_mine", "(", "'net.ipaddrs'", ")", "for", "device", ",", "device_interfaces", "in", "six", ".", "iteritems", "(", "all_interfaces", ")", ":", "if", "not", "device_interfaces", ".", "get", "(", "'result'", ",", "False", ")", ":", "continue", "for", "interface", ",", "interface_details", "in", "six", ".", "iteritems", "(", "device_interfaces", ".", "get", "(", "'out'", ",", "{", "}", ")", ")", ":", "try", ":", "interface_mac", "=", "napalm_helpers", ".", "convert", "(", "napalm_helpers", ".", "mac", ",", "interface_details", ".", "get", "(", "'mac_address'", ")", ")", "except", "AddrFormatError", ":", "continue", "if", "mac", "!=", "interface_mac", ":", "continue", "interface_ipaddrs", "=", "all_ipaddrs", ".", "get", "(", "device", ",", "{", "}", ")", ".", "get", "(", "'out'", ",", "{", "}", ")", ".", "get", "(", "interface", ",", "{", "}", ")", "ip_addresses", "=", "interface_ipaddrs", ".", "get", "(", "'ipv4'", ",", "{", "}", ")", "ip_addresses", ".", "update", "(", "interface_ipaddrs", ".", "get", "(", "'ipv6'", ",", "{", "}", ")", ")", "interface_ips", "=", "[", "'{0}/{1}'", ".", "format", "(", "ip_addr", ",", "addr_details", ".", "get", "(", "'prefix_length'", ",", "'32'", ")", ")", "for", "ip_addr", ",", "addr_details", "in", "six", ".", "iteritems", "(", "ip_addresses", ")", "]", "return", "device", ",", "interface", ",", "interface_ips", "return", "(", "''", ",", "''", ",", "[", "]", ")" ]
Helper to search the interfaces IPs using the MAC address.
[ "Helper", "to", "search", "the", "interfaces", "IPs", "using", "the", "MAC", "address", "." ]
python
train
42.483871
saltstack/salt
salt/states/onyx.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/onyx.py#L153-L197
def user_absent(name): ''' Ensure a user is not present name username to remove if it exists Examples: .. code-block:: yaml delete: onyx.user_absent: - name: daniel ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} old_user = __salt__['onyx.cmd']('get_user', username=name) if not old_user: ret['result'] = True ret['comment'] = 'User does not exist' return ret if __opts__['test'] is True and old_user: ret['result'] = None ret['comment'] = 'User will be removed' ret['changes']['old'] = old_user ret['changes']['new'] = '' return ret __salt__['onyx.cmd']('remove_user', username=name) if __salt__['onyx.cmd']('get_user', username=name): ret['comment'] = 'Failed to remove user' else: ret['result'] = True ret['comment'] = 'User removed' ret['changes']['old'] = old_user ret['changes']['new'] = '' return ret
[ "def", "user_absent", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "old_user", "=", "__salt__", "[", "'onyx.cmd'", "]", "(", "'get_user'", ",", "username", "=", "name", ")", "if", "not", "old_user", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'User does not exist'", "return", "ret", "if", "__opts__", "[", "'test'", "]", "is", "True", "and", "old_user", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'User will be removed'", "ret", "[", "'changes'", "]", "[", "'old'", "]", "=", "old_user", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "''", "return", "ret", "__salt__", "[", "'onyx.cmd'", "]", "(", "'remove_user'", ",", "username", "=", "name", ")", "if", "__salt__", "[", "'onyx.cmd'", "]", "(", "'get_user'", ",", "username", "=", "name", ")", ":", "ret", "[", "'comment'", "]", "=", "'Failed to remove user'", "else", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'User removed'", "ret", "[", "'changes'", "]", "[", "'old'", "]", "=", "old_user", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "''", "return", "ret" ]
Ensure a user is not present name username to remove if it exists Examples: .. code-block:: yaml delete: onyx.user_absent: - name: daniel
[ "Ensure", "a", "user", "is", "not", "present" ]
python
train
22.844444
stuaxo/vext
setup.py
https://github.com/stuaxo/vext/blob/fa98a21ecfbbc1c3d1b84085d69ec42defdd2f69/setup.py#L151-L197
def package_info(self): """ :return: list of package info on installed packages """ import subprocess # create a commandline like pip show Pillow show package_names = self.installed_packages() if not package_names: # No installed packages yet, so nothign to do here... return [] cmdline = [sys.executable, "-mpip"] for name in package_names: cmdline.extend(["show", name]) output = subprocess.check_output(cmdline) # Python 3 fix if not isinstance(output, str): # Some package info is encoded in Latin-1 or something other than # UTF8. Replace non-UTF characters with '?' instead of crashing. output = str(output, encoding='UTF-8', errors='replace') # parse output that looks like this example """ --- Name: Pillow Version: 2.8.1 Location: /mnt/data/home/stu/.virtualenvs/shoebot-setup/lib/python2.7/site-packages/Pillow-2.8.1-py2.7-linux-x86_64.egg Requires: --- Name: vext.gi Version: 0.5.6.25 Location: /mnt/data/home/stu/.virtualenvs/shoebot-setup/lib/python2.7/site-packages/vext.gi-0.5.6.25-py2.7.egg Requires: vext """ results = [] for info in output[3:].split("---"): d = {} for line in info[1:].splitlines(): arg, _, value = line.partition(': ') arg = arg.lower() if arg == 'requires': value = value.split(', ') d[arg] = value results.append(d) return results
[ "def", "package_info", "(", "self", ")", ":", "import", "subprocess", "# create a commandline like pip show Pillow show", "package_names", "=", "self", ".", "installed_packages", "(", ")", "if", "not", "package_names", ":", "# No installed packages yet, so nothign to do here...", "return", "[", "]", "cmdline", "=", "[", "sys", ".", "executable", ",", "\"-mpip\"", "]", "for", "name", "in", "package_names", ":", "cmdline", ".", "extend", "(", "[", "\"show\"", ",", "name", "]", ")", "output", "=", "subprocess", ".", "check_output", "(", "cmdline", ")", "# Python 3 fix", "if", "not", "isinstance", "(", "output", ",", "str", ")", ":", "# Some package info is encoded in Latin-1 or something other than", "# UTF8. Replace non-UTF characters with '?' instead of crashing.", "output", "=", "str", "(", "output", ",", "encoding", "=", "'UTF-8'", ",", "errors", "=", "'replace'", ")", "# parse output that looks like this example", "\"\"\"\n ---\n Name: Pillow\n Version: 2.8.1\n Location: /mnt/data/home/stu/.virtualenvs/shoebot-setup/lib/python2.7/site-packages/Pillow-2.8.1-py2.7-linux-x86_64.egg\n Requires:\n ---\n Name: vext.gi\n Version: 0.5.6.25\n Location: /mnt/data/home/stu/.virtualenvs/shoebot-setup/lib/python2.7/site-packages/vext.gi-0.5.6.25-py2.7.egg\n Requires: vext\n\n \"\"\"", "results", "=", "[", "]", "for", "info", "in", "output", "[", "3", ":", "]", ".", "split", "(", "\"---\"", ")", ":", "d", "=", "{", "}", "for", "line", "in", "info", "[", "1", ":", "]", ".", "splitlines", "(", ")", ":", "arg", ",", "_", ",", "value", "=", "line", ".", "partition", "(", "': '", ")", "arg", "=", "arg", ".", "lower", "(", ")", "if", "arg", "==", "'requires'", ":", "value", "=", "value", ".", "split", "(", "', '", ")", "d", "[", "arg", "]", "=", "value", "results", ".", "append", "(", "d", ")", "return", "results" ]
:return: list of package info on installed packages
[ ":", "return", ":", "list", "of", "package", "info", "on", "installed", "packages" ]
python
train
34.978723
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/namespacebrowser.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/namespacebrowser.py#L153-L171
def setup_toolbar(self): """Setup toolbar""" load_button = create_toolbutton(self, text=_('Import data'), icon=ima.icon('fileimport'), triggered=lambda: self.import_data()) self.save_button = create_toolbutton(self, text=_("Save data"), icon=ima.icon('filesave'), triggered=lambda: self.save_data(self.filename)) self.save_button.setEnabled(False) save_as_button = create_toolbutton(self, text=_("Save data as..."), icon=ima.icon('filesaveas'), triggered=self.save_data) reset_namespace_button = create_toolbutton( self, text=_("Remove all variables"), icon=ima.icon('editdelete'), triggered=self.reset_namespace) return [load_button, self.save_button, save_as_button, reset_namespace_button]
[ "def", "setup_toolbar", "(", "self", ")", ":", "load_button", "=", "create_toolbutton", "(", "self", ",", "text", "=", "_", "(", "'Import data'", ")", ",", "icon", "=", "ima", ".", "icon", "(", "'fileimport'", ")", ",", "triggered", "=", "lambda", ":", "self", ".", "import_data", "(", ")", ")", "self", ".", "save_button", "=", "create_toolbutton", "(", "self", ",", "text", "=", "_", "(", "\"Save data\"", ")", ",", "icon", "=", "ima", ".", "icon", "(", "'filesave'", ")", ",", "triggered", "=", "lambda", ":", "self", ".", "save_data", "(", "self", ".", "filename", ")", ")", "self", ".", "save_button", ".", "setEnabled", "(", "False", ")", "save_as_button", "=", "create_toolbutton", "(", "self", ",", "text", "=", "_", "(", "\"Save data as...\"", ")", ",", "icon", "=", "ima", ".", "icon", "(", "'filesaveas'", ")", ",", "triggered", "=", "self", ".", "save_data", ")", "reset_namespace_button", "=", "create_toolbutton", "(", "self", ",", "text", "=", "_", "(", "\"Remove all variables\"", ")", ",", "icon", "=", "ima", ".", "icon", "(", "'editdelete'", ")", ",", "triggered", "=", "self", ".", "reset_namespace", ")", "return", "[", "load_button", ",", "self", ".", "save_button", ",", "save_as_button", ",", "reset_namespace_button", "]" ]
Setup toolbar
[ "Setup", "toolbar" ]
python
train
55.894737
olitheolix/qtmacs
qtmacs/qtmacsmain_macros.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/qtmacsmain_macros.py#L254-L277
def qteStartRecordingHook(self, msgObj): """ Commence macro recording. Macros are recorded by connecting to the 'keypressed' signal it emits. If the recording has already commenced, or if this method was called during a macro replay, then return immediately. """ if self.qteRecording: self.qteMain.qteStatus('Macro recording already enabled') return # Update status flag. self.qteRecording = True # Reset the variables. self.qteMain.qteStatus('Macro recording started') self.recorded_keysequence = QtmacsKeysequence() # Connect the 'keypressed' and 'abort' signals. self.qteMain.qtesigKeyparsed.connect(self.qteKeyPress) self.qteMain.qtesigAbort.connect(self.qteStopRecordingHook)
[ "def", "qteStartRecordingHook", "(", "self", ",", "msgObj", ")", ":", "if", "self", ".", "qteRecording", ":", "self", ".", "qteMain", ".", "qteStatus", "(", "'Macro recording already enabled'", ")", "return", "# Update status flag.", "self", ".", "qteRecording", "=", "True", "# Reset the variables.", "self", ".", "qteMain", ".", "qteStatus", "(", "'Macro recording started'", ")", "self", ".", "recorded_keysequence", "=", "QtmacsKeysequence", "(", ")", "# Connect the 'keypressed' and 'abort' signals.", "self", ".", "qteMain", ".", "qtesigKeyparsed", ".", "connect", "(", "self", ".", "qteKeyPress", ")", "self", ".", "qteMain", ".", "qtesigAbort", ".", "connect", "(", "self", ".", "qteStopRecordingHook", ")" ]
Commence macro recording. Macros are recorded by connecting to the 'keypressed' signal it emits. If the recording has already commenced, or if this method was called during a macro replay, then return immediately.
[ "Commence", "macro", "recording", "." ]
python
train
33.916667
collectiveacuity/labPack
labpack/storage/google/drive.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L376-L392
def _walk(self, root_path='', root_id=''): ''' a generator method which walks the file structure of the dropbox collection ''' title = '%s._walk' % self.__class__.__name__ if root_id: pass elif root_path: root_id, root_parent = self._get_id(root_path) for file_id, name, mimetype in self._list_directory(root_id): file_path = os.path.join(root_path, name) if mimetype == 'application/vnd.google-apps.folder': for path, id in self._walk(root_path=file_path, root_id=file_id): yield path, id else: yield file_path, file_id
[ "def", "_walk", "(", "self", ",", "root_path", "=", "''", ",", "root_id", "=", "''", ")", ":", "title", "=", "'%s._walk'", "%", "self", ".", "__class__", ".", "__name__", "if", "root_id", ":", "pass", "elif", "root_path", ":", "root_id", ",", "root_parent", "=", "self", ".", "_get_id", "(", "root_path", ")", "for", "file_id", ",", "name", ",", "mimetype", "in", "self", ".", "_list_directory", "(", "root_id", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "root_path", ",", "name", ")", "if", "mimetype", "==", "'application/vnd.google-apps.folder'", ":", "for", "path", ",", "id", "in", "self", ".", "_walk", "(", "root_path", "=", "file_path", ",", "root_id", "=", "file_id", ")", ":", "yield", "path", ",", "id", "else", ":", "yield", "file_path", ",", "file_id" ]
a generator method which walks the file structure of the dropbox collection
[ "a", "generator", "method", "which", "walks", "the", "file", "structure", "of", "the", "dropbox", "collection" ]
python
train
40.176471
pandas-dev/pandas
pandas/io/html.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L375-L420
def _parse_thead_tbody_tfoot(self, table_html): """ Given a table, return parsed header, body, and foot. Parameters ---------- table_html : node-like Returns ------- tuple of (header, body, footer), each a list of list-of-text rows. Notes ----- Header and body are lists-of-lists. Top level list is a list of rows. Each row is a list of str text. Logic: Use <thead>, <tbody>, <tfoot> elements to identify header, body, and footer, otherwise: - Put all rows into body - Move rows from top of body to header only if all elements inside row are <th> - Move rows from bottom of body to footer only if all elements inside row are <th> """ header_rows = self._parse_thead_tr(table_html) body_rows = self._parse_tbody_tr(table_html) footer_rows = self._parse_tfoot_tr(table_html) def row_is_all_th(row): return all(self._equals_tag(t, 'th') for t in self._parse_td(row)) if not header_rows: # The table has no <thead>. Move the top all-<th> rows from # body_rows to header_rows. (This is a common case because many # tables in the wild have no <thead> or <tfoot> while body_rows and row_is_all_th(body_rows[0]): header_rows.append(body_rows.pop(0)) header = self._expand_colspan_rowspan(header_rows) body = self._expand_colspan_rowspan(body_rows) footer = self._expand_colspan_rowspan(footer_rows) return header, body, footer
[ "def", "_parse_thead_tbody_tfoot", "(", "self", ",", "table_html", ")", ":", "header_rows", "=", "self", ".", "_parse_thead_tr", "(", "table_html", ")", "body_rows", "=", "self", ".", "_parse_tbody_tr", "(", "table_html", ")", "footer_rows", "=", "self", ".", "_parse_tfoot_tr", "(", "table_html", ")", "def", "row_is_all_th", "(", "row", ")", ":", "return", "all", "(", "self", ".", "_equals_tag", "(", "t", ",", "'th'", ")", "for", "t", "in", "self", ".", "_parse_td", "(", "row", ")", ")", "if", "not", "header_rows", ":", "# The table has no <thead>. Move the top all-<th> rows from", "# body_rows to header_rows. (This is a common case because many", "# tables in the wild have no <thead> or <tfoot>", "while", "body_rows", "and", "row_is_all_th", "(", "body_rows", "[", "0", "]", ")", ":", "header_rows", ".", "append", "(", "body_rows", ".", "pop", "(", "0", ")", ")", "header", "=", "self", ".", "_expand_colspan_rowspan", "(", "header_rows", ")", "body", "=", "self", ".", "_expand_colspan_rowspan", "(", "body_rows", ")", "footer", "=", "self", ".", "_expand_colspan_rowspan", "(", "footer_rows", ")", "return", "header", ",", "body", ",", "footer" ]
Given a table, return parsed header, body, and foot. Parameters ---------- table_html : node-like Returns ------- tuple of (header, body, footer), each a list of list-of-text rows. Notes ----- Header and body are lists-of-lists. Top level list is a list of rows. Each row is a list of str text. Logic: Use <thead>, <tbody>, <tfoot> elements to identify header, body, and footer, otherwise: - Put all rows into body - Move rows from top of body to header only if all elements inside row are <th> - Move rows from bottom of body to footer only if all elements inside row are <th>
[ "Given", "a", "table", "return", "parsed", "header", "body", "and", "foot", "." ]
python
train
36.065217
frascoweb/frasco
frasco/actions/loaders.py
https://github.com/frascoweb/frasco/blob/ea519d69dd5ca6deaf3650175692ee4a1a02518f/frasco/actions/loaders.py#L70-L84
def create_action_from_dict(name, spec, base_class=ActionsAction, metaclass=type, pop_keys=False): """Creates an action class based on a dict loaded using load_grouped_actions() """ actions = load_grouped_actions(spec, pop_keys=pop_keys) attrs = {"actions": actions, "name": name} if "as" in spec: attrs["as_"] = spec["as"] if pop_keys: del spec["as"] for k in ("requires", "methods", "defaults", "default_option"): if k in spec: attrs[k] = spec[k] if pop_keys: del spec[k] return metaclass(name, (base_class,), attrs)
[ "def", "create_action_from_dict", "(", "name", ",", "spec", ",", "base_class", "=", "ActionsAction", ",", "metaclass", "=", "type", ",", "pop_keys", "=", "False", ")", ":", "actions", "=", "load_grouped_actions", "(", "spec", ",", "pop_keys", "=", "pop_keys", ")", "attrs", "=", "{", "\"actions\"", ":", "actions", ",", "\"name\"", ":", "name", "}", "if", "\"as\"", "in", "spec", ":", "attrs", "[", "\"as_\"", "]", "=", "spec", "[", "\"as\"", "]", "if", "pop_keys", ":", "del", "spec", "[", "\"as\"", "]", "for", "k", "in", "(", "\"requires\"", ",", "\"methods\"", ",", "\"defaults\"", ",", "\"default_option\"", ")", ":", "if", "k", "in", "spec", ":", "attrs", "[", "k", "]", "=", "spec", "[", "k", "]", "if", "pop_keys", ":", "del", "spec", "[", "k", "]", "return", "metaclass", "(", "name", ",", "(", "base_class", ",", ")", ",", "attrs", ")" ]
Creates an action class based on a dict loaded using load_grouped_actions()
[ "Creates", "an", "action", "class", "based", "on", "a", "dict", "loaded", "using", "load_grouped_actions", "()" ]
python
train
40.533333
hosford42/xcs
xcs/algorithms/xcs.py
https://github.com/hosford42/xcs/blob/183bdd0dd339e19ded3be202f86e1b38bdb9f1e5/xcs/algorithms/xcs.py#L779-L813
def _action_set_subsumption(self, action_set): """Perform action set subsumption.""" # Select a condition with maximum bit count among those having # sufficient experience and sufficiently low error. selected_rule = None selected_bit_count = None for rule in action_set: if not (rule.experience > self.subsumption_threshold and rule.error < self.error_threshold): continue bit_count = rule.condition.count() if (selected_rule is None or bit_count > selected_bit_count or (bit_count == selected_bit_count and random.randrange(2))): selected_rule = rule selected_bit_count = bit_count # If no rule was found satisfying the requirements, return # early. if selected_rule is None: return # Subsume each rule which the selected rule generalizes. When a # rule is subsumed, all instances of the subsumed rule are replaced # with instances of the more general one in the population. to_remove = [] for rule in action_set: if (selected_rule is not rule and selected_rule.condition(rule.condition)): selected_rule.numerosity += rule.numerosity action_set.model.discard(rule, rule.numerosity) to_remove.append(rule) for rule in to_remove: action_set.remove(rule)
[ "def", "_action_set_subsumption", "(", "self", ",", "action_set", ")", ":", "# Select a condition with maximum bit count among those having", "# sufficient experience and sufficiently low error.", "selected_rule", "=", "None", "selected_bit_count", "=", "None", "for", "rule", "in", "action_set", ":", "if", "not", "(", "rule", ".", "experience", ">", "self", ".", "subsumption_threshold", "and", "rule", ".", "error", "<", "self", ".", "error_threshold", ")", ":", "continue", "bit_count", "=", "rule", ".", "condition", ".", "count", "(", ")", "if", "(", "selected_rule", "is", "None", "or", "bit_count", ">", "selected_bit_count", "or", "(", "bit_count", "==", "selected_bit_count", "and", "random", ".", "randrange", "(", "2", ")", ")", ")", ":", "selected_rule", "=", "rule", "selected_bit_count", "=", "bit_count", "# If no rule was found satisfying the requirements, return", "# early.", "if", "selected_rule", "is", "None", ":", "return", "# Subsume each rule which the selected rule generalizes. When a", "# rule is subsumed, all instances of the subsumed rule are replaced", "# with instances of the more general one in the population.", "to_remove", "=", "[", "]", "for", "rule", "in", "action_set", ":", "if", "(", "selected_rule", "is", "not", "rule", "and", "selected_rule", ".", "condition", "(", "rule", ".", "condition", ")", ")", ":", "selected_rule", ".", "numerosity", "+=", "rule", ".", "numerosity", "action_set", ".", "model", ".", "discard", "(", "rule", ",", "rule", ".", "numerosity", ")", "to_remove", ".", "append", "(", "rule", ")", "for", "rule", "in", "to_remove", ":", "action_set", ".", "remove", "(", "rule", ")" ]
Perform action set subsumption.
[ "Perform", "action", "set", "subsumption", "." ]
python
train
43.114286
Microsoft/knack
knack/deprecation.py
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/deprecation.py#L127-L131
def _version_less_than_or_equal_to(self, v1, v2): """ Returns true if v1 <= v2. """ # pylint: disable=no-name-in-module, import-error from distutils.version import LooseVersion return LooseVersion(v1) <= LooseVersion(v2)
[ "def", "_version_less_than_or_equal_to", "(", "self", ",", "v1", ",", "v2", ")", ":", "# pylint: disable=no-name-in-module, import-error", "from", "distutils", ".", "version", "import", "LooseVersion", "return", "LooseVersion", "(", "v1", ")", "<=", "LooseVersion", "(", "v2", ")" ]
Returns true if v1 <= v2.
[ "Returns", "true", "if", "v1", "<", "=", "v2", "." ]
python
train
49.6
noahbenson/pimms
pimms/util.py
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/util.py#L282-L289
def is_str(arg): ''' is_str(x) yields True if x is a string object or a 0-dim numpy array of a string and yields False otherwise. ''' return (isinstance(arg, six.string_types) or is_npscalar(arg, 'string') or is_npvalue(arg, 'string'))
[ "def", "is_str", "(", "arg", ")", ":", "return", "(", "isinstance", "(", "arg", ",", "six", ".", "string_types", ")", "or", "is_npscalar", "(", "arg", ",", "'string'", ")", "or", "is_npvalue", "(", "arg", ",", "'string'", ")", ")" ]
is_str(x) yields True if x is a string object or a 0-dim numpy array of a string and yields False otherwise.
[ "is_str", "(", "x", ")", "yields", "True", "if", "x", "is", "a", "string", "object", "or", "a", "0", "-", "dim", "numpy", "array", "of", "a", "string", "and", "yields", "False", "otherwise", "." ]
python
train
34.375
hotdoc/hotdoc
hotdoc/core/tree.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/tree.py#L577-L595
def resolve_symbols(self, database, link_resolver, page=None): """Will call resolve_symbols on all the stale subpages of the tree. Args: page: hotdoc.core.tree.Page, the page to resolve symbols in, will recurse on potential subpages. """ page = page or self.root if page.ast is None and not page.generated: with io.open(page.source_file, 'r', encoding='utf-8') as _: page.ast = cmark.hotdoc_to_ast(_.read(), self) page.resolve_symbols(self, database, link_resolver) self.__update_dep_map(page, page.symbols) for pagename in page.subpages: cpage = self.__all_pages[pagename] self.resolve_symbols(database, link_resolver, page=cpage)
[ "def", "resolve_symbols", "(", "self", ",", "database", ",", "link_resolver", ",", "page", "=", "None", ")", ":", "page", "=", "page", "or", "self", ".", "root", "if", "page", ".", "ast", "is", "None", "and", "not", "page", ".", "generated", ":", "with", "io", ".", "open", "(", "page", ".", "source_file", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "_", ":", "page", ".", "ast", "=", "cmark", ".", "hotdoc_to_ast", "(", "_", ".", "read", "(", ")", ",", "self", ")", "page", ".", "resolve_symbols", "(", "self", ",", "database", ",", "link_resolver", ")", "self", ".", "__update_dep_map", "(", "page", ",", "page", ".", "symbols", ")", "for", "pagename", "in", "page", ".", "subpages", ":", "cpage", "=", "self", ".", "__all_pages", "[", "pagename", "]", "self", ".", "resolve_symbols", "(", "database", ",", "link_resolver", ",", "page", "=", "cpage", ")" ]
Will call resolve_symbols on all the stale subpages of the tree. Args: page: hotdoc.core.tree.Page, the page to resolve symbols in, will recurse on potential subpages.
[ "Will", "call", "resolve_symbols", "on", "all", "the", "stale", "subpages", "of", "the", "tree", ".", "Args", ":", "page", ":", "hotdoc", ".", "core", ".", "tree", ".", "Page", "the", "page", "to", "resolve", "symbols", "in", "will", "recurse", "on", "potential", "subpages", "." ]
python
train
39.631579
romana/vpc-router
vpcrouter/currentstate/__init__.py
https://github.com/romana/vpc-router/blob/d696c2e023f1111ceb61f9c6fbabfafed8e14040/vpcrouter/currentstate/__init__.py#L87-L124
def get_state_repr(self, path): """ Returns the current state, or sub-state, depending on the path. """ if path == "ips": return { "failed_ips" : self.failed_ips, "questionable_ips" : self.questionable_ips, "working_set" : self.working_set, } if path == "route_info": return { "route_spec" : self.route_spec, "routes" : self.routes, "ignore_routes" : self.ignore_routes } if path == "plugins": return self.get_plugins_info() if path == "vpc": return self.vpc_state if path == "": return { "SERVER" : { "version" : self.versions, "start_time" : self.starttime.isoformat(), "current_time" : datetime.datetime.now().isoformat() }, "params" : self.render_main_params(), "plugins" : {"_href" : "/plugins"}, "ips" : {"_href" : "/ips"}, "route_info" : {"_href" : "/route_info"}, "vpc" : {"_href" : "/vpc"} }
[ "def", "get_state_repr", "(", "self", ",", "path", ")", ":", "if", "path", "==", "\"ips\"", ":", "return", "{", "\"failed_ips\"", ":", "self", ".", "failed_ips", ",", "\"questionable_ips\"", ":", "self", ".", "questionable_ips", ",", "\"working_set\"", ":", "self", ".", "working_set", ",", "}", "if", "path", "==", "\"route_info\"", ":", "return", "{", "\"route_spec\"", ":", "self", ".", "route_spec", ",", "\"routes\"", ":", "self", ".", "routes", ",", "\"ignore_routes\"", ":", "self", ".", "ignore_routes", "}", "if", "path", "==", "\"plugins\"", ":", "return", "self", ".", "get_plugins_info", "(", ")", "if", "path", "==", "\"vpc\"", ":", "return", "self", ".", "vpc_state", "if", "path", "==", "\"\"", ":", "return", "{", "\"SERVER\"", ":", "{", "\"version\"", ":", "self", ".", "versions", ",", "\"start_time\"", ":", "self", ".", "starttime", ".", "isoformat", "(", ")", ",", "\"current_time\"", ":", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "}", ",", "\"params\"", ":", "self", ".", "render_main_params", "(", ")", ",", "\"plugins\"", ":", "{", "\"_href\"", ":", "\"/plugins\"", "}", ",", "\"ips\"", ":", "{", "\"_href\"", ":", "\"/ips\"", "}", ",", "\"route_info\"", ":", "{", "\"_href\"", ":", "\"/route_info\"", "}", ",", "\"vpc\"", ":", "{", "\"_href\"", ":", "\"/vpc\"", "}", "}" ]
Returns the current state, or sub-state, depending on the path.
[ "Returns", "the", "current", "state", "or", "sub", "-", "state", "depending", "on", "the", "path", "." ]
python
train
33.026316
gabrielfalcao/dominic
dominic/xpath/expr.py
https://github.com/gabrielfalcao/dominic/blob/a42f418fc288f3b70cb95847b405eaf7b83bb3a0/dominic/xpath/expr.py#L681-L704
def merge_into_nodeset(target, source): """Place all the nodes from the source node-set into the target node-set, preserving document order. Both node-sets must be in document order to begin with. """ if len(target) == 0: target.extend(source) return source = [n for n in source if n not in target] if len(source) == 0: return # If the last node in the target set comes before the first node in the # source set, then we can just concatenate the sets. Otherwise, we # will need to sort. (We could also check to see if the last node in # the source set comes before the first node in the target set, but this # situation is very unlikely in practice.) if document_order(target[-1]) < document_order(source[0]): target.extend(source) else: target.extend(source) target.sort(key=document_order)
[ "def", "merge_into_nodeset", "(", "target", ",", "source", ")", ":", "if", "len", "(", "target", ")", "==", "0", ":", "target", ".", "extend", "(", "source", ")", "return", "source", "=", "[", "n", "for", "n", "in", "source", "if", "n", "not", "in", "target", "]", "if", "len", "(", "source", ")", "==", "0", ":", "return", "# If the last node in the target set comes before the first node in the", "# source set, then we can just concatenate the sets. Otherwise, we", "# will need to sort. (We could also check to see if the last node in", "# the source set comes before the first node in the target set, but this", "# situation is very unlikely in practice.)", "if", "document_order", "(", "target", "[", "-", "1", "]", ")", "<", "document_order", "(", "source", "[", "0", "]", ")", ":", "target", ".", "extend", "(", "source", ")", "else", ":", "target", ".", "extend", "(", "source", ")", "target", ".", "sort", "(", "key", "=", "document_order", ")" ]
Place all the nodes from the source node-set into the target node-set, preserving document order. Both node-sets must be in document order to begin with.
[ "Place", "all", "the", "nodes", "from", "the", "source", "node", "-", "set", "into", "the", "target", "node", "-", "set", "preserving", "document", "order", ".", "Both", "node", "-", "sets", "must", "be", "in", "document", "order", "to", "begin", "with", "." ]
python
train
36.583333
apple/turicreate
src/unity/python/turicreate/toolkits/clustering/dbscan.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/clustering/dbscan.py#L24-L322
def create(dataset, features=None, distance=None, radius=1., min_core_neighbors=10, verbose=True): """ Create a DBSCAN clustering model. The DBSCAN method partitions the input dataset into three types of points, based on the estimated probability density at each point. - **Core** points have a large number of points within a given neighborhood. Specifically, `min_core_neighbors` must be within distance `radius` of a point for it to be considered a core point. - **Boundary** points are within distance `radius` of a core point, but don't have sufficient neighbors of their own to be considered core. - **Noise** points comprise the remainder of the data. These points have too few neighbors to be considered core points, and are further than distance `radius` from all core points. Clusters are formed by connecting core points that are neighbors of each other, then assigning boundary points to their nearest core neighbor's cluster. Parameters ---------- dataset : SFrame Training data, with each row corresponding to an observation. Must include all features specified in the `features` parameter, but may have additional columns as well. features : list[str], optional Name of the columns with features to use in comparing records. 'None' (the default) indicates that all columns of the input `dataset` should be used to train the model. All features must be numeric, i.e. integer or float types. distance : str or list[list], optional Function to measure the distance between any two input data rows. This may be one of two types: - *String*: the name of a standard distance function. One of 'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein', 'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated), or 'transformed_dot_product'. - *Composite distance*: the weighted sum of several standard distance functions applied to various features. This is specified as a list of distance components, each of which is itself a list containing three items: 1. list or tuple of feature names (str) 2. standard distance name (str) 3. scaling factor (int or float) For more information about Turi Create distance functions, please see the :py:mod:`~turicreate.toolkits.distances` module. For sparse vectors, missing keys are assumed to have value 0.0. If 'distance' is left unspecified, a composite distance is constructed automatically based on feature types. radius : int or float, optional Size of each point's neighborhood, with respect to the specified distance function. min_core_neighbors : int, optional Number of neighbors that must be within distance `radius` of a point in order for that point to be considered a "core point" of a cluster. verbose : bool, optional If True, print progress updates and model details during model creation. Returns ------- out : DBSCANModel A model containing a cluster label for each row in the input `dataset`. Also contains the indices of the core points, cluster boundary points, and noise points. See Also -------- DBSCANModel, turicreate.toolkits.distances Notes ----- - Our implementation of DBSCAN first computes the similarity graph on the input dataset, which can be a computationally intensive process. In the current implementation, some distances are substantially faster than others; in particular "euclidean", "squared_euclidean", "cosine", and "transformed_dot_product" are quite fast, while composite distances can be slow. - Any distance function in the GL Create library may be used with DBSCAN but the results may be poor for distances that violate the standard metric properties, i.e. symmetry, non-negativity, triangle inequality, and identity of indiscernibles. In particular, the DBSCAN algorithm is based on the concept of connecting high-density points that are *close* to each other into a single cluster, but the notion of *close* may be very counterintuitive if the chosen distance function is not a valid metric. The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will likely yield the best results. References ---------- - Ester, M., et al. (1996) `A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise <https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the Second International Conference on Knowledge Discovery and Data Mining. pp. 226-231. - `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_ - `Visualizing DBSCAN Clustering <http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_ Examples -------- >>> sf = turicreate.SFrame({ ... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162, ... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020], ... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305, ... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]}) ... >>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3) >>> model.cluster_id.print_rows(15) +--------+------------+----------+ | row_id | cluster_id | type | +--------+------------+----------+ | 8 | 0 | core | | 7 | 2 | core | | 0 | 1 | core | | 2 | 2 | core | | 3 | 1 | core | | 11 | 2 | core | | 4 | 2 | core | | 1 | 0 | boundary | | 6 | 0 | boundary | | 5 | 0 | boundary | | 9 | 0 | boundary | | 12 | 2 | boundary | | 10 | 1 | boundary | | 13 | 1 | boundary | +--------+------------+----------+ [14 rows x 3 columns] """ ## Start the training time clock and instantiate an empty model logger = _logging.getLogger(__name__) start_time = _time.time() ## Validate the input dataset _tkutl._raise_error_if_not_sframe(dataset, "dataset") _tkutl._raise_error_if_sframe_empty(dataset, "dataset") ## Validate neighborhood parameters if not isinstance(min_core_neighbors, int) or min_core_neighbors < 0: raise ValueError("Input 'min_core_neighbors' must be a non-negative " + "integer.") if not isinstance(radius, (int, float)) or radius < 0: raise ValueError("Input 'radius' must be a non-negative integer " + "or float.") ## Compute all-point nearest neighbors within `radius` and count # neighborhood sizes knn_model = _tc.nearest_neighbors.create(dataset, features=features, distance=distance, method='brute_force', verbose=verbose) knn = knn_model.similarity_graph(k=None, radius=radius, include_self_edges=False, output_type='SFrame', verbose=verbose) neighbor_counts = knn.groupby('query_label', _agg.COUNT) ### NOTE: points with NO neighbors are already dropped here! ## Identify core points and boundary candidate points. Not all of the # boundary candidates will be boundary points - some are in small isolated # clusters. if verbose: logger.info("Identifying noise points and core points.") boundary_mask = neighbor_counts['Count'] < min_core_neighbors core_mask = 1 - boundary_mask # this includes too small clusters boundary_idx = neighbor_counts[boundary_mask]['query_label'] core_idx = neighbor_counts[core_mask]['query_label'] ## Build a similarity graph on the core points ## NOTE: careful with singleton core points - the second filter removes them # from the edge set so they have to be added separately as vertices. if verbose: logger.info("Constructing the core point similarity graph.") core_vertices = knn.filter_by(core_idx, 'query_label') core_edges = core_vertices.filter_by(core_idx, 'reference_label') core_graph = _tc.SGraph() core_graph = core_graph.add_vertices(core_vertices[['query_label']], vid_field='query_label') core_graph = core_graph.add_edges(core_edges, src_field='query_label', dst_field='reference_label') ## Compute core point connected components and relabel to be consecutive # integers cc = _tc.connected_components.create(core_graph, verbose=verbose) cc_labels = cc.component_size.add_row_number('__label') core_assignments = cc.component_id.join(cc_labels, on='component_id', how='left')[['__id', '__label']] core_assignments['type'] = 'core' ## Join potential boundary points to core cluster labels (points that aren't # really on a boundary are implicitly dropped) if verbose: logger.info("Processing boundary points.") boundary_edges = knn.filter_by(boundary_idx, 'query_label') # separate real boundary points from points in small isolated clusters boundary_core_edges = boundary_edges.filter_by(core_idx, 'reference_label') # join a boundary point to its single closest core point. boundary_assignments = boundary_core_edges.groupby('query_label', {'reference_label': _agg.ARGMIN('rank', 'reference_label')}) boundary_assignments = boundary_assignments.join(core_assignments, on={'reference_label': '__id'}) boundary_assignments = boundary_assignments.rename({'query_label': '__id'}, inplace=True) boundary_assignments = boundary_assignments.remove_column('reference_label', inplace=True) boundary_assignments['type'] = 'boundary' ## Identify boundary candidates that turned out to be in small clusters but # not on real cluster boundaries small_cluster_idx = set(boundary_idx).difference( boundary_assignments['__id']) ## Identify individual noise points by the fact that they have no neighbors. noise_idx = set(range(dataset.num_rows())).difference( neighbor_counts['query_label']) noise_idx = noise_idx.union(small_cluster_idx) noise_assignments = _tc.SFrame({'row_id': _tc.SArray(list(noise_idx), int)}) noise_assignments['cluster_id'] = None noise_assignments['cluster_id'] = noise_assignments['cluster_id'].astype(int) noise_assignments['type'] = 'noise' ## Append core, boundary, and noise results to each other. master_assignments = _tc.SFrame() num_clusters = 0 if core_assignments.num_rows() > 0: core_assignments = core_assignments.rename({'__id': 'row_id', '__label': 'cluster_id'}, inplace=True) master_assignments = master_assignments.append(core_assignments) num_clusters = len(core_assignments['cluster_id'].unique()) if boundary_assignments.num_rows() > 0: boundary_assignments = boundary_assignments.rename({'__id': 'row_id', '__label': 'cluster_id'}, inplace=True) master_assignments = master_assignments.append(boundary_assignments) if noise_assignments.num_rows() > 0: master_assignments = master_assignments.append(noise_assignments) ## Post-processing and formatting state = {'verbose': verbose, 'radius': radius, 'min_core_neighbors': min_core_neighbors, 'distance': knn_model.distance, 'num_distance_components': knn_model.num_distance_components, 'num_examples': dataset.num_rows(), 'features': knn_model.features, 'num_features': knn_model.num_features, 'unpacked_features': knn_model.unpacked_features, 'num_unpacked_features': knn_model.num_unpacked_features, 'cluster_id': master_assignments, 'num_clusters': num_clusters, 'training_time': _time.time() - start_time} return DBSCANModel(state)
[ "def", "create", "(", "dataset", ",", "features", "=", "None", ",", "distance", "=", "None", ",", "radius", "=", "1.", ",", "min_core_neighbors", "=", "10", ",", "verbose", "=", "True", ")", ":", "## Start the training time clock and instantiate an empty model", "logger", "=", "_logging", ".", "getLogger", "(", "__name__", ")", "start_time", "=", "_time", ".", "time", "(", ")", "## Validate the input dataset", "_tkutl", ".", "_raise_error_if_not_sframe", "(", "dataset", ",", "\"dataset\"", ")", "_tkutl", ".", "_raise_error_if_sframe_empty", "(", "dataset", ",", "\"dataset\"", ")", "## Validate neighborhood parameters", "if", "not", "isinstance", "(", "min_core_neighbors", ",", "int", ")", "or", "min_core_neighbors", "<", "0", ":", "raise", "ValueError", "(", "\"Input 'min_core_neighbors' must be a non-negative \"", "+", "\"integer.\"", ")", "if", "not", "isinstance", "(", "radius", ",", "(", "int", ",", "float", ")", ")", "or", "radius", "<", "0", ":", "raise", "ValueError", "(", "\"Input 'radius' must be a non-negative integer \"", "+", "\"or float.\"", ")", "## Compute all-point nearest neighbors within `radius` and count", "# neighborhood sizes", "knn_model", "=", "_tc", ".", "nearest_neighbors", ".", "create", "(", "dataset", ",", "features", "=", "features", ",", "distance", "=", "distance", ",", "method", "=", "'brute_force'", ",", "verbose", "=", "verbose", ")", "knn", "=", "knn_model", ".", "similarity_graph", "(", "k", "=", "None", ",", "radius", "=", "radius", ",", "include_self_edges", "=", "False", ",", "output_type", "=", "'SFrame'", ",", "verbose", "=", "verbose", ")", "neighbor_counts", "=", "knn", ".", "groupby", "(", "'query_label'", ",", "_agg", ".", "COUNT", ")", "### NOTE: points with NO neighbors are already dropped here!", "## Identify core points and boundary candidate points. Not all of the", "# boundary candidates will be boundary points - some are in small isolated", "# clusters.", "if", "verbose", ":", "logger", ".", "info", "(", "\"Identifying noise points and core points.\"", ")", "boundary_mask", "=", "neighbor_counts", "[", "'Count'", "]", "<", "min_core_neighbors", "core_mask", "=", "1", "-", "boundary_mask", "# this includes too small clusters", "boundary_idx", "=", "neighbor_counts", "[", "boundary_mask", "]", "[", "'query_label'", "]", "core_idx", "=", "neighbor_counts", "[", "core_mask", "]", "[", "'query_label'", "]", "## Build a similarity graph on the core points", "## NOTE: careful with singleton core points - the second filter removes them", "# from the edge set so they have to be added separately as vertices.", "if", "verbose", ":", "logger", ".", "info", "(", "\"Constructing the core point similarity graph.\"", ")", "core_vertices", "=", "knn", ".", "filter_by", "(", "core_idx", ",", "'query_label'", ")", "core_edges", "=", "core_vertices", ".", "filter_by", "(", "core_idx", ",", "'reference_label'", ")", "core_graph", "=", "_tc", ".", "SGraph", "(", ")", "core_graph", "=", "core_graph", ".", "add_vertices", "(", "core_vertices", "[", "[", "'query_label'", "]", "]", ",", "vid_field", "=", "'query_label'", ")", "core_graph", "=", "core_graph", ".", "add_edges", "(", "core_edges", ",", "src_field", "=", "'query_label'", ",", "dst_field", "=", "'reference_label'", ")", "## Compute core point connected components and relabel to be consecutive", "# integers", "cc", "=", "_tc", ".", "connected_components", ".", "create", "(", "core_graph", ",", "verbose", "=", "verbose", ")", "cc_labels", "=", "cc", ".", "component_size", ".", "add_row_number", "(", "'__label'", ")", "core_assignments", "=", "cc", ".", "component_id", ".", "join", "(", "cc_labels", ",", "on", "=", "'component_id'", ",", "how", "=", "'left'", ")", "[", "[", "'__id'", ",", "'__label'", "]", "]", "core_assignments", "[", "'type'", "]", "=", "'core'", "## Join potential boundary points to core cluster labels (points that aren't", "# really on a boundary are implicitly dropped)", "if", "verbose", ":", "logger", ".", "info", "(", "\"Processing boundary points.\"", ")", "boundary_edges", "=", "knn", ".", "filter_by", "(", "boundary_idx", ",", "'query_label'", ")", "# separate real boundary points from points in small isolated clusters", "boundary_core_edges", "=", "boundary_edges", ".", "filter_by", "(", "core_idx", ",", "'reference_label'", ")", "# join a boundary point to its single closest core point.", "boundary_assignments", "=", "boundary_core_edges", ".", "groupby", "(", "'query_label'", ",", "{", "'reference_label'", ":", "_agg", ".", "ARGMIN", "(", "'rank'", ",", "'reference_label'", ")", "}", ")", "boundary_assignments", "=", "boundary_assignments", ".", "join", "(", "core_assignments", ",", "on", "=", "{", "'reference_label'", ":", "'__id'", "}", ")", "boundary_assignments", "=", "boundary_assignments", ".", "rename", "(", "{", "'query_label'", ":", "'__id'", "}", ",", "inplace", "=", "True", ")", "boundary_assignments", "=", "boundary_assignments", ".", "remove_column", "(", "'reference_label'", ",", "inplace", "=", "True", ")", "boundary_assignments", "[", "'type'", "]", "=", "'boundary'", "## Identify boundary candidates that turned out to be in small clusters but", "# not on real cluster boundaries", "small_cluster_idx", "=", "set", "(", "boundary_idx", ")", ".", "difference", "(", "boundary_assignments", "[", "'__id'", "]", ")", "## Identify individual noise points by the fact that they have no neighbors.", "noise_idx", "=", "set", "(", "range", "(", "dataset", ".", "num_rows", "(", ")", ")", ")", ".", "difference", "(", "neighbor_counts", "[", "'query_label'", "]", ")", "noise_idx", "=", "noise_idx", ".", "union", "(", "small_cluster_idx", ")", "noise_assignments", "=", "_tc", ".", "SFrame", "(", "{", "'row_id'", ":", "_tc", ".", "SArray", "(", "list", "(", "noise_idx", ")", ",", "int", ")", "}", ")", "noise_assignments", "[", "'cluster_id'", "]", "=", "None", "noise_assignments", "[", "'cluster_id'", "]", "=", "noise_assignments", "[", "'cluster_id'", "]", ".", "astype", "(", "int", ")", "noise_assignments", "[", "'type'", "]", "=", "'noise'", "## Append core, boundary, and noise results to each other.", "master_assignments", "=", "_tc", ".", "SFrame", "(", ")", "num_clusters", "=", "0", "if", "core_assignments", ".", "num_rows", "(", ")", ">", "0", ":", "core_assignments", "=", "core_assignments", ".", "rename", "(", "{", "'__id'", ":", "'row_id'", ",", "'__label'", ":", "'cluster_id'", "}", ",", "inplace", "=", "True", ")", "master_assignments", "=", "master_assignments", ".", "append", "(", "core_assignments", ")", "num_clusters", "=", "len", "(", "core_assignments", "[", "'cluster_id'", "]", ".", "unique", "(", ")", ")", "if", "boundary_assignments", ".", "num_rows", "(", ")", ">", "0", ":", "boundary_assignments", "=", "boundary_assignments", ".", "rename", "(", "{", "'__id'", ":", "'row_id'", ",", "'__label'", ":", "'cluster_id'", "}", ",", "inplace", "=", "True", ")", "master_assignments", "=", "master_assignments", ".", "append", "(", "boundary_assignments", ")", "if", "noise_assignments", ".", "num_rows", "(", ")", ">", "0", ":", "master_assignments", "=", "master_assignments", ".", "append", "(", "noise_assignments", ")", "## Post-processing and formatting", "state", "=", "{", "'verbose'", ":", "verbose", ",", "'radius'", ":", "radius", ",", "'min_core_neighbors'", ":", "min_core_neighbors", ",", "'distance'", ":", "knn_model", ".", "distance", ",", "'num_distance_components'", ":", "knn_model", ".", "num_distance_components", ",", "'num_examples'", ":", "dataset", ".", "num_rows", "(", ")", ",", "'features'", ":", "knn_model", ".", "features", ",", "'num_features'", ":", "knn_model", ".", "num_features", ",", "'unpacked_features'", ":", "knn_model", ".", "unpacked_features", ",", "'num_unpacked_features'", ":", "knn_model", ".", "num_unpacked_features", ",", "'cluster_id'", ":", "master_assignments", ",", "'num_clusters'", ":", "num_clusters", ",", "'training_time'", ":", "_time", ".", "time", "(", ")", "-", "start_time", "}", "return", "DBSCANModel", "(", "state", ")" ]
Create a DBSCAN clustering model. The DBSCAN method partitions the input dataset into three types of points, based on the estimated probability density at each point. - **Core** points have a large number of points within a given neighborhood. Specifically, `min_core_neighbors` must be within distance `radius` of a point for it to be considered a core point. - **Boundary** points are within distance `radius` of a core point, but don't have sufficient neighbors of their own to be considered core. - **Noise** points comprise the remainder of the data. These points have too few neighbors to be considered core points, and are further than distance `radius` from all core points. Clusters are formed by connecting core points that are neighbors of each other, then assigning boundary points to their nearest core neighbor's cluster. Parameters ---------- dataset : SFrame Training data, with each row corresponding to an observation. Must include all features specified in the `features` parameter, but may have additional columns as well. features : list[str], optional Name of the columns with features to use in comparing records. 'None' (the default) indicates that all columns of the input `dataset` should be used to train the model. All features must be numeric, i.e. integer or float types. distance : str or list[list], optional Function to measure the distance between any two input data rows. This may be one of two types: - *String*: the name of a standard distance function. One of 'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein', 'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated), or 'transformed_dot_product'. - *Composite distance*: the weighted sum of several standard distance functions applied to various features. This is specified as a list of distance components, each of which is itself a list containing three items: 1. list or tuple of feature names (str) 2. standard distance name (str) 3. scaling factor (int or float) For more information about Turi Create distance functions, please see the :py:mod:`~turicreate.toolkits.distances` module. For sparse vectors, missing keys are assumed to have value 0.0. If 'distance' is left unspecified, a composite distance is constructed automatically based on feature types. radius : int or float, optional Size of each point's neighborhood, with respect to the specified distance function. min_core_neighbors : int, optional Number of neighbors that must be within distance `radius` of a point in order for that point to be considered a "core point" of a cluster. verbose : bool, optional If True, print progress updates and model details during model creation. Returns ------- out : DBSCANModel A model containing a cluster label for each row in the input `dataset`. Also contains the indices of the core points, cluster boundary points, and noise points. See Also -------- DBSCANModel, turicreate.toolkits.distances Notes ----- - Our implementation of DBSCAN first computes the similarity graph on the input dataset, which can be a computationally intensive process. In the current implementation, some distances are substantially faster than others; in particular "euclidean", "squared_euclidean", "cosine", and "transformed_dot_product" are quite fast, while composite distances can be slow. - Any distance function in the GL Create library may be used with DBSCAN but the results may be poor for distances that violate the standard metric properties, i.e. symmetry, non-negativity, triangle inequality, and identity of indiscernibles. In particular, the DBSCAN algorithm is based on the concept of connecting high-density points that are *close* to each other into a single cluster, but the notion of *close* may be very counterintuitive if the chosen distance function is not a valid metric. The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will likely yield the best results. References ---------- - Ester, M., et al. (1996) `A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise <https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the Second International Conference on Knowledge Discovery and Data Mining. pp. 226-231. - `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_ - `Visualizing DBSCAN Clustering <http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_ Examples -------- >>> sf = turicreate.SFrame({ ... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162, ... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020], ... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305, ... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]}) ... >>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3) >>> model.cluster_id.print_rows(15) +--------+------------+----------+ | row_id | cluster_id | type | +--------+------------+----------+ | 8 | 0 | core | | 7 | 2 | core | | 0 | 1 | core | | 2 | 2 | core | | 3 | 1 | core | | 11 | 2 | core | | 4 | 2 | core | | 1 | 0 | boundary | | 6 | 0 | boundary | | 5 | 0 | boundary | | 9 | 0 | boundary | | 12 | 2 | boundary | | 10 | 1 | boundary | | 13 | 1 | boundary | +--------+------------+----------+ [14 rows x 3 columns]
[ "Create", "a", "DBSCAN", "clustering", "model", ".", "The", "DBSCAN", "method", "partitions", "the", "input", "dataset", "into", "three", "types", "of", "points", "based", "on", "the", "estimated", "probability", "density", "at", "each", "point", "." ]
python
train
41.839465
zhanglab/psamm
psamm/fastcore.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/fastcore.py#L147-L153
def flip(self, reactions): """Flip the specified reactions.""" for reaction in reactions: if reaction in self._flipped: self._flipped.remove(reaction) else: self._flipped.add(reaction)
[ "def", "flip", "(", "self", ",", "reactions", ")", ":", "for", "reaction", "in", "reactions", ":", "if", "reaction", "in", "self", ".", "_flipped", ":", "self", ".", "_flipped", ".", "remove", "(", "reaction", ")", "else", ":", "self", ".", "_flipped", ".", "add", "(", "reaction", ")" ]
Flip the specified reactions.
[ "Flip", "the", "specified", "reactions", "." ]
python
train
35.714286
saltstack/salt
salt/modules/openbsdrcctl_service.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openbsdrcctl_service.py#L54-L67
def available(name): ''' Return True if the named service is available. CLI Example: .. code-block:: bash salt '*' service.available sshd ''' cmd = '{0} get {1}'.format(_cmd(), name) if __salt__['cmd.retcode'](cmd) == 2: return False return True
[ "def", "available", "(", "name", ")", ":", "cmd", "=", "'{0} get {1}'", ".", "format", "(", "_cmd", "(", ")", ",", "name", ")", "if", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ")", "==", "2", ":", "return", "False", "return", "True" ]
Return True if the named service is available. CLI Example: .. code-block:: bash salt '*' service.available sshd
[ "Return", "True", "if", "the", "named", "service", "is", "available", "." ]
python
train
20.214286
jepegit/cellpy
cellpy/readers/cellreader.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/readers/cellreader.py#L342-L365
def set_raw_datadir(self, directory=None): """Set the directory containing .res-files. Used for setting directory for looking for res-files.@ A valid directory name is required. Args: directory (str): path to res-directory Example: >>> d = CellpyData() >>> directory = "MyData/Arbindata" >>> d.set_raw_datadir(directory) """ if directory is None: self.logger.info("no directory name given") return if not os.path.isdir(directory): self.logger.info(directory) self.logger.info("directory does not exist") return self.raw_datadir = directory
[ "def", "set_raw_datadir", "(", "self", ",", "directory", "=", "None", ")", ":", "if", "directory", "is", "None", ":", "self", ".", "logger", ".", "info", "(", "\"no directory name given\"", ")", "return", "if", "not", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "self", ".", "logger", ".", "info", "(", "directory", ")", "self", ".", "logger", ".", "info", "(", "\"directory does not exist\"", ")", "return", "self", ".", "raw_datadir", "=", "directory" ]
Set the directory containing .res-files. Used for setting directory for looking for res-files.@ A valid directory name is required. Args: directory (str): path to res-directory Example: >>> d = CellpyData() >>> directory = "MyData/Arbindata" >>> d.set_raw_datadir(directory)
[ "Set", "the", "directory", "containing", ".", "res", "-", "files", "." ]
python
train
29.208333
alephdata/memorious
memorious/operations/fetch.py
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/fetch.py#L44-L71
def dav_index(context, data): """List files in a WebDAV directory.""" # This is made to work with ownCloud/nextCloud, but some rumor has # it they are "standards compliant" and it should thus work for # other DAV servers. url = data.get('url') result = context.http.request('PROPFIND', url) for resp in result.xml.findall('./{DAV:}response'): href = resp.findtext('./{DAV:}href') if href is None: continue rurl = urljoin(url, href) rdata = data.copy() rdata['url'] = rurl rdata['foreign_id'] = rurl if rdata['url'] == url: continue if resp.find('.//{DAV:}collection') is not None: rdata['parent_foreign_id'] = rurl context.log.info("Fetching contents of folder: %s" % rurl) context.recurse(data=rdata) else: rdata['parent_foreign_id'] = url # Do GET requests on the urls fetch(context, rdata)
[ "def", "dav_index", "(", "context", ",", "data", ")", ":", "# This is made to work with ownCloud/nextCloud, but some rumor has", "# it they are \"standards compliant\" and it should thus work for", "# other DAV servers.", "url", "=", "data", ".", "get", "(", "'url'", ")", "result", "=", "context", ".", "http", ".", "request", "(", "'PROPFIND'", ",", "url", ")", "for", "resp", "in", "result", ".", "xml", ".", "findall", "(", "'./{DAV:}response'", ")", ":", "href", "=", "resp", ".", "findtext", "(", "'./{DAV:}href'", ")", "if", "href", "is", "None", ":", "continue", "rurl", "=", "urljoin", "(", "url", ",", "href", ")", "rdata", "=", "data", ".", "copy", "(", ")", "rdata", "[", "'url'", "]", "=", "rurl", "rdata", "[", "'foreign_id'", "]", "=", "rurl", "if", "rdata", "[", "'url'", "]", "==", "url", ":", "continue", "if", "resp", ".", "find", "(", "'.//{DAV:}collection'", ")", "is", "not", "None", ":", "rdata", "[", "'parent_foreign_id'", "]", "=", "rurl", "context", ".", "log", ".", "info", "(", "\"Fetching contents of folder: %s\"", "%", "rurl", ")", "context", ".", "recurse", "(", "data", "=", "rdata", ")", "else", ":", "rdata", "[", "'parent_foreign_id'", "]", "=", "url", "# Do GET requests on the urls", "fetch", "(", "context", ",", "rdata", ")" ]
List files in a WebDAV directory.
[ "List", "files", "in", "a", "WebDAV", "directory", "." ]
python
train
34.142857
bxlab/bx-python
lib/bx/wiggle.py
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/wiggle.py#L14-L63
def IntervalReader( f ): """ Iterator yielding chrom, start, end, strand, value. Values are zero-based, half-open. Regions which lack a score are ignored. """ current_chrom = None current_pos = None current_step = None # always for wiggle data strand = '+' mode = "bed" for line in f: if line.isspace() or line.startswith( "track" ) or line.startswith( "#" ) or line.startswith( "browser" ): continue elif line.startswith( "variableStep" ): header = parse_header( line ) current_chrom = header['chrom'] current_pos = None current_step = None if 'span' in header: current_span = int( header['span'] ) else: current_span = 1 mode = "variableStep" elif line.startswith( "fixedStep" ): header = parse_header( line ) current_chrom = header['chrom'] current_pos = int( header['start'] ) - 1 current_step = int( header['step'] ) if 'span' in header: current_span = int( header['span'] ) else: current_span = 1 mode = "fixedStep" elif mode == "bed": fields = line.split() if len( fields ) > 3: if len( fields ) > 5: yield fields[0], int( fields[1] ), int( fields[2] ), fields[5], float( fields[3] ) else: yield fields[0], int( fields[1] ), int( fields[2] ), strand, float( fields[3] ) elif mode == "variableStep": fields = line.split() pos = int( fields[0] ) - 1 yield current_chrom, pos, pos + current_span, strand, float( fields[1] ) elif mode == "fixedStep": yield current_chrom, current_pos, current_pos + current_span, strand, float( line.split()[0] ) current_pos += current_step else: raise ValueError("Unexpected input line: %s" % line.strip())
[ "def", "IntervalReader", "(", "f", ")", ":", "current_chrom", "=", "None", "current_pos", "=", "None", "current_step", "=", "None", "# always for wiggle data", "strand", "=", "'+'", "mode", "=", "\"bed\"", "for", "line", "in", "f", ":", "if", "line", ".", "isspace", "(", ")", "or", "line", ".", "startswith", "(", "\"track\"", ")", "or", "line", ".", "startswith", "(", "\"#\"", ")", "or", "line", ".", "startswith", "(", "\"browser\"", ")", ":", "continue", "elif", "line", ".", "startswith", "(", "\"variableStep\"", ")", ":", "header", "=", "parse_header", "(", "line", ")", "current_chrom", "=", "header", "[", "'chrom'", "]", "current_pos", "=", "None", "current_step", "=", "None", "if", "'span'", "in", "header", ":", "current_span", "=", "int", "(", "header", "[", "'span'", "]", ")", "else", ":", "current_span", "=", "1", "mode", "=", "\"variableStep\"", "elif", "line", ".", "startswith", "(", "\"fixedStep\"", ")", ":", "header", "=", "parse_header", "(", "line", ")", "current_chrom", "=", "header", "[", "'chrom'", "]", "current_pos", "=", "int", "(", "header", "[", "'start'", "]", ")", "-", "1", "current_step", "=", "int", "(", "header", "[", "'step'", "]", ")", "if", "'span'", "in", "header", ":", "current_span", "=", "int", "(", "header", "[", "'span'", "]", ")", "else", ":", "current_span", "=", "1", "mode", "=", "\"fixedStep\"", "elif", "mode", "==", "\"bed\"", ":", "fields", "=", "line", ".", "split", "(", ")", "if", "len", "(", "fields", ")", ">", "3", ":", "if", "len", "(", "fields", ")", ">", "5", ":", "yield", "fields", "[", "0", "]", ",", "int", "(", "fields", "[", "1", "]", ")", ",", "int", "(", "fields", "[", "2", "]", ")", ",", "fields", "[", "5", "]", ",", "float", "(", "fields", "[", "3", "]", ")", "else", ":", "yield", "fields", "[", "0", "]", ",", "int", "(", "fields", "[", "1", "]", ")", ",", "int", "(", "fields", "[", "2", "]", ")", ",", "strand", ",", "float", "(", "fields", "[", "3", "]", ")", "elif", "mode", "==", "\"variableStep\"", ":", "fields", "=", "line", ".", "split", "(", ")", "pos", "=", "int", "(", "fields", "[", "0", "]", ")", "-", "1", "yield", "current_chrom", ",", "pos", ",", "pos", "+", "current_span", ",", "strand", ",", "float", "(", "fields", "[", "1", "]", ")", "elif", "mode", "==", "\"fixedStep\"", ":", "yield", "current_chrom", ",", "current_pos", ",", "current_pos", "+", "current_span", ",", "strand", ",", "float", "(", "line", ".", "split", "(", ")", "[", "0", "]", ")", "current_pos", "+=", "current_step", "else", ":", "raise", "ValueError", "(", "\"Unexpected input line: %s\"", "%", "line", ".", "strip", "(", ")", ")" ]
Iterator yielding chrom, start, end, strand, value. Values are zero-based, half-open. Regions which lack a score are ignored.
[ "Iterator", "yielding", "chrom", "start", "end", "strand", "value", ".", "Values", "are", "zero", "-", "based", "half", "-", "open", ".", "Regions", "which", "lack", "a", "score", "are", "ignored", "." ]
python
train
38.98
Unity-Technologies/ml-agents
ml-agents/mlagents/trainers/trainer_metrics.py
https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents/mlagents/trainers/trainer_metrics.py#L51-L58
def add_delta_step(self, delta: float): """ Inform Metrics class about time to step in environment. """ if self.delta_last_experience_collection: self.delta_last_experience_collection += delta else: self.delta_last_experience_collection = delta
[ "def", "add_delta_step", "(", "self", ",", "delta", ":", "float", ")", ":", "if", "self", ".", "delta_last_experience_collection", ":", "self", ".", "delta_last_experience_collection", "+=", "delta", "else", ":", "self", ".", "delta_last_experience_collection", "=", "delta" ]
Inform Metrics class about time to step in environment.
[ "Inform", "Metrics", "class", "about", "time", "to", "step", "in", "environment", "." ]
python
train
37.625
angr/angr
angr/state_plugins/abstract_memory.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/abstract_memory.py#L68-L87
def get_abstract_locations(self, addr, size): """ Get a list of abstract locations that is within the range of [addr, addr + size] This implementation is pretty slow. But since this method won't be called frequently, we can live with the bad implementation for now. :param addr: Starting address of the memory region. :param size: Size of the memory region, in bytes. :return: A list of covered AbstractLocation objects, or an empty list if there is none. """ ret = [ ] for aloc in self._alocs.values(): for seg in aloc.segments: if seg.offset >= addr and seg.offset < addr + size: ret.append(aloc) break return ret
[ "def", "get_abstract_locations", "(", "self", ",", "addr", ",", "size", ")", ":", "ret", "=", "[", "]", "for", "aloc", "in", "self", ".", "_alocs", ".", "values", "(", ")", ":", "for", "seg", "in", "aloc", ".", "segments", ":", "if", "seg", ".", "offset", ">=", "addr", "and", "seg", ".", "offset", "<", "addr", "+", "size", ":", "ret", ".", "append", "(", "aloc", ")", "break", "return", "ret" ]
Get a list of abstract locations that is within the range of [addr, addr + size] This implementation is pretty slow. But since this method won't be called frequently, we can live with the bad implementation for now. :param addr: Starting address of the memory region. :param size: Size of the memory region, in bytes. :return: A list of covered AbstractLocation objects, or an empty list if there is none.
[ "Get", "a", "list", "of", "abstract", "locations", "that", "is", "within", "the", "range", "of", "[", "addr", "addr", "+", "size", "]" ]
python
train
38.45
eumis/pyviews
pyviews/binding/implementations.py
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/binding/implementations.py#L37-L42
def destroy(self): '''Unsubscribes callback from observable''' self._observable.release(self._key, self._callback) self._observable = None self._key = None self._callback = None
[ "def", "destroy", "(", "self", ")", ":", "self", ".", "_observable", ".", "release", "(", "self", ".", "_key", ",", "self", ".", "_callback", ")", "self", ".", "_observable", "=", "None", "self", ".", "_key", "=", "None", "self", ".", "_callback", "=", "None" ]
Unsubscribes callback from observable
[ "Unsubscribes", "callback", "from", "observable" ]
python
train
35.333333
novopl/peltak
src/peltak/core/versioning.py
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/core/versioning.py#L235-L248
def write(self, version): # type: (str) -> None """ Write the project version to .py file. This will regex search in the file for a ``__version__ = VERSION_STRING`` and substitute the version string for the new version. """ with open(self.version_file) as fp: content = fp.read() ver_statement = "__version__ = '{}'".format(version) new_content = RE_PY_VERSION.sub(ver_statement, content) fs.write_file(self.version_file, new_content)
[ "def", "write", "(", "self", ",", "version", ")", ":", "# type: (str) -> None", "with", "open", "(", "self", ".", "version_file", ")", "as", "fp", ":", "content", "=", "fp", ".", "read", "(", ")", "ver_statement", "=", "\"__version__ = '{}'\"", ".", "format", "(", "version", ")", "new_content", "=", "RE_PY_VERSION", ".", "sub", "(", "ver_statement", ",", "content", ")", "fs", ".", "write_file", "(", "self", ".", "version_file", ",", "new_content", ")" ]
Write the project version to .py file. This will regex search in the file for a ``__version__ = VERSION_STRING`` and substitute the version string for the new version.
[ "Write", "the", "project", "version", "to", ".", "py", "file", "." ]
python
train
36.785714
Chilipp/psyplot
psyplot/plotter.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/plotter.py#L1588-L1592
def draw(self): """Draw the figures and those that are shared and have been changed""" for fig in self.figs2draw: fig.canvas.draw() self._figs2draw.clear()
[ "def", "draw", "(", "self", ")", ":", "for", "fig", "in", "self", ".", "figs2draw", ":", "fig", ".", "canvas", ".", "draw", "(", ")", "self", ".", "_figs2draw", ".", "clear", "(", ")" ]
Draw the figures and those that are shared and have been changed
[ "Draw", "the", "figures", "and", "those", "that", "are", "shared", "and", "have", "been", "changed" ]
python
train
37.4
vstconsulting/vstutils
vstutils/api/doc_generator.py
https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L167-L193
def get_status_code_and_schema_rst(self, responses): ''' Function for prepare information about responses with example, prepare only responses with status code from `101` to `299` :param responses: -- dictionary that contains responses, with status code as key :type responses: dict :return: ''' for status_code, response_schema in responses.items(): status_code = int(status_code) schema = response_schema.get('schema', None) status = HTTP_STATUS_CODES.get(status_code, None) if status is None or not (100 < status_code < 300): continue self.write('**Example Response**', 1) self.write('') self.write('.. code-block:: http', 1) self.write('') self.write('HTTP/1.1 {} {}'.format(status_code, status), 2) self.write('Vary: {}'.format(response_schema['description']), 2) self.write('Content-Type: application/json', 2) self.write('') if schema: self.schema_handler(schema) else: self.write('{}', self.indent_depth)
[ "def", "get_status_code_and_schema_rst", "(", "self", ",", "responses", ")", ":", "for", "status_code", ",", "response_schema", "in", "responses", ".", "items", "(", ")", ":", "status_code", "=", "int", "(", "status_code", ")", "schema", "=", "response_schema", ".", "get", "(", "'schema'", ",", "None", ")", "status", "=", "HTTP_STATUS_CODES", ".", "get", "(", "status_code", ",", "None", ")", "if", "status", "is", "None", "or", "not", "(", "100", "<", "status_code", "<", "300", ")", ":", "continue", "self", ".", "write", "(", "'**Example Response**'", ",", "1", ")", "self", ".", "write", "(", "''", ")", "self", ".", "write", "(", "'.. code-block:: http'", ",", "1", ")", "self", ".", "write", "(", "''", ")", "self", ".", "write", "(", "'HTTP/1.1 {} {}'", ".", "format", "(", "status_code", ",", "status", ")", ",", "2", ")", "self", ".", "write", "(", "'Vary: {}'", ".", "format", "(", "response_schema", "[", "'description'", "]", ")", ",", "2", ")", "self", ".", "write", "(", "'Content-Type: application/json'", ",", "2", ")", "self", ".", "write", "(", "''", ")", "if", "schema", ":", "self", ".", "schema_handler", "(", "schema", ")", "else", ":", "self", ".", "write", "(", "'{}'", ",", "self", ".", "indent_depth", ")" ]
Function for prepare information about responses with example, prepare only responses with status code from `101` to `299` :param responses: -- dictionary that contains responses, with status code as key :type responses: dict :return:
[ "Function", "for", "prepare", "information", "about", "responses", "with", "example", "prepare", "only", "responses", "with", "status", "code", "from", "101", "to", "299", ":", "param", "responses", ":", "--", "dictionary", "that", "contains", "responses", "with", "status", "code", "as", "key", ":", "type", "responses", ":", "dict", ":", "return", ":" ]
python
train
43.222222
taskcluster/taskcluster-client.py
taskcluster/utils.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/utils.py#L322-L348
def optionsFromEnvironment(defaults=None): """Fetch root URL and credentials from the standard TASKCLUSTER_… environment variables and return them in a format suitable for passing to a client constructor.""" options = defaults or {} credentials = options.get('credentials', {}) rootUrl = os.environ.get('TASKCLUSTER_ROOT_URL') if rootUrl: options['rootUrl'] = rootUrl clientId = os.environ.get('TASKCLUSTER_CLIENT_ID') if clientId: credentials['clientId'] = clientId accessToken = os.environ.get('TASKCLUSTER_ACCESS_TOKEN') if accessToken: credentials['accessToken'] = accessToken certificate = os.environ.get('TASKCLUSTER_CERTIFICATE') if certificate: credentials['certificate'] = certificate if credentials: options['credentials'] = credentials return options
[ "def", "optionsFromEnvironment", "(", "defaults", "=", "None", ")", ":", "options", "=", "defaults", "or", "{", "}", "credentials", "=", "options", ".", "get", "(", "'credentials'", ",", "{", "}", ")", "rootUrl", "=", "os", ".", "environ", ".", "get", "(", "'TASKCLUSTER_ROOT_URL'", ")", "if", "rootUrl", ":", "options", "[", "'rootUrl'", "]", "=", "rootUrl", "clientId", "=", "os", ".", "environ", ".", "get", "(", "'TASKCLUSTER_CLIENT_ID'", ")", "if", "clientId", ":", "credentials", "[", "'clientId'", "]", "=", "clientId", "accessToken", "=", "os", ".", "environ", ".", "get", "(", "'TASKCLUSTER_ACCESS_TOKEN'", ")", "if", "accessToken", ":", "credentials", "[", "'accessToken'", "]", "=", "accessToken", "certificate", "=", "os", ".", "environ", ".", "get", "(", "'TASKCLUSTER_CERTIFICATE'", ")", "if", "certificate", ":", "credentials", "[", "'certificate'", "]", "=", "certificate", "if", "credentials", ":", "options", "[", "'credentials'", "]", "=", "credentials", "return", "options" ]
Fetch root URL and credentials from the standard TASKCLUSTER_… environment variables and return them in a format suitable for passing to a client constructor.
[ "Fetch", "root", "URL", "and", "credentials", "from", "the", "standard", "TASKCLUSTER_…", "environment", "variables", "and", "return", "them", "in", "a", "format", "suitable", "for", "passing", "to", "a", "client", "constructor", "." ]
python
train
31.148148
CodeReclaimers/neat-python
neat/genome.py
https://github.com/CodeReclaimers/neat-python/blob/e3dbe77c0d776eae41d598e6439e6ac02ab90b18/neat/genome.py#L547-L557
def connect_partial_nodirect(self, config): """ Create a partially-connected genome, with (unless no hidden nodes) no direct input-output connections.""" assert 0 <= config.connection_fraction <= 1 all_connections = self.compute_full_connections(config, False) shuffle(all_connections) num_to_add = int(round(len(all_connections) * config.connection_fraction)) for input_id, output_id in all_connections[:num_to_add]: connection = self.create_connection(config, input_id, output_id) self.connections[connection.key] = connection
[ "def", "connect_partial_nodirect", "(", "self", ",", "config", ")", ":", "assert", "0", "<=", "config", ".", "connection_fraction", "<=", "1", "all_connections", "=", "self", ".", "compute_full_connections", "(", "config", ",", "False", ")", "shuffle", "(", "all_connections", ")", "num_to_add", "=", "int", "(", "round", "(", "len", "(", "all_connections", ")", "*", "config", ".", "connection_fraction", ")", ")", "for", "input_id", ",", "output_id", "in", "all_connections", "[", ":", "num_to_add", "]", ":", "connection", "=", "self", ".", "create_connection", "(", "config", ",", "input_id", ",", "output_id", ")", "self", ".", "connections", "[", "connection", ".", "key", "]", "=", "connection" ]
Create a partially-connected genome, with (unless no hidden nodes) no direct input-output connections.
[ "Create", "a", "partially", "-", "connected", "genome", "with", "(", "unless", "no", "hidden", "nodes", ")", "no", "direct", "input", "-", "output", "connections", "." ]
python
train
55.090909
molmod/molmod
molmod/graphs.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1423-L1427
def iter_initial_relations(self, subject_graph): """Iterate over all valid initial relations for a match""" vertex0 = 0 for vertex1 in range(subject_graph.num_vertices): yield vertex0, vertex1
[ "def", "iter_initial_relations", "(", "self", ",", "subject_graph", ")", ":", "vertex0", "=", "0", "for", "vertex1", "in", "range", "(", "subject_graph", ".", "num_vertices", ")", ":", "yield", "vertex0", ",", "vertex1" ]
Iterate over all valid initial relations for a match
[ "Iterate", "over", "all", "valid", "initial", "relations", "for", "a", "match" ]
python
train
44.8
vtkiorg/vtki
vtki/utilities.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L53-L106
def get_scalar(mesh, name, preference='cell', info=False, err=False): """ Searches both point and cell data for an array Parameters ---------- name : str The name of the array to get the range. preference : str, optional When scalars is specified, this is the perfered scalar type to search for in the dataset. Must be either ``'point'`` or ``'cell'`` info : bool Return info about the scalar rather than the array itself. err : bool Boolean to control whether to throw an error if array is not present. """ parr = point_scalar(mesh, name) carr = cell_scalar(mesh, name) if isinstance(preference, str): if preference in ['cell', 'c', 'cells']: preference = CELL_DATA_FIELD elif preference in ['point', 'p', 'points']: preference = POINT_DATA_FIELD else: raise RuntimeError('Data field ({}) not supported.'.format(preference)) if all([parr is not None, carr is not None]): if preference == CELL_DATA_FIELD: if info: return carr, CELL_DATA_FIELD else: return carr elif preference == POINT_DATA_FIELD: if info: return parr, POINT_DATA_FIELD else: return parr else: raise RuntimeError('Data field ({}) not supported.'.format(preference)) arr = None field = None if parr is not None: arr = parr field = 0 elif carr is not None: arr = carr field = 1 elif err: raise KeyError('Data scalar ({}) not present in this dataset.'.format(name)) if info: return arr, field return arr
[ "def", "get_scalar", "(", "mesh", ",", "name", ",", "preference", "=", "'cell'", ",", "info", "=", "False", ",", "err", "=", "False", ")", ":", "parr", "=", "point_scalar", "(", "mesh", ",", "name", ")", "carr", "=", "cell_scalar", "(", "mesh", ",", "name", ")", "if", "isinstance", "(", "preference", ",", "str", ")", ":", "if", "preference", "in", "[", "'cell'", ",", "'c'", ",", "'cells'", "]", ":", "preference", "=", "CELL_DATA_FIELD", "elif", "preference", "in", "[", "'point'", ",", "'p'", ",", "'points'", "]", ":", "preference", "=", "POINT_DATA_FIELD", "else", ":", "raise", "RuntimeError", "(", "'Data field ({}) not supported.'", ".", "format", "(", "preference", ")", ")", "if", "all", "(", "[", "parr", "is", "not", "None", ",", "carr", "is", "not", "None", "]", ")", ":", "if", "preference", "==", "CELL_DATA_FIELD", ":", "if", "info", ":", "return", "carr", ",", "CELL_DATA_FIELD", "else", ":", "return", "carr", "elif", "preference", "==", "POINT_DATA_FIELD", ":", "if", "info", ":", "return", "parr", ",", "POINT_DATA_FIELD", "else", ":", "return", "parr", "else", ":", "raise", "RuntimeError", "(", "'Data field ({}) not supported.'", ".", "format", "(", "preference", ")", ")", "arr", "=", "None", "field", "=", "None", "if", "parr", "is", "not", "None", ":", "arr", "=", "parr", "field", "=", "0", "elif", "carr", "is", "not", "None", ":", "arr", "=", "carr", "field", "=", "1", "elif", "err", ":", "raise", "KeyError", "(", "'Data scalar ({}) not present in this dataset.'", ".", "format", "(", "name", ")", ")", "if", "info", ":", "return", "arr", ",", "field", "return", "arr" ]
Searches both point and cell data for an array Parameters ---------- name : str The name of the array to get the range. preference : str, optional When scalars is specified, this is the perfered scalar type to search for in the dataset. Must be either ``'point'`` or ``'cell'`` info : bool Return info about the scalar rather than the array itself. err : bool Boolean to control whether to throw an error if array is not present.
[ "Searches", "both", "point", "and", "cell", "data", "for", "an", "array" ]
python
train
31.351852
codeforamerica/epa_python
epa/gics/gics.py
https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/gics/gics.py#L60-L66
def eligible_cost(self, column=None, value=None, **kwargs): """ The assistance dollar amounts by eligible cost category. >>> GICS().eligible_cost('amount', 100000) """ return self._resolve_call('GIC_ELIGIBLE_COST', column, value, **kwargs)
[ "def", "eligible_cost", "(", "self", ",", "column", "=", "None", ",", "value", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_resolve_call", "(", "'GIC_ELIGIBLE_COST'", ",", "column", ",", "value", ",", "*", "*", "kwargs", ")" ]
The assistance dollar amounts by eligible cost category. >>> GICS().eligible_cost('amount', 100000)
[ "The", "assistance", "dollar", "amounts", "by", "eligible", "cost", "category", "." ]
python
train
39.142857
mathandy/svgpathtools
svgpathtools/path.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/path.py#L147-L160
def bez2poly(bez, numpy_ordering=True, return_poly1d=False): """Converts a Bezier object or tuple of Bezier control points to a tuple of coefficients of the expanded polynomial. return_poly1d : returns a numpy.poly1d object. This makes computations of derivatives/anti-derivatives and many other operations quite quick. numpy_ordering : By default (to accommodate numpy) the coefficients will be output in reverse standard order. Note: This function is redundant thanks to the .poly() method included with all bezier segment classes.""" if is_bezier_segment(bez): bez = bez.bpoints() return bezier2polynomial(bez, numpy_ordering=numpy_ordering, return_poly1d=return_poly1d)
[ "def", "bez2poly", "(", "bez", ",", "numpy_ordering", "=", "True", ",", "return_poly1d", "=", "False", ")", ":", "if", "is_bezier_segment", "(", "bez", ")", ":", "bez", "=", "bez", ".", "bpoints", "(", ")", "return", "bezier2polynomial", "(", "bez", ",", "numpy_ordering", "=", "numpy_ordering", ",", "return_poly1d", "=", "return_poly1d", ")" ]
Converts a Bezier object or tuple of Bezier control points to a tuple of coefficients of the expanded polynomial. return_poly1d : returns a numpy.poly1d object. This makes computations of derivatives/anti-derivatives and many other operations quite quick. numpy_ordering : By default (to accommodate numpy) the coefficients will be output in reverse standard order. Note: This function is redundant thanks to the .poly() method included with all bezier segment classes.
[ "Converts", "a", "Bezier", "object", "or", "tuple", "of", "Bezier", "control", "points", "to", "a", "tuple", "of", "coefficients", "of", "the", "expanded", "polynomial", ".", "return_poly1d", ":", "returns", "a", "numpy", ".", "poly1d", "object", ".", "This", "makes", "computations", "of", "derivatives", "/", "anti", "-", "derivatives", "and", "many", "other", "operations", "quite", "quick", ".", "numpy_ordering", ":", "By", "default", "(", "to", "accommodate", "numpy", ")", "the", "coefficients", "will", "be", "output", "in", "reverse", "standard", "order", ".", "Note", ":", "This", "function", "is", "redundant", "thanks", "to", "the", ".", "poly", "()", "method", "included", "with", "all", "bezier", "segment", "classes", "." ]
python
train
54.857143
kivy/python-for-android
pythonforandroid/recipe.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/recipe.py#L580-L612
def get_recipe(cls, name, ctx): '''Returns the Recipe with the given name, if it exists.''' name = name.lower() if not hasattr(cls, "recipes"): cls.recipes = {} if name in cls.recipes: return cls.recipes[name] recipe_file = None for recipes_dir in cls.recipe_dirs(ctx): if not exists(recipes_dir): continue # Find matching folder (may differ in case): for subfolder in listdir(recipes_dir): if subfolder.lower() == name: recipe_file = join(recipes_dir, subfolder, '__init__.py') if exists(recipe_file): name = subfolder # adapt to actual spelling break recipe_file = None if recipe_file is not None: break if not recipe_file: raise ValueError('Recipe does not exist: {}'.format(name)) mod = import_recipe('pythonforandroid.recipes.{}'.format(name), recipe_file) if len(logger.handlers) > 1: logger.removeHandler(logger.handlers[1]) recipe = mod.recipe recipe.ctx = ctx cls.recipes[name.lower()] = recipe return recipe
[ "def", "get_recipe", "(", "cls", ",", "name", ",", "ctx", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "if", "not", "hasattr", "(", "cls", ",", "\"recipes\"", ")", ":", "cls", ".", "recipes", "=", "{", "}", "if", "name", "in", "cls", ".", "recipes", ":", "return", "cls", ".", "recipes", "[", "name", "]", "recipe_file", "=", "None", "for", "recipes_dir", "in", "cls", ".", "recipe_dirs", "(", "ctx", ")", ":", "if", "not", "exists", "(", "recipes_dir", ")", ":", "continue", "# Find matching folder (may differ in case):", "for", "subfolder", "in", "listdir", "(", "recipes_dir", ")", ":", "if", "subfolder", ".", "lower", "(", ")", "==", "name", ":", "recipe_file", "=", "join", "(", "recipes_dir", ",", "subfolder", ",", "'__init__.py'", ")", "if", "exists", "(", "recipe_file", ")", ":", "name", "=", "subfolder", "# adapt to actual spelling", "break", "recipe_file", "=", "None", "if", "recipe_file", "is", "not", "None", ":", "break", "if", "not", "recipe_file", ":", "raise", "ValueError", "(", "'Recipe does not exist: {}'", ".", "format", "(", "name", ")", ")", "mod", "=", "import_recipe", "(", "'pythonforandroid.recipes.{}'", ".", "format", "(", "name", ")", ",", "recipe_file", ")", "if", "len", "(", "logger", ".", "handlers", ")", ">", "1", ":", "logger", ".", "removeHandler", "(", "logger", ".", "handlers", "[", "1", "]", ")", "recipe", "=", "mod", ".", "recipe", "recipe", ".", "ctx", "=", "ctx", "cls", ".", "recipes", "[", "name", ".", "lower", "(", ")", "]", "=", "recipe", "return", "recipe" ]
Returns the Recipe with the given name, if it exists.
[ "Returns", "the", "Recipe", "with", "the", "given", "name", "if", "it", "exists", "." ]
python
train
37.727273