repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
sassoo/goldman
goldman/deserializers/jsonapi.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/jsonapi.py#L85-L121
def normalize(self, body): """ Invoke the JSON API normalizer Perform the following: * add the type as a rtype property * flatten the payload * add the id as a rid property ONLY if present We don't need to vet the inputs much because the Parser has already done all the work. :param body: the already vetted & parsed payload :return: normalized dict """ resource = body['data'] data = {'rtype': resource['type']} if 'attributes' in resource: attributes = resource['attributes'] attributes = self._normalize_attributes(attributes) data.update(attributes) if 'relationships' in resource: relationships = resource['relationships'] relationships = self._normalize_relationships(relationships) data.update(relationships) if resource.get('id'): data['rid'] = resource['id'] return data
[ "def", "normalize", "(", "self", ",", "body", ")", ":", "resource", "=", "body", "[", "'data'", "]", "data", "=", "{", "'rtype'", ":", "resource", "[", "'type'", "]", "}", "if", "'attributes'", "in", "resource", ":", "attributes", "=", "resource", "[", "'attributes'", "]", "attributes", "=", "self", ".", "_normalize_attributes", "(", "attributes", ")", "data", ".", "update", "(", "attributes", ")", "if", "'relationships'", "in", "resource", ":", "relationships", "=", "resource", "[", "'relationships'", "]", "relationships", "=", "self", ".", "_normalize_relationships", "(", "relationships", ")", "data", ".", "update", "(", "relationships", ")", "if", "resource", ".", "get", "(", "'id'", ")", ":", "data", "[", "'rid'", "]", "=", "resource", "[", "'id'", "]", "return", "data" ]
Invoke the JSON API normalizer Perform the following: * add the type as a rtype property * flatten the payload * add the id as a rid property ONLY if present We don't need to vet the inputs much because the Parser has already done all the work. :param body: the already vetted & parsed payload :return: normalized dict
[ "Invoke", "the", "JSON", "API", "normalizer" ]
python
train
27.081081
garenchan/policy
policy/enforcer.py
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L115-L131
def load_rules(self, force_reload=False, overwrite=True): """Load rules from policy file or cache.""" # double-checked locking if self.load_once and self._policy_loaded: return with self._load_lock: if self.load_once and self._policy_loaded: return reloaded, data = _cache.read_file( self.policy_file, force_reload=force_reload) self._policy_loaded = True if reloaded or not self.rules: rules = Rules.load_json(data, self.default_rule, self.raise_error) self._set_rules(rules, overwrite=overwrite) LOG.debug('Reload policy file: %s', self.policy_file)
[ "def", "load_rules", "(", "self", ",", "force_reload", "=", "False", ",", "overwrite", "=", "True", ")", ":", "# double-checked locking", "if", "self", ".", "load_once", "and", "self", ".", "_policy_loaded", ":", "return", "with", "self", ".", "_load_lock", ":", "if", "self", ".", "load_once", "and", "self", ".", "_policy_loaded", ":", "return", "reloaded", ",", "data", "=", "_cache", ".", "read_file", "(", "self", ".", "policy_file", ",", "force_reload", "=", "force_reload", ")", "self", ".", "_policy_loaded", "=", "True", "if", "reloaded", "or", "not", "self", ".", "rules", ":", "rules", "=", "Rules", ".", "load_json", "(", "data", ",", "self", ".", "default_rule", ",", "self", ".", "raise_error", ")", "self", ".", "_set_rules", "(", "rules", ",", "overwrite", "=", "overwrite", ")", "LOG", ".", "debug", "(", "'Reload policy file: %s'", ",", "self", ".", "policy_file", ")" ]
Load rules from policy file or cache.
[ "Load", "rules", "from", "policy", "file", "or", "cache", "." ]
python
train
41.705882
bitesofcode/projexui
projexui/xwidgetvalue.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/xwidgetvalue.py#L92-L107
def getComboValue(combo): """ Checks to see if there is a dataType custom property set to determine whether to return an integer or a string. :param combo | <QComboBox> :return <int> || <str> """ dataType = unwrapVariant(combo.property('dataType')) if dataType == 'string': return combo.currentText() elif dataType == 'data': return unwrapVariant(combo.itemData(combo.currentIndex())) return combo.currentIndex()
[ "def", "getComboValue", "(", "combo", ")", ":", "dataType", "=", "unwrapVariant", "(", "combo", ".", "property", "(", "'dataType'", ")", ")", "if", "dataType", "==", "'string'", ":", "return", "combo", ".", "currentText", "(", ")", "elif", "dataType", "==", "'data'", ":", "return", "unwrapVariant", "(", "combo", ".", "itemData", "(", "combo", ".", "currentIndex", "(", ")", ")", ")", "return", "combo", ".", "currentIndex", "(", ")" ]
Checks to see if there is a dataType custom property set to determine whether to return an integer or a string. :param combo | <QComboBox> :return <int> || <str>
[ "Checks", "to", "see", "if", "there", "is", "a", "dataType", "custom", "property", "set", "to", "determine", "whether", "to", "return", "an", "integer", "or", "a", "string", ".", ":", "param", "combo", "|", "<QComboBox", ">", ":", "return", "<int", ">", "||", "<str", ">" ]
python
train
30.8125
OSSOS/MOP
src/jjk/preproc/ephemSearch.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/ephemSearch.py#L264-L274
def predict(abg,date,obs=568): """Run GB's predict using an ABG file as input.""" import orbfit import RO.StringUtil (ra,dec,a,b,ang) = orbfit.predict(abg,date,obs) obj['RA']=ra obj['DEC']=dec obj['dRA']=a obj['dDEC']=b obj['dANG']=ang return obj
[ "def", "predict", "(", "abg", ",", "date", ",", "obs", "=", "568", ")", ":", "import", "orbfit", "import", "RO", ".", "StringUtil", "(", "ra", ",", "dec", ",", "a", ",", "b", ",", "ang", ")", "=", "orbfit", ".", "predict", "(", "abg", ",", "date", ",", "obs", ")", "obj", "[", "'RA'", "]", "=", "ra", "obj", "[", "'DEC'", "]", "=", "dec", "obj", "[", "'dRA'", "]", "=", "a", "obj", "[", "'dDEC'", "]", "=", "b", "obj", "[", "'dANG'", "]", "=", "ang", "return", "obj" ]
Run GB's predict using an ABG file as input.
[ "Run", "GB", "s", "predict", "using", "an", "ABG", "file", "as", "input", "." ]
python
train
25.090909
MartinThoma/hwrt
hwrt/utils.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/utils.py#L855-L882
def get_objectlist(description, config_key, module): """ Take a description and return a list of classes. Parameters ---------- description : list of dictionaries Each dictionary has only one entry. The key is the name of a class. The value of that entry is a list of dictionaries again. Those dictionaries are paramters. Returns ------- List of objects. """ object_list = [] for feature in description: for feat, params in feature.items(): feat = get_class(feat, config_key, module) if params is None: object_list.append(feat()) else: parameters = {} for dicts in params: for param_name, param_value in dicts.items(): parameters[param_name] = param_value object_list.append(feat(**parameters)) # pylint: disable=W0142 return object_list
[ "def", "get_objectlist", "(", "description", ",", "config_key", ",", "module", ")", ":", "object_list", "=", "[", "]", "for", "feature", "in", "description", ":", "for", "feat", ",", "params", "in", "feature", ".", "items", "(", ")", ":", "feat", "=", "get_class", "(", "feat", ",", "config_key", ",", "module", ")", "if", "params", "is", "None", ":", "object_list", ".", "append", "(", "feat", "(", ")", ")", "else", ":", "parameters", "=", "{", "}", "for", "dicts", "in", "params", ":", "for", "param_name", ",", "param_value", "in", "dicts", ".", "items", "(", ")", ":", "parameters", "[", "param_name", "]", "=", "param_value", "object_list", ".", "append", "(", "feat", "(", "*", "*", "parameters", ")", ")", "# pylint: disable=W0142", "return", "object_list" ]
Take a description and return a list of classes. Parameters ---------- description : list of dictionaries Each dictionary has only one entry. The key is the name of a class. The value of that entry is a list of dictionaries again. Those dictionaries are paramters. Returns ------- List of objects.
[ "Take", "a", "description", "and", "return", "a", "list", "of", "classes", "." ]
python
train
33.464286
heitzmann/gdspy
gdspy/__init__.py
https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L2379-L2395
def get_datatypes(self): """ Returns a set of datatypes in this cell. Returns ------- out : set Set of the datatypes used in this cell. """ datatypes = set() for element in self.elements: if isinstance(element, PolygonSet): datatypes.update(element.datatypes) elif isinstance(element, CellReference) or isinstance( element, CellArray): datatypes.update(element.ref_cell.get_datatypes()) return datatypes
[ "def", "get_datatypes", "(", "self", ")", ":", "datatypes", "=", "set", "(", ")", "for", "element", "in", "self", ".", "elements", ":", "if", "isinstance", "(", "element", ",", "PolygonSet", ")", ":", "datatypes", ".", "update", "(", "element", ".", "datatypes", ")", "elif", "isinstance", "(", "element", ",", "CellReference", ")", "or", "isinstance", "(", "element", ",", "CellArray", ")", ":", "datatypes", ".", "update", "(", "element", ".", "ref_cell", ".", "get_datatypes", "(", ")", ")", "return", "datatypes" ]
Returns a set of datatypes in this cell. Returns ------- out : set Set of the datatypes used in this cell.
[ "Returns", "a", "set", "of", "datatypes", "in", "this", "cell", "." ]
python
train
32.235294
Unity-Technologies/ml-agents
ml-agents-envs/mlagents/envs/rpc_communicator.py
https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents-envs/mlagents/envs/rpc_communicator.py#L65-L75
def check_port(self, port): """ Attempts to bind to the requested communicator port, checking if it is already in use. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind(("localhost", port)) except socket.error: raise UnityWorkerInUseException(self.worker_id) finally: s.close()
[ "def", "check_port", "(", "self", ",", "port", ")", ":", "s", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "try", ":", "s", ".", "bind", "(", "(", "\"localhost\"", ",", "port", ")", ")", "except", "socket", ".", "error", ":", "raise", "UnityWorkerInUseException", "(", "self", ".", "worker_id", ")", "finally", ":", "s", ".", "close", "(", ")" ]
Attempts to bind to the requested communicator port, checking if it is already in use.
[ "Attempts", "to", "bind", "to", "the", "requested", "communicator", "port", "checking", "if", "it", "is", "already", "in", "use", "." ]
python
train
34.454545
CalebBell/fluids
fluids/friction.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/friction.py#L3647-L3733
def roughness_Farshad(ID=None, D=None, coeffs=None): r'''Calculates of retrieves the roughness of a pipe based on the work of [1]_. This function will return an average value for pipes of a given material, or if diameter is provided, will calculate one specifically for the pipe inner diameter according to the following expression with constants `A` and `B`: .. math:: \epsilon = A\cdot D^{B+1} Please not that `A` has units of inches, and `B` requires `D` to be in inches as well. The list of supported materials is as follows: * 'Plastic coated' * 'Carbon steel, honed bare' * 'Cr13, electropolished bare' * 'Cement lining' * 'Carbon steel, bare' * 'Fiberglass lining' * 'Cr13, bare' If `coeffs` and `D` are given, the custom coefficients for the equation as given by the user will be used and `ID` is not required. Parameters ---------- ID : str, optional Name of pipe material from above list D : float, optional Actual inner diameter of pipe, [m] coeffs : tuple, optional (A, B) Coefficients to use directly, instead of looking them up; they are actually dimensional, in the forms (inch^-B, -) but only coefficients with those dimensions are available [-] Returns ------- epsilon : float Roughness of pipe [m] Notes ----- The diameter-dependent form provides lower roughness values for larger diameters. The measurements were based on DIN 4768/1 (1987), using both a "Dektak ST Surface Profiler" and a "Hommel Tester T1000". Both instruments were found to be in agreement. A series of flow tests, in which pressure drop directly measured, were performed as well, with nitrogen gas as an operating fluid. The accuracy of the data from these tests is claimed to be within 1%. Using those results, the authors back-calculated what relative roughness values would be necessary to produce the observed pressure drops. The average difference between this back-calculated roughness and the measured roughness was 6.75%. For microchannels, this model will predict roughness much larger than the actual channel diameter. Examples -------- >>> roughness_Farshad('Cr13, bare', 0.05) 5.3141677781137006e-05 References ---------- .. [1] Farshad, Fred F., and Herman H. Rieke. "Surface Roughness Design Values for Modern Pipes." SPE Drilling & Completion 21, no. 3 (September 1, 2006): 212-215. doi:10.2118/89040-PA. ''' # Case 1, coeffs given; only run if ID is not given. if ID is None and coeffs: A, B = coeffs return A*(D/inch)**(B+1)*inch # Case 2, lookup parameters try : dat = _Farshad_roughness[ID] except: raise KeyError('ID was not in _Farshad_roughness.') if D is None: return dat[0] else: A, B = dat[1], dat[2] return A*(D/inch)**(B+1)*inch
[ "def", "roughness_Farshad", "(", "ID", "=", "None", ",", "D", "=", "None", ",", "coeffs", "=", "None", ")", ":", "# Case 1, coeffs given; only run if ID is not given.", "if", "ID", "is", "None", "and", "coeffs", ":", "A", ",", "B", "=", "coeffs", "return", "A", "*", "(", "D", "/", "inch", ")", "**", "(", "B", "+", "1", ")", "*", "inch", "# Case 2, lookup parameters", "try", ":", "dat", "=", "_Farshad_roughness", "[", "ID", "]", "except", ":", "raise", "KeyError", "(", "'ID was not in _Farshad_roughness.'", ")", "if", "D", "is", "None", ":", "return", "dat", "[", "0", "]", "else", ":", "A", ",", "B", "=", "dat", "[", "1", "]", ",", "dat", "[", "2", "]", "return", "A", "*", "(", "D", "/", "inch", ")", "**", "(", "B", "+", "1", ")", "*", "inch" ]
r'''Calculates of retrieves the roughness of a pipe based on the work of [1]_. This function will return an average value for pipes of a given material, or if diameter is provided, will calculate one specifically for the pipe inner diameter according to the following expression with constants `A` and `B`: .. math:: \epsilon = A\cdot D^{B+1} Please not that `A` has units of inches, and `B` requires `D` to be in inches as well. The list of supported materials is as follows: * 'Plastic coated' * 'Carbon steel, honed bare' * 'Cr13, electropolished bare' * 'Cement lining' * 'Carbon steel, bare' * 'Fiberglass lining' * 'Cr13, bare' If `coeffs` and `D` are given, the custom coefficients for the equation as given by the user will be used and `ID` is not required. Parameters ---------- ID : str, optional Name of pipe material from above list D : float, optional Actual inner diameter of pipe, [m] coeffs : tuple, optional (A, B) Coefficients to use directly, instead of looking them up; they are actually dimensional, in the forms (inch^-B, -) but only coefficients with those dimensions are available [-] Returns ------- epsilon : float Roughness of pipe [m] Notes ----- The diameter-dependent form provides lower roughness values for larger diameters. The measurements were based on DIN 4768/1 (1987), using both a "Dektak ST Surface Profiler" and a "Hommel Tester T1000". Both instruments were found to be in agreement. A series of flow tests, in which pressure drop directly measured, were performed as well, with nitrogen gas as an operating fluid. The accuracy of the data from these tests is claimed to be within 1%. Using those results, the authors back-calculated what relative roughness values would be necessary to produce the observed pressure drops. The average difference between this back-calculated roughness and the measured roughness was 6.75%. For microchannels, this model will predict roughness much larger than the actual channel diameter. Examples -------- >>> roughness_Farshad('Cr13, bare', 0.05) 5.3141677781137006e-05 References ---------- .. [1] Farshad, Fred F., and Herman H. Rieke. "Surface Roughness Design Values for Modern Pipes." SPE Drilling & Completion 21, no. 3 (September 1, 2006): 212-215. doi:10.2118/89040-PA.
[ "r", "Calculates", "of", "retrieves", "the", "roughness", "of", "a", "pipe", "based", "on", "the", "work", "of", "[", "1", "]", "_", ".", "This", "function", "will", "return", "an", "average", "value", "for", "pipes", "of", "a", "given", "material", "or", "if", "diameter", "is", "provided", "will", "calculate", "one", "specifically", "for", "the", "pipe", "inner", "diameter", "according", "to", "the", "following", "expression", "with", "constants", "A", "and", "B", ":", "..", "math", "::", "\\", "epsilon", "=", "A", "\\", "cdot", "D^", "{", "B", "+", "1", "}", "Please", "not", "that", "A", "has", "units", "of", "inches", "and", "B", "requires", "D", "to", "be", "in", "inches", "as", "well", ".", "The", "list", "of", "supported", "materials", "is", "as", "follows", ":" ]
python
train
34.402299
materialsproject/pymatgen
pymatgen/io/abinit/abiinspect.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/abiinspect.py#L639-L654
def all_docs_with_tag(self, doc_tag): """ Returns all the documents with the specified tag. """ docs = [] while True: try: doc = self.next_doc_with(doc_tag) docs.append(doc) except StopIteration: break self.seek(0) return docs
[ "def", "all_docs_with_tag", "(", "self", ",", "doc_tag", ")", ":", "docs", "=", "[", "]", "while", "True", ":", "try", ":", "doc", "=", "self", ".", "next_doc_with", "(", "doc_tag", ")", "docs", ".", "append", "(", "doc", ")", "except", "StopIteration", ":", "break", "self", ".", "seek", "(", "0", ")", "return", "docs" ]
Returns all the documents with the specified tag.
[ "Returns", "all", "the", "documents", "with", "the", "specified", "tag", "." ]
python
train
21.375
tadashi-aikawa/owlmixin
owlmixin/owlcollections.py
https://github.com/tadashi-aikawa/owlmixin/blob/7c4a042c3008abddc56a8e8e55ae930d276071f5/owlmixin/owlcollections.py#L190-L206
def group_by(self, to_key): """ :param to_key: :type to_key: T -> unicode :rtype: TDict[TList[T]] Usage: >>> TList([1, 2, 3, 4, 5]).group_by(lambda x: x % 2).to_json() '{"0": [2,4],"1": [1,3,5]}' """ ret = TDict() for v in self: k = to_key(v) ret.setdefault(k, TList()) ret[k].append(v) return ret
[ "def", "group_by", "(", "self", ",", "to_key", ")", ":", "ret", "=", "TDict", "(", ")", "for", "v", "in", "self", ":", "k", "=", "to_key", "(", "v", ")", "ret", ".", "setdefault", "(", "k", ",", "TList", "(", ")", ")", "ret", "[", "k", "]", ".", "append", "(", "v", ")", "return", "ret" ]
:param to_key: :type to_key: T -> unicode :rtype: TDict[TList[T]] Usage: >>> TList([1, 2, 3, 4, 5]).group_by(lambda x: x % 2).to_json() '{"0": [2,4],"1": [1,3,5]}'
[ ":", "param", "to_key", ":", ":", "type", "to_key", ":", "T", "-", ">", "unicode", ":", "rtype", ":", "TDict", "[", "TList", "[", "T", "]]" ]
python
train
24.411765
maximtrp/scikit-posthocs
scikit_posthocs/_posthocs.py
https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1845-L1897
def posthoc_tukey_hsd(x, g, alpha=0.05): '''Pairwise comparisons with TukeyHSD confidence intervals. This is a convenience function to make statsmodels `pairwise_tukeyhsd` method more applicable for further use. Parameters ---------- x : array_like or pandas Series object, 1d An array, any object exposing the array interface, containing dependent variable values (test or response variable). Values should have a non-nominal scale. NaN values will cause an error (please handle manually). g : array_like or pandas Series object, 1d An array, any object exposing the array interface, containing independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). alpha : float, optional Significance level for the test. Default is 0.05. Returns ------- result : pandas DataFrame DataFrame with 0, 1, and -1 values, where 0 is False (not significant), 1 is True (significant), and -1 is for diagonal elements. Examples -------- >>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]] >>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5] >>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g)) ''' result = pairwise_tukeyhsd(x, g, alpha=0.05) groups = np.array(result.groupsunique, dtype=np.str) groups_len = len(groups) vs = np.zeros((groups_len, groups_len), dtype=np.int) for a in result.summary()[1:]: a0 = str(a[0]) a1 = str(a[1]) a0i = np.where(groups == a0)[0][0] a1i = np.where(groups == a1)[0][0] vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0 vs = np.triu(vs) np.fill_diagonal(vs, -1) tri_lower = np.tril_indices(vs.shape[0], -1) vs[tri_lower] = vs.T[tri_lower] return DataFrame(vs, index=groups, columns=groups)
[ "def", "posthoc_tukey_hsd", "(", "x", ",", "g", ",", "alpha", "=", "0.05", ")", ":", "result", "=", "pairwise_tukeyhsd", "(", "x", ",", "g", ",", "alpha", "=", "0.05", ")", "groups", "=", "np", ".", "array", "(", "result", ".", "groupsunique", ",", "dtype", "=", "np", ".", "str", ")", "groups_len", "=", "len", "(", "groups", ")", "vs", "=", "np", ".", "zeros", "(", "(", "groups_len", ",", "groups_len", ")", ",", "dtype", "=", "np", ".", "int", ")", "for", "a", "in", "result", ".", "summary", "(", ")", "[", "1", ":", "]", ":", "a0", "=", "str", "(", "a", "[", "0", "]", ")", "a1", "=", "str", "(", "a", "[", "1", "]", ")", "a0i", "=", "np", ".", "where", "(", "groups", "==", "a0", ")", "[", "0", "]", "[", "0", "]", "a1i", "=", "np", ".", "where", "(", "groups", "==", "a1", ")", "[", "0", "]", "[", "0", "]", "vs", "[", "a0i", ",", "a1i", "]", "=", "1", "if", "str", "(", "a", "[", "5", "]", ")", "==", "'True'", "else", "0", "vs", "=", "np", ".", "triu", "(", "vs", ")", "np", ".", "fill_diagonal", "(", "vs", ",", "-", "1", ")", "tri_lower", "=", "np", ".", "tril_indices", "(", "vs", ".", "shape", "[", "0", "]", ",", "-", "1", ")", "vs", "[", "tri_lower", "]", "=", "vs", ".", "T", "[", "tri_lower", "]", "return", "DataFrame", "(", "vs", ",", "index", "=", "groups", ",", "columns", "=", "groups", ")" ]
Pairwise comparisons with TukeyHSD confidence intervals. This is a convenience function to make statsmodels `pairwise_tukeyhsd` method more applicable for further use. Parameters ---------- x : array_like or pandas Series object, 1d An array, any object exposing the array interface, containing dependent variable values (test or response variable). Values should have a non-nominal scale. NaN values will cause an error (please handle manually). g : array_like or pandas Series object, 1d An array, any object exposing the array interface, containing independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). alpha : float, optional Significance level for the test. Default is 0.05. Returns ------- result : pandas DataFrame DataFrame with 0, 1, and -1 values, where 0 is False (not significant), 1 is True (significant), and -1 is for diagonal elements. Examples -------- >>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]] >>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5] >>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
[ "Pairwise", "comparisons", "with", "TukeyHSD", "confidence", "intervals", ".", "This", "is", "a", "convenience", "function", "to", "make", "statsmodels", "pairwise_tukeyhsd", "method", "more", "applicable", "for", "further", "use", "." ]
python
train
34.811321
GPflow/GPflow
gpflow/models/model.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/models/model.py#L201-L206
def predict_y(self, Xnew): """ Compute the mean and variance of held-out data at the points Xnew """ pred_f_mean, pred_f_var = self._build_predict(Xnew) return self.likelihood.predict_mean_and_var(pred_f_mean, pred_f_var)
[ "def", "predict_y", "(", "self", ",", "Xnew", ")", ":", "pred_f_mean", ",", "pred_f_var", "=", "self", ".", "_build_predict", "(", "Xnew", ")", "return", "self", ".", "likelihood", ".", "predict_mean_and_var", "(", "pred_f_mean", ",", "pred_f_var", ")" ]
Compute the mean and variance of held-out data at the points Xnew
[ "Compute", "the", "mean", "and", "variance", "of", "held", "-", "out", "data", "at", "the", "points", "Xnew" ]
python
train
42.666667
sebdah/dynamic-dynamodb
dynamic_dynamodb/aws/sns.py
https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/aws/sns.py#L71-L89
def __publish(topic, message, subject=None): """ Publish a message to a SNS topic :type topic: str :param topic: SNS topic to publish the message to :type message: str :param message: Message to send via SNS :type subject: str :param subject: Subject to use for e-mail notifications :returns: None """ try: SNS_CONNECTION.publish(topic=topic, message=message, subject=subject) logger.info('Sent SNS notification to {0}'.format(topic)) except BotoServerError as error: logger.error('Problem sending SNS notification: {0}'.format( error.message)) return
[ "def", "__publish", "(", "topic", ",", "message", ",", "subject", "=", "None", ")", ":", "try", ":", "SNS_CONNECTION", ".", "publish", "(", "topic", "=", "topic", ",", "message", "=", "message", ",", "subject", "=", "subject", ")", "logger", ".", "info", "(", "'Sent SNS notification to {0}'", ".", "format", "(", "topic", ")", ")", "except", "BotoServerError", "as", "error", ":", "logger", ".", "error", "(", "'Problem sending SNS notification: {0}'", ".", "format", "(", "error", ".", "message", ")", ")", "return" ]
Publish a message to a SNS topic :type topic: str :param topic: SNS topic to publish the message to :type message: str :param message: Message to send via SNS :type subject: str :param subject: Subject to use for e-mail notifications :returns: None
[ "Publish", "a", "message", "to", "a", "SNS", "topic" ]
python
train
32.578947
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L3717-L3821
def watchpoint_set(self, addr, addr_mask=0x0, data=0x0, data_mask=0x0, access_size=None, read=False, write=False, privileged=False): """Sets a watchpoint at the given address. This method allows for a watchpoint to be set on an given address or range of addresses. The watchpoint can then be triggered if the data at the given address matches the specified ``data`` or range of data as determined by ``data_mask``, on specific access size events, reads, writes, or privileged accesses. Both ``addr_mask`` and ``data_mask`` are used to specify ranges. Bits set to ``1`` are masked out and not taken into consideration when comparison against an address or data value. E.g. an ``addr_mask`` with a value of ``0x1`` and ``addr`` with value ``0xdeadbeef`` means that the watchpoint will be set on addresses ``0xdeadbeef`` and ``0xdeadbeee``. If the ``data`` was ``0x11223340`` and the given ``data_mask`` has a value of ``0x0000000F``, then the watchpoint would trigger for data matching ``0x11223340 - 0x1122334F``. Note: If both ``read`` and ``write`` are specified, then the watchpoint will trigger on both read and write events to the given address. Args: self (JLink): the ``JLink`` instance addr_mask (int): optional mask to use for determining which address the watchpoint should be set on data (int): optional data to set the watchpoint on in order to have the watchpoint triggered when the value at the specified address matches the given ``data`` data_mask (int): optional mask to use for determining the range of data on which the watchpoint should be triggered access_size (int): if specified, this must be one of ``{8, 16, 32}`` and determines the access size for which the watchpoint should trigger read (bool): if ``True``, triggers the watchpoint on read events write (bool): if ``True``, triggers the watchpoint on write events privileged (bool): if ``True``, triggers the watchpoint on privileged accesses Returns: The handle of the created watchpoint. Raises: ValueError: if an invalid access size is given. JLinkException: if the watchpoint fails to be set. """ access_flags = 0x0 access_mask_flags = 0x0 # If an access size is not specified, we must specify that the size of # the access does not matter by specifying the access mask flags. if access_size is None: access_mask_flags = access_mask_flags | enums.JLinkAccessMaskFlags.SIZE elif access_size == 8: access_flags = access_flags | enums.JLinkAccessFlags.SIZE_8BIT elif access_size == 16: access_flags = access_flags | enums.JLinkAccessFlags.SIZE_16BIT elif access_size == 32: access_flags = access_flags | enums.JLinkAccessFlags.SIZE_32BIT else: raise ValueError('Invalid access size given: %d' % access_size) # The read and write access flags cannot be specified together, so if # the user specifies that they want read and write access, then the # access mask flag must be set. if read and write: access_mask_flags = access_mask_flags | enums.JLinkAccessMaskFlags.DIR elif read: access_flags = access_flags | enums.JLinkAccessFlags.READ elif write: access_flags = access_flags | enums.JLinkAccessFlags.WRITE # If privileged is not specified, then there is no specification level # on which kinds of writes should be accessed, in which case we must # specify that flag. if privileged: access_flags = access_flags | enums.JLinkAccessFlags.PRIV else: access_mask_flags = access_mask_flags | enums.JLinkAccessMaskFlags.PRIV # Populate the Data event to configure how the watchpoint is triggered. wp = structs.JLinkDataEvent() wp.Addr = addr wp.AddrMask = addr_mask wp.Data = data wp.DataMask = data_mask wp.Access = access_flags wp.AccessMask = access_mask_flags # Return value of the function is <= 0 in the event of an error, # otherwise the watchpoint was set successfully. handle = ctypes.c_uint32() res = self._dll.JLINKARM_SetDataEvent(ctypes.pointer(wp), ctypes.pointer(handle)) if res < 0: raise errors.JLinkDataException(res) return handle.value
[ "def", "watchpoint_set", "(", "self", ",", "addr", ",", "addr_mask", "=", "0x0", ",", "data", "=", "0x0", ",", "data_mask", "=", "0x0", ",", "access_size", "=", "None", ",", "read", "=", "False", ",", "write", "=", "False", ",", "privileged", "=", "False", ")", ":", "access_flags", "=", "0x0", "access_mask_flags", "=", "0x0", "# If an access size is not specified, we must specify that the size of", "# the access does not matter by specifying the access mask flags.", "if", "access_size", "is", "None", ":", "access_mask_flags", "=", "access_mask_flags", "|", "enums", ".", "JLinkAccessMaskFlags", ".", "SIZE", "elif", "access_size", "==", "8", ":", "access_flags", "=", "access_flags", "|", "enums", ".", "JLinkAccessFlags", ".", "SIZE_8BIT", "elif", "access_size", "==", "16", ":", "access_flags", "=", "access_flags", "|", "enums", ".", "JLinkAccessFlags", ".", "SIZE_16BIT", "elif", "access_size", "==", "32", ":", "access_flags", "=", "access_flags", "|", "enums", ".", "JLinkAccessFlags", ".", "SIZE_32BIT", "else", ":", "raise", "ValueError", "(", "'Invalid access size given: %d'", "%", "access_size", ")", "# The read and write access flags cannot be specified together, so if", "# the user specifies that they want read and write access, then the", "# access mask flag must be set.", "if", "read", "and", "write", ":", "access_mask_flags", "=", "access_mask_flags", "|", "enums", ".", "JLinkAccessMaskFlags", ".", "DIR", "elif", "read", ":", "access_flags", "=", "access_flags", "|", "enums", ".", "JLinkAccessFlags", ".", "READ", "elif", "write", ":", "access_flags", "=", "access_flags", "|", "enums", ".", "JLinkAccessFlags", ".", "WRITE", "# If privileged is not specified, then there is no specification level", "# on which kinds of writes should be accessed, in which case we must", "# specify that flag.", "if", "privileged", ":", "access_flags", "=", "access_flags", "|", "enums", ".", "JLinkAccessFlags", ".", "PRIV", "else", ":", "access_mask_flags", "=", "access_mask_flags", "|", "enums", ".", "JLinkAccessMaskFlags", ".", "PRIV", "# Populate the Data event to configure how the watchpoint is triggered.", "wp", "=", "structs", ".", "JLinkDataEvent", "(", ")", "wp", ".", "Addr", "=", "addr", "wp", ".", "AddrMask", "=", "addr_mask", "wp", ".", "Data", "=", "data", "wp", ".", "DataMask", "=", "data_mask", "wp", ".", "Access", "=", "access_flags", "wp", ".", "AccessMask", "=", "access_mask_flags", "# Return value of the function is <= 0 in the event of an error,", "# otherwise the watchpoint was set successfully.", "handle", "=", "ctypes", ".", "c_uint32", "(", ")", "res", "=", "self", ".", "_dll", ".", "JLINKARM_SetDataEvent", "(", "ctypes", ".", "pointer", "(", "wp", ")", ",", "ctypes", ".", "pointer", "(", "handle", ")", ")", "if", "res", "<", "0", ":", "raise", "errors", ".", "JLinkDataException", "(", "res", ")", "return", "handle", ".", "value" ]
Sets a watchpoint at the given address. This method allows for a watchpoint to be set on an given address or range of addresses. The watchpoint can then be triggered if the data at the given address matches the specified ``data`` or range of data as determined by ``data_mask``, on specific access size events, reads, writes, or privileged accesses. Both ``addr_mask`` and ``data_mask`` are used to specify ranges. Bits set to ``1`` are masked out and not taken into consideration when comparison against an address or data value. E.g. an ``addr_mask`` with a value of ``0x1`` and ``addr`` with value ``0xdeadbeef`` means that the watchpoint will be set on addresses ``0xdeadbeef`` and ``0xdeadbeee``. If the ``data`` was ``0x11223340`` and the given ``data_mask`` has a value of ``0x0000000F``, then the watchpoint would trigger for data matching ``0x11223340 - 0x1122334F``. Note: If both ``read`` and ``write`` are specified, then the watchpoint will trigger on both read and write events to the given address. Args: self (JLink): the ``JLink`` instance addr_mask (int): optional mask to use for determining which address the watchpoint should be set on data (int): optional data to set the watchpoint on in order to have the watchpoint triggered when the value at the specified address matches the given ``data`` data_mask (int): optional mask to use for determining the range of data on which the watchpoint should be triggered access_size (int): if specified, this must be one of ``{8, 16, 32}`` and determines the access size for which the watchpoint should trigger read (bool): if ``True``, triggers the watchpoint on read events write (bool): if ``True``, triggers the watchpoint on write events privileged (bool): if ``True``, triggers the watchpoint on privileged accesses Returns: The handle of the created watchpoint. Raises: ValueError: if an invalid access size is given. JLinkException: if the watchpoint fails to be set.
[ "Sets", "a", "watchpoint", "at", "the", "given", "address", "." ]
python
train
45.657143
data61/clkhash
clkhash/field_formats.py
https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/field_formats.py#L157-L188
def fhp_from_json_dict( json_dict # type: Dict[str, Any] ): # type: (...) -> FieldHashingProperties """ Make a :class:`FieldHashingProperties` object from a dictionary. :param dict json_dict: The dictionary must have have an 'ngram' key and one of k or num_bits. It may have 'positional' key; if missing a default is used. The encoding is always set to the default value. :return: A :class:`FieldHashingProperties` instance. """ h = json_dict.get('hash', {'type': 'blakeHash'}) num_bits = json_dict.get('numBits') k = json_dict.get('k') if not num_bits and not k: num_bits = 200 # default for v2 schema return FieldHashingProperties( ngram=json_dict['ngram'], positional=json_dict.get( 'positional', FieldHashingProperties._DEFAULT_POSITIONAL), hash_type=h['type'], prevent_singularity=h.get('prevent_singularity'), num_bits=num_bits, k=k, missing_value=MissingValueSpec.from_json_dict( json_dict[ 'missingValue']) if 'missingValue' in json_dict else None )
[ "def", "fhp_from_json_dict", "(", "json_dict", "# type: Dict[str, Any]", ")", ":", "# type: (...) -> FieldHashingProperties", "h", "=", "json_dict", ".", "get", "(", "'hash'", ",", "{", "'type'", ":", "'blakeHash'", "}", ")", "num_bits", "=", "json_dict", ".", "get", "(", "'numBits'", ")", "k", "=", "json_dict", ".", "get", "(", "'k'", ")", "if", "not", "num_bits", "and", "not", "k", ":", "num_bits", "=", "200", "# default for v2 schema", "return", "FieldHashingProperties", "(", "ngram", "=", "json_dict", "[", "'ngram'", "]", ",", "positional", "=", "json_dict", ".", "get", "(", "'positional'", ",", "FieldHashingProperties", ".", "_DEFAULT_POSITIONAL", ")", ",", "hash_type", "=", "h", "[", "'type'", "]", ",", "prevent_singularity", "=", "h", ".", "get", "(", "'prevent_singularity'", ")", ",", "num_bits", "=", "num_bits", ",", "k", "=", "k", ",", "missing_value", "=", "MissingValueSpec", ".", "from_json_dict", "(", "json_dict", "[", "'missingValue'", "]", ")", "if", "'missingValue'", "in", "json_dict", "else", "None", ")" ]
Make a :class:`FieldHashingProperties` object from a dictionary. :param dict json_dict: The dictionary must have have an 'ngram' key and one of k or num_bits. It may have 'positional' key; if missing a default is used. The encoding is always set to the default value. :return: A :class:`FieldHashingProperties` instance.
[ "Make", "a", ":", "class", ":", "FieldHashingProperties", "object", "from", "a", "dictionary", "." ]
python
train
36.25
Azure/azure-cli-extensions
src/express-route/azext_express_route/vendored_sdks/network_management_client.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/express-route/azext_express_route/vendored_sdks/network_management_client.py#L861-L871
def express_route_ports(self): """Instance depends on the API version: * 2018-08-01: :class:`ExpressRoutePortsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRoutePortsOperations>` """ api_version = self._get_api_version('express_route_ports') if api_version == '2018-08-01': from .v2018_08_01.operations import ExpressRoutePortsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "express_route_ports", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'express_route_ports'", ")", "if", "api_version", "==", "'2018-08-01'", ":", "from", ".", "v2018_08_01", ".", "operations", "import", "ExpressRoutePortsOperations", "as", "OperationClass", "else", ":", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")", "return", "OperationClass", "(", "self", ".", "_client", ",", "self", ".", "config", ",", "Serializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ",", "Deserializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ")" ]
Instance depends on the API version: * 2018-08-01: :class:`ExpressRoutePortsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRoutePortsOperations>`
[ "Instance", "depends", "on", "the", "API", "version", ":" ]
python
train
60.727273
getsentry/sentry-python
sentry_sdk/integrations/wsgi.py
https://github.com/getsentry/sentry-python/blob/a1d77722bdce0b94660ebf50b5c4a4645916d084/sentry_sdk/integrations/wsgi.py#L35-L55
def get_host(environ): # type: (Dict[str, str]) -> str """Return the host for the given WSGI environment. Yanked from Werkzeug.""" if environ.get("HTTP_HOST"): rv = environ["HTTP_HOST"] if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"): rv = rv[:-3] elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"): rv = rv[:-4] elif environ.get("SERVER_NAME"): rv = environ["SERVER_NAME"] if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in ( ("https", "443"), ("http", "80"), ): rv += ":" + environ["SERVER_PORT"] else: # In spite of the WSGI spec, SERVER_NAME might not be present. rv = "unknown" return rv
[ "def", "get_host", "(", "environ", ")", ":", "# type: (Dict[str, str]) -> str", "if", "environ", ".", "get", "(", "\"HTTP_HOST\"", ")", ":", "rv", "=", "environ", "[", "\"HTTP_HOST\"", "]", "if", "environ", "[", "\"wsgi.url_scheme\"", "]", "==", "\"http\"", "and", "rv", ".", "endswith", "(", "\":80\"", ")", ":", "rv", "=", "rv", "[", ":", "-", "3", "]", "elif", "environ", "[", "\"wsgi.url_scheme\"", "]", "==", "\"https\"", "and", "rv", ".", "endswith", "(", "\":443\"", ")", ":", "rv", "=", "rv", "[", ":", "-", "4", "]", "elif", "environ", ".", "get", "(", "\"SERVER_NAME\"", ")", ":", "rv", "=", "environ", "[", "\"SERVER_NAME\"", "]", "if", "(", "environ", "[", "\"wsgi.url_scheme\"", "]", ",", "environ", "[", "\"SERVER_PORT\"", "]", ")", "not", "in", "(", "(", "\"https\"", ",", "\"443\"", ")", ",", "(", "\"http\"", ",", "\"80\"", ")", ",", ")", ":", "rv", "+=", "\":\"", "+", "environ", "[", "\"SERVER_PORT\"", "]", "else", ":", "# In spite of the WSGI spec, SERVER_NAME might not be present.", "rv", "=", "\"unknown\"", "return", "rv" ]
Return the host for the given WSGI environment. Yanked from Werkzeug.
[ "Return", "the", "host", "for", "the", "given", "WSGI", "environment", ".", "Yanked", "from", "Werkzeug", "." ]
python
train
36.380952
AshleySetter/optoanalysis
optoanalysis/optoanalysis/optoanalysis.py
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L1473-L1512
def multi_load_data(Channel, RunNos, RepeatNos, directoryPath='.', calcPSD=True, NPerSegmentPSD=1000000): """ Lets you load multiple datasets at once assuming they have a filename which contains a pattern of the form: CH<ChannelNo>_RUN00...<RunNo>_REPEAT00...<RepeatNo> Parameters ---------- Channel : int The channel you want to load RunNos : sequence Sequence of run numbers you want to load RepeatNos : sequence Sequence of repeat numbers you want to load directoryPath : string, optional The path to the directory housing the data The default is the current directory Returns ------- Data : list A list containing the DataObjects that were loaded. """ matching_files = search_data_std(Channel=Channel, RunNos=RunNos, RepeatNos=RepeatNos, directoryPath=directoryPath) #data = [] #for filepath in matching_files_: # data.append(load_data(filepath, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD)) cpu_count = _cpu_count() workerPool = _Pool(cpu_count) load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD) data = workerPool.map(load_data_partial, matching_files) workerPool.close() workerPool.terminate() workerPool.join() #with _Pool(cpu_count) as workerPool: #load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD) #data = workerPool.map(load_data_partial, files_CorrectRepeatNo) return data
[ "def", "multi_load_data", "(", "Channel", ",", "RunNos", ",", "RepeatNos", ",", "directoryPath", "=", "'.'", ",", "calcPSD", "=", "True", ",", "NPerSegmentPSD", "=", "1000000", ")", ":", "matching_files", "=", "search_data_std", "(", "Channel", "=", "Channel", ",", "RunNos", "=", "RunNos", ",", "RepeatNos", "=", "RepeatNos", ",", "directoryPath", "=", "directoryPath", ")", "#data = []", "#for filepath in matching_files_:", "# data.append(load_data(filepath, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD))", "cpu_count", "=", "_cpu_count", "(", ")", "workerPool", "=", "_Pool", "(", "cpu_count", ")", "load_data_partial", "=", "_partial", "(", "load_data", ",", "calcPSD", "=", "calcPSD", ",", "NPerSegmentPSD", "=", "NPerSegmentPSD", ")", "data", "=", "workerPool", ".", "map", "(", "load_data_partial", ",", "matching_files", ")", "workerPool", ".", "close", "(", ")", "workerPool", ".", "terminate", "(", ")", "workerPool", ".", "join", "(", ")", "#with _Pool(cpu_count) as workerPool:", "#load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD)", "#data = workerPool.map(load_data_partial, files_CorrectRepeatNo)", "return", "data" ]
Lets you load multiple datasets at once assuming they have a filename which contains a pattern of the form: CH<ChannelNo>_RUN00...<RunNo>_REPEAT00...<RepeatNo> Parameters ---------- Channel : int The channel you want to load RunNos : sequence Sequence of run numbers you want to load RepeatNos : sequence Sequence of repeat numbers you want to load directoryPath : string, optional The path to the directory housing the data The default is the current directory Returns ------- Data : list A list containing the DataObjects that were loaded.
[ "Lets", "you", "load", "multiple", "datasets", "at", "once", "assuming", "they", "have", "a", "filename", "which", "contains", "a", "pattern", "of", "the", "form", ":", "CH<ChannelNo", ">", "_RUN00", "...", "<RunNo", ">", "_REPEAT00", "...", "<RepeatNo", ">" ]
python
train
37.875
xmikos/soapy_power
soapypower/__main__.py
https://github.com/xmikos/soapy_power/blob/46e12659b8d08af764dc09a1f31b0e85a68f808f/soapypower/__main__.py#L36-L45
def specific_gains(string): """Convert string with gains of individual amplification elements to dict""" if not string: return {} gains = {} for gain in string.split(','): amp_name, value = gain.split('=') gains[amp_name.strip()] = float(value.strip()) return gains
[ "def", "specific_gains", "(", "string", ")", ":", "if", "not", "string", ":", "return", "{", "}", "gains", "=", "{", "}", "for", "gain", "in", "string", ".", "split", "(", "','", ")", ":", "amp_name", ",", "value", "=", "gain", ".", "split", "(", "'='", ")", "gains", "[", "amp_name", ".", "strip", "(", ")", "]", "=", "float", "(", "value", ".", "strip", "(", ")", ")", "return", "gains" ]
Convert string with gains of individual amplification elements to dict
[ "Convert", "string", "with", "gains", "of", "individual", "amplification", "elements", "to", "dict" ]
python
test
30.1
sloria/aiohttp_utils
aiohttp_utils/runner.py
https://github.com/sloria/aiohttp_utils/blob/e5b41452f8077e7d749715606b1560f4b50e3d71/aiohttp_utils/runner.py#L99-L117
def run(app: web.Application, **kwargs): """Run an `aiohttp.web.Application` using gunicorn. :param app: The app to run. :param str app_uri: Import path to `app`. Takes the form ``$(MODULE_NAME):$(VARIABLE_NAME)``. The module name can be a full dotted path. The variable name refers to the `aiohttp.web.Application` instance. This argument is required if ``reload=True``. :param str host: Hostname to listen on. :param int port: Port of the server. :param bool reload: Whether to reload the server on a code change. If not set, will take the same value as ``app.debug``. **EXPERIMENTAL**. :param \*\*kwargs: Extra configuration options to set on the ``GunicornApp's`` config object. """ runner = Runner(app, **kwargs) runner.run()
[ "def", "run", "(", "app", ":", "web", ".", "Application", ",", "*", "*", "kwargs", ")", ":", "runner", "=", "Runner", "(", "app", ",", "*", "*", "kwargs", ")", "runner", ".", "run", "(", ")" ]
Run an `aiohttp.web.Application` using gunicorn. :param app: The app to run. :param str app_uri: Import path to `app`. Takes the form ``$(MODULE_NAME):$(VARIABLE_NAME)``. The module name can be a full dotted path. The variable name refers to the `aiohttp.web.Application` instance. This argument is required if ``reload=True``. :param str host: Hostname to listen on. :param int port: Port of the server. :param bool reload: Whether to reload the server on a code change. If not set, will take the same value as ``app.debug``. **EXPERIMENTAL**. :param \*\*kwargs: Extra configuration options to set on the ``GunicornApp's`` config object.
[ "Run", "an", "aiohttp", ".", "web", ".", "Application", "using", "gunicorn", "." ]
python
test
42.578947
gbowerman/azurerm
azurerm/keyvault.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/keyvault.py#L99-L115
def list_keyvaults(access_token, subscription_id, rgname): '''Lists key vaults in the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults', '?api-version=', KEYVAULT_API]) return do_get_next(endpoint, access_token)
[ "def", "list_keyvaults", "(", "access_token", ",", "subscription_id", ",", "rgname", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourcegroups/'", ",", "rgname", ",", "'/providers/Microsoft.KeyVault/vaults'", ",", "'?api-version='", ",", "KEYVAULT_API", "]", ")", "return", "do_get_next", "(", "endpoint", ",", "access_token", ")" ]
Lists key vaults in the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. 200 OK.
[ "Lists", "key", "vaults", "in", "the", "named", "resource", "group", "." ]
python
train
38.117647
materialsproject/pymatgen
pymatgen/io/abinit/tasks.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L2626-L2650
def setup(self): """ Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z] to the output file, and this breaks a lot of code that relies of the use of a unique file extension. Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already exists. A few lines of code in python, a lot of problems if you try to implement this trick in Fortran90. """ def rename_file(afile): """Helper function to rename :class:`File` objects. Return string for logging purpose.""" # Find the index of the last file (if any). # TODO: Maybe it's better to use run.abo --> run(1).abo fnames = [f for f in os.listdir(self.workdir) if f.startswith(afile.basename)] nums = [int(f) for f in [f.split("_")[-1] for f in fnames] if f.isdigit()] last = max(nums) if nums else 0 new_path = afile.path + "_" + str(last+1) os.rename(afile.path, new_path) return "Will rename %s to %s" % (afile.path, new_path) logs = [] if self.output_file.exists: logs.append(rename_file(self.output_file)) if self.log_file.exists: logs.append(rename_file(self.log_file)) if logs: self.history.info("\n".join(logs))
[ "def", "setup", "(", "self", ")", ":", "def", "rename_file", "(", "afile", ")", ":", "\"\"\"Helper function to rename :class:`File` objects. Return string for logging purpose.\"\"\"", "# Find the index of the last file (if any).", "# TODO: Maybe it's better to use run.abo --> run(1).abo", "fnames", "=", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "self", ".", "workdir", ")", "if", "f", ".", "startswith", "(", "afile", ".", "basename", ")", "]", "nums", "=", "[", "int", "(", "f", ")", "for", "f", "in", "[", "f", ".", "split", "(", "\"_\"", ")", "[", "-", "1", "]", "for", "f", "in", "fnames", "]", "if", "f", ".", "isdigit", "(", ")", "]", "last", "=", "max", "(", "nums", ")", "if", "nums", "else", "0", "new_path", "=", "afile", ".", "path", "+", "\"_\"", "+", "str", "(", "last", "+", "1", ")", "os", ".", "rename", "(", "afile", ".", "path", ",", "new_path", ")", "return", "\"Will rename %s to %s\"", "%", "(", "afile", ".", "path", ",", "new_path", ")", "logs", "=", "[", "]", "if", "self", ".", "output_file", ".", "exists", ":", "logs", ".", "append", "(", "rename_file", "(", "self", ".", "output_file", ")", ")", "if", "self", ".", "log_file", ".", "exists", ":", "logs", ".", "append", "(", "rename_file", "(", "self", ".", "log_file", ")", ")", "if", "logs", ":", "self", ".", "history", ".", "info", "(", "\"\\n\"", ".", "join", "(", "logs", ")", ")" ]
Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z] to the output file, and this breaks a lot of code that relies of the use of a unique file extension. Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already exists. A few lines of code in python, a lot of problems if you try to implement this trick in Fortran90.
[ "Abinit", "has", "the", "very", "*", "bad", "*", "habit", "of", "changing", "the", "file", "extension", "by", "appending", "the", "characters", "in", "[", "A", "B", "...", "Z", "]", "to", "the", "output", "file", "and", "this", "breaks", "a", "lot", "of", "code", "that", "relies", "of", "the", "use", "of", "a", "unique", "file", "extension", ".", "Here", "we", "fix", "this", "issue", "by", "renaming", "run", ".", "abo", "to", "run", ".", "abo_", "[", "number", "]", "if", "the", "output", "file", "run", ".", "abo", "already", "exists", ".", "A", "few", "lines", "of", "code", "in", "python", "a", "lot", "of", "problems", "if", "you", "try", "to", "implement", "this", "trick", "in", "Fortran90", "." ]
python
train
53.76
edublancas/sklearn-evaluation
sklearn_evaluation/util.py
https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/util.py#L80-L89
def _product(k, v): """ Perform the product between two objects even if they don't support iteration """ if not _can_iterate(k): k = [k] if not _can_iterate(v): v = [v] return list(product(k, v))
[ "def", "_product", "(", "k", ",", "v", ")", ":", "if", "not", "_can_iterate", "(", "k", ")", ":", "k", "=", "[", "k", "]", "if", "not", "_can_iterate", "(", "v", ")", ":", "v", "=", "[", "v", "]", "return", "list", "(", "product", "(", "k", ",", "v", ")", ")" ]
Perform the product between two objects even if they don't support iteration
[ "Perform", "the", "product", "between", "two", "objects", "even", "if", "they", "don", "t", "support", "iteration" ]
python
train
23.8
sublee/etc
etc/helpers.py
https://github.com/sublee/etc/blob/f2be64604da5af0d7739cfacf36f55712f0fc5cb/etc/helpers.py#L37-L52
def gen_repr(cls, template, *args, **kwargs): """Generates a string for :func:`repr`.""" buf = io.StringIO() buf.write(u'<') buf.write(cls.__module__.decode() if kwargs.pop('full', False) else u'etc') buf.write(u'.') buf.write(cls.__name__.decode()) if not kwargs.pop('dense', False): buf.write(u' ') buf.write(template.format(*args, **kwargs)) options = kwargs.pop('options', []) for attr, value in options: if value is not None: buf.write(u' %s=%s' % (attr, value)) buf.write(u'>') return buf.getvalue()
[ "def", "gen_repr", "(", "cls", ",", "template", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "buf", "=", "io", ".", "StringIO", "(", ")", "buf", ".", "write", "(", "u'<'", ")", "buf", ".", "write", "(", "cls", ".", "__module__", ".", "decode", "(", ")", "if", "kwargs", ".", "pop", "(", "'full'", ",", "False", ")", "else", "u'etc'", ")", "buf", ".", "write", "(", "u'.'", ")", "buf", ".", "write", "(", "cls", ".", "__name__", ".", "decode", "(", ")", ")", "if", "not", "kwargs", ".", "pop", "(", "'dense'", ",", "False", ")", ":", "buf", ".", "write", "(", "u' '", ")", "buf", ".", "write", "(", "template", ".", "format", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "options", "=", "kwargs", ".", "pop", "(", "'options'", ",", "[", "]", ")", "for", "attr", ",", "value", "in", "options", ":", "if", "value", "is", "not", "None", ":", "buf", ".", "write", "(", "u' %s=%s'", "%", "(", "attr", ",", "value", ")", ")", "buf", ".", "write", "(", "u'>'", ")", "return", "buf", ".", "getvalue", "(", ")" ]
Generates a string for :func:`repr`.
[ "Generates", "a", "string", "for", ":", "func", ":", "repr", "." ]
python
train
35.375
tradenity/python-sdk
tradenity/resources/discount_promotion.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/discount_promotion.py#L682-L704
def list_all_discount_promotions(cls, **kwargs): """List DiscountPromotions Return a list of DiscountPromotions This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_discount_promotions(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[DiscountPromotion] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_discount_promotions_with_http_info(**kwargs) else: (data) = cls._list_all_discount_promotions_with_http_info(**kwargs) return data
[ "def", "list_all_discount_promotions", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_list_all_discount_promotions_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_list_all_discount_promotions_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
List DiscountPromotions Return a list of DiscountPromotions This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_discount_promotions(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[DiscountPromotion] If the method is called asynchronously, returns the request thread.
[ "List", "DiscountPromotions" ]
python
train
39.391304
sosy-lab/benchexec
benchexec/resources.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/resources.py#L213-L241
def get_memory_banks_per_run(coreAssignment, cgroups): """Get an assignment of memory banks to runs that fits to the given coreAssignment, i.e., no run is allowed to use memory that is not local (on the same NUMA node) to one of its CPU cores.""" try: # read list of available memory banks allMems = set(cgroups.read_allowed_memory_banks()) result = [] for cores in coreAssignment: mems = set() for core in cores: coreDir = '/sys/devices/system/cpu/cpu{0}/'.format(core) mems.update(_get_memory_banks_listed_in_dir(coreDir)) allowedMems = sorted(mems.intersection(allMems)) logging.debug("Memory banks for cores %s are %s, of which we can use %s.", cores, list(mems), allowedMems) result.append(allowedMems) assert len(result) == len(coreAssignment) if any(result) and os.path.isdir('/sys/devices/system/node/'): return result else: # All runs get the empty list of memory regions # because this system has no NUMA support return None except ValueError as e: sys.exit("Could not read memory information from kernel: {0}".format(e))
[ "def", "get_memory_banks_per_run", "(", "coreAssignment", ",", "cgroups", ")", ":", "try", ":", "# read list of available memory banks", "allMems", "=", "set", "(", "cgroups", ".", "read_allowed_memory_banks", "(", ")", ")", "result", "=", "[", "]", "for", "cores", "in", "coreAssignment", ":", "mems", "=", "set", "(", ")", "for", "core", "in", "cores", ":", "coreDir", "=", "'/sys/devices/system/cpu/cpu{0}/'", ".", "format", "(", "core", ")", "mems", ".", "update", "(", "_get_memory_banks_listed_in_dir", "(", "coreDir", ")", ")", "allowedMems", "=", "sorted", "(", "mems", ".", "intersection", "(", "allMems", ")", ")", "logging", ".", "debug", "(", "\"Memory banks for cores %s are %s, of which we can use %s.\"", ",", "cores", ",", "list", "(", "mems", ")", ",", "allowedMems", ")", "result", ".", "append", "(", "allowedMems", ")", "assert", "len", "(", "result", ")", "==", "len", "(", "coreAssignment", ")", "if", "any", "(", "result", ")", "and", "os", ".", "path", ".", "isdir", "(", "'/sys/devices/system/node/'", ")", ":", "return", "result", "else", ":", "# All runs get the empty list of memory regions", "# because this system has no NUMA support", "return", "None", "except", "ValueError", "as", "e", ":", "sys", ".", "exit", "(", "\"Could not read memory information from kernel: {0}\"", ".", "format", "(", "e", ")", ")" ]
Get an assignment of memory banks to runs that fits to the given coreAssignment, i.e., no run is allowed to use memory that is not local (on the same NUMA node) to one of its CPU cores.
[ "Get", "an", "assignment", "of", "memory", "banks", "to", "runs", "that", "fits", "to", "the", "given", "coreAssignment", "i", ".", "e", ".", "no", "run", "is", "allowed", "to", "use", "memory", "that", "is", "not", "local", "(", "on", "the", "same", "NUMA", "node", ")", "to", "one", "of", "its", "CPU", "cores", "." ]
python
train
42.448276
MacHu-GWU/sqlalchemy_mate-project
sqlalchemy_mate/credential.py
https://github.com/MacHu-GWU/sqlalchemy_mate-project/blob/946754744c8870f083fd7b4339fca15d1d6128b2/sqlalchemy_mate/credential.py#L191-L241
def from_env(cls, prefix, kms_decrypt=False, aws_profile=None): """ Load database credential from env variable. - host: ENV.{PREFIX}_HOST - port: ENV.{PREFIX}_PORT - database: ENV.{PREFIX}_DATABASE - username: ENV.{PREFIX}_USERNAME - password: ENV.{PREFIX}_PASSWORD :param prefix: str :param kms_decrypt: bool :param aws_profile: str """ if len(prefix) < 1: raise ValueError("prefix can't be empty") if len(set(prefix).difference(set(string.ascii_uppercase + "_"))): raise ValueError("prefix can only use [A-Z] and '_'!") if not prefix.endswith("_"): prefix = prefix + "_" data = dict( host=os.getenv(prefix + "HOST"), port=os.getenv(prefix + "PORT"), database=os.getenv(prefix + "DATABASE"), username=os.getenv(prefix + "USERNAME"), password=os.getenv(prefix + "PASSWORD"), ) if kms_decrypt is True: # pragma: no cover import boto3 from base64 import b64decode if aws_profile is not None: kms = boto3.client("kms") else: ses = boto3.Session(profile_name=aws_profile) kms = ses.client("kms") def decrypt(kms, text): return kms.decrypt( CiphertextBlob=b64decode(text.encode("utf-8")) )["Plaintext"].decode("utf-8") data = { key: value if value is None else decrypt(kms, str(value)) for key, value in data.items() } return cls(**data)
[ "def", "from_env", "(", "cls", ",", "prefix", ",", "kms_decrypt", "=", "False", ",", "aws_profile", "=", "None", ")", ":", "if", "len", "(", "prefix", ")", "<", "1", ":", "raise", "ValueError", "(", "\"prefix can't be empty\"", ")", "if", "len", "(", "set", "(", "prefix", ")", ".", "difference", "(", "set", "(", "string", ".", "ascii_uppercase", "+", "\"_\"", ")", ")", ")", ":", "raise", "ValueError", "(", "\"prefix can only use [A-Z] and '_'!\"", ")", "if", "not", "prefix", ".", "endswith", "(", "\"_\"", ")", ":", "prefix", "=", "prefix", "+", "\"_\"", "data", "=", "dict", "(", "host", "=", "os", ".", "getenv", "(", "prefix", "+", "\"HOST\"", ")", ",", "port", "=", "os", ".", "getenv", "(", "prefix", "+", "\"PORT\"", ")", ",", "database", "=", "os", ".", "getenv", "(", "prefix", "+", "\"DATABASE\"", ")", ",", "username", "=", "os", ".", "getenv", "(", "prefix", "+", "\"USERNAME\"", ")", ",", "password", "=", "os", ".", "getenv", "(", "prefix", "+", "\"PASSWORD\"", ")", ",", ")", "if", "kms_decrypt", "is", "True", ":", "# pragma: no cover", "import", "boto3", "from", "base64", "import", "b64decode", "if", "aws_profile", "is", "not", "None", ":", "kms", "=", "boto3", ".", "client", "(", "\"kms\"", ")", "else", ":", "ses", "=", "boto3", ".", "Session", "(", "profile_name", "=", "aws_profile", ")", "kms", "=", "ses", ".", "client", "(", "\"kms\"", ")", "def", "decrypt", "(", "kms", ",", "text", ")", ":", "return", "kms", ".", "decrypt", "(", "CiphertextBlob", "=", "b64decode", "(", "text", ".", "encode", "(", "\"utf-8\"", ")", ")", ")", "[", "\"Plaintext\"", "]", ".", "decode", "(", "\"utf-8\"", ")", "data", "=", "{", "key", ":", "value", "if", "value", "is", "None", "else", "decrypt", "(", "kms", ",", "str", "(", "value", ")", ")", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", "}", "return", "cls", "(", "*", "*", "data", ")" ]
Load database credential from env variable. - host: ENV.{PREFIX}_HOST - port: ENV.{PREFIX}_PORT - database: ENV.{PREFIX}_DATABASE - username: ENV.{PREFIX}_USERNAME - password: ENV.{PREFIX}_PASSWORD :param prefix: str :param kms_decrypt: bool :param aws_profile: str
[ "Load", "database", "credential", "from", "env", "variable", "." ]
python
train
32.313725
redodo/formats
formats/helpers.py
https://github.com/redodo/formats/blob/5bc7a79a2c93ef895534edbbf83f1efe2f62e081/formats/helpers.py#L28-L44
def discover_yaml(bank=None, **meta): """Discovers the YAML format and registers it if available. Install YAML support via PIP:: pip install PyYAML :param bank: The format bank to register the format in :param meta: Extra information associated with the format """ try: import yaml if bank is None: bank = default_bank bank.register('yaml', yaml.load, yaml.dump, **meta) except ImportError: pass
[ "def", "discover_yaml", "(", "bank", "=", "None", ",", "*", "*", "meta", ")", ":", "try", ":", "import", "yaml", "if", "bank", "is", "None", ":", "bank", "=", "default_bank", "bank", ".", "register", "(", "'yaml'", ",", "yaml", ".", "load", ",", "yaml", ".", "dump", ",", "*", "*", "meta", ")", "except", "ImportError", ":", "pass" ]
Discovers the YAML format and registers it if available. Install YAML support via PIP:: pip install PyYAML :param bank: The format bank to register the format in :param meta: Extra information associated with the format
[ "Discovers", "the", "YAML", "format", "and", "registers", "it", "if", "available", "." ]
python
train
27.176471
gem/oq-engine
openquake/commonlib/logictree.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/logictree.py#L873-L884
def _parse_planar_geometry_surface(self, node): """ Parses a planar geometry surface """ nodes = [] for key in ["topLeft", "topRight", "bottomRight", "bottomLeft"]: nodes.append(geo.Point(getattr(node, key)["lon"], getattr(node, key)["lat"], getattr(node, key)["depth"])) top_left, top_right, bottom_right, bottom_left = tuple(nodes) return geo.PlanarSurface.from_corner_points( top_left, top_right, bottom_right, bottom_left)
[ "def", "_parse_planar_geometry_surface", "(", "self", ",", "node", ")", ":", "nodes", "=", "[", "]", "for", "key", "in", "[", "\"topLeft\"", ",", "\"topRight\"", ",", "\"bottomRight\"", ",", "\"bottomLeft\"", "]", ":", "nodes", ".", "append", "(", "geo", ".", "Point", "(", "getattr", "(", "node", ",", "key", ")", "[", "\"lon\"", "]", ",", "getattr", "(", "node", ",", "key", ")", "[", "\"lat\"", "]", ",", "getattr", "(", "node", ",", "key", ")", "[", "\"depth\"", "]", ")", ")", "top_left", ",", "top_right", ",", "bottom_right", ",", "bottom_left", "=", "tuple", "(", "nodes", ")", "return", "geo", ".", "PlanarSurface", ".", "from_corner_points", "(", "top_left", ",", "top_right", ",", "bottom_right", ",", "bottom_left", ")" ]
Parses a planar geometry surface
[ "Parses", "a", "planar", "geometry", "surface" ]
python
train
47.083333
tomprince/txgithub
txgithub/api.py
https://github.com/tomprince/txgithub/blob/3bd5eebb25db013e2193e6a102a91049f356710d/txgithub/api.py#L336-L347
def create(self, repo_user, repo_name, issue_number, body): """ PATCH /repos/:owner/:repo/issues/:number/comments :param issue_number: The issue's (or pull request's) number :param body: The body of this comment """ return self.api.makeRequest( ['repos', repo_user, repo_name, 'issues', issue_number, 'comments'], method='POST', post=dict(body=body))
[ "def", "create", "(", "self", ",", "repo_user", ",", "repo_name", ",", "issue_number", ",", "body", ")", ":", "return", "self", ".", "api", ".", "makeRequest", "(", "[", "'repos'", ",", "repo_user", ",", "repo_name", ",", "'issues'", ",", "issue_number", ",", "'comments'", "]", ",", "method", "=", "'POST'", ",", "post", "=", "dict", "(", "body", "=", "body", ")", ")" ]
PATCH /repos/:owner/:repo/issues/:number/comments :param issue_number: The issue's (or pull request's) number :param body: The body of this comment
[ "PATCH", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "issues", "/", ":", "number", "/", "comments" ]
python
train
36.416667
kmn/coincheck
coincheck/order.py
https://github.com/kmn/coincheck/blob/4e1062f1e564cddceec2f6fb4d70fe3a3ab645bc/coincheck/order.py#L59-L66
def cancel(self,order_id): ''' cancel the specified order :param order_id: order_id to be canceled ''' url= 'https://coincheck.com/api/exchange/orders/' + order_id headers = make_header(url,access_key=self.access_key,secret_key=self.secret_key) r = requests.delete(url,headers=headers) return json.loads(r.text)
[ "def", "cancel", "(", "self", ",", "order_id", ")", ":", "url", "=", "'https://coincheck.com/api/exchange/orders/'", "+", "order_id", "headers", "=", "make_header", "(", "url", ",", "access_key", "=", "self", ".", "access_key", ",", "secret_key", "=", "self", ".", "secret_key", ")", "r", "=", "requests", ".", "delete", "(", "url", ",", "headers", "=", "headers", ")", "return", "json", ".", "loads", "(", "r", ".", "text", ")" ]
cancel the specified order :param order_id: order_id to be canceled
[ "cancel", "the", "specified", "order", ":", "param", "order_id", ":", "order_id", "to", "be", "canceled" ]
python
train
45
apache/airflow
airflow/contrib/hooks/azure_container_instance_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_container_instance_hook.py#L149-L158
def delete(self, resource_group, name): """ Delete a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str """ self.connection.container_groups.delete(resource_group, name)
[ "def", "delete", "(", "self", ",", "resource_group", ",", "name", ")", ":", "self", ".", "connection", ".", "container_groups", ".", "delete", "(", "resource_group", ",", "name", ")" ]
Delete a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str
[ "Delete", "a", "container", "group" ]
python
test
33.1
widdowquinn/pyani
bin/genbank_get_genomes_by_taxon.py
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L263-L275
def extract_filestem(data): """Extract filestem from Entrez eSummary data. Function expects esummary['DocumentSummarySet']['DocumentSummary'][0] Some illegal characters may occur in AssemblyName - for these, a more robust regex replace/escape may be required. Sadly, NCBI don't just use standard percent escapes, but instead replace certain characters with underscores: white space, slash, comma, hash, brackets. """ escapes = re.compile(r"[\s/,#\(\)]") escname = re.sub(escapes, '_', data['AssemblyName']) return '_'.join([data['AssemblyAccession'], escname])
[ "def", "extract_filestem", "(", "data", ")", ":", "escapes", "=", "re", ".", "compile", "(", "r\"[\\s/,#\\(\\)]\"", ")", "escname", "=", "re", ".", "sub", "(", "escapes", ",", "'_'", ",", "data", "[", "'AssemblyName'", "]", ")", "return", "'_'", ".", "join", "(", "[", "data", "[", "'AssemblyAccession'", "]", ",", "escname", "]", ")" ]
Extract filestem from Entrez eSummary data. Function expects esummary['DocumentSummarySet']['DocumentSummary'][0] Some illegal characters may occur in AssemblyName - for these, a more robust regex replace/escape may be required. Sadly, NCBI don't just use standard percent escapes, but instead replace certain characters with underscores: white space, slash, comma, hash, brackets.
[ "Extract", "filestem", "from", "Entrez", "eSummary", "data", "." ]
python
train
45.384615
BD2KOnFHIR/i2b2model
i2b2model/data/i2b2observationfact.py
https://github.com/BD2KOnFHIR/i2b2model/blob/9d49bb53b0733dd83ab5b716014865e270a3c903/i2b2model/data/i2b2observationfact.py#L113-L120
def _date_val(self, dt: datetime) -> None: """ Add a date value :param dt: datetime to add """ self._tval_char = dt.strftime('%Y-%m-%d %H:%M') self._nval_num = (dt.year * 10000) + (dt.month * 100) + dt.day + \ (((dt.hour / 100.0) + (dt.minute / 10000.0)) if isinstance(dt, datetime) else 0)
[ "def", "_date_val", "(", "self", ",", "dt", ":", "datetime", ")", "->", "None", ":", "self", ".", "_tval_char", "=", "dt", ".", "strftime", "(", "'%Y-%m-%d %H:%M'", ")", "self", ".", "_nval_num", "=", "(", "dt", ".", "year", "*", "10000", ")", "+", "(", "dt", ".", "month", "*", "100", ")", "+", "dt", ".", "day", "+", "(", "(", "(", "dt", ".", "hour", "/", "100.0", ")", "+", "(", "dt", ".", "minute", "/", "10000.0", ")", ")", "if", "isinstance", "(", "dt", ",", "datetime", ")", "else", "0", ")" ]
Add a date value :param dt: datetime to add
[ "Add", "a", "date", "value", ":", "param", "dt", ":", "datetime", "to", "add" ]
python
train
44.5
project-rig/rig
rig/machine_control/struct_file.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/struct_file.py#L101-L106
def num(value): """Convert a value from one of several bases to an int.""" if re_hex_num.match(value): return int(value, base=16) else: return int(value)
[ "def", "num", "(", "value", ")", ":", "if", "re_hex_num", ".", "match", "(", "value", ")", ":", "return", "int", "(", "value", ",", "base", "=", "16", ")", "else", ":", "return", "int", "(", "value", ")" ]
Convert a value from one of several bases to an int.
[ "Convert", "a", "value", "from", "one", "of", "several", "bases", "to", "an", "int", "." ]
python
train
29.333333
mkoura/dump2polarion
dump2polarion/properties.py
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/properties.py#L137-L153
def remove_response_property(xml_root): """Removes response properties if exist.""" if xml_root.tag == "testsuites": properties = xml_root.find("properties") resp_properties = [] for prop in properties: prop_name = prop.get("name", "") if "polarion-response-" in prop_name: resp_properties.append(prop) for resp_property in resp_properties: properties.remove(resp_property) elif xml_root.tag in ("testcases", "requirements"): resp_properties = xml_root.find("response-properties") if resp_properties is not None: xml_root.remove(resp_properties) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
[ "def", "remove_response_property", "(", "xml_root", ")", ":", "if", "xml_root", ".", "tag", "==", "\"testsuites\"", ":", "properties", "=", "xml_root", ".", "find", "(", "\"properties\"", ")", "resp_properties", "=", "[", "]", "for", "prop", "in", "properties", ":", "prop_name", "=", "prop", ".", "get", "(", "\"name\"", ",", "\"\"", ")", "if", "\"polarion-response-\"", "in", "prop_name", ":", "resp_properties", ".", "append", "(", "prop", ")", "for", "resp_property", "in", "resp_properties", ":", "properties", ".", "remove", "(", "resp_property", ")", "elif", "xml_root", ".", "tag", "in", "(", "\"testcases\"", ",", "\"requirements\"", ")", ":", "resp_properties", "=", "xml_root", ".", "find", "(", "\"response-properties\"", ")", "if", "resp_properties", "is", "not", "None", ":", "xml_root", ".", "remove", "(", "resp_properties", ")", "else", ":", "raise", "Dump2PolarionException", "(", "_NOT_EXPECTED_FORMAT_MSG", ")" ]
Removes response properties if exist.
[ "Removes", "response", "properties", "if", "exist", "." ]
python
train
42.705882
kensho-technologies/graphql-compiler
graphql_compiler/compiler/workarounds/orientdb_eval_scheduling.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/workarounds/orientdb_eval_scheduling.py#L101-L109
def _create_tautological_expression_for_location(query_metadata_table, location): """For a given location, create a BinaryComposition that always evaluates to 'true'.""" location_type = query_metadata_table.get_location_info(location).type location_exists = BinaryComposition( u'!=', ContextField(location, location_type), NullLiteral) location_does_not_exist = BinaryComposition( u'=', ContextField(location, location_type), NullLiteral) return BinaryComposition(u'||', location_exists, location_does_not_exist)
[ "def", "_create_tautological_expression_for_location", "(", "query_metadata_table", ",", "location", ")", ":", "location_type", "=", "query_metadata_table", ".", "get_location_info", "(", "location", ")", ".", "type", "location_exists", "=", "BinaryComposition", "(", "u'!='", ",", "ContextField", "(", "location", ",", "location_type", ")", ",", "NullLiteral", ")", "location_does_not_exist", "=", "BinaryComposition", "(", "u'='", ",", "ContextField", "(", "location", ",", "location_type", ")", ",", "NullLiteral", ")", "return", "BinaryComposition", "(", "u'||'", ",", "location_exists", ",", "location_does_not_exist", ")" ]
For a given location, create a BinaryComposition that always evaluates to 'true'.
[ "For", "a", "given", "location", "create", "a", "BinaryComposition", "that", "always", "evaluates", "to", "true", "." ]
python
train
60.111111
saltstack/salt
salt/cloud/clouds/profitbricks.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/profitbricks.py#L1227-L1251
def _get_firewall_rules(firewall_rules): ''' Construct a list of optional firewall rules from the cloud profile. ''' ret = [] for key, value in six.iteritems(firewall_rules): # Verify the required 'protocol' property is present in the cloud # profile config if 'protocol' not in firewall_rules[key].keys(): raise SaltCloudConfigError( 'The firewall rule \'{0}\' is missing \'protocol\''.format(key) ) ret.append(FirewallRule( name=key, protocol=firewall_rules[key].get('protocol', None), source_mac=firewall_rules[key].get('source_mac', None), source_ip=firewall_rules[key].get('source_ip', None), target_ip=firewall_rules[key].get('target_ip', None), port_range_start=firewall_rules[key].get('port_range_start', None), port_range_end=firewall_rules[key].get('port_range_end', None), icmp_type=firewall_rules[key].get('icmp_type', None), icmp_code=firewall_rules[key].get('icmp_code', None) )) return ret
[ "def", "_get_firewall_rules", "(", "firewall_rules", ")", ":", "ret", "=", "[", "]", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "firewall_rules", ")", ":", "# Verify the required 'protocol' property is present in the cloud", "# profile config", "if", "'protocol'", "not", "in", "firewall_rules", "[", "key", "]", ".", "keys", "(", ")", ":", "raise", "SaltCloudConfigError", "(", "'The firewall rule \\'{0}\\' is missing \\'protocol\\''", ".", "format", "(", "key", ")", ")", "ret", ".", "append", "(", "FirewallRule", "(", "name", "=", "key", ",", "protocol", "=", "firewall_rules", "[", "key", "]", ".", "get", "(", "'protocol'", ",", "None", ")", ",", "source_mac", "=", "firewall_rules", "[", "key", "]", ".", "get", "(", "'source_mac'", ",", "None", ")", ",", "source_ip", "=", "firewall_rules", "[", "key", "]", ".", "get", "(", "'source_ip'", ",", "None", ")", ",", "target_ip", "=", "firewall_rules", "[", "key", "]", ".", "get", "(", "'target_ip'", ",", "None", ")", ",", "port_range_start", "=", "firewall_rules", "[", "key", "]", ".", "get", "(", "'port_range_start'", ",", "None", ")", ",", "port_range_end", "=", "firewall_rules", "[", "key", "]", ".", "get", "(", "'port_range_end'", ",", "None", ")", ",", "icmp_type", "=", "firewall_rules", "[", "key", "]", ".", "get", "(", "'icmp_type'", ",", "None", ")", ",", "icmp_code", "=", "firewall_rules", "[", "key", "]", ".", "get", "(", "'icmp_code'", ",", "None", ")", ")", ")", "return", "ret" ]
Construct a list of optional firewall rules from the cloud profile.
[ "Construct", "a", "list", "of", "optional", "firewall", "rules", "from", "the", "cloud", "profile", "." ]
python
train
43.72
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/message.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/message.py#L603-L607
def _succeed(self, request_id, reply, duration): """Publish a CommandSucceededEvent.""" self.listeners.publish_command_success( duration, reply, self.name, request_id, self.sock_info.address, self.op_id)
[ "def", "_succeed", "(", "self", ",", "request_id", ",", "reply", ",", "duration", ")", ":", "self", ".", "listeners", ".", "publish_command_success", "(", "duration", ",", "reply", ",", "self", ".", "name", ",", "request_id", ",", "self", ".", "sock_info", ".", "address", ",", "self", ".", "op_id", ")" ]
Publish a CommandSucceededEvent.
[ "Publish", "a", "CommandSucceededEvent", "." ]
python
train
47.8
CybOXProject/mixbox
mixbox/entities.py
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/entities.py#L777-L794
def _fix_example_namespace(self): """Attempts to resolve issues where our samples use 'http://example.com/' for our example namespace but python-stix uses 'http://example.com' by removing the former. """ example_prefix = 'example' # Example ns prefix idgen_prefix = idgen.get_id_namespace_prefix() # If the ID namespace alias doesn't match the example alias, return. if idgen_prefix != example_prefix: return # If the example namespace prefix isn't in the parsed namespace # prefixes, return. if example_prefix not in self._input_namespaces: return self._input_namespaces[example_prefix] = idgen.EXAMPLE_NAMESPACE.name
[ "def", "_fix_example_namespace", "(", "self", ")", ":", "example_prefix", "=", "'example'", "# Example ns prefix", "idgen_prefix", "=", "idgen", ".", "get_id_namespace_prefix", "(", ")", "# If the ID namespace alias doesn't match the example alias, return.", "if", "idgen_prefix", "!=", "example_prefix", ":", "return", "# If the example namespace prefix isn't in the parsed namespace", "# prefixes, return.", "if", "example_prefix", "not", "in", "self", ".", "_input_namespaces", ":", "return", "self", ".", "_input_namespaces", "[", "example_prefix", "]", "=", "idgen", ".", "EXAMPLE_NAMESPACE", ".", "name" ]
Attempts to resolve issues where our samples use 'http://example.com/' for our example namespace but python-stix uses 'http://example.com' by removing the former.
[ "Attempts", "to", "resolve", "issues", "where", "our", "samples", "use", "http", ":", "//", "example", ".", "com", "/", "for", "our", "example", "namespace", "but", "python", "-", "stix", "uses", "http", ":", "//", "example", ".", "com", "by", "removing", "the", "former", "." ]
python
train
40.277778
CityOfZion/neo-python-core
neocore/UIntBase.py
https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/UIntBase.py#L33-L36
def GetHashCode(self): """uint32 identifier""" slice_length = 4 if len(self.Data) >= 4 else len(self.Data) return int.from_bytes(self.Data[:slice_length], 'little')
[ "def", "GetHashCode", "(", "self", ")", ":", "slice_length", "=", "4", "if", "len", "(", "self", ".", "Data", ")", ">=", "4", "else", "len", "(", "self", ".", "Data", ")", "return", "int", ".", "from_bytes", "(", "self", ".", "Data", "[", ":", "slice_length", "]", ",", "'little'", ")" ]
uint32 identifier
[ "uint32", "identifier" ]
python
train
46.25
CellProfiler/centrosome
centrosome/cpmorphology.py
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/cpmorphology.py#L3378-L3402
def spur(image, mask=None, iterations=1): '''Remove spur pixels from an image 0 0 0 0 0 0 0 1 0 -> 0 0 0 0 0 1 0 0 ? ''' global spur_table_1,spur_table_2 if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False index_i, index_j, masked_image = prepare_for_index_lookup(masked_image, False) if iterations is None: iterations = len(index_i) for i in range(iterations): for table in (spur_table_1, spur_table_2): index_i, index_j = index_lookup(index_i, index_j, masked_image, table, 1) masked_image = extract_from_image_lookup(image, index_i, index_j) if not mask is None: masked_image[~mask] = image[~mask] return masked_image
[ "def", "spur", "(", "image", ",", "mask", "=", "None", ",", "iterations", "=", "1", ")", ":", "global", "spur_table_1", ",", "spur_table_2", "if", "mask", "is", "None", ":", "masked_image", "=", "image", "else", ":", "masked_image", "=", "image", ".", "astype", "(", "bool", ")", ".", "copy", "(", ")", "masked_image", "[", "~", "mask", "]", "=", "False", "index_i", ",", "index_j", ",", "masked_image", "=", "prepare_for_index_lookup", "(", "masked_image", ",", "False", ")", "if", "iterations", "is", "None", ":", "iterations", "=", "len", "(", "index_i", ")", "for", "i", "in", "range", "(", "iterations", ")", ":", "for", "table", "in", "(", "spur_table_1", ",", "spur_table_2", ")", ":", "index_i", ",", "index_j", "=", "index_lookup", "(", "index_i", ",", "index_j", ",", "masked_image", ",", "table", ",", "1", ")", "masked_image", "=", "extract_from_image_lookup", "(", "image", ",", "index_i", ",", "index_j", ")", "if", "not", "mask", "is", "None", ":", "masked_image", "[", "~", "mask", "]", "=", "image", "[", "~", "mask", "]", "return", "masked_image" ]
Remove spur pixels from an image 0 0 0 0 0 0 0 1 0 -> 0 0 0 0 0 1 0 0 ?
[ "Remove", "spur", "pixels", "from", "an", "image", "0", "0", "0", "0", "0", "0", "0", "1", "0", "-", ">", "0", "0", "0", "0", "0", "1", "0", "0", "?" ]
python
train
35.68
pandas-dev/pandas
pandas/core/nanops.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L276-L304
def _wrap_results(result, dtype, fill_value=None): """ wrap our results if needed """ if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): if fill_value is None: # GH#24293 fill_value = iNaT if not isinstance(result, np.ndarray): tz = getattr(dtype, 'tz', None) assert not isna(fill_value), "Expected non-null fill_value" if result == fill_value: result = np.nan result = tslibs.Timestamp(result, tz=tz) else: result = result.view(dtype) elif is_timedelta64_dtype(dtype): if not isinstance(result, np.ndarray): if result == fill_value: result = np.nan # raise if we have a timedelta64[ns] which is too large if np.fabs(result) > _int64_max: raise ValueError("overflow in timedelta operation") result = tslibs.Timedelta(result, unit='ns') else: result = result.astype('i8').view(dtype) return result
[ "def", "_wrap_results", "(", "result", ",", "dtype", ",", "fill_value", "=", "None", ")", ":", "if", "is_datetime64_dtype", "(", "dtype", ")", "or", "is_datetime64tz_dtype", "(", "dtype", ")", ":", "if", "fill_value", "is", "None", ":", "# GH#24293", "fill_value", "=", "iNaT", "if", "not", "isinstance", "(", "result", ",", "np", ".", "ndarray", ")", ":", "tz", "=", "getattr", "(", "dtype", ",", "'tz'", ",", "None", ")", "assert", "not", "isna", "(", "fill_value", ")", ",", "\"Expected non-null fill_value\"", "if", "result", "==", "fill_value", ":", "result", "=", "np", ".", "nan", "result", "=", "tslibs", ".", "Timestamp", "(", "result", ",", "tz", "=", "tz", ")", "else", ":", "result", "=", "result", ".", "view", "(", "dtype", ")", "elif", "is_timedelta64_dtype", "(", "dtype", ")", ":", "if", "not", "isinstance", "(", "result", ",", "np", ".", "ndarray", ")", ":", "if", "result", "==", "fill_value", ":", "result", "=", "np", ".", "nan", "# raise if we have a timedelta64[ns] which is too large", "if", "np", ".", "fabs", "(", "result", ")", ">", "_int64_max", ":", "raise", "ValueError", "(", "\"overflow in timedelta operation\"", ")", "result", "=", "tslibs", ".", "Timedelta", "(", "result", ",", "unit", "=", "'ns'", ")", "else", ":", "result", "=", "result", ".", "astype", "(", "'i8'", ")", ".", "view", "(", "dtype", ")", "return", "result" ]
wrap our results if needed
[ "wrap", "our", "results", "if", "needed" ]
python
train
35.586207
materialsproject/pymatgen
pymatgen/io/qchem_deprecated.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/qchem_deprecated.py#L226-L233
def set_velocities(self, velocities): """ :param velocities (au): list of list of atom velocities :return: """ assert len(velocities) == len(self.mol) self.params["velocity"] = velocities
[ "def", "set_velocities", "(", "self", ",", "velocities", ")", ":", "assert", "len", "(", "velocities", ")", "==", "len", "(", "self", ".", "mol", ")", "self", ".", "params", "[", "\"velocity\"", "]", "=", "velocities" ]
:param velocities (au): list of list of atom velocities :return:
[ ":", "param", "velocities", "(", "au", ")", ":", "list", "of", "list", "of", "atom", "velocities", ":", "return", ":" ]
python
train
29.875
demurgos/py-pathmatch
tools/lint_string_prefix.py
https://github.com/demurgos/py-pathmatch/blob/70b3aa99ee34da8b80b6ec6340862b760159d2a1/tools/lint_string_prefix.py#L45-L53
def process_tokens(self, tokens): u""" Iterate other tokens to find strings and ensure that they are prefixed. :param tokens: :return: """ for (tok_type, token, (start_row, _), _, _) in tokens: if tok_type == tokenize.STRING: self._check_string(token, start_row)
[ "def", "process_tokens", "(", "self", ",", "tokens", ")", ":", "for", "(", "tok_type", ",", "token", ",", "(", "start_row", ",", "_", ")", ",", "_", ",", "_", ")", "in", "tokens", ":", "if", "tok_type", "==", "tokenize", ".", "STRING", ":", "self", ".", "_check_string", "(", "token", ",", "start_row", ")" ]
u""" Iterate other tokens to find strings and ensure that they are prefixed. :param tokens: :return:
[ "u", "Iterate", "other", "tokens", "to", "find", "strings", "and", "ensure", "that", "they", "are", "prefixed", ".", ":", "param", "tokens", ":", ":", "return", ":" ]
python
train
36.666667
tensorflow/tensor2tensor
tensor2tensor/models/video/nfg_interpolate.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/nfg_interpolate.py#L153-L164
def get_summaries_log_dir(decode_hp, output_dir, dataset_split): """Get nested summaries_log_dir based on decode_hp.""" child_dir = decode_hp.summaries_log_dir level_dir = "".join([str(level) for level in decode_hp.level_interp]) if decode_hp.channel_interp == "all": rank_dir = "all" else: rank_dir = "rank_%d" % decode_hp.rank_interp child_dir = "%s/%s_%s" % (child_dir, level_dir, rank_dir) if dataset_split is not None: child_dir += "_{}".format(dataset_split) return os.path.join(output_dir, child_dir)
[ "def", "get_summaries_log_dir", "(", "decode_hp", ",", "output_dir", ",", "dataset_split", ")", ":", "child_dir", "=", "decode_hp", ".", "summaries_log_dir", "level_dir", "=", "\"\"", ".", "join", "(", "[", "str", "(", "level", ")", "for", "level", "in", "decode_hp", ".", "level_interp", "]", ")", "if", "decode_hp", ".", "channel_interp", "==", "\"all\"", ":", "rank_dir", "=", "\"all\"", "else", ":", "rank_dir", "=", "\"rank_%d\"", "%", "decode_hp", ".", "rank_interp", "child_dir", "=", "\"%s/%s_%s\"", "%", "(", "child_dir", ",", "level_dir", ",", "rank_dir", ")", "if", "dataset_split", "is", "not", "None", ":", "child_dir", "+=", "\"_{}\"", ".", "format", "(", "dataset_split", ")", "return", "os", ".", "path", ".", "join", "(", "output_dir", ",", "child_dir", ")" ]
Get nested summaries_log_dir based on decode_hp.
[ "Get", "nested", "summaries_log_dir", "based", "on", "decode_hp", "." ]
python
train
43.666667
nickmasster/xsmtplib
xsmtplib/xsmtplib.py
https://github.com/nickmasster/xsmtplib/blob/0207f5c72f2fec03f3ebdb3acb3a56401805f32f/xsmtplib/xsmtplib.py#L132-L183
def connect_proxy(self, proxy_host='localhost', proxy_port=0, proxy_type=socks.HTTP, host='localhost', port=0): """Connect to a host on a given port via proxy server If the hostname ends with a colon (`:') followed by a number, and there is no port specified, that suffix will be stripped off and the number interpreted as the port number to use. Note: This method is automatically invoked by __init__, if a host and proxy server are specified during instantiation. :param proxy_host: Hostname of proxy server :type proxy_host: string :param proxy_port: Port of proxy server, by default port for specified proxy type is used :type proxy_port: int :param proxy_type: Proxy type to use (see socks.PROXY_TYPES for details) :type proxy_type: int :param host: Hostname of SMTP server :type host: string :param port: Port of SMTP server, by default smtplib.SMTP_PORT is used :type port: int :return: Tuple of (code, msg) :rtype: tuple """ if proxy_type not in socks.DEFAULT_PORTS.keys(): raise NotSupportedProxyType (proxy_host, proxy_port) = self._parse_host(host=proxy_host, port=proxy_port) if not proxy_port: proxy_port = socks.DEFAULT_PORTS[proxy_type] (host, port) = self._parse_host(host=host, port=port) if self.debuglevel > 0: self._print_debug('connect: via proxy', proxy_host, proxy_port) s = socks.socksocket() s.set_proxy(proxy_type=proxy_type, addr=proxy_host, port=proxy_port) s.settimeout(self.timeout) if self.source_address is not None: s.bind(self.source_address) s.connect((host, port)) # todo # Send CRLF in order to get first response from destination server. # Probably it's needed only for HTTP proxies. Further investigation required. s.sendall(bCRLF) self.sock = s (code, msg) = self.getreply() if self.debuglevel > 0: self._print_debug('connect:', repr(msg)) return code, msg
[ "def", "connect_proxy", "(", "self", ",", "proxy_host", "=", "'localhost'", ",", "proxy_port", "=", "0", ",", "proxy_type", "=", "socks", ".", "HTTP", ",", "host", "=", "'localhost'", ",", "port", "=", "0", ")", ":", "if", "proxy_type", "not", "in", "socks", ".", "DEFAULT_PORTS", ".", "keys", "(", ")", ":", "raise", "NotSupportedProxyType", "(", "proxy_host", ",", "proxy_port", ")", "=", "self", ".", "_parse_host", "(", "host", "=", "proxy_host", ",", "port", "=", "proxy_port", ")", "if", "not", "proxy_port", ":", "proxy_port", "=", "socks", ".", "DEFAULT_PORTS", "[", "proxy_type", "]", "(", "host", ",", "port", ")", "=", "self", ".", "_parse_host", "(", "host", "=", "host", ",", "port", "=", "port", ")", "if", "self", ".", "debuglevel", ">", "0", ":", "self", ".", "_print_debug", "(", "'connect: via proxy'", ",", "proxy_host", ",", "proxy_port", ")", "s", "=", "socks", ".", "socksocket", "(", ")", "s", ".", "set_proxy", "(", "proxy_type", "=", "proxy_type", ",", "addr", "=", "proxy_host", ",", "port", "=", "proxy_port", ")", "s", ".", "settimeout", "(", "self", ".", "timeout", ")", "if", "self", ".", "source_address", "is", "not", "None", ":", "s", ".", "bind", "(", "self", ".", "source_address", ")", "s", ".", "connect", "(", "(", "host", ",", "port", ")", ")", "# todo", "# Send CRLF in order to get first response from destination server.", "# Probably it's needed only for HTTP proxies. Further investigation required.", "s", ".", "sendall", "(", "bCRLF", ")", "self", ".", "sock", "=", "s", "(", "code", ",", "msg", ")", "=", "self", ".", "getreply", "(", ")", "if", "self", ".", "debuglevel", ">", "0", ":", "self", ".", "_print_debug", "(", "'connect:'", ",", "repr", "(", "msg", ")", ")", "return", "code", ",", "msg" ]
Connect to a host on a given port via proxy server If the hostname ends with a colon (`:') followed by a number, and there is no port specified, that suffix will be stripped off and the number interpreted as the port number to use. Note: This method is automatically invoked by __init__, if a host and proxy server are specified during instantiation. :param proxy_host: Hostname of proxy server :type proxy_host: string :param proxy_port: Port of proxy server, by default port for specified proxy type is used :type proxy_port: int :param proxy_type: Proxy type to use (see socks.PROXY_TYPES for details) :type proxy_type: int :param host: Hostname of SMTP server :type host: string :param port: Port of SMTP server, by default smtplib.SMTP_PORT is used :type port: int :return: Tuple of (code, msg) :rtype: tuple
[ "Connect", "to", "a", "host", "on", "a", "given", "port", "via", "proxy", "server" ]
python
train
41
carta/ldap_tools
src/ldap_tools/group.py
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/group.py#L180-L190
def delete(config, group, force): """Delete an LDAP group.""" if not force: if not click.confirm( 'Confirm that you want to delete group {}'.format(group)): sys.exit("Deletion of {} aborted".format(group)) client = Client() client.prepare_connection() group_api = API(client) group_api.delete(group)
[ "def", "delete", "(", "config", ",", "group", ",", "force", ")", ":", "if", "not", "force", ":", "if", "not", "click", ".", "confirm", "(", "'Confirm that you want to delete group {}'", ".", "format", "(", "group", ")", ")", ":", "sys", ".", "exit", "(", "\"Deletion of {} aborted\"", ".", "format", "(", "group", ")", ")", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "group_api", "=", "API", "(", "client", ")", "group_api", ".", "delete", "(", "group", ")" ]
Delete an LDAP group.
[ "Delete", "an", "LDAP", "group", "." ]
python
train
35.090909
scikit-learn-contrib/hdbscan
hdbscan/plots.py
https://github.com/scikit-learn-contrib/hdbscan/blob/e40ccef139e56e38adf7bd6912cd63efd97598f9/hdbscan/plots.py#L457-L481
def to_pandas(self): """Return a pandas dataframe representation of the condensed tree. Each row of the dataframe corresponds to an edge in the tree. The columns of the dataframe are `parent`, `child`, `lambda_val` and `child_size`. The `parent` and `child` are the ids of the parent and child nodes in the tree. Node ids less than the number of points in the original dataset represent individual points, while ids greater than the number of points are clusters. The `lambda_val` value is the value (1/distance) at which the `child` node leaves the cluster. The `child_size` is the number of points in the `child` node. """ try: from pandas import DataFrame, Series except ImportError: raise ImportError('You must have pandas installed to export pandas DataFrames') result = DataFrame(self._raw_tree) return result
[ "def", "to_pandas", "(", "self", ")", ":", "try", ":", "from", "pandas", "import", "DataFrame", ",", "Series", "except", "ImportError", ":", "raise", "ImportError", "(", "'You must have pandas installed to export pandas DataFrames'", ")", "result", "=", "DataFrame", "(", "self", ".", "_raw_tree", ")", "return", "result" ]
Return a pandas dataframe representation of the condensed tree. Each row of the dataframe corresponds to an edge in the tree. The columns of the dataframe are `parent`, `child`, `lambda_val` and `child_size`. The `parent` and `child` are the ids of the parent and child nodes in the tree. Node ids less than the number of points in the original dataset represent individual points, while ids greater than the number of points are clusters. The `lambda_val` value is the value (1/distance) at which the `child` node leaves the cluster. The `child_size` is the number of points in the `child` node.
[ "Return", "a", "pandas", "dataframe", "representation", "of", "the", "condensed", "tree", "." ]
python
train
37.96
mila-iqia/fuel
fuel/downloaders/youtube_audio.py
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/youtube_audio.py#L41-L57
def fill_subparser(subparser): """Sets up a subparser to download audio of YouTube videos. Adds the compulsory `--youtube-id` flag. Parameters ---------- subparser : :class:`argparse.ArgumentParser` Subparser handling the `youtube_audio` command. """ subparser.add_argument( '--youtube-id', type=str, required=True, help=("The YouTube ID of the video from which to extract audio, " "usually an 11-character string.") ) return download
[ "def", "fill_subparser", "(", "subparser", ")", ":", "subparser", ".", "add_argument", "(", "'--youtube-id'", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "(", "\"The YouTube ID of the video from which to extract audio, \"", "\"usually an 11-character string.\"", ")", ")", "return", "download" ]
Sets up a subparser to download audio of YouTube videos. Adds the compulsory `--youtube-id` flag. Parameters ---------- subparser : :class:`argparse.ArgumentParser` Subparser handling the `youtube_audio` command.
[ "Sets", "up", "a", "subparser", "to", "download", "audio", "of", "YouTube", "videos", "." ]
python
train
29.117647
TheGhouls/oct
oct/results/output.py
https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/results/output.py#L12-L21
def generate_graphs(data, name, results_dir): """Generate all reports from original dataframe :param dic data: dict containing raw and compiled results dataframes :param str name: name for prefixing graphs output :param str results_dir: results output directory """ graphs.resp_graph_raw(data['raw'], name + '_response_times.svg', results_dir) graphs.resp_graph(data['compiled'], name + '_response_times_intervals.svg', results_dir) graphs.tp_graph(data['compiled'], name + '_throughput.svg', results_dir)
[ "def", "generate_graphs", "(", "data", ",", "name", ",", "results_dir", ")", ":", "graphs", ".", "resp_graph_raw", "(", "data", "[", "'raw'", "]", ",", "name", "+", "'_response_times.svg'", ",", "results_dir", ")", "graphs", ".", "resp_graph", "(", "data", "[", "'compiled'", "]", ",", "name", "+", "'_response_times_intervals.svg'", ",", "results_dir", ")", "graphs", ".", "tp_graph", "(", "data", "[", "'compiled'", "]", ",", "name", "+", "'_throughput.svg'", ",", "results_dir", ")" ]
Generate all reports from original dataframe :param dic data: dict containing raw and compiled results dataframes :param str name: name for prefixing graphs output :param str results_dir: results output directory
[ "Generate", "all", "reports", "from", "original", "dataframe" ]
python
train
52.9
laike9m/pdir2
pdir/api.py
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L74-L94
def search(self, term: str, case_sensitive: bool = False) -> 'PrettyDir': """Searches for names that match some pattern. Args: term: String used to match names. A name is returned if it matches the whole search term. case_sensitive: Boolean to match case or not, default is False (case insensitive). Return: A PrettyDir object with matched names. """ if case_sensitive: return PrettyDir( self.obj, [pattr for pattr in self.pattrs if term in pattr.name] ) else: term = term.lower() return PrettyDir( self.obj, [pattr for pattr in self.pattrs if term in pattr.name.lower()] )
[ "def", "search", "(", "self", ",", "term", ":", "str", ",", "case_sensitive", ":", "bool", "=", "False", ")", "->", "'PrettyDir'", ":", "if", "case_sensitive", ":", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "term", "in", "pattr", ".", "name", "]", ")", "else", ":", "term", "=", "term", ".", "lower", "(", ")", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "term", "in", "pattr", ".", "name", ".", "lower", "(", ")", "]", ")" ]
Searches for names that match some pattern. Args: term: String used to match names. A name is returned if it matches the whole search term. case_sensitive: Boolean to match case or not, default is False (case insensitive). Return: A PrettyDir object with matched names.
[ "Searches", "for", "names", "that", "match", "some", "pattern", "." ]
python
train
36.142857
dropbox/stone
stone/frontend/ir_generator.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/ir_generator.py#L579-L597
def _merge_patches(self): """Injects object patches into their original object definitions.""" for patched_item, patched_namespace in self._patch_data_by_canonical_name.values(): patched_item_base_name = self._get_base_name(patched_item.name, patched_namespace.name) if patched_item_base_name not in self._item_by_canonical_name: raise InvalidSpec('Patch {} must correspond to a pre-existing data_type.'.format( quote(patched_item.name)), patched_item.lineno, patched_item.path) existing_item = self._item_by_canonical_name[patched_item_base_name] self._check_patch_type_mismatch(patched_item, existing_item) if isinstance(patched_item, (AstStructPatch, AstUnionPatch)): self._check_field_names_unique(existing_item, patched_item) existing_item.fields += patched_item.fields self._inject_patched_examples(existing_item, patched_item) else: raise AssertionError('Unknown Patch Object Type {}'.format( patched_item.__class__.__name__))
[ "def", "_merge_patches", "(", "self", ")", ":", "for", "patched_item", ",", "patched_namespace", "in", "self", ".", "_patch_data_by_canonical_name", ".", "values", "(", ")", ":", "patched_item_base_name", "=", "self", ".", "_get_base_name", "(", "patched_item", ".", "name", ",", "patched_namespace", ".", "name", ")", "if", "patched_item_base_name", "not", "in", "self", ".", "_item_by_canonical_name", ":", "raise", "InvalidSpec", "(", "'Patch {} must correspond to a pre-existing data_type.'", ".", "format", "(", "quote", "(", "patched_item", ".", "name", ")", ")", ",", "patched_item", ".", "lineno", ",", "patched_item", ".", "path", ")", "existing_item", "=", "self", ".", "_item_by_canonical_name", "[", "patched_item_base_name", "]", "self", ".", "_check_patch_type_mismatch", "(", "patched_item", ",", "existing_item", ")", "if", "isinstance", "(", "patched_item", ",", "(", "AstStructPatch", ",", "AstUnionPatch", ")", ")", ":", "self", ".", "_check_field_names_unique", "(", "existing_item", ",", "patched_item", ")", "existing_item", ".", "fields", "+=", "patched_item", ".", "fields", "self", ".", "_inject_patched_examples", "(", "existing_item", ",", "patched_item", ")", "else", ":", "raise", "AssertionError", "(", "'Unknown Patch Object Type {}'", ".", "format", "(", "patched_item", ".", "__class__", ".", "__name__", ")", ")" ]
Injects object patches into their original object definitions.
[ "Injects", "object", "patches", "into", "their", "original", "object", "definitions", "." ]
python
train
59.263158
HacKanCuBa/passphrase-py
passphrase/calc.py
https://github.com/HacKanCuBa/passphrase-py/blob/219d6374338ed9a1475b4f09b0d85212376f11e0/passphrase/calc.py#L103-L136
def words_amount_needed(entropybits: Union[int, float], entropy_w: Union[int, float], entropy_n: Union[int, float], amount_n: int) -> int: """Calculate words needed for a passphrase based on entropy.""" # Thanks to @julianor for this tip to calculate default amount of # entropy: minbitlen/log2(len(wordlist)). # I set the minimum entropy bits and calculate the amount of words # needed, cosidering the entropy of the wordlist. # Then: entropy_w * amount_w + entropy_n * amount_n >= ENTROPY_BITS_MIN if not isinstance(entropybits, (int, float)): raise TypeError('entropybits can only be int or float') if not isinstance(entropy_w, (int, float)): raise TypeError('entropy_w can only be int or float') if not isinstance(entropy_n, (int, float)): raise TypeError('entropy_n can only be int or float') if not isinstance(amount_n, int): raise TypeError('amount_n can only be int') if entropybits < 0: raise ValueError('entropybits should be greater than 0') if entropy_w <= 0: raise ValueError('entropy_w should be greater than 0') if entropy_n < 0: raise ValueError('entropy_n should be greater than 0') if amount_n < 0: raise ValueError('amount_n should be greater than 0') amount_w = (entropybits - entropy_n * amount_n) / entropy_w if amount_w > -1.0: return ceil(fabs(amount_w)) return 0
[ "def", "words_amount_needed", "(", "entropybits", ":", "Union", "[", "int", ",", "float", "]", ",", "entropy_w", ":", "Union", "[", "int", ",", "float", "]", ",", "entropy_n", ":", "Union", "[", "int", ",", "float", "]", ",", "amount_n", ":", "int", ")", "->", "int", ":", "# Thanks to @julianor for this tip to calculate default amount of", "# entropy: minbitlen/log2(len(wordlist)).", "# I set the minimum entropy bits and calculate the amount of words", "# needed, cosidering the entropy of the wordlist.", "# Then: entropy_w * amount_w + entropy_n * amount_n >= ENTROPY_BITS_MIN", "if", "not", "isinstance", "(", "entropybits", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "TypeError", "(", "'entropybits can only be int or float'", ")", "if", "not", "isinstance", "(", "entropy_w", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "TypeError", "(", "'entropy_w can only be int or float'", ")", "if", "not", "isinstance", "(", "entropy_n", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "TypeError", "(", "'entropy_n can only be int or float'", ")", "if", "not", "isinstance", "(", "amount_n", ",", "int", ")", ":", "raise", "TypeError", "(", "'amount_n can only be int'", ")", "if", "entropybits", "<", "0", ":", "raise", "ValueError", "(", "'entropybits should be greater than 0'", ")", "if", "entropy_w", "<=", "0", ":", "raise", "ValueError", "(", "'entropy_w should be greater than 0'", ")", "if", "entropy_n", "<", "0", ":", "raise", "ValueError", "(", "'entropy_n should be greater than 0'", ")", "if", "amount_n", "<", "0", ":", "raise", "ValueError", "(", "'amount_n should be greater than 0'", ")", "amount_w", "=", "(", "entropybits", "-", "entropy_n", "*", "amount_n", ")", "/", "entropy_w", "if", "amount_w", ">", "-", "1.0", ":", "return", "ceil", "(", "fabs", "(", "amount_w", ")", ")", "return", "0" ]
Calculate words needed for a passphrase based on entropy.
[ "Calculate", "words", "needed", "for", "a", "passphrase", "based", "on", "entropy", "." ]
python
train
43.235294
ethpm/py-ethpm
ethpm/utils/manifest_validation.py
https://github.com/ethpm/py-ethpm/blob/81ed58d7c636fe00c6770edeb0401812b1a5e8fc/ethpm/utils/manifest_validation.py#L21-L42
def validate_meta_object(meta: Dict[str, Any], allow_extra_meta_fields: bool) -> None: """ Validates that every key is one of `META_FIELDS` and has a value of the expected type. """ for key, value in meta.items(): if key in META_FIELDS: if type(value) is not META_FIELDS[key]: raise ValidationError( f"Values for {key} are expected to have the type {META_FIELDS[key]}, " f"instead got {type(value)}." ) elif allow_extra_meta_fields: if key[:2] != "x-": raise ValidationError( "Undefined meta fields need to begin with 'x-', " f"{key} is not a valid undefined meta field." ) else: raise ValidationError( f"{key} is not a permitted meta field. To allow undefined fields, " "set `allow_extra_meta_fields` to True." )
[ "def", "validate_meta_object", "(", "meta", ":", "Dict", "[", "str", ",", "Any", "]", ",", "allow_extra_meta_fields", ":", "bool", ")", "->", "None", ":", "for", "key", ",", "value", "in", "meta", ".", "items", "(", ")", ":", "if", "key", "in", "META_FIELDS", ":", "if", "type", "(", "value", ")", "is", "not", "META_FIELDS", "[", "key", "]", ":", "raise", "ValidationError", "(", "f\"Values for {key} are expected to have the type {META_FIELDS[key]}, \"", "f\"instead got {type(value)}.\"", ")", "elif", "allow_extra_meta_fields", ":", "if", "key", "[", ":", "2", "]", "!=", "\"x-\"", ":", "raise", "ValidationError", "(", "\"Undefined meta fields need to begin with 'x-', \"", "f\"{key} is not a valid undefined meta field.\"", ")", "else", ":", "raise", "ValidationError", "(", "f\"{key} is not a permitted meta field. To allow undefined fields, \"", "\"set `allow_extra_meta_fields` to True.\"", ")" ]
Validates that every key is one of `META_FIELDS` and has a value of the expected type.
[ "Validates", "that", "every", "key", "is", "one", "of", "META_FIELDS", "and", "has", "a", "value", "of", "the", "expected", "type", "." ]
python
train
43.454545
raphaelgyory/django-rest-messaging
rest_messaging/models.py
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L191-L194
def return_daily_messages_count(self, sender): """ Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits """ h24 = now() - timedelta(days=1) return Message.objects.filter(sender=sender, sent_at__gte=h24).count()
[ "def", "return_daily_messages_count", "(", "self", ",", "sender", ")", ":", "h24", "=", "now", "(", ")", "-", "timedelta", "(", "days", "=", "1", ")", "return", "Message", ".", "objects", ".", "filter", "(", "sender", "=", "sender", ",", "sent_at__gte", "=", "h24", ")", ".", "count", "(", ")" ]
Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits
[ "Returns", "the", "number", "of", "messages", "sent", "in", "the", "last", "24", "hours", "so", "we", "can", "ensure", "the", "user", "does", "not", "exceed", "his", "messaging", "limits" ]
python
train
74.5
sammchardy/python-kucoin
kucoin/client.py
https://github.com/sammchardy/python-kucoin/blob/a4cacde413804784bd313f27a0ad37234888be29/kucoin/client.py#L1324-L1404
def get_fills(self, order_id=None, symbol=None, side=None, order_type=None, start=None, end=None, page=None, limit=None): """Get a list of recent fills. https://docs.kucoin.com/#list-fills :param order_id: (optional) generated order id :type order_id: string :param symbol: (optional) Name of symbol e.g. KCS-BTC :type symbol: string :param side: (optional) buy or sell :type side: string :param order_type: (optional) limit, market, limit_stop or market_stop :type order_type: string :param start: Start time as unix timestamp (optional) :type start: string :param end: End time as unix timestamp (optional) :type end: string :param page: optional - Page to fetch :type page: int :param limit: optional - Number of orders :type limit: int .. code:: python fills = client.get_fills() :returns: ApiResponse .. code:: python { "currentPage":1, "pageSize":1, "totalNum":251915, "totalPage":251915, "items":[ { "symbol":"BTC-USDT", "tradeId":"5c35c02709e4f67d5266954e", "orderId":"5c35c02703aa673ceec2a168", "counterOrderId":"5c1ab46003aa676e487fa8e3", "side":"buy", "liquidity":"taker", "forceTaker":true, "price":"0.083", "size":"0.8424304", "funds":"0.0699217232", "fee":"0", "feeRate":"0", "feeCurrency":"USDT", "stop":"", "type":"limit", "createdAt":1547026472000 } ] } :raises: KucoinResponseException, KucoinAPIException """ data = {} if order_id: data['orderId'] = order_id if symbol: data['symbol'] = symbol if side: data['side'] = side if order_type: data['type'] = order_type if start: data['startAt'] = start if end: data['endAt'] = end if page: data['page'] = page if limit: data['pageSize'] = limit return self._get('fills', True, data=data)
[ "def", "get_fills", "(", "self", ",", "order_id", "=", "None", ",", "symbol", "=", "None", ",", "side", "=", "None", ",", "order_type", "=", "None", ",", "start", "=", "None", ",", "end", "=", "None", ",", "page", "=", "None", ",", "limit", "=", "None", ")", ":", "data", "=", "{", "}", "if", "order_id", ":", "data", "[", "'orderId'", "]", "=", "order_id", "if", "symbol", ":", "data", "[", "'symbol'", "]", "=", "symbol", "if", "side", ":", "data", "[", "'side'", "]", "=", "side", "if", "order_type", ":", "data", "[", "'type'", "]", "=", "order_type", "if", "start", ":", "data", "[", "'startAt'", "]", "=", "start", "if", "end", ":", "data", "[", "'endAt'", "]", "=", "end", "if", "page", ":", "data", "[", "'page'", "]", "=", "page", "if", "limit", ":", "data", "[", "'pageSize'", "]", "=", "limit", "return", "self", ".", "_get", "(", "'fills'", ",", "True", ",", "data", "=", "data", ")" ]
Get a list of recent fills. https://docs.kucoin.com/#list-fills :param order_id: (optional) generated order id :type order_id: string :param symbol: (optional) Name of symbol e.g. KCS-BTC :type symbol: string :param side: (optional) buy or sell :type side: string :param order_type: (optional) limit, market, limit_stop or market_stop :type order_type: string :param start: Start time as unix timestamp (optional) :type start: string :param end: End time as unix timestamp (optional) :type end: string :param page: optional - Page to fetch :type page: int :param limit: optional - Number of orders :type limit: int .. code:: python fills = client.get_fills() :returns: ApiResponse .. code:: python { "currentPage":1, "pageSize":1, "totalNum":251915, "totalPage":251915, "items":[ { "symbol":"BTC-USDT", "tradeId":"5c35c02709e4f67d5266954e", "orderId":"5c35c02703aa673ceec2a168", "counterOrderId":"5c1ab46003aa676e487fa8e3", "side":"buy", "liquidity":"taker", "forceTaker":true, "price":"0.083", "size":"0.8424304", "funds":"0.0699217232", "fee":"0", "feeRate":"0", "feeCurrency":"USDT", "stop":"", "type":"limit", "createdAt":1547026472000 } ] } :raises: KucoinResponseException, KucoinAPIException
[ "Get", "a", "list", "of", "recent", "fills", "." ]
python
train
31.061728
astropy/astropy-healpix
astropy_healpix/core.py
https://github.com/astropy/astropy-healpix/blob/c7fbe36305aadda9946dd37969d5dcb9ff6b1440/astropy_healpix/core.py#L345-L395
def healpix_to_lonlat(healpix_index, nside, dx=None, dy=None, order='ring'): """ Convert HEALPix indices (optionally with offsets) to longitudes/latitudes. If no offsets (``dx`` and ``dy``) are provided, the coordinates will default to those at the center of the HEALPix pixels. Parameters ---------- healpix_index : int or `~numpy.ndarray` HEALPix indices (as a scalar or array) nside : int or `~numpy.ndarray` Number of pixels along the side of each of the 12 top-level HEALPix tiles dx, dy : float or `~numpy.ndarray`, optional Offsets inside the HEALPix pixel, which must be in the range [0:1], where 0.5 is the center of the HEALPix pixels (as scalars or arrays) order : { 'nested' | 'ring' }, optional Order of HEALPix pixels Returns ------- lon : :class:`~astropy.coordinates.Longitude` The longitude values lat : :class:`~astropy.coordinates.Latitude` The latitude values """ _validate_nside(nside) if _validate_order(order) == 'ring': func = _core.healpix_ring_to_lonlat else: # _validate_order(order) == 'nested' func = _core.healpix_nested_to_lonlat if dx is None: dx = 0.5 else: _validate_offset('x', dx) if dy is None: dy = 0.5 else: _validate_offset('y', dy) nside = np.asarray(nside, dtype=np.intc) lon, lat = func(healpix_index, nside, dx, dy) lon = Longitude(lon, unit=u.rad, copy=False) lat = Latitude(lat, unit=u.rad, copy=False) return lon, lat
[ "def", "healpix_to_lonlat", "(", "healpix_index", ",", "nside", ",", "dx", "=", "None", ",", "dy", "=", "None", ",", "order", "=", "'ring'", ")", ":", "_validate_nside", "(", "nside", ")", "if", "_validate_order", "(", "order", ")", "==", "'ring'", ":", "func", "=", "_core", ".", "healpix_ring_to_lonlat", "else", ":", "# _validate_order(order) == 'nested'", "func", "=", "_core", ".", "healpix_nested_to_lonlat", "if", "dx", "is", "None", ":", "dx", "=", "0.5", "else", ":", "_validate_offset", "(", "'x'", ",", "dx", ")", "if", "dy", "is", "None", ":", "dy", "=", "0.5", "else", ":", "_validate_offset", "(", "'y'", ",", "dy", ")", "nside", "=", "np", ".", "asarray", "(", "nside", ",", "dtype", "=", "np", ".", "intc", ")", "lon", ",", "lat", "=", "func", "(", "healpix_index", ",", "nside", ",", "dx", ",", "dy", ")", "lon", "=", "Longitude", "(", "lon", ",", "unit", "=", "u", ".", "rad", ",", "copy", "=", "False", ")", "lat", "=", "Latitude", "(", "lat", ",", "unit", "=", "u", ".", "rad", ",", "copy", "=", "False", ")", "return", "lon", ",", "lat" ]
Convert HEALPix indices (optionally with offsets) to longitudes/latitudes. If no offsets (``dx`` and ``dy``) are provided, the coordinates will default to those at the center of the HEALPix pixels. Parameters ---------- healpix_index : int or `~numpy.ndarray` HEALPix indices (as a scalar or array) nside : int or `~numpy.ndarray` Number of pixels along the side of each of the 12 top-level HEALPix tiles dx, dy : float or `~numpy.ndarray`, optional Offsets inside the HEALPix pixel, which must be in the range [0:1], where 0.5 is the center of the HEALPix pixels (as scalars or arrays) order : { 'nested' | 'ring' }, optional Order of HEALPix pixels Returns ------- lon : :class:`~astropy.coordinates.Longitude` The longitude values lat : :class:`~astropy.coordinates.Latitude` The latitude values
[ "Convert", "HEALPix", "indices", "(", "optionally", "with", "offsets", ")", "to", "longitudes", "/", "latitudes", "." ]
python
train
30.137255
kytos/python-openflow
pyof/foundation/base.py
https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/foundation/base.py#L784-L812
def pack(self, value=None): """Pack the message into a binary data. One of the basic operations on a Message is the pack operation. During the packing process, we convert all message attributes to binary format. Since that this is usually used before sending the message to a switch, here we also call :meth:`update_header_length`. .. seealso:: This method call its parent's :meth:`GenericStruct.pack` after :meth:`update_header_length`. Returns: bytes: A binary data thats represents the Message. Raises: Exception: If there are validation errors. """ if value is None: self.update_header_length() return super().pack() elif isinstance(value, type(self)): return value.pack() else: msg = "{} is not an instance of {}".format(value, type(self).__name__) raise PackException(msg)
[ "def", "pack", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "None", ":", "self", ".", "update_header_length", "(", ")", "return", "super", "(", ")", ".", "pack", "(", ")", "elif", "isinstance", "(", "value", ",", "type", "(", "self", ")", ")", ":", "return", "value", ".", "pack", "(", ")", "else", ":", "msg", "=", "\"{} is not an instance of {}\"", ".", "format", "(", "value", ",", "type", "(", "self", ")", ".", "__name__", ")", "raise", "PackException", "(", "msg", ")" ]
Pack the message into a binary data. One of the basic operations on a Message is the pack operation. During the packing process, we convert all message attributes to binary format. Since that this is usually used before sending the message to a switch, here we also call :meth:`update_header_length`. .. seealso:: This method call its parent's :meth:`GenericStruct.pack` after :meth:`update_header_length`. Returns: bytes: A binary data thats represents the Message. Raises: Exception: If there are validation errors.
[ "Pack", "the", "message", "into", "a", "binary", "data", "." ]
python
train
34.862069
pyca/pyopenssl
src/OpenSSL/SSL.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/SSL.py#L1389-L1402
def set_tlsext_use_srtp(self, profiles): """ Enable support for negotiating SRTP keying material. :param bytes profiles: A colon delimited list of protection profile names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``. :return: None """ if not isinstance(profiles, bytes): raise TypeError("profiles must be a byte string.") _openssl_assert( _lib.SSL_CTX_set_tlsext_use_srtp(self._context, profiles) == 0 )
[ "def", "set_tlsext_use_srtp", "(", "self", ",", "profiles", ")", ":", "if", "not", "isinstance", "(", "profiles", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"profiles must be a byte string.\"", ")", "_openssl_assert", "(", "_lib", ".", "SSL_CTX_set_tlsext_use_srtp", "(", "self", ".", "_context", ",", "profiles", ")", "==", "0", ")" ]
Enable support for negotiating SRTP keying material. :param bytes profiles: A colon delimited list of protection profile names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``. :return: None
[ "Enable", "support", "for", "negotiating", "SRTP", "keying", "material", "." ]
python
test
36.214286
DallasMorningNews/django-datafreezer
datafreezer/views.py
https://github.com/DallasMorningNews/django-datafreezer/blob/982dcf2015c80a280f1a093e32977cb71d4ea7aa/datafreezer/views.py#L690-L735
def get(self, request): """View for HTTP GET method. Returns template and context from generate_page_title and generate_sections to populate template. """ sections = self.generate_sections() if self.paginated: p = Paginator(sections, 25) page = request.GET.get('page') try: sections = p.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. sections = p.page(1) except EmptyPage: # If page is out of range (e.g. 9999), return last page # of results. sections = p.page(p.num_pages) pageUpper = int(p.num_pages) / 2 try: pageLower = int(page) / 2 except TypeError: pageLower = -999 else: pageUpper = None pageLower = None context = { 'sections': sections, 'page_title': self.generate_page_title(), 'browse_type': self.browse_type, 'pageUpper': pageUpper, 'pageLower': pageLower } return render( request, self.template_path, context )
[ "def", "get", "(", "self", ",", "request", ")", ":", "sections", "=", "self", ".", "generate_sections", "(", ")", "if", "self", ".", "paginated", ":", "p", "=", "Paginator", "(", "sections", ",", "25", ")", "page", "=", "request", ".", "GET", ".", "get", "(", "'page'", ")", "try", ":", "sections", "=", "p", ".", "page", "(", "page", ")", "except", "PageNotAnInteger", ":", "# If page is not an integer, deliver first page.", "sections", "=", "p", ".", "page", "(", "1", ")", "except", "EmptyPage", ":", "# If page is out of range (e.g. 9999), return last page", "# of results.", "sections", "=", "p", ".", "page", "(", "p", ".", "num_pages", ")", "pageUpper", "=", "int", "(", "p", ".", "num_pages", ")", "/", "2", "try", ":", "pageLower", "=", "int", "(", "page", ")", "/", "2", "except", "TypeError", ":", "pageLower", "=", "-", "999", "else", ":", "pageUpper", "=", "None", "pageLower", "=", "None", "context", "=", "{", "'sections'", ":", "sections", ",", "'page_title'", ":", "self", ".", "generate_page_title", "(", ")", ",", "'browse_type'", ":", "self", ".", "browse_type", ",", "'pageUpper'", ":", "pageUpper", ",", "'pageLower'", ":", "pageLower", "}", "return", "render", "(", "request", ",", "self", ".", "template_path", ",", "context", ")" ]
View for HTTP GET method. Returns template and context from generate_page_title and generate_sections to populate template.
[ "View", "for", "HTTP", "GET", "method", "." ]
python
train
27.326087
PythonCharmers/python-future
src/future/backports/urllib/parse.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/urllib/parse.py#L930-L949
def splitnport(host, defport=-1): """Split host and port, returning numeric port. Return given default port if no ':' found; defaults to -1. Return numerical port if a valid number are found after ':'. Return None if ':' but not a valid number.""" global _nportprog if _nportprog is None: import re _nportprog = re.compile('^(.*):(.*)$') match = _nportprog.match(host) if match: host, port = match.group(1, 2) try: if not port: raise ValueError("no digits") nport = int(port) except ValueError: nport = None return host, nport return host, defport
[ "def", "splitnport", "(", "host", ",", "defport", "=", "-", "1", ")", ":", "global", "_nportprog", "if", "_nportprog", "is", "None", ":", "import", "re", "_nportprog", "=", "re", ".", "compile", "(", "'^(.*):(.*)$'", ")", "match", "=", "_nportprog", ".", "match", "(", "host", ")", "if", "match", ":", "host", ",", "port", "=", "match", ".", "group", "(", "1", ",", "2", ")", "try", ":", "if", "not", "port", ":", "raise", "ValueError", "(", "\"no digits\"", ")", "nport", "=", "int", "(", "port", ")", "except", "ValueError", ":", "nport", "=", "None", "return", "host", ",", "nport", "return", "host", ",", "defport" ]
Split host and port, returning numeric port. Return given default port if no ':' found; defaults to -1. Return numerical port if a valid number are found after ':'. Return None if ':' but not a valid number.
[ "Split", "host", "and", "port", "returning", "numeric", "port", ".", "Return", "given", "default", "port", "if", "no", ":", "found", ";", "defaults", "to", "-", "1", ".", "Return", "numerical", "port", "if", "a", "valid", "number", "are", "found", "after", ":", ".", "Return", "None", "if", ":", "but", "not", "a", "valid", "number", "." ]
python
train
32.45
mikedh/trimesh
trimesh/path/util.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/util.py#L4-L28
def is_ccw(points): """ Check if connected planar points are counterclockwise. Parameters ----------- points: (n,2) float, connected points on a plane Returns ---------- ccw: bool, True if points are counterclockwise """ points = np.asanyarray(points, dtype=np.float64) if (len(points.shape) != 2 or points.shape[1] != 2): raise ValueError('CCW is only defined for 2D') xd = np.diff(points[:, 0]) yd = np.column_stack(( points[:, 1], points[:, 1])).reshape(-1)[1:-1].reshape((-1, 2)).sum(axis=1) area = np.sum(xd * yd) * .5 ccw = area < 0 return ccw
[ "def", "is_ccw", "(", "points", ")", ":", "points", "=", "np", ".", "asanyarray", "(", "points", ",", "dtype", "=", "np", ".", "float64", ")", "if", "(", "len", "(", "points", ".", "shape", ")", "!=", "2", "or", "points", ".", "shape", "[", "1", "]", "!=", "2", ")", ":", "raise", "ValueError", "(", "'CCW is only defined for 2D'", ")", "xd", "=", "np", ".", "diff", "(", "points", "[", ":", ",", "0", "]", ")", "yd", "=", "np", ".", "column_stack", "(", "(", "points", "[", ":", ",", "1", "]", ",", "points", "[", ":", ",", "1", "]", ")", ")", ".", "reshape", "(", "-", "1", ")", "[", "1", ":", "-", "1", "]", ".", "reshape", "(", "(", "-", "1", ",", "2", ")", ")", ".", "sum", "(", "axis", "=", "1", ")", "area", "=", "np", ".", "sum", "(", "xd", "*", "yd", ")", "*", ".5", "ccw", "=", "area", "<", "0", "return", "ccw" ]
Check if connected planar points are counterclockwise. Parameters ----------- points: (n,2) float, connected points on a plane Returns ---------- ccw: bool, True if points are counterclockwise
[ "Check", "if", "connected", "planar", "points", "are", "counterclockwise", "." ]
python
train
25.16
gamechanger/schemer
schemer/__init__.py
https://github.com/gamechanger/schemer/blob/1d1dd7da433d3b84ce5a80ded5a84ab4a65825ee/schemer/__init__.py#L183-L215
def _validate_instance(self, instance, errors, path_prefix=''): """Validates that the given instance of a document conforms to the given schema's structure and validations. Any validation errors are added to the given errors collection. The caller should assume the instance is considered valid if the errors collection is empty when this method returns.""" if not isinstance(instance, dict): errors[path_prefix] = "Expected instance of dict to validate against schema." return # validate against the schema level validators self._apply_validations(errors, path_prefix, self._validates, instance) # Loop over each field in the schema and check the instance value conforms # to its spec for field, spec in self.doc_spec.iteritems(): path = self._append_path(path_prefix, field) # If the field is present, validate it's value. if field in instance: self._validate_value(instance[field], spec, path, errors) else: # If not, add an error if it was a required key. if spec.get('required', False): errors[path] = "{} is required.".format(path) # Now loop over each field in the given instance and make sure we don't # have any fields not declared in the schema, unless strict mode has been # explicitly disabled. if self._strict: for field in instance: if field not in self.doc_spec: errors[self._append_path(path_prefix, field)] = "Unexpected document field not present in schema"
[ "def", "_validate_instance", "(", "self", ",", "instance", ",", "errors", ",", "path_prefix", "=", "''", ")", ":", "if", "not", "isinstance", "(", "instance", ",", "dict", ")", ":", "errors", "[", "path_prefix", "]", "=", "\"Expected instance of dict to validate against schema.\"", "return", "# validate against the schema level validators", "self", ".", "_apply_validations", "(", "errors", ",", "path_prefix", ",", "self", ".", "_validates", ",", "instance", ")", "# Loop over each field in the schema and check the instance value conforms", "# to its spec", "for", "field", ",", "spec", "in", "self", ".", "doc_spec", ".", "iteritems", "(", ")", ":", "path", "=", "self", ".", "_append_path", "(", "path_prefix", ",", "field", ")", "# If the field is present, validate it's value.", "if", "field", "in", "instance", ":", "self", ".", "_validate_value", "(", "instance", "[", "field", "]", ",", "spec", ",", "path", ",", "errors", ")", "else", ":", "# If not, add an error if it was a required key.", "if", "spec", ".", "get", "(", "'required'", ",", "False", ")", ":", "errors", "[", "path", "]", "=", "\"{} is required.\"", ".", "format", "(", "path", ")", "# Now loop over each field in the given instance and make sure we don't", "# have any fields not declared in the schema, unless strict mode has been", "# explicitly disabled.", "if", "self", ".", "_strict", ":", "for", "field", "in", "instance", ":", "if", "field", "not", "in", "self", ".", "doc_spec", ":", "errors", "[", "self", ".", "_append_path", "(", "path_prefix", ",", "field", ")", "]", "=", "\"Unexpected document field not present in schema\"" ]
Validates that the given instance of a document conforms to the given schema's structure and validations. Any validation errors are added to the given errors collection. The caller should assume the instance is considered valid if the errors collection is empty when this method returns.
[ "Validates", "that", "the", "given", "instance", "of", "a", "document", "conforms", "to", "the", "given", "schema", "s", "structure", "and", "validations", ".", "Any", "validation", "errors", "are", "added", "to", "the", "given", "errors", "collection", ".", "The", "caller", "should", "assume", "the", "instance", "is", "considered", "valid", "if", "the", "errors", "collection", "is", "empty", "when", "this", "method", "returns", "." ]
python
train
49.939394
apache/airflow
airflow/hooks/presto_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/presto_hook.py#L129-L140
def insert_rows(self, table, rows, target_fields=None): """ A generic way to insert a set of tuples into a table. :param table: Name of the target table :type table: str :param rows: The rows to insert into the table :type rows: iterable of tuples :param target_fields: The names of the columns to fill in the table :type target_fields: iterable of strings """ super().insert_rows(table, rows, target_fields, 0)
[ "def", "insert_rows", "(", "self", ",", "table", ",", "rows", ",", "target_fields", "=", "None", ")", ":", "super", "(", ")", ".", "insert_rows", "(", "table", ",", "rows", ",", "target_fields", ",", "0", ")" ]
A generic way to insert a set of tuples into a table. :param table: Name of the target table :type table: str :param rows: The rows to insert into the table :type rows: iterable of tuples :param target_fields: The names of the columns to fill in the table :type target_fields: iterable of strings
[ "A", "generic", "way", "to", "insert", "a", "set", "of", "tuples", "into", "a", "table", "." ]
python
test
40.083333
ethpm/py-ethpm
ethpm/tools/builder.py
https://github.com/ethpm/py-ethpm/blob/81ed58d7c636fe00c6770edeb0401812b1a5e8fc/ethpm/tools/builder.py#L553-L580
def deployment( *, block_uri: URI, contract_instance: str, contract_type: str, address: HexStr, transaction: HexStr = None, block: HexStr = None, deployment_bytecode: Dict[str, Any] = None, runtime_bytecode: Dict[str, Any] = None, compiler: Dict[str, Any] = None, ) -> Manifest: """ Returns a manifest, with the newly included deployment. Requires a valid blockchain URI, however no validation is provided that this URI is unique amongst the other deployment URIs, so the user must take care that each blockchain URI represents a unique blockchain. """ return _deployment( contract_instance, contract_type, deployment_bytecode, runtime_bytecode, compiler, block_uri, address, transaction, block, )
[ "def", "deployment", "(", "*", ",", "block_uri", ":", "URI", ",", "contract_instance", ":", "str", ",", "contract_type", ":", "str", ",", "address", ":", "HexStr", ",", "transaction", ":", "HexStr", "=", "None", ",", "block", ":", "HexStr", "=", "None", ",", "deployment_bytecode", ":", "Dict", "[", "str", ",", "Any", "]", "=", "None", ",", "runtime_bytecode", ":", "Dict", "[", "str", ",", "Any", "]", "=", "None", ",", "compiler", ":", "Dict", "[", "str", ",", "Any", "]", "=", "None", ",", ")", "->", "Manifest", ":", "return", "_deployment", "(", "contract_instance", ",", "contract_type", ",", "deployment_bytecode", ",", "runtime_bytecode", ",", "compiler", ",", "block_uri", ",", "address", ",", "transaction", ",", "block", ",", ")" ]
Returns a manifest, with the newly included deployment. Requires a valid blockchain URI, however no validation is provided that this URI is unique amongst the other deployment URIs, so the user must take care that each blockchain URI represents a unique blockchain.
[ "Returns", "a", "manifest", "with", "the", "newly", "included", "deployment", ".", "Requires", "a", "valid", "blockchain", "URI", "however", "no", "validation", "is", "provided", "that", "this", "URI", "is", "unique", "amongst", "the", "other", "deployment", "URIs", "so", "the", "user", "must", "take", "care", "that", "each", "blockchain", "URI", "represents", "a", "unique", "blockchain", "." ]
python
train
28.928571
cytoscape/py2cytoscape
py2cytoscape/cyrest/cybrowser.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/cybrowser.py#L29-L40
def hide(self, wid, verbose=False): """ Hide and HTML browser in the Results Panel. :param wid: Window ID :param verbose: print more """ PARAMS={"id":wid} response=api(url=self.__url+"/hide?",PARAMS=PARAMS, method="GET", verbose=verbose) return response
[ "def", "hide", "(", "self", ",", "wid", ",", "verbose", "=", "False", ")", ":", "PARAMS", "=", "{", "\"id\"", ":", "wid", "}", "response", "=", "api", "(", "url", "=", "self", ".", "__url", "+", "\"/hide?\"", ",", "PARAMS", "=", "PARAMS", ",", "method", "=", "\"GET\"", ",", "verbose", "=", "verbose", ")", "return", "response" ]
Hide and HTML browser in the Results Panel. :param wid: Window ID :param verbose: print more
[ "Hide", "and", "HTML", "browser", "in", "the", "Results", "Panel", "." ]
python
train
25.75
sassoo/goldman
goldman/deserializers/jsonapi.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/jsonapi.py#L221-L238
def _parse_top_level(self, body): """ Ensure compliance with the spec's top-level section """ link = 'jsonapi.org/format/#document-top-level' try: if not isinstance(body['data'], dict): raise TypeError except (KeyError, TypeError): self.fail('JSON API payloads MUST be a hash at the most ' 'top-level; rooted at a key named `data` where the ' 'value must be a hash. Currently, we only support ' 'JSON API payloads that comply with the single ' 'Resource Object section.', link) if 'errors' in body: self.fail('JSON API payloads MUST not have both `data` & ' '`errors` top-level keys.', link)
[ "def", "_parse_top_level", "(", "self", ",", "body", ")", ":", "link", "=", "'jsonapi.org/format/#document-top-level'", "try", ":", "if", "not", "isinstance", "(", "body", "[", "'data'", "]", ",", "dict", ")", ":", "raise", "TypeError", "except", "(", "KeyError", ",", "TypeError", ")", ":", "self", ".", "fail", "(", "'JSON API payloads MUST be a hash at the most '", "'top-level; rooted at a key named `data` where the '", "'value must be a hash. Currently, we only support '", "'JSON API payloads that comply with the single '", "'Resource Object section.'", ",", "link", ")", "if", "'errors'", "in", "body", ":", "self", ".", "fail", "(", "'JSON API payloads MUST not have both `data` & '", "'`errors` top-level keys.'", ",", "link", ")" ]
Ensure compliance with the spec's top-level section
[ "Ensure", "compliance", "with", "the", "spec", "s", "top", "-", "level", "section" ]
python
train
43.277778
SheffieldML/GPy
GPy/util/multioutput.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/multioutput.py#L43-L59
def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='ICM'): """ Builds a kernel for an Intrinsic Coregionalization Model :input_dim: Input dimensionality (does not include dimension of indices) :num_outputs: Number of outputs :param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B). :type kernel: a GPy kernel :param W_rank: number tuples of the corregionalization parameters 'W' :type W_rank: integer """ if kernel.input_dim != input_dim: kernel.input_dim = input_dim warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.") K = kernel.prod(GPy.kern.Coregionalize(1, num_outputs, active_dims=[input_dim], rank=W_rank,W=W,kappa=kappa,name='B'),name=name) return K
[ "def", "ICM", "(", "input_dim", ",", "num_outputs", ",", "kernel", ",", "W_rank", "=", "1", ",", "W", "=", "None", ",", "kappa", "=", "None", ",", "name", "=", "'ICM'", ")", ":", "if", "kernel", ".", "input_dim", "!=", "input_dim", ":", "kernel", ".", "input_dim", "=", "input_dim", "warnings", ".", "warn", "(", "\"kernel's input dimension overwritten to fit input_dim parameter.\"", ")", "K", "=", "kernel", ".", "prod", "(", "GPy", ".", "kern", ".", "Coregionalize", "(", "1", ",", "num_outputs", ",", "active_dims", "=", "[", "input_dim", "]", ",", "rank", "=", "W_rank", ",", "W", "=", "W", ",", "kappa", "=", "kappa", ",", "name", "=", "'B'", ")", ",", "name", "=", "name", ")", "return", "K" ]
Builds a kernel for an Intrinsic Coregionalization Model :input_dim: Input dimensionality (does not include dimension of indices) :num_outputs: Number of outputs :param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B). :type kernel: a GPy kernel :param W_rank: number tuples of the corregionalization parameters 'W' :type W_rank: integer
[ "Builds", "a", "kernel", "for", "an", "Intrinsic", "Coregionalization", "Model" ]
python
train
46.294118
delph-in/pydelphin
delphin/commands.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/commands.py#L625-L678
def compare(testsuite, gold, select='i-id i-input mrs'): """ Compare two [incr tsdb()] profiles. Args: testsuite (str, TestSuite): path to the test [incr tsdb()] testsuite or a :class:`TestSuite` object gold (str, TestSuite): path to the gold [incr tsdb()] testsuite or a :class:`TestSuite` object select: TSQL query to select (id, input, mrs) triples (default: `i-id i-input mrs`) Yields: dict: Comparison results as:: {"id": "item identifier", "input": "input sentence", "test": number_of_unique_results_in_test, "shared": number_of_shared_results, "gold": number_of_unique_results_in_gold} """ from delphin.mrs import simplemrs, compare as mrs_compare if not isinstance(testsuite, itsdb.TestSuite): if isinstance(testsuite, itsdb.ItsdbProfile): testsuite = testsuite.root testsuite = itsdb.TestSuite(testsuite) if not isinstance(gold, itsdb.TestSuite): if isinstance(gold, itsdb.ItsdbProfile): gold = gold.root gold = itsdb.TestSuite(gold) queryobj = tsql.inspect_query('select ' + select) if len(queryobj['projection']) != 3: raise ValueError('select does not return 3 fields: ' + select) input_select = '{} {}'.format(queryobj['projection'][0], queryobj['projection'][1]) i_inputs = dict(tsql.select(input_select, testsuite)) matched_rows = itsdb.match_rows( tsql.select(select, testsuite), tsql.select(select, gold), 0) for (key, testrows, goldrows) in matched_rows: (test_unique, shared, gold_unique) = mrs_compare.compare_bags( [simplemrs.loads_one(row[2]) for row in testrows], [simplemrs.loads_one(row[2]) for row in goldrows]) yield {'id': key, 'input': i_inputs[key], 'test': test_unique, 'shared': shared, 'gold': gold_unique}
[ "def", "compare", "(", "testsuite", ",", "gold", ",", "select", "=", "'i-id i-input mrs'", ")", ":", "from", "delphin", ".", "mrs", "import", "simplemrs", ",", "compare", "as", "mrs_compare", "if", "not", "isinstance", "(", "testsuite", ",", "itsdb", ".", "TestSuite", ")", ":", "if", "isinstance", "(", "testsuite", ",", "itsdb", ".", "ItsdbProfile", ")", ":", "testsuite", "=", "testsuite", ".", "root", "testsuite", "=", "itsdb", ".", "TestSuite", "(", "testsuite", ")", "if", "not", "isinstance", "(", "gold", ",", "itsdb", ".", "TestSuite", ")", ":", "if", "isinstance", "(", "gold", ",", "itsdb", ".", "ItsdbProfile", ")", ":", "gold", "=", "gold", ".", "root", "gold", "=", "itsdb", ".", "TestSuite", "(", "gold", ")", "queryobj", "=", "tsql", ".", "inspect_query", "(", "'select '", "+", "select", ")", "if", "len", "(", "queryobj", "[", "'projection'", "]", ")", "!=", "3", ":", "raise", "ValueError", "(", "'select does not return 3 fields: '", "+", "select", ")", "input_select", "=", "'{} {}'", ".", "format", "(", "queryobj", "[", "'projection'", "]", "[", "0", "]", ",", "queryobj", "[", "'projection'", "]", "[", "1", "]", ")", "i_inputs", "=", "dict", "(", "tsql", ".", "select", "(", "input_select", ",", "testsuite", ")", ")", "matched_rows", "=", "itsdb", ".", "match_rows", "(", "tsql", ".", "select", "(", "select", ",", "testsuite", ")", ",", "tsql", ".", "select", "(", "select", ",", "gold", ")", ",", "0", ")", "for", "(", "key", ",", "testrows", ",", "goldrows", ")", "in", "matched_rows", ":", "(", "test_unique", ",", "shared", ",", "gold_unique", ")", "=", "mrs_compare", ".", "compare_bags", "(", "[", "simplemrs", ".", "loads_one", "(", "row", "[", "2", "]", ")", "for", "row", "in", "testrows", "]", ",", "[", "simplemrs", ".", "loads_one", "(", "row", "[", "2", "]", ")", "for", "row", "in", "goldrows", "]", ")", "yield", "{", "'id'", ":", "key", ",", "'input'", ":", "i_inputs", "[", "key", "]", ",", "'test'", ":", "test_unique", ",", "'shared'", ":", "shared", ",", "'gold'", ":", "gold_unique", "}" ]
Compare two [incr tsdb()] profiles. Args: testsuite (str, TestSuite): path to the test [incr tsdb()] testsuite or a :class:`TestSuite` object gold (str, TestSuite): path to the gold [incr tsdb()] testsuite or a :class:`TestSuite` object select: TSQL query to select (id, input, mrs) triples (default: `i-id i-input mrs`) Yields: dict: Comparison results as:: {"id": "item identifier", "input": "input sentence", "test": number_of_unique_results_in_test, "shared": number_of_shared_results, "gold": number_of_unique_results_in_gold}
[ "Compare", "two", "[", "incr", "tsdb", "()", "]", "profiles", "." ]
python
train
37.111111
pywbem/pywbem
attic/twisted_client.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/twisted_client.py#L58-L88
def connectionMade(self): """Send a HTTP POST command with the appropriate CIM over HTTP headers and payload.""" self.factory.request_xml = str(self.factory.payload) self.sendCommand('POST', '/cimom') self.sendHeader('Host', '%s:%d' % (self.transport.addr[0], self.transport.addr[1])) self.sendHeader('User-Agent', 'pywbem/twisted') self.sendHeader('Content-length', len(self.factory.payload)) self.sendHeader('Content-type', 'application/xml') if self.factory.creds: auth = base64.b64encode('%s:%s' % (self.factory.creds[0], self.factory.creds[1])) self.sendHeader('Authorization', 'Basic %s' % auth) self.sendHeader('CIMOperation', str(self.factory.operation)) self.sendHeader('CIMMethod', str(self.factory.method)) self.sendHeader('CIMObject', str(self.factory.object)) self.endHeaders() # TODO: Figure out why twisted doesn't support unicode. An # exception should be thrown by the str() call if the payload # can't be converted to the current codepage. self.transport.write(str(self.factory.payload))
[ "def", "connectionMade", "(", "self", ")", ":", "self", ".", "factory", ".", "request_xml", "=", "str", "(", "self", ".", "factory", ".", "payload", ")", "self", ".", "sendCommand", "(", "'POST'", ",", "'/cimom'", ")", "self", ".", "sendHeader", "(", "'Host'", ",", "'%s:%d'", "%", "(", "self", ".", "transport", ".", "addr", "[", "0", "]", ",", "self", ".", "transport", ".", "addr", "[", "1", "]", ")", ")", "self", ".", "sendHeader", "(", "'User-Agent'", ",", "'pywbem/twisted'", ")", "self", ".", "sendHeader", "(", "'Content-length'", ",", "len", "(", "self", ".", "factory", ".", "payload", ")", ")", "self", ".", "sendHeader", "(", "'Content-type'", ",", "'application/xml'", ")", "if", "self", ".", "factory", ".", "creds", ":", "auth", "=", "base64", ".", "b64encode", "(", "'%s:%s'", "%", "(", "self", ".", "factory", ".", "creds", "[", "0", "]", ",", "self", ".", "factory", ".", "creds", "[", "1", "]", ")", ")", "self", ".", "sendHeader", "(", "'Authorization'", ",", "'Basic %s'", "%", "auth", ")", "self", ".", "sendHeader", "(", "'CIMOperation'", ",", "str", "(", "self", ".", "factory", ".", "operation", ")", ")", "self", ".", "sendHeader", "(", "'CIMMethod'", ",", "str", "(", "self", ".", "factory", ".", "method", ")", ")", "self", ".", "sendHeader", "(", "'CIMObject'", ",", "str", "(", "self", ".", "factory", ".", "object", ")", ")", "self", ".", "endHeaders", "(", ")", "# TODO: Figure out why twisted doesn't support unicode. An", "# exception should be thrown by the str() call if the payload", "# can't be converted to the current codepage.", "self", ".", "transport", ".", "write", "(", "str", "(", "self", ".", "factory", ".", "payload", ")", ")" ]
Send a HTTP POST command with the appropriate CIM over HTTP headers and payload.
[ "Send", "a", "HTTP", "POST", "command", "with", "the", "appropriate", "CIM", "over", "HTTP", "headers", "and", "payload", "." ]
python
train
39.225806
mattjj/pylds
pylds/laplace.py
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L133-L162
def gradient_log_joint(self, x): """ The gradient of the log joint probability. For the Gaussian terms, this is d/dx [-1/2 x^T J x + h^T x] = -Jx + h. For the likelihood terms, we have for each time t d/dx log p(yt | xt) """ T, D = self.T, self.D_latent assert x.shape == (T, D) # Collect the Gaussian LDS prior terms _, h_init, _ = self.info_init_params _, _, _, h1, h2, _ = self.info_dynamics_params H_diag, H_upper_diag = self.sparse_J_prior # Compute the gradient from the prior g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x) g[0] += h_init g[:-1] += h1 g[1:] += h2 # Compute gradient from the likelihood terms g += self.grad_local_log_likelihood(x) return g
[ "def", "gradient_log_joint", "(", "self", ",", "x", ")", ":", "T", ",", "D", "=", "self", ".", "T", ",", "self", ".", "D_latent", "assert", "x", ".", "shape", "==", "(", "T", ",", "D", ")", "# Collect the Gaussian LDS prior terms", "_", ",", "h_init", ",", "_", "=", "self", ".", "info_init_params", "_", ",", "_", ",", "_", ",", "h1", ",", "h2", ",", "_", "=", "self", ".", "info_dynamics_params", "H_diag", ",", "H_upper_diag", "=", "self", ".", "sparse_J_prior", "# Compute the gradient from the prior", "g", "=", "-", "1", "*", "symm_block_tridiag_matmul", "(", "H_diag", ",", "H_upper_diag", ",", "x", ")", "g", "[", "0", "]", "+=", "h_init", "g", "[", ":", "-", "1", "]", "+=", "h1", "g", "[", "1", ":", "]", "+=", "h2", "# Compute gradient from the likelihood terms", "g", "+=", "self", ".", "grad_local_log_likelihood", "(", "x", ")", "return", "g" ]
The gradient of the log joint probability. For the Gaussian terms, this is d/dx [-1/2 x^T J x + h^T x] = -Jx + h. For the likelihood terms, we have for each time t d/dx log p(yt | xt)
[ "The", "gradient", "of", "the", "log", "joint", "probability", "." ]
python
train
27.666667
juju/python-libjuju
juju/client/_client2.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client2.py#L5638-L5651
async def SetMeterStatus(self, statues): ''' statues : typing.Sequence[~MeterStatusParam] Returns -> typing.Sequence[~ErrorResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='MetricsDebug', request='SetMeterStatus', version=2, params=_params) _params['statues'] = statues reply = await self.rpc(msg) return reply
[ "async", "def", "SetMeterStatus", "(", "self", ",", "statues", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'MetricsDebug'", ",", "request", "=", "'SetMeterStatus'", ",", "version", "=", "2", ",", "params", "=", "_params", ")", "_params", "[", "'statues'", "]", "=", "statues", "reply", "=", "await", "self", ".", "rpc", "(", "msg", ")", "return", "reply" ]
statues : typing.Sequence[~MeterStatusParam] Returns -> typing.Sequence[~ErrorResult]
[ "statues", ":", "typing", ".", "Sequence", "[", "~MeterStatusParam", "]", "Returns", "-", ">", "typing", ".", "Sequence", "[", "~ErrorResult", "]" ]
python
train
32.785714
krukas/Trionyx
trionyx/trionyx/views/core.py
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L278-L294
def get_items(self, paginator, current_page): """Get list items for current page""" fields = self.get_model_config().get_list_fields() page = paginator.page(current_page) items = [] for item in page: items.append({ 'id': item.id, 'url': item.get_absolute_url(), 'row_data': [ fields[field]['renderer'](item, field) for field in self.get_current_fields() ] }) return items
[ "def", "get_items", "(", "self", ",", "paginator", ",", "current_page", ")", ":", "fields", "=", "self", ".", "get_model_config", "(", ")", ".", "get_list_fields", "(", ")", "page", "=", "paginator", ".", "page", "(", "current_page", ")", "items", "=", "[", "]", "for", "item", "in", "page", ":", "items", ".", "append", "(", "{", "'id'", ":", "item", ".", "id", ",", "'url'", ":", "item", ".", "get_absolute_url", "(", ")", ",", "'row_data'", ":", "[", "fields", "[", "field", "]", "[", "'renderer'", "]", "(", "item", ",", "field", ")", "for", "field", "in", "self", ".", "get_current_fields", "(", ")", "]", "}", ")", "return", "items" ]
Get list items for current page
[ "Get", "list", "items", "for", "current", "page" ]
python
train
31.352941
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L171-L197
def _format_finite(negative, digits, dot_pos): """Given a (possibly empty) string of digits and an integer dot_pos indicating the position of the decimal point relative to the start of that string, output a formatted numeric string with the same value and same implicit exponent.""" # strip leading zeros olddigits = digits digits = digits.lstrip('0') dot_pos -= len(olddigits) - len(digits) # value is 0.digits * 10**dot_pos use_exponent = dot_pos <= -4 or dot_pos > len(digits) if use_exponent: exp = dot_pos - 1 if digits else dot_pos dot_pos -= exp # left pad with zeros, insert decimal point, and add exponent if dot_pos <= 0: digits = '0' * (1 - dot_pos) + digits dot_pos += 1 - dot_pos assert 1 <= dot_pos <= len(digits) if dot_pos < len(digits): digits = digits[:dot_pos] + '.' + digits[dot_pos:] if use_exponent: digits += "e{0:+03d}".format(exp) return '-' + digits if negative else digits
[ "def", "_format_finite", "(", "negative", ",", "digits", ",", "dot_pos", ")", ":", "# strip leading zeros", "olddigits", "=", "digits", "digits", "=", "digits", ".", "lstrip", "(", "'0'", ")", "dot_pos", "-=", "len", "(", "olddigits", ")", "-", "len", "(", "digits", ")", "# value is 0.digits * 10**dot_pos", "use_exponent", "=", "dot_pos", "<=", "-", "4", "or", "dot_pos", ">", "len", "(", "digits", ")", "if", "use_exponent", ":", "exp", "=", "dot_pos", "-", "1", "if", "digits", "else", "dot_pos", "dot_pos", "-=", "exp", "# left pad with zeros, insert decimal point, and add exponent", "if", "dot_pos", "<=", "0", ":", "digits", "=", "'0'", "*", "(", "1", "-", "dot_pos", ")", "+", "digits", "dot_pos", "+=", "1", "-", "dot_pos", "assert", "1", "<=", "dot_pos", "<=", "len", "(", "digits", ")", "if", "dot_pos", "<", "len", "(", "digits", ")", ":", "digits", "=", "digits", "[", ":", "dot_pos", "]", "+", "'.'", "+", "digits", "[", "dot_pos", ":", "]", "if", "use_exponent", ":", "digits", "+=", "\"e{0:+03d}\"", ".", "format", "(", "exp", ")", "return", "'-'", "+", "digits", "if", "negative", "else", "digits" ]
Given a (possibly empty) string of digits and an integer dot_pos indicating the position of the decimal point relative to the start of that string, output a formatted numeric string with the same value and same implicit exponent.
[ "Given", "a", "(", "possibly", "empty", ")", "string", "of", "digits", "and", "an", "integer", "dot_pos", "indicating", "the", "position", "of", "the", "decimal", "point", "relative", "to", "the", "start", "of", "that", "string", "output", "a", "formatted", "numeric", "string", "with", "the", "same", "value", "and", "same", "implicit", "exponent", "." ]
python
train
36.740741
JarryShaw/PyPCAPKit
src/protocols/internet/internet.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/internet.py#L77-L111
def _decode_next_layer(self, dict_, proto=None, length=None, *, version=4, ipv6_exthdr=None): """Decode next layer extractor. Positional arguments: * dict_ -- dict, info buffer * proto -- str, next layer protocol name * length -- int, valid (not padding) length Keyword Arguments: * version -- int, IP version (4 in default) <keyword> 4 / 6 * ext_proto -- ProtoChain, ProtoChain of IPv6 extension headers Returns: * dict -- current protocol with next layer extracted """ if self._onerror: next_ = beholder(self._import_next_layer)(self, proto, length, version=version) else: next_ = self._import_next_layer(proto, length, version=version) info, chain = next_.info, next_.protochain # make next layer protocol name layer = next_.alias.lower() # proto = next_.__class__.__name__ # write info and protocol chain into dict dict_[layer] = info self._next = next_ if ipv6_exthdr is not None: for proto in reversed(ipv6_exthdr): chain = ProtoChain(proto.__class__, proto.alias, basis=chain) self._protos = ProtoChain(self.__class__, self.alias, basis=chain) return dict_
[ "def", "_decode_next_layer", "(", "self", ",", "dict_", ",", "proto", "=", "None", ",", "length", "=", "None", ",", "*", ",", "version", "=", "4", ",", "ipv6_exthdr", "=", "None", ")", ":", "if", "self", ".", "_onerror", ":", "next_", "=", "beholder", "(", "self", ".", "_import_next_layer", ")", "(", "self", ",", "proto", ",", "length", ",", "version", "=", "version", ")", "else", ":", "next_", "=", "self", ".", "_import_next_layer", "(", "proto", ",", "length", ",", "version", "=", "version", ")", "info", ",", "chain", "=", "next_", ".", "info", ",", "next_", ".", "protochain", "# make next layer protocol name", "layer", "=", "next_", ".", "alias", ".", "lower", "(", ")", "# proto = next_.__class__.__name__", "# write info and protocol chain into dict", "dict_", "[", "layer", "]", "=", "info", "self", ".", "_next", "=", "next_", "if", "ipv6_exthdr", "is", "not", "None", ":", "for", "proto", "in", "reversed", "(", "ipv6_exthdr", ")", ":", "chain", "=", "ProtoChain", "(", "proto", ".", "__class__", ",", "proto", ".", "alias", ",", "basis", "=", "chain", ")", "self", ".", "_protos", "=", "ProtoChain", "(", "self", ".", "__class__", ",", "self", ".", "alias", ",", "basis", "=", "chain", ")", "return", "dict_" ]
Decode next layer extractor. Positional arguments: * dict_ -- dict, info buffer * proto -- str, next layer protocol name * length -- int, valid (not padding) length Keyword Arguments: * version -- int, IP version (4 in default) <keyword> 4 / 6 * ext_proto -- ProtoChain, ProtoChain of IPv6 extension headers Returns: * dict -- current protocol with next layer extracted
[ "Decode", "next", "layer", "extractor", "." ]
python
train
37.8
guaix-ucm/numina
numina/instrument/simulation/atmosphere.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/instrument/simulation/atmosphere.py#L78-L89
def generate_moffat_profile(seeing_fwhm, alpha): """Generate a normalized Moffat profile from its FWHM and alpha""" scale = 2 * math.sqrt(2**(1.0 / alpha) - 1) gamma = seeing_fwhm / scale amplitude = 1.0 / math.pi * (alpha - 1) / gamma**2 seeing_model = Moffat2D(amplitude=amplitude, x_mean=0.0, y_mean=0.0, gamma=gamma, alpha=alpha) return seeing_model
[ "def", "generate_moffat_profile", "(", "seeing_fwhm", ",", "alpha", ")", ":", "scale", "=", "2", "*", "math", ".", "sqrt", "(", "2", "**", "(", "1.0", "/", "alpha", ")", "-", "1", ")", "gamma", "=", "seeing_fwhm", "/", "scale", "amplitude", "=", "1.0", "/", "math", ".", "pi", "*", "(", "alpha", "-", "1", ")", "/", "gamma", "**", "2", "seeing_model", "=", "Moffat2D", "(", "amplitude", "=", "amplitude", ",", "x_mean", "=", "0.0", ",", "y_mean", "=", "0.0", ",", "gamma", "=", "gamma", ",", "alpha", "=", "alpha", ")", "return", "seeing_model" ]
Generate a normalized Moffat profile from its FWHM and alpha
[ "Generate", "a", "normalized", "Moffat", "profile", "from", "its", "FWHM", "and", "alpha" ]
python
train
39.916667
saltstack/salt
salt/modules/boto_secgroup.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_secgroup.py#L406-L431
def delete(name=None, group_id=None, region=None, key=None, keyid=None, profile=None, vpc_id=None, vpc_name=None): ''' Delete a security group. CLI example:: salt myminion boto_secgroup.delete mysecgroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) group = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name, group_id=group_id, region=region, key=key, keyid=keyid, profile=profile) if group: deleted = conn.delete_security_group(group_id=group.id) if deleted: log.info('Deleted security group %s with id %s.', group.name, group.id) return True else: msg = 'Failed to delete security group {0}.'.format(name) log.error(msg) return False else: log.debug('Security group not found.') return False
[ "def", "delete", "(", "name", "=", "None", ",", "group_id", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ",", "vpc_id", "=", "None", ",", "vpc_name", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "group", "=", "_get_group", "(", "conn", ",", "name", "=", "name", ",", "vpc_id", "=", "vpc_id", ",", "vpc_name", "=", "vpc_name", ",", "group_id", "=", "group_id", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "group", ":", "deleted", "=", "conn", ".", "delete_security_group", "(", "group_id", "=", "group", ".", "id", ")", "if", "deleted", ":", "log", ".", "info", "(", "'Deleted security group %s with id %s.'", ",", "group", ".", "name", ",", "group", ".", "id", ")", "return", "True", "else", ":", "msg", "=", "'Failed to delete security group {0}.'", ".", "format", "(", "name", ")", "log", ".", "error", "(", "msg", ")", "return", "False", "else", ":", "log", ".", "debug", "(", "'Security group not found.'", ")", "return", "False" ]
Delete a security group. CLI example:: salt myminion boto_secgroup.delete mysecgroup
[ "Delete", "a", "security", "group", "." ]
python
train
34.923077
Fuyukai/ConfigMaster
configmaster/ConfigFile.py
https://github.com/Fuyukai/ConfigMaster/blob/8018aa415da55c84edaa8a49664f674758a14edd/configmaster/ConfigFile.py#L81-L90
def apply_defaults(self, other_config): """ Applies default values from a different ConfigObject or ConfigKey object to this ConfigObject. If there are any values in this object that are also in the default object, it will use the values from this object. """ if isinstance(other_config, self.__class__): self.config.load_from_dict(other_config.config, overwrite=False) else: self.config.load_from_dict(other_config, overwrite=False)
[ "def", "apply_defaults", "(", "self", ",", "other_config", ")", ":", "if", "isinstance", "(", "other_config", ",", "self", ".", "__class__", ")", ":", "self", ".", "config", ".", "load_from_dict", "(", "other_config", ".", "config", ",", "overwrite", "=", "False", ")", "else", ":", "self", ".", "config", ".", "load_from_dict", "(", "other_config", ",", "overwrite", "=", "False", ")" ]
Applies default values from a different ConfigObject or ConfigKey object to this ConfigObject. If there are any values in this object that are also in the default object, it will use the values from this object.
[ "Applies", "default", "values", "from", "a", "different", "ConfigObject", "or", "ConfigKey", "object", "to", "this", "ConfigObject", "." ]
python
train
49.7
cloudera/cm_api
python/examples/aws.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/examples/aws.py#L106-L117
def call_s3guard_prune(credential_name): """ Runs S3Guard prune command on external account associated with the given credential_name. """ # Get the AWS credential account associated with the credential account = get_external_account(api, credential_name) # Invoke the prune command for the account by its name cmd = account.external_account_cmd_by_name('S3GuardPrune') print ("Issued '{0}' command with id '{1}'".format(cmd.name, cmd.id)) print ("Waiting for command {0} to finish...".format(cmd.id)) cmd = cmd.wait() print ("Command succeeded: {0}".format(cmd.success))
[ "def", "call_s3guard_prune", "(", "credential_name", ")", ":", "# Get the AWS credential account associated with the credential", "account", "=", "get_external_account", "(", "api", ",", "credential_name", ")", "# Invoke the prune command for the account by its name", "cmd", "=", "account", ".", "external_account_cmd_by_name", "(", "'S3GuardPrune'", ")", "print", "(", "\"Issued '{0}' command with id '{1}'\"", ".", "format", "(", "cmd", ".", "name", ",", "cmd", ".", "id", ")", ")", "print", "(", "\"Waiting for command {0} to finish...\"", ".", "format", "(", "cmd", ".", "id", ")", ")", "cmd", "=", "cmd", ".", "wait", "(", ")", "print", "(", "\"Command succeeded: {0}\"", ".", "format", "(", "cmd", ".", "success", ")", ")" ]
Runs S3Guard prune command on external account associated with the given credential_name.
[ "Runs", "S3Guard", "prune", "command", "on", "external", "account", "associated", "with", "the", "given", "credential_name", "." ]
python
train
48.583333
onnx/onnxmltools
onnxutils/onnxconverter_common/topology.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxutils/onnxconverter_common/topology.py#L174-L194
def declare_local_variable(self, raw_name, type=None, prepend=False): ''' This function may create a new variable in this scope. If raw_name has been used to create other variables, the new variable will hide all other variables created using raw_name. ''' # Get unique ID for the new variable onnx_name = self.get_unique_variable_name(raw_name) # Create the variable variable = Variable(raw_name, onnx_name, self.name, type) self.variables[onnx_name] = variable if raw_name in self.variable_name_mapping: # Hide existing variables with the same raw_name if not prepend: self.variable_name_mapping[raw_name].append(onnx_name) else: self.variable_name_mapping[raw_name].insert(0, onnx_name) else: self.variable_name_mapping[raw_name] = [onnx_name] return variable
[ "def", "declare_local_variable", "(", "self", ",", "raw_name", ",", "type", "=", "None", ",", "prepend", "=", "False", ")", ":", "# Get unique ID for the new variable", "onnx_name", "=", "self", ".", "get_unique_variable_name", "(", "raw_name", ")", "# Create the variable", "variable", "=", "Variable", "(", "raw_name", ",", "onnx_name", ",", "self", ".", "name", ",", "type", ")", "self", ".", "variables", "[", "onnx_name", "]", "=", "variable", "if", "raw_name", "in", "self", ".", "variable_name_mapping", ":", "# Hide existing variables with the same raw_name", "if", "not", "prepend", ":", "self", ".", "variable_name_mapping", "[", "raw_name", "]", ".", "append", "(", "onnx_name", ")", "else", ":", "self", ".", "variable_name_mapping", "[", "raw_name", "]", ".", "insert", "(", "0", ",", "onnx_name", ")", "else", ":", "self", ".", "variable_name_mapping", "[", "raw_name", "]", "=", "[", "onnx_name", "]", "return", "variable" ]
This function may create a new variable in this scope. If raw_name has been used to create other variables, the new variable will hide all other variables created using raw_name.
[ "This", "function", "may", "create", "a", "new", "variable", "in", "this", "scope", ".", "If", "raw_name", "has", "been", "used", "to", "create", "other", "variables", "the", "new", "variable", "will", "hide", "all", "other", "variables", "created", "using", "raw_name", "." ]
python
train
43.809524
ambitioninc/django-query-builder
querybuilder/query.py
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L1324-L1337
def get_field_identifiers(self): """ Builds a list of the field identifiers for all tables and joined tables by calling ``get_field_identifiers()`` on each table :return: list of field identifiers :rtype: list of str """ field_identifiers = [] for table in self.tables: field_identifiers += table.get_field_identifiers() for join_item in self.joins: field_identifiers += join_item.right_table.get_field_identifiers() return field_identifiers
[ "def", "get_field_identifiers", "(", "self", ")", ":", "field_identifiers", "=", "[", "]", "for", "table", "in", "self", ".", "tables", ":", "field_identifiers", "+=", "table", ".", "get_field_identifiers", "(", ")", "for", "join_item", "in", "self", ".", "joins", ":", "field_identifiers", "+=", "join_item", ".", "right_table", ".", "get_field_identifiers", "(", ")", "return", "field_identifiers" ]
Builds a list of the field identifiers for all tables and joined tables by calling ``get_field_identifiers()`` on each table :return: list of field identifiers :rtype: list of str
[ "Builds", "a", "list", "of", "the", "field", "identifiers", "for", "all", "tables", "and", "joined", "tables", "by", "calling", "get_field_identifiers", "()", "on", "each", "table" ]
python
train
38.071429
leosartaj/tvstats
tvstats/graph.py
https://github.com/leosartaj/tvstats/blob/164fe736111d43869f8c9686e07a5ab1b9f22444/tvstats/graph.py#L7-L24
def graphdata(data): """returns ratings and episode number to be used for making graphs""" data = jh.get_ratings(data) num = 1 rating_final = [] episode_final = [] for k,v in data.iteritems(): rating=[] epinum=[] for r in v: if r != None: rating.append(float(r)) epinum.append(num) num+=1 rating_final.append(rating) episode_final.append(epinum) return rating_final,episode_final
[ "def", "graphdata", "(", "data", ")", ":", "data", "=", "jh", ".", "get_ratings", "(", "data", ")", "num", "=", "1", "rating_final", "=", "[", "]", "episode_final", "=", "[", "]", "for", "k", ",", "v", "in", "data", ".", "iteritems", "(", ")", ":", "rating", "=", "[", "]", "epinum", "=", "[", "]", "for", "r", "in", "v", ":", "if", "r", "!=", "None", ":", "rating", ".", "append", "(", "float", "(", "r", ")", ")", "epinum", ".", "append", "(", "num", ")", "num", "+=", "1", "rating_final", ".", "append", "(", "rating", ")", "episode_final", ".", "append", "(", "epinum", ")", "return", "rating_final", ",", "episode_final" ]
returns ratings and episode number to be used for making graphs
[ "returns", "ratings", "and", "episode", "number", "to", "be", "used", "for", "making", "graphs" ]
python
train
27.444444
teepark/greenhouse
greenhouse/scheduler.py
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/scheduler.py#L779-L795
def remove_global_hook(handler): """remove a callback from the list of global hooks :param handler: the callback function, previously added with global_hook, to remove from the list of global hooks :type handler: function :returns: bool, whether the handler was removed from the global hooks """ for i, cb in enumerate(state.global_hooks): cb = cb() if cb is not None and cb is handler: state.global_hooks.pop(i) log.info("removing a global hook callback") return True return False
[ "def", "remove_global_hook", "(", "handler", ")", ":", "for", "i", ",", "cb", "in", "enumerate", "(", "state", ".", "global_hooks", ")", ":", "cb", "=", "cb", "(", ")", "if", "cb", "is", "not", "None", "and", "cb", "is", "handler", ":", "state", ".", "global_hooks", ".", "pop", "(", "i", ")", "log", ".", "info", "(", "\"removing a global hook callback\"", ")", "return", "True", "return", "False" ]
remove a callback from the list of global hooks :param handler: the callback function, previously added with global_hook, to remove from the list of global hooks :type handler: function :returns: bool, whether the handler was removed from the global hooks
[ "remove", "a", "callback", "from", "the", "list", "of", "global", "hooks" ]
python
train
33.117647
3DLIRIOUS/MeshLabXML
meshlabxml/transform.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/transform.py#L623-L655
def wrap2cylinder(script, radius=1, pitch=0, taper=0, pitch_func=None, taper_func=None): """Deform mesh around cylinder of radius and axis z y = 0 will be on the surface of radius "radius" pitch != 0 will create a helix, with distance "pitch" traveled in z for each rotation taper = change in r over z. E.g. a value of 0.5 will shrink r by 0.5 for every z length of 1 """ """vert_function(s=s, x='(%s+y-taper)*sin(x/(%s+y))' % (radius, radius), y='(%s+y)*cos(x/(%s+y))' % (radius, radius), z='z-%s*x/(2*%s*(%s+y))' % (pitch, pi, radius))""" if pitch_func is None: pitch_func = '-(pitch)*x/(2*pi*(radius))' pitch_func = pitch_func.replace( 'pitch', str(pitch)).replace( 'pi', str(math.pi)).replace( 'radius', str(radius)) if taper_func is None: taper_func = '-(taper)*(pitch_func)' taper_func = taper_func.replace( 'taper', str(taper)).replace( 'pitch_func', str(pitch_func)).replace( 'pi', str(math.pi)) x_func = '(y+(radius)+(taper_func))*sin(x/(radius))'.replace( 'radius', str(radius)).replace('taper_func', str(taper_func)) y_func = '(y+(radius)+(taper_func))*cos(x/(radius))'.replace( 'radius', str(radius)).replace('taper_func', str(taper_func)) z_func = 'z+(pitch_func)'.replace('pitch_func', str(pitch_func)) vert_function(script, x_func, y_func, z_func) return None
[ "def", "wrap2cylinder", "(", "script", ",", "radius", "=", "1", ",", "pitch", "=", "0", ",", "taper", "=", "0", ",", "pitch_func", "=", "None", ",", "taper_func", "=", "None", ")", ":", "\"\"\"vert_function(s=s, x='(%s+y-taper)*sin(x/(%s+y))' % (radius, radius),\n y='(%s+y)*cos(x/(%s+y))' % (radius, radius),\n z='z-%s*x/(2*%s*(%s+y))' % (pitch, pi, radius))\"\"\"", "if", "pitch_func", "is", "None", ":", "pitch_func", "=", "'-(pitch)*x/(2*pi*(radius))'", "pitch_func", "=", "pitch_func", ".", "replace", "(", "'pitch'", ",", "str", "(", "pitch", ")", ")", ".", "replace", "(", "'pi'", ",", "str", "(", "math", ".", "pi", ")", ")", ".", "replace", "(", "'radius'", ",", "str", "(", "radius", ")", ")", "if", "taper_func", "is", "None", ":", "taper_func", "=", "'-(taper)*(pitch_func)'", "taper_func", "=", "taper_func", ".", "replace", "(", "'taper'", ",", "str", "(", "taper", ")", ")", ".", "replace", "(", "'pitch_func'", ",", "str", "(", "pitch_func", ")", ")", ".", "replace", "(", "'pi'", ",", "str", "(", "math", ".", "pi", ")", ")", "x_func", "=", "'(y+(radius)+(taper_func))*sin(x/(radius))'", ".", "replace", "(", "'radius'", ",", "str", "(", "radius", ")", ")", ".", "replace", "(", "'taper_func'", ",", "str", "(", "taper_func", ")", ")", "y_func", "=", "'(y+(radius)+(taper_func))*cos(x/(radius))'", ".", "replace", "(", "'radius'", ",", "str", "(", "radius", ")", ")", ".", "replace", "(", "'taper_func'", ",", "str", "(", "taper_func", ")", ")", "z_func", "=", "'z+(pitch_func)'", ".", "replace", "(", "'pitch_func'", ",", "str", "(", "pitch_func", ")", ")", "vert_function", "(", "script", ",", "x_func", ",", "y_func", ",", "z_func", ")", "return", "None" ]
Deform mesh around cylinder of radius and axis z y = 0 will be on the surface of radius "radius" pitch != 0 will create a helix, with distance "pitch" traveled in z for each rotation taper = change in r over z. E.g. a value of 0.5 will shrink r by 0.5 for every z length of 1
[ "Deform", "mesh", "around", "cylinder", "of", "radius", "and", "axis", "z" ]
python
test
44.545455
ic-labs/django-icekit
icekit_events/templatetags/events_tags.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit_events/templatetags/events_tags.py#L65-L83
def _format_with_same_year_and_month(format_specifier): """ Return a version of `format_specifier` that renders a date assuming it has the same year and month as another date. Usually this means ommitting the year and month. This can be overridden by specifying a format that has `_SAME_YEAR_SAME_MONTH` appended to the name in the project's `formats` spec. """ test_format_specifier = format_specifier + "_SAME_YEAR_SAME_MONTH" test_format = get_format(test_format_specifier, use_l10n=True) if test_format == test_format_specifier: # this format string didn't resolve to anything and may be a raw format. # Use a regex to remove year and month markers instead. no_year = re.sub(YEAR_RE, '', get_format(format_specifier)) return re.sub(MONTH_RE, '', no_year) else: return test_format
[ "def", "_format_with_same_year_and_month", "(", "format_specifier", ")", ":", "test_format_specifier", "=", "format_specifier", "+", "\"_SAME_YEAR_SAME_MONTH\"", "test_format", "=", "get_format", "(", "test_format_specifier", ",", "use_l10n", "=", "True", ")", "if", "test_format", "==", "test_format_specifier", ":", "# this format string didn't resolve to anything and may be a raw format.", "# Use a regex to remove year and month markers instead.", "no_year", "=", "re", ".", "sub", "(", "YEAR_RE", ",", "''", ",", "get_format", "(", "format_specifier", ")", ")", "return", "re", ".", "sub", "(", "MONTH_RE", ",", "''", ",", "no_year", ")", "else", ":", "return", "test_format" ]
Return a version of `format_specifier` that renders a date assuming it has the same year and month as another date. Usually this means ommitting the year and month. This can be overridden by specifying a format that has `_SAME_YEAR_SAME_MONTH` appended to the name in the project's `formats` spec.
[ "Return", "a", "version", "of", "format_specifier", "that", "renders", "a", "date", "assuming", "it", "has", "the", "same", "year", "and", "month", "as", "another", "date", ".", "Usually", "this", "means", "ommitting", "the", "year", "and", "month", "." ]
python
train
45.105263
xeroc/python-graphenelib
graphenecommon/transactionbuilder.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/transactionbuilder.py#L367-L400
def constructTx(self): """ Construct the actual transaction and store it in the class's dict store """ ops = list() for op in self.ops: if isinstance(op, ProposalBuilder): # This operation is a proposal an needs to be deal with # differently proposal = op.get_raw() if proposal: ops.append(proposal) elif isinstance(op, self.operation_class): ops.extend([op]) else: # otherwise, we simply wrap ops into Operations ops.extend([self.operation_class(op)]) # We now wrap everything into an actual transaction ops = self.add_required_fees(ops, asset_id=self.fee_asset_id) expiration = formatTimeFromNow( self.expiration or self.blockchain.expiration or 30 # defaults to 30 seconds ) ref_block_num, ref_block_prefix = self.get_block_params() self.tx = self.signed_transaction_class( ref_block_num=ref_block_num, ref_block_prefix=ref_block_prefix, expiration=expiration, operations=ops, ) dict.update(self, self.tx.json()) self._unset_require_reconstruction()
[ "def", "constructTx", "(", "self", ")", ":", "ops", "=", "list", "(", ")", "for", "op", "in", "self", ".", "ops", ":", "if", "isinstance", "(", "op", ",", "ProposalBuilder", ")", ":", "# This operation is a proposal an needs to be deal with", "# differently", "proposal", "=", "op", ".", "get_raw", "(", ")", "if", "proposal", ":", "ops", ".", "append", "(", "proposal", ")", "elif", "isinstance", "(", "op", ",", "self", ".", "operation_class", ")", ":", "ops", ".", "extend", "(", "[", "op", "]", ")", "else", ":", "# otherwise, we simply wrap ops into Operations", "ops", ".", "extend", "(", "[", "self", ".", "operation_class", "(", "op", ")", "]", ")", "# We now wrap everything into an actual transaction", "ops", "=", "self", ".", "add_required_fees", "(", "ops", ",", "asset_id", "=", "self", ".", "fee_asset_id", ")", "expiration", "=", "formatTimeFromNow", "(", "self", ".", "expiration", "or", "self", ".", "blockchain", ".", "expiration", "or", "30", "# defaults to 30 seconds", ")", "ref_block_num", ",", "ref_block_prefix", "=", "self", ".", "get_block_params", "(", ")", "self", ".", "tx", "=", "self", ".", "signed_transaction_class", "(", "ref_block_num", "=", "ref_block_num", ",", "ref_block_prefix", "=", "ref_block_prefix", ",", "expiration", "=", "expiration", ",", "operations", "=", "ops", ",", ")", "dict", ".", "update", "(", "self", ",", "self", ".", "tx", ".", "json", "(", ")", ")", "self", ".", "_unset_require_reconstruction", "(", ")" ]
Construct the actual transaction and store it in the class's dict store
[ "Construct", "the", "actual", "transaction", "and", "store", "it", "in", "the", "class", "s", "dict", "store" ]
python
valid
37.911765
VirusTotal/yara-python
setup.py
https://github.com/VirusTotal/yara-python/blob/c3992bdc3a95d42e9df249ae92e726b74737e859/setup.py#L161-L273
def run(self): """Execute the build command.""" module = self.distribution.ext_modules[0] base_dir = os.path.dirname(__file__) if base_dir: os.chdir(base_dir) exclusions = [] for define in self.define or []: module.define_macros.append(define) for library in self.libraries or []: module.libraries.append(library) building_for_windows = self.plat_name in ('win32','win-amd64') building_for_osx = 'macosx' in self.plat_name building_for_linux = 'linux' in self.plat_name building_for_freebsd = 'freebsd' in self.plat_name building_for_openbsd = 'openbsd' in self.plat_name # need testing if building_for_linux: module.define_macros.append(('USE_LINUX_PROC', '1')) elif building_for_windows: module.define_macros.append(('USE_WINDOWS_PROC', '1')) module.define_macros.append(('_CRT_SECURE_NO_WARNINGS', '1')) module.libraries.append('kernel32') module.libraries.append('advapi32') module.libraries.append('user32') module.libraries.append('crypt32') module.libraries.append('ws2_32') elif building_for_osx: module.define_macros.append(('USE_MACH_PROC', '1')) module.include_dirs.append('/usr/local/opt/openssl/include') module.include_dirs.append('/opt/local/include') module.library_dirs.append('/opt/local/lib') module.include_dirs.append('/usr/local/include') module.library_dirs.append('/usr/local/lib') elif building_for_freebsd: module.define_macros.append(('USE_FREEBSD_PROC', '1')) module.include_dirs.append('/opt/local/include') module.library_dirs.append('/opt/local/lib') module.include_dirs.append('/usr/local/include') module.library_dirs.append('/usr/local/lib') elif building_for_openbsd: module.define_macros.append(('USE_OPENBSD_PROC', '1')) module.include_dirs.append('/opt/local/include') module.library_dirs.append('/opt/local/lib') module.include_dirs.append('/usr/local/include') module.library_dirs.append('/usr/local/lib') else: module.define_macros.append(('USE_NO_PROC', '1')) if has_function('memmem'): module.define_macros.append(('HAVE_MEMMEM', '1')) if has_function('strlcpy'): module.define_macros.append(('HAVE_STRLCPY', '1')) if has_function('strlcat'): module.define_macros.append(('HAVE_STRLCAT', '1')) if self.enable_profiling: module.define_macros.append(('PROFILING_ENABLED', '1')) if self.dynamic_linking: module.libraries.append('yara') else: if not self.define or not ('HASH_MODULE', '1') in self.define: if (has_function('MD5_Init', libraries=['crypto']) and has_function('SHA256_Init', libraries=['crypto'])): module.define_macros.append(('HASH_MODULE', '1')) module.define_macros.append(('HAVE_LIBCRYPTO', '1')) module.libraries.append('crypto') else: exclusions.append('yara/libyara/modules/hash.c') if self.enable_magic: module.define_macros.append(('MAGIC_MODULE', '1')) module.libraries.append('magic') else: exclusions.append('yara/libyara/modules/magic.c') if self.enable_cuckoo: module.define_macros.append(('CUCKOO_MODULE', '1')) module.libraries.append('jansson') else: exclusions.append('yara/libyara/modules/cuckoo.c') if self.enable_dotnet: module.define_macros.append(('DOTNET_MODULE', '1')) else: exclusions.append('yara/libyara/modules/dotnet.c') if self.enable_dex: module.define_macros.append(('DEX_MODULE', '1')) else: exclusions.append('yara/libyara/modules/dex.c') if self.enable_macho: module.define_macros.append(('MACHO_MODULE', '1')) else: exclusions.append('yara/libyara/modules/macho.c') exclusions = [os.path.normpath(x) for x in exclusions] for directory, _, files in os.walk('yara/libyara/'): for x in files: x = os.path.normpath(os.path.join(directory, x)) if x.endswith('.c') and x not in exclusions: module.sources.append(x) build_ext.run(self)
[ "def", "run", "(", "self", ")", ":", "module", "=", "self", ".", "distribution", ".", "ext_modules", "[", "0", "]", "base_dir", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "if", "base_dir", ":", "os", ".", "chdir", "(", "base_dir", ")", "exclusions", "=", "[", "]", "for", "define", "in", "self", ".", "define", "or", "[", "]", ":", "module", ".", "define_macros", ".", "append", "(", "define", ")", "for", "library", "in", "self", ".", "libraries", "or", "[", "]", ":", "module", ".", "libraries", ".", "append", "(", "library", ")", "building_for_windows", "=", "self", ".", "plat_name", "in", "(", "'win32'", ",", "'win-amd64'", ")", "building_for_osx", "=", "'macosx'", "in", "self", ".", "plat_name", "building_for_linux", "=", "'linux'", "in", "self", ".", "plat_name", "building_for_freebsd", "=", "'freebsd'", "in", "self", ".", "plat_name", "building_for_openbsd", "=", "'openbsd'", "in", "self", ".", "plat_name", "# need testing", "if", "building_for_linux", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'USE_LINUX_PROC'", ",", "'1'", ")", ")", "elif", "building_for_windows", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'USE_WINDOWS_PROC'", ",", "'1'", ")", ")", "module", ".", "define_macros", ".", "append", "(", "(", "'_CRT_SECURE_NO_WARNINGS'", ",", "'1'", ")", ")", "module", ".", "libraries", ".", "append", "(", "'kernel32'", ")", "module", ".", "libraries", ".", "append", "(", "'advapi32'", ")", "module", ".", "libraries", ".", "append", "(", "'user32'", ")", "module", ".", "libraries", ".", "append", "(", "'crypt32'", ")", "module", ".", "libraries", ".", "append", "(", "'ws2_32'", ")", "elif", "building_for_osx", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'USE_MACH_PROC'", ",", "'1'", ")", ")", "module", ".", "include_dirs", ".", "append", "(", "'/usr/local/opt/openssl/include'", ")", "module", ".", "include_dirs", ".", "append", "(", "'/opt/local/include'", ")", "module", ".", "library_dirs", ".", "append", "(", "'/opt/local/lib'", ")", "module", ".", "include_dirs", ".", "append", "(", "'/usr/local/include'", ")", "module", ".", "library_dirs", ".", "append", "(", "'/usr/local/lib'", ")", "elif", "building_for_freebsd", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'USE_FREEBSD_PROC'", ",", "'1'", ")", ")", "module", ".", "include_dirs", ".", "append", "(", "'/opt/local/include'", ")", "module", ".", "library_dirs", ".", "append", "(", "'/opt/local/lib'", ")", "module", ".", "include_dirs", ".", "append", "(", "'/usr/local/include'", ")", "module", ".", "library_dirs", ".", "append", "(", "'/usr/local/lib'", ")", "elif", "building_for_openbsd", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'USE_OPENBSD_PROC'", ",", "'1'", ")", ")", "module", ".", "include_dirs", ".", "append", "(", "'/opt/local/include'", ")", "module", ".", "library_dirs", ".", "append", "(", "'/opt/local/lib'", ")", "module", ".", "include_dirs", ".", "append", "(", "'/usr/local/include'", ")", "module", ".", "library_dirs", ".", "append", "(", "'/usr/local/lib'", ")", "else", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'USE_NO_PROC'", ",", "'1'", ")", ")", "if", "has_function", "(", "'memmem'", ")", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'HAVE_MEMMEM'", ",", "'1'", ")", ")", "if", "has_function", "(", "'strlcpy'", ")", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'HAVE_STRLCPY'", ",", "'1'", ")", ")", "if", "has_function", "(", "'strlcat'", ")", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'HAVE_STRLCAT'", ",", "'1'", ")", ")", "if", "self", ".", "enable_profiling", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'PROFILING_ENABLED'", ",", "'1'", ")", ")", "if", "self", ".", "dynamic_linking", ":", "module", ".", "libraries", ".", "append", "(", "'yara'", ")", "else", ":", "if", "not", "self", ".", "define", "or", "not", "(", "'HASH_MODULE'", ",", "'1'", ")", "in", "self", ".", "define", ":", "if", "(", "has_function", "(", "'MD5_Init'", ",", "libraries", "=", "[", "'crypto'", "]", ")", "and", "has_function", "(", "'SHA256_Init'", ",", "libraries", "=", "[", "'crypto'", "]", ")", ")", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'HASH_MODULE'", ",", "'1'", ")", ")", "module", ".", "define_macros", ".", "append", "(", "(", "'HAVE_LIBCRYPTO'", ",", "'1'", ")", ")", "module", ".", "libraries", ".", "append", "(", "'crypto'", ")", "else", ":", "exclusions", ".", "append", "(", "'yara/libyara/modules/hash.c'", ")", "if", "self", ".", "enable_magic", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'MAGIC_MODULE'", ",", "'1'", ")", ")", "module", ".", "libraries", ".", "append", "(", "'magic'", ")", "else", ":", "exclusions", ".", "append", "(", "'yara/libyara/modules/magic.c'", ")", "if", "self", ".", "enable_cuckoo", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'CUCKOO_MODULE'", ",", "'1'", ")", ")", "module", ".", "libraries", ".", "append", "(", "'jansson'", ")", "else", ":", "exclusions", ".", "append", "(", "'yara/libyara/modules/cuckoo.c'", ")", "if", "self", ".", "enable_dotnet", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'DOTNET_MODULE'", ",", "'1'", ")", ")", "else", ":", "exclusions", ".", "append", "(", "'yara/libyara/modules/dotnet.c'", ")", "if", "self", ".", "enable_dex", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'DEX_MODULE'", ",", "'1'", ")", ")", "else", ":", "exclusions", ".", "append", "(", "'yara/libyara/modules/dex.c'", ")", "if", "self", ".", "enable_macho", ":", "module", ".", "define_macros", ".", "append", "(", "(", "'MACHO_MODULE'", ",", "'1'", ")", ")", "else", ":", "exclusions", ".", "append", "(", "'yara/libyara/modules/macho.c'", ")", "exclusions", "=", "[", "os", ".", "path", ".", "normpath", "(", "x", ")", "for", "x", "in", "exclusions", "]", "for", "directory", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "'yara/libyara/'", ")", ":", "for", "x", "in", "files", ":", "x", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "x", ")", ")", "if", "x", ".", "endswith", "(", "'.c'", ")", "and", "x", "not", "in", "exclusions", ":", "module", ".", "sources", ".", "append", "(", "x", ")", "build_ext", ".", "run", "(", "self", ")" ]
Execute the build command.
[ "Execute", "the", "build", "command", "." ]
python
train
36.327434
onelogin/python-saml
src/onelogin/saml2/logout_response.py
https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/logout_response.py#L50-L60
def get_issuer(self): """ Gets the Issuer of the Logout Response Message :return: The Issuer :rtype: string """ issuer = None issuer_nodes = self.__query('/samlp:LogoutResponse/saml:Issuer') if len(issuer_nodes) == 1: issuer = OneLogin_Saml2_Utils.element_text(issuer_nodes[0]) return issuer
[ "def", "get_issuer", "(", "self", ")", ":", "issuer", "=", "None", "issuer_nodes", "=", "self", ".", "__query", "(", "'/samlp:LogoutResponse/saml:Issuer'", ")", "if", "len", "(", "issuer_nodes", ")", "==", "1", ":", "issuer", "=", "OneLogin_Saml2_Utils", ".", "element_text", "(", "issuer_nodes", "[", "0", "]", ")", "return", "issuer" ]
Gets the Issuer of the Logout Response Message :return: The Issuer :rtype: string
[ "Gets", "the", "Issuer", "of", "the", "Logout", "Response", "Message", ":", "return", ":", "The", "Issuer", ":", "rtype", ":", "string" ]
python
train
33.181818
inveniosoftware/invenio-oauthclient
invenio_oauthclient/handlers.py
https://github.com/inveniosoftware/invenio-oauthclient/blob/2500dc6935738107617aeade79e050d7608004bb/invenio_oauthclient/handlers.py#L266-L348
def authorized_signup_handler(resp, remote, *args, **kwargs): """Handle sign-in/up functionality. :param remote: The remote application. :param resp: The response. :returns: Redirect response. """ # Remove any previously stored auto register session key session.pop(token_session_key(remote.name) + '_autoregister', None) # Store token in session # ---------------------- # Set token in session - token object only returned if # current_user.is_autenticated(). token = response_token_setter(remote, resp) handlers = current_oauthclient.signup_handlers[remote.name] # Sign-in/up user # --------------- if not current_user.is_authenticated: account_info = handlers['info'](resp) account_info_received.send( remote, token=token, response=resp, account_info=account_info ) user = oauth_get_user( remote.consumer_key, account_info=account_info, access_token=token_getter(remote)[0], ) if user is None: # Auto sign-up if user not found form = create_csrf_disabled_registrationform() form = fill_form( form, account_info['user'] ) user = oauth_register(form) # if registration fails ... if user is None: # requires extra information session[ token_session_key(remote.name) + '_autoregister'] = True session[token_session_key(remote.name) + '_account_info'] = account_info session[token_session_key(remote.name) + '_response'] = resp db.session.commit() return redirect(url_for( '.signup', remote_app=remote.name, )) # Authenticate user if not oauth_authenticate(remote.consumer_key, user, require_existing_link=False): return current_app.login_manager.unauthorized() # Link account # ------------ # Need to store token in database instead of only the session when # called first time. token = response_token_setter(remote, resp) # Setup account # ------------- if not token.remote_account.extra_data: account_setup = handlers['setup'](token, resp) account_setup_received.send( remote, token=token, response=resp, account_setup=account_setup ) db.session.commit() account_setup_committed.send(remote, token=token) else: db.session.commit() # Redirect to next next_url = get_session_next_url(remote.name) if next_url: return redirect(next_url) return redirect(url_for('invenio_oauthclient_settings.index'))
[ "def", "authorized_signup_handler", "(", "resp", ",", "remote", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Remove any previously stored auto register session key", "session", ".", "pop", "(", "token_session_key", "(", "remote", ".", "name", ")", "+", "'_autoregister'", ",", "None", ")", "# Store token in session", "# ----------------------", "# Set token in session - token object only returned if", "# current_user.is_autenticated().", "token", "=", "response_token_setter", "(", "remote", ",", "resp", ")", "handlers", "=", "current_oauthclient", ".", "signup_handlers", "[", "remote", ".", "name", "]", "# Sign-in/up user", "# ---------------", "if", "not", "current_user", ".", "is_authenticated", ":", "account_info", "=", "handlers", "[", "'info'", "]", "(", "resp", ")", "account_info_received", ".", "send", "(", "remote", ",", "token", "=", "token", ",", "response", "=", "resp", ",", "account_info", "=", "account_info", ")", "user", "=", "oauth_get_user", "(", "remote", ".", "consumer_key", ",", "account_info", "=", "account_info", ",", "access_token", "=", "token_getter", "(", "remote", ")", "[", "0", "]", ",", ")", "if", "user", "is", "None", ":", "# Auto sign-up if user not found", "form", "=", "create_csrf_disabled_registrationform", "(", ")", "form", "=", "fill_form", "(", "form", ",", "account_info", "[", "'user'", "]", ")", "user", "=", "oauth_register", "(", "form", ")", "# if registration fails ...", "if", "user", "is", "None", ":", "# requires extra information", "session", "[", "token_session_key", "(", "remote", ".", "name", ")", "+", "'_autoregister'", "]", "=", "True", "session", "[", "token_session_key", "(", "remote", ".", "name", ")", "+", "'_account_info'", "]", "=", "account_info", "session", "[", "token_session_key", "(", "remote", ".", "name", ")", "+", "'_response'", "]", "=", "resp", "db", ".", "session", ".", "commit", "(", ")", "return", "redirect", "(", "url_for", "(", "'.signup'", ",", "remote_app", "=", "remote", ".", "name", ",", ")", ")", "# Authenticate user", "if", "not", "oauth_authenticate", "(", "remote", ".", "consumer_key", ",", "user", ",", "require_existing_link", "=", "False", ")", ":", "return", "current_app", ".", "login_manager", ".", "unauthorized", "(", ")", "# Link account", "# ------------", "# Need to store token in database instead of only the session when", "# called first time.", "token", "=", "response_token_setter", "(", "remote", ",", "resp", ")", "# Setup account", "# -------------", "if", "not", "token", ".", "remote_account", ".", "extra_data", ":", "account_setup", "=", "handlers", "[", "'setup'", "]", "(", "token", ",", "resp", ")", "account_setup_received", ".", "send", "(", "remote", ",", "token", "=", "token", ",", "response", "=", "resp", ",", "account_setup", "=", "account_setup", ")", "db", ".", "session", ".", "commit", "(", ")", "account_setup_committed", ".", "send", "(", "remote", ",", "token", "=", "token", ")", "else", ":", "db", ".", "session", ".", "commit", "(", ")", "# Redirect to next", "next_url", "=", "get_session_next_url", "(", "remote", ".", "name", ")", "if", "next_url", ":", "return", "redirect", "(", "next_url", ")", "return", "redirect", "(", "url_for", "(", "'invenio_oauthclient_settings.index'", ")", ")" ]
Handle sign-in/up functionality. :param remote: The remote application. :param resp: The response. :returns: Redirect response.
[ "Handle", "sign", "-", "in", "/", "up", "functionality", "." ]
python
train
34.012048
materialsproject/pymatgen
pymatgen/analysis/pourbaix_diagram.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/pourbaix_diagram.py#L796-L846
def get_pourbaix_plot(self, limits=None, title="", label_domains=True, plt=None): """ Plot Pourbaix diagram. Args: limits: 2D list containing limits of the Pourbaix diagram of the form [[xlo, xhi], [ylo, yhi]] title (str): Title to display on plot label_domains (bool): whether to label pourbaix domains plt (pyplot): Pyplot instance for plotting Returns: plt (pyplot) - matplotlib plot object with pourbaix diagram """ if limits is None: limits = [[-2, 16], [-3, 3]] plt = plt or pretty_plot(16) xlim = limits[0] ylim = limits[1] h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC], [xlim[1], -xlim[1] * PREFAC]]) o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23], [xlim[1], -xlim[1] * PREFAC + 1.23]]) neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]]) V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]]) ax = plt.gca() ax.set_xlim(xlim) ax.set_ylim(ylim) lw = 3 plt.plot(h_line[0], h_line[1], "r--", linewidth=lw) plt.plot(o_line[0], o_line[1], "r--", linewidth=lw) plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw) plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw) for entry, vertices in self._pd._stable_domain_vertices.items(): center = np.average(vertices, axis=0) x, y = np.transpose(np.vstack([vertices, vertices[0]])) plt.plot(x, y, 'k-', linewidth=lw) if label_domains: plt.annotate(generate_entry_label(entry), center, ha='center', va='center', fontsize=20, color="b") plt.xlabel("pH") plt.ylabel("E (V)") plt.title(title, fontsize=20, fontweight='bold') return plt
[ "def", "get_pourbaix_plot", "(", "self", ",", "limits", "=", "None", ",", "title", "=", "\"\"", ",", "label_domains", "=", "True", ",", "plt", "=", "None", ")", ":", "if", "limits", "is", "None", ":", "limits", "=", "[", "[", "-", "2", ",", "16", "]", ",", "[", "-", "3", ",", "3", "]", "]", "plt", "=", "plt", "or", "pretty_plot", "(", "16", ")", "xlim", "=", "limits", "[", "0", "]", "ylim", "=", "limits", "[", "1", "]", "h_line", "=", "np", ".", "transpose", "(", "[", "[", "xlim", "[", "0", "]", ",", "-", "xlim", "[", "0", "]", "*", "PREFAC", "]", ",", "[", "xlim", "[", "1", "]", ",", "-", "xlim", "[", "1", "]", "*", "PREFAC", "]", "]", ")", "o_line", "=", "np", ".", "transpose", "(", "[", "[", "xlim", "[", "0", "]", ",", "-", "xlim", "[", "0", "]", "*", "PREFAC", "+", "1.23", "]", ",", "[", "xlim", "[", "1", "]", ",", "-", "xlim", "[", "1", "]", "*", "PREFAC", "+", "1.23", "]", "]", ")", "neutral_line", "=", "np", ".", "transpose", "(", "[", "[", "7", ",", "ylim", "[", "0", "]", "]", ",", "[", "7", ",", "ylim", "[", "1", "]", "]", "]", ")", "V0_line", "=", "np", ".", "transpose", "(", "[", "[", "xlim", "[", "0", "]", ",", "0", "]", ",", "[", "xlim", "[", "1", "]", ",", "0", "]", "]", ")", "ax", "=", "plt", ".", "gca", "(", ")", "ax", ".", "set_xlim", "(", "xlim", ")", "ax", ".", "set_ylim", "(", "ylim", ")", "lw", "=", "3", "plt", ".", "plot", "(", "h_line", "[", "0", "]", ",", "h_line", "[", "1", "]", ",", "\"r--\"", ",", "linewidth", "=", "lw", ")", "plt", ".", "plot", "(", "o_line", "[", "0", "]", ",", "o_line", "[", "1", "]", ",", "\"r--\"", ",", "linewidth", "=", "lw", ")", "plt", ".", "plot", "(", "neutral_line", "[", "0", "]", ",", "neutral_line", "[", "1", "]", ",", "\"k-.\"", ",", "linewidth", "=", "lw", ")", "plt", ".", "plot", "(", "V0_line", "[", "0", "]", ",", "V0_line", "[", "1", "]", ",", "\"k-.\"", ",", "linewidth", "=", "lw", ")", "for", "entry", ",", "vertices", "in", "self", ".", "_pd", ".", "_stable_domain_vertices", ".", "items", "(", ")", ":", "center", "=", "np", ".", "average", "(", "vertices", ",", "axis", "=", "0", ")", "x", ",", "y", "=", "np", ".", "transpose", "(", "np", ".", "vstack", "(", "[", "vertices", ",", "vertices", "[", "0", "]", "]", ")", ")", "plt", ".", "plot", "(", "x", ",", "y", ",", "'k-'", ",", "linewidth", "=", "lw", ")", "if", "label_domains", ":", "plt", ".", "annotate", "(", "generate_entry_label", "(", "entry", ")", ",", "center", ",", "ha", "=", "'center'", ",", "va", "=", "'center'", ",", "fontsize", "=", "20", ",", "color", "=", "\"b\"", ")", "plt", ".", "xlabel", "(", "\"pH\"", ")", "plt", ".", "ylabel", "(", "\"E (V)\"", ")", "plt", ".", "title", "(", "title", ",", "fontsize", "=", "20", ",", "fontweight", "=", "'bold'", ")", "return", "plt" ]
Plot Pourbaix diagram. Args: limits: 2D list containing limits of the Pourbaix diagram of the form [[xlo, xhi], [ylo, yhi]] title (str): Title to display on plot label_domains (bool): whether to label pourbaix domains plt (pyplot): Pyplot instance for plotting Returns: plt (pyplot) - matplotlib plot object with pourbaix diagram
[ "Plot", "Pourbaix", "diagram", "." ]
python
train
38.176471
google/openhtf
openhtf/plugs/usb/filesync_service.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L230-L265
def send(self, src_file, filename, st_mode=DEFAULT_PUSH_MODE, mtime=None, timeout=None): """Push a file-like object to the device. Args: src_file: File-like object for reading from filename: Filename to push to on the device st_mode: stat mode for filename on the device mtime: modification time to set for the file on the device timeout: Timeout to use for the send operation. Raises: AdbProtocolError: If we get an unexpected response. AdbRemoteError: If there's a remote error (but valid protocol). """ transport = DataFilesyncTransport(self.stream) transport.write_data('SEND', '%s,%s' % (filename, st_mode), timeout) try: while True: data = src_file.read(MAX_PUSH_DATA_BYTES) if not data: break transport.write_data('DATA', data, timeout) mtime = mtime or int(time.time()) transport.write_message( FilesyncMessageTypes.DoneMessage('DONE', mtime), timeout) except usb_exceptions.AdbStreamClosedError: # Try to do one last read to see if we can get any more information, # ignoring any errors for this Read attempt. Note that this always # raises, either a new AdbRemoteError, or the AdbStreamClosedError. self._check_for_fail_message(transport, sys.exc_info(), timeout) data_msg = transport.read_message(timeout) data_msg.assert_command_is('OKAY')
[ "def", "send", "(", "self", ",", "src_file", ",", "filename", ",", "st_mode", "=", "DEFAULT_PUSH_MODE", ",", "mtime", "=", "None", ",", "timeout", "=", "None", ")", ":", "transport", "=", "DataFilesyncTransport", "(", "self", ".", "stream", ")", "transport", ".", "write_data", "(", "'SEND'", ",", "'%s,%s'", "%", "(", "filename", ",", "st_mode", ")", ",", "timeout", ")", "try", ":", "while", "True", ":", "data", "=", "src_file", ".", "read", "(", "MAX_PUSH_DATA_BYTES", ")", "if", "not", "data", ":", "break", "transport", ".", "write_data", "(", "'DATA'", ",", "data", ",", "timeout", ")", "mtime", "=", "mtime", "or", "int", "(", "time", ".", "time", "(", ")", ")", "transport", ".", "write_message", "(", "FilesyncMessageTypes", ".", "DoneMessage", "(", "'DONE'", ",", "mtime", ")", ",", "timeout", ")", "except", "usb_exceptions", ".", "AdbStreamClosedError", ":", "# Try to do one last read to see if we can get any more information,", "# ignoring any errors for this Read attempt. Note that this always", "# raises, either a new AdbRemoteError, or the AdbStreamClosedError.", "self", ".", "_check_for_fail_message", "(", "transport", ",", "sys", ".", "exc_info", "(", ")", ",", "timeout", ")", "data_msg", "=", "transport", ".", "read_message", "(", "timeout", ")", "data_msg", ".", "assert_command_is", "(", "'OKAY'", ")" ]
Push a file-like object to the device. Args: src_file: File-like object for reading from filename: Filename to push to on the device st_mode: stat mode for filename on the device mtime: modification time to set for the file on the device timeout: Timeout to use for the send operation. Raises: AdbProtocolError: If we get an unexpected response. AdbRemoteError: If there's a remote error (but valid protocol).
[ "Push", "a", "file", "-", "like", "object", "to", "the", "device", "." ]
python
train
38.888889
shidenggui/easyquotation
easyquotation/jsl.py
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L268-L323
def fundarb( self, jsl_username, jsl_password, avolume=100, bvolume=100, ptype="price", ): """以字典形式返回分级A数据 :param jsl_username: 集思录用户名 :param jsl_password: 集思路登录密码 :param avolume: A成交额,单位百万 :param bvolume: B成交额,单位百万 :param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一 """ session = requests.session() headers = { # pylint: disable=line-too-long "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" } session.headers.update(headers) logindata = dict( return_url="http://www.jisilu.cn/", user_name=jsl_username, password=jsl_password, net_auto_login="1", _post_type="ajax", ) rep = session.post(self.__jsl_login_url, data=logindata) if rep.json()["err"] is not None: return rep.json() # 添加当前的ctime fundarb_url = self.__fundarb_url.format(ctime=int(time.time())) pdata = dict( avolume=avolume, bvolume=bvolume, ptype=ptype, is_search="1", market=["sh", "sz"], rp="50", ) # 请求数据 rep = session.post(fundarb_url, data=pdata) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) self.__fundarb = data return self.__fundarb
[ "def", "fundarb", "(", "self", ",", "jsl_username", ",", "jsl_password", ",", "avolume", "=", "100", ",", "bvolume", "=", "100", ",", "ptype", "=", "\"price\"", ",", ")", ":", "session", "=", "requests", ".", "session", "(", ")", "headers", "=", "{", "# pylint: disable=line-too-long", "\"User-Agent\"", ":", "\"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko\"", "}", "session", ".", "headers", ".", "update", "(", "headers", ")", "logindata", "=", "dict", "(", "return_url", "=", "\"http://www.jisilu.cn/\"", ",", "user_name", "=", "jsl_username", ",", "password", "=", "jsl_password", ",", "net_auto_login", "=", "\"1\"", ",", "_post_type", "=", "\"ajax\"", ",", ")", "rep", "=", "session", ".", "post", "(", "self", ".", "__jsl_login_url", ",", "data", "=", "logindata", ")", "if", "rep", ".", "json", "(", ")", "[", "\"err\"", "]", "is", "not", "None", ":", "return", "rep", ".", "json", "(", ")", "# 添加当前的ctime", "fundarb_url", "=", "self", ".", "__fundarb_url", ".", "format", "(", "ctime", "=", "int", "(", "time", ".", "time", "(", ")", ")", ")", "pdata", "=", "dict", "(", "avolume", "=", "avolume", ",", "bvolume", "=", "bvolume", ",", "ptype", "=", "ptype", ",", "is_search", "=", "\"1\"", ",", "market", "=", "[", "\"sh\"", ",", "\"sz\"", "]", ",", "rp", "=", "\"50\"", ",", ")", "# 请求数据", "rep", "=", "session", ".", "post", "(", "fundarb_url", ",", "data", "=", "pdata", ")", "# 获取返回的json字符串", "fundajson", "=", "json", ".", "loads", "(", "rep", ".", "text", ")", "# 格式化返回的json字符串", "data", "=", "self", ".", "formatfundajson", "(", "fundajson", ")", "self", ".", "__fundarb", "=", "data", "return", "self", ".", "__fundarb" ]
以字典形式返回分级A数据 :param jsl_username: 集思录用户名 :param jsl_password: 集思路登录密码 :param avolume: A成交额,单位百万 :param bvolume: B成交额,单位百万 :param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一
[ "以字典形式返回分级A数据", ":", "param", "jsl_username", ":", "集思录用户名", ":", "param", "jsl_password", ":", "集思路登录密码", ":", "param", "avolume", ":", "A成交额,单位百万", ":", "param", "bvolume", ":", "B成交额,单位百万", ":", "param", "ptype", ":", "溢价计算方式,price", "=", "现价,buy", "=", "买一,sell", "=", "卖一" ]
python
train
26.392857
kejbaly2/metrique
metrique/plotting.py
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/plotting.py#L246-L262
def line(self, x, label=None, y='bottom', color='grey', **kwargs): ''' Creates a vertical line in the plot. :param x: The x coordinate of the line. Should be in the same units as the x-axis. :param string label: The label to be displayed. :param y: May be 'top', 'bottom' or int. The y coordinate of the text-label. :param color color: The color of the line. ''' super(DiffPlotter, self).line(x, label, y, color, self.ax1, **kwargs) super(DiffPlotter, self).line(x, '', 0, color, self.ax2, **kwargs)
[ "def", "line", "(", "self", ",", "x", ",", "label", "=", "None", ",", "y", "=", "'bottom'", ",", "color", "=", "'grey'", ",", "*", "*", "kwargs", ")", ":", "super", "(", "DiffPlotter", ",", "self", ")", ".", "line", "(", "x", ",", "label", ",", "y", ",", "color", ",", "self", ".", "ax1", ",", "*", "*", "kwargs", ")", "super", "(", "DiffPlotter", ",", "self", ")", ".", "line", "(", "x", ",", "''", ",", "0", ",", "color", ",", "self", ".", "ax2", ",", "*", "*", "kwargs", ")" ]
Creates a vertical line in the plot. :param x: The x coordinate of the line. Should be in the same units as the x-axis. :param string label: The label to be displayed. :param y: May be 'top', 'bottom' or int. The y coordinate of the text-label. :param color color: The color of the line.
[ "Creates", "a", "vertical", "line", "in", "the", "plot", "." ]
python
train
36.941176
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L2861-L2875
def deleteLink(self, linkdict): """Delete link if PDF""" CheckParent(self) val = _fitz.Page_deleteLink(self, linkdict) if linkdict["xref"] == 0: return linkid = linkdict["id"] try: linkobj = self._annot_refs[linkid] linkobj._erase() except: pass return val
[ "def", "deleteLink", "(", "self", ",", "linkdict", ")", ":", "CheckParent", "(", "self", ")", "val", "=", "_fitz", ".", "Page_deleteLink", "(", "self", ",", "linkdict", ")", "if", "linkdict", "[", "\"xref\"", "]", "==", "0", ":", "return", "linkid", "=", "linkdict", "[", "\"id\"", "]", "try", ":", "linkobj", "=", "self", ".", "_annot_refs", "[", "linkid", "]", "linkobj", ".", "_erase", "(", ")", "except", ":", "pass", "return", "val" ]
Delete link if PDF
[ "Delete", "link", "if", "PDF" ]
python
train
23
readbeyond/aeneas
aeneas/logger.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/logger.py#L144-L165
def log(self, message, severity=INFO, tag=u""): """ Add a given message to the log, and return its time. :param string message: the message to be added :param severity: the severity of the message :type severity: :class:`~aeneas.logger.Logger` :param string tag: the tag associated with the message; usually, the name of the class generating the entry :rtype: datetime """ entry = _LogEntry( severity=severity, time=datetime.datetime.now(), tag=tag, indentation=self.indentation, message=self._sanitize(message) ) self.entries.append(entry) if self.tee: gf.safe_print(entry.pretty_print(show_datetime=self.tee_show_datetime)) return entry.time
[ "def", "log", "(", "self", ",", "message", ",", "severity", "=", "INFO", ",", "tag", "=", "u\"\"", ")", ":", "entry", "=", "_LogEntry", "(", "severity", "=", "severity", ",", "time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ",", "tag", "=", "tag", ",", "indentation", "=", "self", ".", "indentation", ",", "message", "=", "self", ".", "_sanitize", "(", "message", ")", ")", "self", ".", "entries", ".", "append", "(", "entry", ")", "if", "self", ".", "tee", ":", "gf", ".", "safe_print", "(", "entry", ".", "pretty_print", "(", "show_datetime", "=", "self", ".", "tee_show_datetime", ")", ")", "return", "entry", ".", "time" ]
Add a given message to the log, and return its time. :param string message: the message to be added :param severity: the severity of the message :type severity: :class:`~aeneas.logger.Logger` :param string tag: the tag associated with the message; usually, the name of the class generating the entry :rtype: datetime
[ "Add", "a", "given", "message", "to", "the", "log", "and", "return", "its", "time", "." ]
python
train
37.590909
pyblish/pyblish-maya
pyblish_maya/lib.py
https://github.com/pyblish/pyblish-maya/blob/75db8b5d8de9d53ae95e74195a788b5f6db2cb5f/pyblish_maya/lib.py#L209-L230
def maintained_selection(): """Maintain selection during context Example: >>> with maintained_selection(): ... # Modify selection ... cmds.select('node', replace=True) >>> # Selection restored """ previous_selection = cmds.ls(selection=True) try: yield finally: if previous_selection: cmds.select(previous_selection, replace=True, noExpand=True) else: cmds.select(deselect=True, noExpand=True)
[ "def", "maintained_selection", "(", ")", ":", "previous_selection", "=", "cmds", ".", "ls", "(", "selection", "=", "True", ")", "try", ":", "yield", "finally", ":", "if", "previous_selection", ":", "cmds", ".", "select", "(", "previous_selection", ",", "replace", "=", "True", ",", "noExpand", "=", "True", ")", "else", ":", "cmds", ".", "select", "(", "deselect", "=", "True", ",", "noExpand", "=", "True", ")" ]
Maintain selection during context Example: >>> with maintained_selection(): ... # Modify selection ... cmds.select('node', replace=True) >>> # Selection restored
[ "Maintain", "selection", "during", "context" ]
python
test
25.409091
saltstack/salt
salt/modules/winrepo.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/winrepo.py#L81-L122
def update_git_repos(clean=False): ''' Checkout git repos containing :ref:`Windows Software Package Definitions <windows-package-manager>`. .. important:: This function requires `Git for Windows`_ to be installed in order to work. When installing, make sure to select an installation option which permits the git executable to be run from the Command Prompt. .. _`Git for Windows`: https://git-for-windows.github.io/ clean : False Clean repo cachedirs which are not configured under :conf_minion:`winrepo_remotes`. .. note:: This option only applies if either pygit2_ or GitPython_ is installed into Salt's bundled Python. .. warning:: This argument should not be set to ``True`` if a mix of git and non-git repo definitions are being used, as it will result in the non-git repo definitions being removed. .. versionadded:: 2015.8.0 .. _GitPython: https://github.com/gitpython-developers/GitPython .. _pygit2: https://github.com/libgit2/pygit2 CLI Example: .. code-block:: bash salt-call winrepo.update_git_repos ''' if not salt.utils.path.which('git'): raise CommandExecutionError( 'Git for Windows is not installed, or not configured to be ' 'accessible from the Command Prompt' ) return _update_git_repos(opts=__opts__, clean=clean, masterless=True)
[ "def", "update_git_repos", "(", "clean", "=", "False", ")", ":", "if", "not", "salt", ".", "utils", ".", "path", ".", "which", "(", "'git'", ")", ":", "raise", "CommandExecutionError", "(", "'Git for Windows is not installed, or not configured to be '", "'accessible from the Command Prompt'", ")", "return", "_update_git_repos", "(", "opts", "=", "__opts__", ",", "clean", "=", "clean", ",", "masterless", "=", "True", ")" ]
Checkout git repos containing :ref:`Windows Software Package Definitions <windows-package-manager>`. .. important:: This function requires `Git for Windows`_ to be installed in order to work. When installing, make sure to select an installation option which permits the git executable to be run from the Command Prompt. .. _`Git for Windows`: https://git-for-windows.github.io/ clean : False Clean repo cachedirs which are not configured under :conf_minion:`winrepo_remotes`. .. note:: This option only applies if either pygit2_ or GitPython_ is installed into Salt's bundled Python. .. warning:: This argument should not be set to ``True`` if a mix of git and non-git repo definitions are being used, as it will result in the non-git repo definitions being removed. .. versionadded:: 2015.8.0 .. _GitPython: https://github.com/gitpython-developers/GitPython .. _pygit2: https://github.com/libgit2/pygit2 CLI Example: .. code-block:: bash salt-call winrepo.update_git_repos
[ "Checkout", "git", "repos", "containing", ":", "ref", ":", "Windows", "Software", "Package", "Definitions", "<windows", "-", "package", "-", "manager", ">", "." ]
python
train
34.595238
SHTOOLS/SHTOOLS
pyshtools/shclasses/shcoeffsgrid.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shcoeffsgrid.py#L1729-L1749
def _make_complex(self): """Convert the real SHCoeffs class to the complex class.""" rcomplex_coeffs = _shtools.SHrtoc(self.coeffs, convention=1, switchcs=0) # These coefficients are using real floats, and need to be # converted to complex form. complex_coeffs = _np.zeros((2, self.lmax+1, self.lmax+1), dtype='complex') complex_coeffs[0, :, :] = (rcomplex_coeffs[0, :, :] + 1j * rcomplex_coeffs[1, :, :]) complex_coeffs[1, :, :] = complex_coeffs[0, :, :].conjugate() for m in self.degrees(): if m % 2 == 1: complex_coeffs[1, :, m] = - complex_coeffs[1, :, m] # complex_coeffs is initialized in this function and can be # passed as reference return SHCoeffs.from_array(complex_coeffs, normalization=self.normalization, csphase=self.csphase, copy=False)
[ "def", "_make_complex", "(", "self", ")", ":", "rcomplex_coeffs", "=", "_shtools", ".", "SHrtoc", "(", "self", ".", "coeffs", ",", "convention", "=", "1", ",", "switchcs", "=", "0", ")", "# These coefficients are using real floats, and need to be", "# converted to complex form.", "complex_coeffs", "=", "_np", ".", "zeros", "(", "(", "2", ",", "self", ".", "lmax", "+", "1", ",", "self", ".", "lmax", "+", "1", ")", ",", "dtype", "=", "'complex'", ")", "complex_coeffs", "[", "0", ",", ":", ",", ":", "]", "=", "(", "rcomplex_coeffs", "[", "0", ",", ":", ",", ":", "]", "+", "1j", "*", "rcomplex_coeffs", "[", "1", ",", ":", ",", ":", "]", ")", "complex_coeffs", "[", "1", ",", ":", ",", ":", "]", "=", "complex_coeffs", "[", "0", ",", ":", ",", ":", "]", ".", "conjugate", "(", ")", "for", "m", "in", "self", ".", "degrees", "(", ")", ":", "if", "m", "%", "2", "==", "1", ":", "complex_coeffs", "[", "1", ",", ":", ",", "m", "]", "=", "-", "complex_coeffs", "[", "1", ",", ":", ",", "m", "]", "# complex_coeffs is initialized in this function and can be", "# passed as reference", "return", "SHCoeffs", ".", "from_array", "(", "complex_coeffs", ",", "normalization", "=", "self", ".", "normalization", ",", "csphase", "=", "self", ".", "csphase", ",", "copy", "=", "False", ")" ]
Convert the real SHCoeffs class to the complex class.
[ "Convert", "the", "real", "SHCoeffs", "class", "to", "the", "complex", "class", "." ]
python
train
49.142857
yaml/pyyaml
lib/yaml/__init__.py
https://github.com/yaml/pyyaml/blob/e471e86bf6dabdad45a1438c20a4a5c033eb9034/lib/yaml/__init__.py#L303-L309
def safe_dump(data, stream=None, **kwds): """ Serialize a Python object into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead. """ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
[ "def", "safe_dump", "(", "data", ",", "stream", "=", "None", ",", "*", "*", "kwds", ")", ":", "return", "dump_all", "(", "[", "data", "]", ",", "stream", ",", "Dumper", "=", "SafeDumper", ",", "*", "*", "kwds", ")" ]
Serialize a Python object into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead.
[ "Serialize", "a", "Python", "object", "into", "a", "YAML", "stream", ".", "Produce", "only", "basic", "YAML", "tags", ".", "If", "stream", "is", "None", "return", "the", "produced", "string", "instead", "." ]
python
train
36.714286