repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
ioos/pyoos
pyoos/collectors/ioos/swe_sos.py
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/ioos/swe_sos.py#L50-L87
def metadata_plus_exceptions( self, output_format=None, feature_name_callback=None, **kwargs ): """ Gets SensorML objects for all procedures in your filtered features. Return two dictionaries for service responses keyed by 'feature': responses: values are SOS DescribeSensor response text response_failures: values are exception text content furnished from ServiceException, ExceptionReport You should override the default output_format for servers that do not respond properly. """ callback = feature_name_callback or str if output_format is None: output_format = ( 'text/xml; subtype="sensorML/1.0.1/profiles/ioos_sos/1.0"' ) responses = {} response_failures = {} if self.features is not None: for feature in self.features: ds_kwargs = kwargs.copy() ds_kwargs.update( { "outputFormat": output_format, "procedure": callback(feature), } ) try: responses[feature] = SensorML( self.server.describe_sensor(**ds_kwargs) ) except (ServiceException, ExceptionReport) as e: response_failures[feature] = str(e) return (responses, response_failures)
[ "def", "metadata_plus_exceptions", "(", "self", ",", "output_format", "=", "None", ",", "feature_name_callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "callback", "=", "feature_name_callback", "or", "str", "if", "output_format", "is", "None", ":", "output_format", "=", "(", "'text/xml; subtype=\"sensorML/1.0.1/profiles/ioos_sos/1.0\"'", ")", "responses", "=", "{", "}", "response_failures", "=", "{", "}", "if", "self", ".", "features", "is", "not", "None", ":", "for", "feature", "in", "self", ".", "features", ":", "ds_kwargs", "=", "kwargs", ".", "copy", "(", ")", "ds_kwargs", ".", "update", "(", "{", "\"outputFormat\"", ":", "output_format", ",", "\"procedure\"", ":", "callback", "(", "feature", ")", ",", "}", ")", "try", ":", "responses", "[", "feature", "]", "=", "SensorML", "(", "self", ".", "server", ".", "describe_sensor", "(", "*", "*", "ds_kwargs", ")", ")", "except", "(", "ServiceException", ",", "ExceptionReport", ")", "as", "e", ":", "response_failures", "[", "feature", "]", "=", "str", "(", "e", ")", "return", "(", "responses", ",", "response_failures", ")" ]
Gets SensorML objects for all procedures in your filtered features. Return two dictionaries for service responses keyed by 'feature': responses: values are SOS DescribeSensor response text response_failures: values are exception text content furnished from ServiceException, ExceptionReport You should override the default output_format for servers that do not respond properly.
[ "Gets", "SensorML", "objects", "for", "all", "procedures", "in", "your", "filtered", "features", "." ]
python
train
woolfson-group/isambard
isambard/ampal/protein.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/protein.py#L1289-L1324
def side_chain(self): """List of the side-chain atoms (R-group). Notes ----- Returns empty list for glycine. Returns ------- side_chain_atoms: list(`Atoms`) """ side_chain_atoms = [] if self.mol_code != 'GLY': covalent_bond_graph = generate_covalent_bond_graph( find_covalent_bonds(self)) try: subgraphs = generate_bond_subgraphs_from_break( covalent_bond_graph, self['CA'], self['CB']) if len(subgraphs) == 1: subgraphs = generate_bond_subgraphs_from_break( subgraphs[0], self['CD'], self['N']) if len(subgraphs) == 2: for g in subgraphs: if self['CB'] in g: side_chain_atoms = g.nodes() break except: warning_message = "Malformed PDB for Residue {0}: {1}.".format( self.id, self) if 'CB' in self.atoms.keys(): side_chain_atoms.append(self['CB']) warning_message += " Side-chain is just the CB atom." else: warning_message += " Empty side-chain." warnings.warn(warning_message, MalformedPDBWarning) return side_chain_atoms
[ "def", "side_chain", "(", "self", ")", ":", "side_chain_atoms", "=", "[", "]", "if", "self", ".", "mol_code", "!=", "'GLY'", ":", "covalent_bond_graph", "=", "generate_covalent_bond_graph", "(", "find_covalent_bonds", "(", "self", ")", ")", "try", ":", "subgraphs", "=", "generate_bond_subgraphs_from_break", "(", "covalent_bond_graph", ",", "self", "[", "'CA'", "]", ",", "self", "[", "'CB'", "]", ")", "if", "len", "(", "subgraphs", ")", "==", "1", ":", "subgraphs", "=", "generate_bond_subgraphs_from_break", "(", "subgraphs", "[", "0", "]", ",", "self", "[", "'CD'", "]", ",", "self", "[", "'N'", "]", ")", "if", "len", "(", "subgraphs", ")", "==", "2", ":", "for", "g", "in", "subgraphs", ":", "if", "self", "[", "'CB'", "]", "in", "g", ":", "side_chain_atoms", "=", "g", ".", "nodes", "(", ")", "break", "except", ":", "warning_message", "=", "\"Malformed PDB for Residue {0}: {1}.\"", ".", "format", "(", "self", ".", "id", ",", "self", ")", "if", "'CB'", "in", "self", ".", "atoms", ".", "keys", "(", ")", ":", "side_chain_atoms", ".", "append", "(", "self", "[", "'CB'", "]", ")", "warning_message", "+=", "\" Side-chain is just the CB atom.\"", "else", ":", "warning_message", "+=", "\" Empty side-chain.\"", "warnings", ".", "warn", "(", "warning_message", ",", "MalformedPDBWarning", ")", "return", "side_chain_atoms" ]
List of the side-chain atoms (R-group). Notes ----- Returns empty list for glycine. Returns ------- side_chain_atoms: list(`Atoms`)
[ "List", "of", "the", "side", "-", "chain", "atoms", "(", "R", "-", "group", ")", "." ]
python
train
aestrivex/bctpy
bct/utils/other.py
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/utils/other.py#L6-L33
def threshold_absolute(W, thr, copy=True): ''' This function thresholds the connectivity matrix by absolute weight magnitude. All weights below the given threshold, and all weights on the main diagonal (self-self connections) are set to 0. If copy is not set, this function will *modify W in place.* Parameters ---------- W : np.ndarray weighted connectivity matrix thr : float absolute weight threshold copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray thresholded connectivity matrix ''' if copy: W = W.copy() np.fill_diagonal(W, 0) # clear diagonal W[W < thr] = 0 # apply threshold return W
[ "def", "threshold_absolute", "(", "W", ",", "thr", ",", "copy", "=", "True", ")", ":", "if", "copy", ":", "W", "=", "W", ".", "copy", "(", ")", "np", ".", "fill_diagonal", "(", "W", ",", "0", ")", "# clear diagonal", "W", "[", "W", "<", "thr", "]", "=", "0", "# apply threshold", "return", "W" ]
This function thresholds the connectivity matrix by absolute weight magnitude. All weights below the given threshold, and all weights on the main diagonal (self-self connections) are set to 0. If copy is not set, this function will *modify W in place.* Parameters ---------- W : np.ndarray weighted connectivity matrix thr : float absolute weight threshold copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray thresholded connectivity matrix
[ "This", "function", "thresholds", "the", "connectivity", "matrix", "by", "absolute", "weight", "magnitude", ".", "All", "weights", "below", "the", "given", "threshold", "and", "all", "weights", "on", "the", "main", "diagonal", "(", "self", "-", "self", "connections", ")", "are", "set", "to", "0", "." ]
python
train
kwikteam/phy
phy/plot/panzoom.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L198-L210
def _constrain_pan(self): """Constrain bounding box.""" if self.xmin is not None and self.xmax is not None: p0 = self.xmin + 1. / self._zoom[0] p1 = self.xmax - 1. / self._zoom[0] p0, p1 = min(p0, p1), max(p0, p1) self._pan[0] = np.clip(self._pan[0], p0, p1) if self.ymin is not None and self.ymax is not None: p0 = self.ymin + 1. / self._zoom[1] p1 = self.ymax - 1. / self._zoom[1] p0, p1 = min(p0, p1), max(p0, p1) self._pan[1] = np.clip(self._pan[1], p0, p1)
[ "def", "_constrain_pan", "(", "self", ")", ":", "if", "self", ".", "xmin", "is", "not", "None", "and", "self", ".", "xmax", "is", "not", "None", ":", "p0", "=", "self", ".", "xmin", "+", "1.", "/", "self", ".", "_zoom", "[", "0", "]", "p1", "=", "self", ".", "xmax", "-", "1.", "/", "self", ".", "_zoom", "[", "0", "]", "p0", ",", "p1", "=", "min", "(", "p0", ",", "p1", ")", ",", "max", "(", "p0", ",", "p1", ")", "self", ".", "_pan", "[", "0", "]", "=", "np", ".", "clip", "(", "self", ".", "_pan", "[", "0", "]", ",", "p0", ",", "p1", ")", "if", "self", ".", "ymin", "is", "not", "None", "and", "self", ".", "ymax", "is", "not", "None", ":", "p0", "=", "self", ".", "ymin", "+", "1.", "/", "self", ".", "_zoom", "[", "1", "]", "p1", "=", "self", ".", "ymax", "-", "1.", "/", "self", ".", "_zoom", "[", "1", "]", "p0", ",", "p1", "=", "min", "(", "p0", ",", "p1", ")", ",", "max", "(", "p0", ",", "p1", ")", "self", ".", "_pan", "[", "1", "]", "=", "np", ".", "clip", "(", "self", ".", "_pan", "[", "1", "]", ",", "p0", ",", "p1", ")" ]
Constrain bounding box.
[ "Constrain", "bounding", "box", "." ]
python
train
limix/limix-core
limix_core/mean/linear.py
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L219-L251
def removeFixedEffect(self, index=None): """ set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, remove last term. """ if self._n_terms==0: pass if index is None or index==(self._n_terms-1): self._n_terms-=1 F = self._F.pop() #= self.F[:-1] A = self._A.pop() #= self.A[:-1] self._A_identity.pop() #= self.A_identity[:-1] REML_term = self._REML_term.pop()# = self.REML_term[:-1] self._B.pop()# = self.B[:-1] self._n_fixed_effs-=F.shape[1]*A.shape[0] if REML_term: self._n_fixed_effs_REML-=F.shape[1]*A.shape[0] pass elif index >= self.n_terms: raise Exception("index exceeds max index of terms") else: raise NotImplementedError("currently only last term can be removed") pass self._rebuild_indicator() self.clear_cache('Fstar','Astar','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
[ "def", "removeFixedEffect", "(", "self", ",", "index", "=", "None", ")", ":", "if", "self", ".", "_n_terms", "==", "0", ":", "pass", "if", "index", "is", "None", "or", "index", "==", "(", "self", ".", "_n_terms", "-", "1", ")", ":", "self", ".", "_n_terms", "-=", "1", "F", "=", "self", ".", "_F", ".", "pop", "(", ")", "#= self.F[:-1]", "A", "=", "self", ".", "_A", ".", "pop", "(", ")", "#= self.A[:-1]", "self", ".", "_A_identity", ".", "pop", "(", ")", "#= self.A_identity[:-1]", "REML_term", "=", "self", ".", "_REML_term", ".", "pop", "(", ")", "# = self.REML_term[:-1]", "self", ".", "_B", ".", "pop", "(", ")", "# = self.B[:-1]", "self", ".", "_n_fixed_effs", "-=", "F", ".", "shape", "[", "1", "]", "*", "A", ".", "shape", "[", "0", "]", "if", "REML_term", ":", "self", ".", "_n_fixed_effs_REML", "-=", "F", ".", "shape", "[", "1", "]", "*", "A", ".", "shape", "[", "0", "]", "pass", "elif", "index", ">=", "self", ".", "n_terms", ":", "raise", "Exception", "(", "\"index exceeds max index of terms\"", ")", "else", ":", "raise", "NotImplementedError", "(", "\"currently only last term can be removed\"", ")", "pass", "self", ".", "_rebuild_indicator", "(", ")", "self", ".", "clear_cache", "(", "'Fstar'", ",", "'Astar'", ",", "'Xstar'", ",", "'Xhat'", ",", "'Areml'", ",", "'Areml_eigh'", ",", "'Areml_chol'", ",", "'Areml_inv'", ",", "'beta_hat'", ",", "'B_hat'", ",", "'LRLdiag_Xhat_tens'", ",", "'Areml_grad'", ",", "'beta_grad'", ",", "'Xstar_beta_grad'", ",", "'Zstar'", ",", "'DLZ'", ")" ]
set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, remove last term.
[ "set", "sample", "and", "trait", "designs", "F", ":", "NxK", "sample", "design", "A", ":", "LxP", "sample", "design", "REML", ":", "REML", "for", "this", "term?", "index", ":", "index", "of", "which", "fixed", "effect", "to", "replace", ".", "If", "None", "remove", "last", "term", "." ]
python
train
ScatterHQ/machinist
machinist/_fsm.py
https://github.com/ScatterHQ/machinist/blob/1d1c017ac03be8e737d50af0dfabf31722ddc621/machinist/_fsm.py#L193-L209
def addTransitions(self, state, transitions): """ Create a new L{TransitionTable} with all the same transitions as this L{TransitionTable} plus a number of new transitions. @param state: The state for which the new transitions are defined. @param transitions: A L{dict} mapping inputs to output, nextState pairs. Each item from this L{dict} will define a new transition in C{state}. @return: The newly created L{TransitionTable}. """ table = self._copy() state = table.table.setdefault(state, {}) for (input, (output, nextState)) in transitions.items(): state[input] = Transition(output, nextState) return table
[ "def", "addTransitions", "(", "self", ",", "state", ",", "transitions", ")", ":", "table", "=", "self", ".", "_copy", "(", ")", "state", "=", "table", ".", "table", ".", "setdefault", "(", "state", ",", "{", "}", ")", "for", "(", "input", ",", "(", "output", ",", "nextState", ")", ")", "in", "transitions", ".", "items", "(", ")", ":", "state", "[", "input", "]", "=", "Transition", "(", "output", ",", "nextState", ")", "return", "table" ]
Create a new L{TransitionTable} with all the same transitions as this L{TransitionTable} plus a number of new transitions. @param state: The state for which the new transitions are defined. @param transitions: A L{dict} mapping inputs to output, nextState pairs. Each item from this L{dict} will define a new transition in C{state}. @return: The newly created L{TransitionTable}.
[ "Create", "a", "new", "L", "{", "TransitionTable", "}", "with", "all", "the", "same", "transitions", "as", "this", "L", "{", "TransitionTable", "}", "plus", "a", "number", "of", "new", "transitions", "." ]
python
train
dropbox/stone
stone/backends/obj_c_types.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/obj_c_types.py#L582-L599
def _generate_union_cstor_funcs(self, union): """Emits standard union constructor.""" for field in union.all_fields: enum_field_name = fmt_enum_name(field.name, union) func_args = [] if is_void_type( field.data_type) else fmt_func_args_from_fields([field]) with self.block_func( func=self._cstor_name_from_field(field), args=func_args, return_type='instancetype'): self.emit('self = [super init];') with self.block_init(): self.emit('_tag = {};'.format(enum_field_name)) if not is_void_type(field.data_type): self.emit('_{} = {};'.format( fmt_var(field.name), fmt_var(field.name))) self.emit()
[ "def", "_generate_union_cstor_funcs", "(", "self", ",", "union", ")", ":", "for", "field", "in", "union", ".", "all_fields", ":", "enum_field_name", "=", "fmt_enum_name", "(", "field", ".", "name", ",", "union", ")", "func_args", "=", "[", "]", "if", "is_void_type", "(", "field", ".", "data_type", ")", "else", "fmt_func_args_from_fields", "(", "[", "field", "]", ")", "with", "self", ".", "block_func", "(", "func", "=", "self", ".", "_cstor_name_from_field", "(", "field", ")", ",", "args", "=", "func_args", ",", "return_type", "=", "'instancetype'", ")", ":", "self", ".", "emit", "(", "'self = [super init];'", ")", "with", "self", ".", "block_init", "(", ")", ":", "self", ".", "emit", "(", "'_tag = {};'", ".", "format", "(", "enum_field_name", ")", ")", "if", "not", "is_void_type", "(", "field", ".", "data_type", ")", ":", "self", ".", "emit", "(", "'_{} = {};'", ".", "format", "(", "fmt_var", "(", "field", ".", "name", ")", ",", "fmt_var", "(", "field", ".", "name", ")", ")", ")", "self", ".", "emit", "(", ")" ]
Emits standard union constructor.
[ "Emits", "standard", "union", "constructor", "." ]
python
train
regardscitoyens/anpy
anpy/dossier_like_senapy.py
https://github.com/regardscitoyens/anpy/blob/72eff17c992e054edade7bc16eda1eca96e69225/anpy/dossier_like_senapy.py#L21-L27
def find_promulgation_date(line): """ >>> find_promulgation_date("Loi nº 2010-383 du 16 avril 2010 autorisant l'approbation de l'accord entre...") '2010-04-16' """ line = line.split(' du ')[1] return format_date(re.search(r"(\d\d? \w\w\w+ \d\d\d\d)", line).group(1))
[ "def", "find_promulgation_date", "(", "line", ")", ":", "line", "=", "line", ".", "split", "(", "' du '", ")", "[", "1", "]", "return", "format_date", "(", "re", ".", "search", "(", "r\"(\\d\\d? \\w\\w\\w+ \\d\\d\\d\\d)\"", ",", "line", ")", ".", "group", "(", "1", ")", ")" ]
>>> find_promulgation_date("Loi nº 2010-383 du 16 avril 2010 autorisant l'approbation de l'accord entre...") '2010-04-16'
[ ">>>", "find_promulgation_date", "(", "Loi", "nº", "2010", "-", "383", "du", "16", "avril", "2010", "autorisant", "l", "approbation", "de", "l", "accord", "entre", "...", ")", "2010", "-", "04", "-", "16" ]
python
train
baverman/covador
covador/types.py
https://github.com/baverman/covador/blob/1597759f7ba77004efef1b27bf804539663b5488/covador/types.py#L693-L697
def pipe(p1, p2): """Joins two pipes""" if isinstance(p1, Pipeable) or isinstance(p2, Pipeable): return p1 | p2 return Pipe([p1, p2])
[ "def", "pipe", "(", "p1", ",", "p2", ")", ":", "if", "isinstance", "(", "p1", ",", "Pipeable", ")", "or", "isinstance", "(", "p2", ",", "Pipeable", ")", ":", "return", "p1", "|", "p2", "return", "Pipe", "(", "[", "p1", ",", "p2", "]", ")" ]
Joins two pipes
[ "Joins", "two", "pipes" ]
python
train
daler/metaseq
metaseq/minibrowser.py
https://github.com/daler/metaseq/blob/fa875d1f72317aa7ef95cb128b739956b16eef9f/metaseq/minibrowser.py#L101-L106
def make_fig(self): """ Figure constructor, called before `self.plot()` """ self.fig = plt.figure(figsize=(8, 4)) self._all_figures.append(self.fig)
[ "def", "make_fig", "(", "self", ")", ":", "self", ".", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "8", ",", "4", ")", ")", "self", ".", "_all_figures", ".", "append", "(", "self", ".", "fig", ")" ]
Figure constructor, called before `self.plot()`
[ "Figure", "constructor", "called", "before", "self", ".", "plot", "()" ]
python
train
evhub/coconut
coconut/compiler/matching.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/matching.py#L580-L596
def match_trailer(self, tokens, item): """Matches typedefs and as patterns.""" internal_assert(len(tokens) > 1 and len(tokens) % 2 == 1, "invalid trailer match tokens", tokens) match, trailers = tokens[0], tokens[1:] for i in range(0, len(trailers), 2): op, arg = trailers[i], trailers[i + 1] if op == "is": self.add_check("_coconut.isinstance(" + item + ", " + arg + ")") elif op == "as": if arg in self.names: self.add_check(self.names[arg] + " == " + item) elif arg != wildcard: self.add_def(arg + " = " + item) self.names[arg] = item else: raise CoconutInternalException("invalid trailer match operation", op) self.match(match, item)
[ "def", "match_trailer", "(", "self", ",", "tokens", ",", "item", ")", ":", "internal_assert", "(", "len", "(", "tokens", ")", ">", "1", "and", "len", "(", "tokens", ")", "%", "2", "==", "1", ",", "\"invalid trailer match tokens\"", ",", "tokens", ")", "match", ",", "trailers", "=", "tokens", "[", "0", "]", ",", "tokens", "[", "1", ":", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "trailers", ")", ",", "2", ")", ":", "op", ",", "arg", "=", "trailers", "[", "i", "]", ",", "trailers", "[", "i", "+", "1", "]", "if", "op", "==", "\"is\"", ":", "self", ".", "add_check", "(", "\"_coconut.isinstance(\"", "+", "item", "+", "\", \"", "+", "arg", "+", "\")\"", ")", "elif", "op", "==", "\"as\"", ":", "if", "arg", "in", "self", ".", "names", ":", "self", ".", "add_check", "(", "self", ".", "names", "[", "arg", "]", "+", "\" == \"", "+", "item", ")", "elif", "arg", "!=", "wildcard", ":", "self", ".", "add_def", "(", "arg", "+", "\" = \"", "+", "item", ")", "self", ".", "names", "[", "arg", "]", "=", "item", "else", ":", "raise", "CoconutInternalException", "(", "\"invalid trailer match operation\"", ",", "op", ")", "self", ".", "match", "(", "match", ",", "item", ")" ]
Matches typedefs and as patterns.
[ "Matches", "typedefs", "and", "as", "patterns", "." ]
python
train
facelessuser/backrefs
backrefs/uniprops/__init__.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/uniprops/__init__.py#L154-L165
def get_hangul_syllable_type_property(value, is_bytes=False): """Get `HANGUL SYLLABLE TYPE` property.""" obj = unidata.ascii_hangul_syllable_type if is_bytes else unidata.unicode_hangul_syllable_type if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['hangulsyllabletype'].get(negated, negated) else: value = unidata.unicode_alias['hangulsyllabletype'].get(value, value) return obj[value]
[ "def", "get_hangul_syllable_type_property", "(", "value", ",", "is_bytes", "=", "False", ")", ":", "obj", "=", "unidata", ".", "ascii_hangul_syllable_type", "if", "is_bytes", "else", "unidata", ".", "unicode_hangul_syllable_type", "if", "value", ".", "startswith", "(", "'^'", ")", ":", "negated", "=", "value", "[", "1", ":", "]", "value", "=", "'^'", "+", "unidata", ".", "unicode_alias", "[", "'hangulsyllabletype'", "]", ".", "get", "(", "negated", ",", "negated", ")", "else", ":", "value", "=", "unidata", ".", "unicode_alias", "[", "'hangulsyllabletype'", "]", ".", "get", "(", "value", ",", "value", ")", "return", "obj", "[", "value", "]" ]
Get `HANGUL SYLLABLE TYPE` property.
[ "Get", "HANGUL", "SYLLABLE", "TYPE", "property", "." ]
python
train
Dirguis/ipfn
ipfn/ipfn.py
https://github.com/Dirguis/ipfn/blob/0a896ea395664515c5a424b69043937aad1d5567/ipfn/ipfn.py#L60-L134
def ipfn_np(self, m, aggregates, dimensions, weight_col='total'): """ Runs the ipfn method from a matrix m, aggregates/marginals and the dimension(s) preserved. For example: from ipfn import ipfn import numpy as np m = np.array([[8., 4., 6., 7.], [3., 6., 5., 2.], [9., 11., 3., 1.]], ) xip = np.array([20., 18., 22.]) xpj = np.array([18., 16., 12., 14.]) aggregates = [xip, xpj] dimensions = [[0], [1]] IPF = ipfn(m, aggregates, dimensions) m = IPF.iteration() """ steps = len(aggregates) dim = len(m.shape) product_elem = [] tables = [m] # TODO: do we need to persist all these dataframe? Or maybe we just need to persist the table_update and table_current # and then update the table_current to the table_update to the latest we have. And create an empty zero dataframe for table_update (Evelyn) for inc in range(steps - 1): tables.append(np.array(np.zeros(m.shape))) original = copy.copy(m) # Calculate the new weights for each dimension for inc in range(steps): if inc == (steps - 1): table_update = m table_current = tables[inc] else: table_update = tables[inc + 1] table_current = tables[inc] for dimension in dimensions[inc]: product_elem.append(range(m.shape[dimension])) for item in product(*product_elem): idx = self.index_axis_elem(dim, dimensions[inc], item) table_current_slice = table_current[idx] mijk = table_current_slice.sum() # TODO: Directly put it as xijk = aggregates[inc][item] (Evelyn) xijk = aggregates[inc] xijk = xijk[item] if mijk == 0: # table_current_slice += 1e-5 # TODO: Basically, this part would remain 0 as always right? Cause if the sum of the slice is zero, then we only have zeros in this slice. # TODO: you could put it as table_update[idx] = table_current_slice (since multiplication on zero is still zero) table_update[idx] = table_current_slice else: # TODO: when inc == steps - 1, this part is also directly updating the dataframe m (Evelyn) # If we are not going to persist every table generated, we could still keep this part to directly update dataframe m table_update[idx] = table_current_slice * 1.0 * xijk / mijk # For debug purposes # if np.isnan(table_update).any(): # print(idx) # sys.exit(0) product_elem = [] # Check the convergence rate for each dimension max_conv = 0 for inc in range(steps): # TODO: this part already generated before, we could somehow persist it. But it's not important (Evelyn) for dimension in dimensions[inc]: product_elem.append(range(m.shape[dimension])) for item in product(*product_elem): idx = self.index_axis_elem(dim, dimensions[inc], item) ori_ijk = aggregates[inc][item] m_slice = m[idx] m_ijk = m_slice.sum() # print('Current vs original', abs(m_ijk/ori_ijk - 1)) if abs(m_ijk / ori_ijk - 1) > max_conv: max_conv = abs(m_ijk / ori_ijk - 1) product_elem = [] return m, max_conv
[ "def", "ipfn_np", "(", "self", ",", "m", ",", "aggregates", ",", "dimensions", ",", "weight_col", "=", "'total'", ")", ":", "steps", "=", "len", "(", "aggregates", ")", "dim", "=", "len", "(", "m", ".", "shape", ")", "product_elem", "=", "[", "]", "tables", "=", "[", "m", "]", "# TODO: do we need to persist all these dataframe? Or maybe we just need to persist the table_update and table_current", "# and then update the table_current to the table_update to the latest we have. And create an empty zero dataframe for table_update (Evelyn)", "for", "inc", "in", "range", "(", "steps", "-", "1", ")", ":", "tables", ".", "append", "(", "np", ".", "array", "(", "np", ".", "zeros", "(", "m", ".", "shape", ")", ")", ")", "original", "=", "copy", ".", "copy", "(", "m", ")", "# Calculate the new weights for each dimension", "for", "inc", "in", "range", "(", "steps", ")", ":", "if", "inc", "==", "(", "steps", "-", "1", ")", ":", "table_update", "=", "m", "table_current", "=", "tables", "[", "inc", "]", "else", ":", "table_update", "=", "tables", "[", "inc", "+", "1", "]", "table_current", "=", "tables", "[", "inc", "]", "for", "dimension", "in", "dimensions", "[", "inc", "]", ":", "product_elem", ".", "append", "(", "range", "(", "m", ".", "shape", "[", "dimension", "]", ")", ")", "for", "item", "in", "product", "(", "*", "product_elem", ")", ":", "idx", "=", "self", ".", "index_axis_elem", "(", "dim", ",", "dimensions", "[", "inc", "]", ",", "item", ")", "table_current_slice", "=", "table_current", "[", "idx", "]", "mijk", "=", "table_current_slice", ".", "sum", "(", ")", "# TODO: Directly put it as xijk = aggregates[inc][item] (Evelyn)", "xijk", "=", "aggregates", "[", "inc", "]", "xijk", "=", "xijk", "[", "item", "]", "if", "mijk", "==", "0", ":", "# table_current_slice += 1e-5", "# TODO: Basically, this part would remain 0 as always right? Cause if the sum of the slice is zero, then we only have zeros in this slice.", "# TODO: you could put it as table_update[idx] = table_current_slice (since multiplication on zero is still zero)", "table_update", "[", "idx", "]", "=", "table_current_slice", "else", ":", "# TODO: when inc == steps - 1, this part is also directly updating the dataframe m (Evelyn)", "# If we are not going to persist every table generated, we could still keep this part to directly update dataframe m", "table_update", "[", "idx", "]", "=", "table_current_slice", "*", "1.0", "*", "xijk", "/", "mijk", "# For debug purposes", "# if np.isnan(table_update).any():", "# print(idx)", "# sys.exit(0)", "product_elem", "=", "[", "]", "# Check the convergence rate for each dimension", "max_conv", "=", "0", "for", "inc", "in", "range", "(", "steps", ")", ":", "# TODO: this part already generated before, we could somehow persist it. But it's not important (Evelyn)", "for", "dimension", "in", "dimensions", "[", "inc", "]", ":", "product_elem", ".", "append", "(", "range", "(", "m", ".", "shape", "[", "dimension", "]", ")", ")", "for", "item", "in", "product", "(", "*", "product_elem", ")", ":", "idx", "=", "self", ".", "index_axis_elem", "(", "dim", ",", "dimensions", "[", "inc", "]", ",", "item", ")", "ori_ijk", "=", "aggregates", "[", "inc", "]", "[", "item", "]", "m_slice", "=", "m", "[", "idx", "]", "m_ijk", "=", "m_slice", ".", "sum", "(", ")", "# print('Current vs original', abs(m_ijk/ori_ijk - 1))", "if", "abs", "(", "m_ijk", "/", "ori_ijk", "-", "1", ")", ">", "max_conv", ":", "max_conv", "=", "abs", "(", "m_ijk", "/", "ori_ijk", "-", "1", ")", "product_elem", "=", "[", "]", "return", "m", ",", "max_conv" ]
Runs the ipfn method from a matrix m, aggregates/marginals and the dimension(s) preserved. For example: from ipfn import ipfn import numpy as np m = np.array([[8., 4., 6., 7.], [3., 6., 5., 2.], [9., 11., 3., 1.]], ) xip = np.array([20., 18., 22.]) xpj = np.array([18., 16., 12., 14.]) aggregates = [xip, xpj] dimensions = [[0], [1]] IPF = ipfn(m, aggregates, dimensions) m = IPF.iteration()
[ "Runs", "the", "ipfn", "method", "from", "a", "matrix", "m", "aggregates", "/", "marginals", "and", "the", "dimension", "(", "s", ")", "preserved", ".", "For", "example", ":", "from", "ipfn", "import", "ipfn", "import", "numpy", "as", "np", "m", "=", "np", ".", "array", "(", "[[", "8", ".", "4", ".", "6", ".", "7", ".", "]", "[", "3", ".", "6", ".", "5", ".", "2", ".", "]", "[", "9", ".", "11", ".", "3", ".", "1", ".", "]]", ")", "xip", "=", "np", ".", "array", "(", "[", "20", ".", "18", ".", "22", ".", "]", ")", "xpj", "=", "np", ".", "array", "(", "[", "18", ".", "16", ".", "12", ".", "14", ".", "]", ")", "aggregates", "=", "[", "xip", "xpj", "]", "dimensions", "=", "[[", "0", "]", "[", "1", "]]" ]
python
valid
kronenthaler/mod-pbxproj
pbxproj/pbxextensions/ProjectFiles.py
https://github.com/kronenthaler/mod-pbxproj/blob/8de3cbdd3210480ddbb1fa0f50a4f4ea87de6e71/pbxproj/pbxextensions/ProjectFiles.py#L266-L278
def get_files_by_path(self, path, tree=TreeType.SOURCE_ROOT): """ Gets the files under the given tree type that match the given path. :param path: Path to the file relative to the tree root :param tree: Tree type to look for the path. By default the SOURCE_ROOT :return: List of all PBXFileReference that match the path and tree criteria. """ files = [] for file_ref in self.objects.get_objects_in_section(u'PBXFileReference'): if file_ref.path == path and file_ref.sourceTree == tree: files.append(file_ref) return files
[ "def", "get_files_by_path", "(", "self", ",", "path", ",", "tree", "=", "TreeType", ".", "SOURCE_ROOT", ")", ":", "files", "=", "[", "]", "for", "file_ref", "in", "self", ".", "objects", ".", "get_objects_in_section", "(", "u'PBXFileReference'", ")", ":", "if", "file_ref", ".", "path", "==", "path", "and", "file_ref", ".", "sourceTree", "==", "tree", ":", "files", ".", "append", "(", "file_ref", ")", "return", "files" ]
Gets the files under the given tree type that match the given path. :param path: Path to the file relative to the tree root :param tree: Tree type to look for the path. By default the SOURCE_ROOT :return: List of all PBXFileReference that match the path and tree criteria.
[ "Gets", "the", "files", "under", "the", "given", "tree", "type", "that", "match", "the", "given", "path", ".", ":", "param", "path", ":", "Path", "to", "the", "file", "relative", "to", "the", "tree", "root", ":", "param", "tree", ":", "Tree", "type", "to", "look", "for", "the", "path", ".", "By", "default", "the", "SOURCE_ROOT", ":", "return", ":", "List", "of", "all", "PBXFileReference", "that", "match", "the", "path", "and", "tree", "criteria", "." ]
python
train
PyCQA/astroid
astroid/brain/brain_attrs.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/brain/brain_attrs.py#L18-L28
def is_decorated_with_attrs(node, decorator_names=ATTRS_NAMES): """Return True if a decorated node has an attr decorator applied.""" if not node.decorators: return False for decorator_attribute in node.decorators.nodes: if isinstance(decorator_attribute, astroid.Call): # decorator with arguments decorator_attribute = decorator_attribute.func if decorator_attribute.as_string() in decorator_names: return True return False
[ "def", "is_decorated_with_attrs", "(", "node", ",", "decorator_names", "=", "ATTRS_NAMES", ")", ":", "if", "not", "node", ".", "decorators", ":", "return", "False", "for", "decorator_attribute", "in", "node", ".", "decorators", ".", "nodes", ":", "if", "isinstance", "(", "decorator_attribute", ",", "astroid", ".", "Call", ")", ":", "# decorator with arguments", "decorator_attribute", "=", "decorator_attribute", ".", "func", "if", "decorator_attribute", ".", "as_string", "(", ")", "in", "decorator_names", ":", "return", "True", "return", "False" ]
Return True if a decorated node has an attr decorator applied.
[ "Return", "True", "if", "a", "decorated", "node", "has", "an", "attr", "decorator", "applied", "." ]
python
train
LCAV/pylocus
pylocus/algorithms.py
https://github.com/LCAV/pylocus/blob/c56a38c251d8a435caf4641a8ae6027ecba2c8c6/pylocus/algorithms.py#L146-L167
def reconstruct_cdm(dm, absolute_angles, all_points, W=None): """ Reconstruct point set from angle and distance measurements, using coordinate difference matrices. """ from pylocus.point_set import dmi_from_V, sdm_from_dmi, get_V from pylocus.mds import signedMDS N = all_points.shape[0] V = get_V(absolute_angles, dm) dmx = dmi_from_V(V, 0) dmy = dmi_from_V(V, 1) sdmx = sdm_from_dmi(dmx, N) sdmy = sdm_from_dmi(dmy, N) points_x = signedMDS(sdmx, W) points_y = signedMDS(sdmy, W) Xhat = np.c_[points_x, points_y] Y, R, t, c = procrustes(all_points, Xhat, scale=False) return Y
[ "def", "reconstruct_cdm", "(", "dm", ",", "absolute_angles", ",", "all_points", ",", "W", "=", "None", ")", ":", "from", "pylocus", ".", "point_set", "import", "dmi_from_V", ",", "sdm_from_dmi", ",", "get_V", "from", "pylocus", ".", "mds", "import", "signedMDS", "N", "=", "all_points", ".", "shape", "[", "0", "]", "V", "=", "get_V", "(", "absolute_angles", ",", "dm", ")", "dmx", "=", "dmi_from_V", "(", "V", ",", "0", ")", "dmy", "=", "dmi_from_V", "(", "V", ",", "1", ")", "sdmx", "=", "sdm_from_dmi", "(", "dmx", ",", "N", ")", "sdmy", "=", "sdm_from_dmi", "(", "dmy", ",", "N", ")", "points_x", "=", "signedMDS", "(", "sdmx", ",", "W", ")", "points_y", "=", "signedMDS", "(", "sdmy", ",", "W", ")", "Xhat", "=", "np", ".", "c_", "[", "points_x", ",", "points_y", "]", "Y", ",", "R", ",", "t", ",", "c", "=", "procrustes", "(", "all_points", ",", "Xhat", ",", "scale", "=", "False", ")", "return", "Y" ]
Reconstruct point set from angle and distance measurements, using coordinate difference matrices.
[ "Reconstruct", "point", "set", "from", "angle", "and", "distance", "measurements", "using", "coordinate", "difference", "matrices", "." ]
python
train
numenta/nupic
src/nupic/engine/__init__.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/engine/__init__.py#L449-L454
def getInputNames(self): """ Returns list of input names in spec. """ inputs = self.getSpec().inputs return [inputs.getByIndex(i)[0] for i in xrange(inputs.getCount())]
[ "def", "getInputNames", "(", "self", ")", ":", "inputs", "=", "self", ".", "getSpec", "(", ")", ".", "inputs", "return", "[", "inputs", ".", "getByIndex", "(", "i", ")", "[", "0", "]", "for", "i", "in", "xrange", "(", "inputs", ".", "getCount", "(", ")", ")", "]" ]
Returns list of input names in spec.
[ "Returns", "list", "of", "input", "names", "in", "spec", "." ]
python
valid
PGower/PyCanvas
pycanvas/apis/feature_flags.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/feature_flags.py#L340-L362
def remove_feature_flag_users(self, user_id, feature): """ Remove feature flag. Remove feature flag for a given Account, Course, or User. (Note that the flag must be defined on the Account, Course, or User directly.) The object will then inherit the feature flags from a higher account, if any exist. If this flag was 'on' or 'off', then lower-level account flags that were masked by this one will apply again. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - feature """ID""" path["feature"] = feature self.logger.debug("DELETE /api/v1/users/{user_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/{user_id}/features/flags/{feature}".format(**path), data=data, params=params, single_item=True)
[ "def", "remove_feature_flag_users", "(", "self", ",", "user_id", ",", "feature", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - user_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"user_id\"", "]", "=", "user_id", "# REQUIRED - PATH - feature\r", "\"\"\"ID\"\"\"", "path", "[", "\"feature\"", "]", "=", "feature", "self", ".", "logger", ".", "debug", "(", "\"DELETE /api/v1/users/{user_id}/features/flags/{feature} with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"DELETE\"", ",", "\"/api/v1/users/{user_id}/features/flags/{feature}\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "single_item", "=", "True", ")" ]
Remove feature flag. Remove feature flag for a given Account, Course, or User. (Note that the flag must be defined on the Account, Course, or User directly.) The object will then inherit the feature flags from a higher account, if any exist. If this flag was 'on' or 'off', then lower-level account flags that were masked by this one will apply again.
[ "Remove", "feature", "flag", ".", "Remove", "feature", "flag", "for", "a", "given", "Account", "Course", "or", "User", ".", "(", "Note", "that", "the", "flag", "must", "be", "defined", "on", "the", "Account", "Course", "or", "User", "directly", ".", ")", "The", "object", "will", "then", "inherit", "the", "feature", "flags", "from", "a", "higher", "account", "if", "any", "exist", ".", "If", "this", "flag", "was", "on", "or", "off", "then", "lower", "-", "level", "account", "flags", "that", "were", "masked", "by", "this", "one", "will", "apply", "again", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/extract_text.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L384-L391
def get_cmd_output_from_stdin(stdint_content_binary: bytes, *args, encoding: str = SYS_ENCODING) -> str: """ Returns text output of a command, passing binary data in via stdin. """ p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE) stdout, stderr = p.communicate(input=stdint_content_binary) return stdout.decode(encoding, errors='ignore')
[ "def", "get_cmd_output_from_stdin", "(", "stdint_content_binary", ":", "bytes", ",", "*", "args", ",", "encoding", ":", "str", "=", "SYS_ENCODING", ")", "->", "str", ":", "p", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", "input", "=", "stdint_content_binary", ")", "return", "stdout", ".", "decode", "(", "encoding", ",", "errors", "=", "'ignore'", ")" ]
Returns text output of a command, passing binary data in via stdin.
[ "Returns", "text", "output", "of", "a", "command", "passing", "binary", "data", "in", "via", "stdin", "." ]
python
train
jbittel/django-mama-cas
mama_cas/models.py
https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/models.py#L207-L218
def request_sign_out(self, user): """ Send a single logout request to each service accessed by a specified user. This is called at logout when single logout is enabled. If requests-futures is installed, asynchronous requests will be sent. Otherwise, synchronous requests will be sent. """ session = Session() for ticket in self.filter(user=user, consumed__gte=user.last_login): ticket.request_sign_out(session=session)
[ "def", "request_sign_out", "(", "self", ",", "user", ")", ":", "session", "=", "Session", "(", ")", "for", "ticket", "in", "self", ".", "filter", "(", "user", "=", "user", ",", "consumed__gte", "=", "user", ".", "last_login", ")", ":", "ticket", ".", "request_sign_out", "(", "session", "=", "session", ")" ]
Send a single logout request to each service accessed by a specified user. This is called at logout when single logout is enabled. If requests-futures is installed, asynchronous requests will be sent. Otherwise, synchronous requests will be sent.
[ "Send", "a", "single", "logout", "request", "to", "each", "service", "accessed", "by", "a", "specified", "user", ".", "This", "is", "called", "at", "logout", "when", "single", "logout", "is", "enabled", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/launcher.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/launcher.py#L185-L198
def compile_ui(self, namespace, unknown): """Compile qt designer files :param namespace: namespace containing arguments from the launch parser :type namespace: Namespace :param unknown: list of unknown arguments :type unknown: list :returns: None :rtype: None :raises: None """ uifiles = namespace.uifile for f in uifiles: qtcompile.compile_ui(f.name)
[ "def", "compile_ui", "(", "self", ",", "namespace", ",", "unknown", ")", ":", "uifiles", "=", "namespace", ".", "uifile", "for", "f", "in", "uifiles", ":", "qtcompile", ".", "compile_ui", "(", "f", ".", "name", ")" ]
Compile qt designer files :param namespace: namespace containing arguments from the launch parser :type namespace: Namespace :param unknown: list of unknown arguments :type unknown: list :returns: None :rtype: None :raises: None
[ "Compile", "qt", "designer", "files" ]
python
train
googleapis/google-cloud-python
bigquery/samples/get_model.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/samples/get_model.py#L16-L34
def get_model(client, model_id): """Sample ID: go/samples-tracker/1510""" # [START bigquery_get_model] from google.cloud import bigquery # TODO(developer): Construct a BigQuery client object. # client = bigquery.Client() # TODO(developer): Set model_id to the ID of the model to fetch. # model_id = 'your-project.your_dataset.your_model' model = client.get_model(model_id) full_model_id = "{}.{}.{}".format(model.project, model.dataset_id, model.model_id) friendly_name = model.friendly_name print( "Got model '{}' with friendly_name '{}'.".format(full_model_id, friendly_name) )
[ "def", "get_model", "(", "client", ",", "model_id", ")", ":", "# [START bigquery_get_model]", "from", "google", ".", "cloud", "import", "bigquery", "# TODO(developer): Construct a BigQuery client object.", "# client = bigquery.Client()", "# TODO(developer): Set model_id to the ID of the model to fetch.", "# model_id = 'your-project.your_dataset.your_model'", "model", "=", "client", ".", "get_model", "(", "model_id", ")", "full_model_id", "=", "\"{}.{}.{}\"", ".", "format", "(", "model", ".", "project", ",", "model", ".", "dataset_id", ",", "model", ".", "model_id", ")", "friendly_name", "=", "model", ".", "friendly_name", "print", "(", "\"Got model '{}' with friendly_name '{}'.\"", ".", "format", "(", "full_model_id", ",", "friendly_name", ")", ")" ]
Sample ID: go/samples-tracker/1510
[ "Sample", "ID", ":", "go", "/", "samples", "-", "tracker", "/", "1510" ]
python
train
praekeltfoundation/seed-control-interface-service
services/tasks.py
https://github.com/praekeltfoundation/seed-control-interface-service/blob/0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e/services/tasks.py#L81-L88
def run(self): """ Queues all services to be polled. Should be run via beat. """ services = Service.objects.all() for service in services: poll_service.apply_async(kwargs={"service_id": str(service.id)}) return "Queued <%s> Service(s) for Polling" % services.count()
[ "def", "run", "(", "self", ")", ":", "services", "=", "Service", ".", "objects", ".", "all", "(", ")", "for", "service", "in", "services", ":", "poll_service", ".", "apply_async", "(", "kwargs", "=", "{", "\"service_id\"", ":", "str", "(", "service", ".", "id", ")", "}", ")", "return", "\"Queued <%s> Service(s) for Polling\"", "%", "services", ".", "count", "(", ")" ]
Queues all services to be polled. Should be run via beat.
[ "Queues", "all", "services", "to", "be", "polled", ".", "Should", "be", "run", "via", "beat", "." ]
python
train
tBaxter/tango-happenings
build/lib/happenings/models.py
https://github.com/tBaxter/tango-happenings/blob/cb3c49ea39e0a6cef9c6ffb534c2fbf401139ba2/build/lib/happenings/models.py#L238-L249
def get_top_assets(self): """ Gets images and videos to populate top assets. Map is built separately. """ images = self.get_all_images()[0:14] video = [] if supports_video: video = self.eventvideo_set.all()[0:10] return list(chain(images, video))[0:15]
[ "def", "get_top_assets", "(", "self", ")", ":", "images", "=", "self", ".", "get_all_images", "(", ")", "[", "0", ":", "14", "]", "video", "=", "[", "]", "if", "supports_video", ":", "video", "=", "self", ".", "eventvideo_set", ".", "all", "(", ")", "[", "0", ":", "10", "]", "return", "list", "(", "chain", "(", "images", ",", "video", ")", ")", "[", "0", ":", "15", "]" ]
Gets images and videos to populate top assets. Map is built separately.
[ "Gets", "images", "and", "videos", "to", "populate", "top", "assets", "." ]
python
valid
tuomas2/automate
src/automate/statusobject.py
https://github.com/tuomas2/automate/blob/d8a8cd03cd0da047e033a2d305f3f260f8c4e017/src/automate/statusobject.py#L223-L236
def get_status_display(self, **kwargs): """ Define how status is displayed in UIs (add units etc.). """ if 'value' in kwargs: value = kwargs['value'] else: value = self.status if self.show_stdev_seconds: stdev = self.stdev(self.show_stdev_seconds) return f'{value}±{stdev:2.2}' else: return str(value)
[ "def", "get_status_display", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "'value'", "in", "kwargs", ":", "value", "=", "kwargs", "[", "'value'", "]", "else", ":", "value", "=", "self", ".", "status", "if", "self", ".", "show_stdev_seconds", ":", "stdev", "=", "self", ".", "stdev", "(", "self", ".", "show_stdev_seconds", ")", "return", "f'{value}±{stdev:2.2}'", "else", ":", "return", "str", "(", "value", ")" ]
Define how status is displayed in UIs (add units etc.).
[ "Define", "how", "status", "is", "displayed", "in", "UIs", "(", "add", "units", "etc", ".", ")", "." ]
python
train
newville/wxmplot
examples/floatcontrol.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/examples/floatcontrol.py#L14-L25
def set_float(val): """ utility to set a floating value, useful for converting from strings """ out = None if not val in (None, ''): try: out = float(val) except ValueError: return None if numpy.isnan(out): out = default return out
[ "def", "set_float", "(", "val", ")", ":", "out", "=", "None", "if", "not", "val", "in", "(", "None", ",", "''", ")", ":", "try", ":", "out", "=", "float", "(", "val", ")", "except", "ValueError", ":", "return", "None", "if", "numpy", ".", "isnan", "(", "out", ")", ":", "out", "=", "default", "return", "out" ]
utility to set a floating value, useful for converting from strings
[ "utility", "to", "set", "a", "floating", "value", "useful", "for", "converting", "from", "strings" ]
python
train
gwpy/gwpy
gwpy/types/series.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L864-L871
def update(self, other, inplace=True): """Update this series by appending new data from an other and dropping the same amount of data off the start. This is a convenience method that just calls `~Series.append` with `resize=False`. """ return self.append(other, inplace=inplace, resize=False)
[ "def", "update", "(", "self", ",", "other", ",", "inplace", "=", "True", ")", ":", "return", "self", ".", "append", "(", "other", ",", "inplace", "=", "inplace", ",", "resize", "=", "False", ")" ]
Update this series by appending new data from an other and dropping the same amount of data off the start. This is a convenience method that just calls `~Series.append` with `resize=False`.
[ "Update", "this", "series", "by", "appending", "new", "data", "from", "an", "other", "and", "dropping", "the", "same", "amount", "of", "data", "off", "the", "start", "." ]
python
train
CiscoDevNet/webexteamssdk
webexteamssdk/utils.py
https://github.com/CiscoDevNet/webexteamssdk/blob/6fc2cc3557e080ba4b2a380664cb2a0532ae45cd/webexteamssdk/utils.py#L89-L97
def validate_base_url(base_url): """Verify that base_url specifies a protocol and network location.""" parsed_url = urllib.parse.urlparse(base_url) if parsed_url.scheme and parsed_url.netloc: return parsed_url.geturl() else: error_message = "base_url must contain a valid scheme (protocol " \ "specifier) and network location (hostname)" raise ValueError(error_message)
[ "def", "validate_base_url", "(", "base_url", ")", ":", "parsed_url", "=", "urllib", ".", "parse", ".", "urlparse", "(", "base_url", ")", "if", "parsed_url", ".", "scheme", "and", "parsed_url", ".", "netloc", ":", "return", "parsed_url", ".", "geturl", "(", ")", "else", ":", "error_message", "=", "\"base_url must contain a valid scheme (protocol \"", "\"specifier) and network location (hostname)\"", "raise", "ValueError", "(", "error_message", ")" ]
Verify that base_url specifies a protocol and network location.
[ "Verify", "that", "base_url", "specifies", "a", "protocol", "and", "network", "location", "." ]
python
test
improbable-research/keanu
keanu-python/keanu/vertex/generated.py
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/vertex/generated.py#L585-L591
def Exponential(rate: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ One to one constructor for mapping some shape of rate to matching shaped exponential. :param rate: the rate of the Exponential with either the same shape as specified for this vertex or scalar """ return Double(context.jvm_view().ExponentialVertex, label, cast_to_double_vertex(rate))
[ "def", "Exponential", "(", "rate", ":", "vertex_constructor_param_types", ",", "label", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Vertex", ":", "return", "Double", "(", "context", ".", "jvm_view", "(", ")", ".", "ExponentialVertex", ",", "label", ",", "cast_to_double_vertex", "(", "rate", ")", ")" ]
One to one constructor for mapping some shape of rate to matching shaped exponential. :param rate: the rate of the Exponential with either the same shape as specified for this vertex or scalar
[ "One", "to", "one", "constructor", "for", "mapping", "some", "shape", "of", "rate", "to", "matching", "shaped", "exponential", ".", ":", "param", "rate", ":", "the", "rate", "of", "the", "Exponential", "with", "either", "the", "same", "shape", "as", "specified", "for", "this", "vertex", "or", "scalar" ]
python
train
O365/python-o365
O365/message.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/message.py#L801-L878
def save_draft(self, target_folder=OutlookWellKnowFolderNames.DRAFTS): """ Save this message as a draft on the cloud :param target_folder: name of the drafts folder :return: Success / Failure :rtype: bool """ if self.object_id: # update message. Attachments are NOT included nor saved. if not self.__is_draft: raise RuntimeError('Only draft messages can be updated') if not self._track_changes: return True # there's nothing to update url = self.build_url( self._endpoints.get('get_message').format(id=self.object_id)) method = self.con.patch data = self.to_api_data(restrict_keys=self._track_changes) data.pop(self._cc('attachments'), None) # attachments are handled by the next method call # noinspection PyProtectedMember self.attachments._update_attachments_to_cloud() else: # new message. Attachments are included and saved. if not self.__is_draft: raise RuntimeError('Only draft messages can be saved as drafts') target_folder = target_folder or OutlookWellKnowFolderNames.DRAFTS if isinstance(target_folder, OutlookWellKnowFolderNames): target_folder = target_folder.value elif not isinstance(target_folder, str): # a Folder instance target_folder = getattr(target_folder, 'folder_id', OutlookWellKnowFolderNames.DRAFTS.value) url = self.build_url( self._endpoints.get('create_draft_folder').format( id=target_folder)) method = self.con.post data = self.to_api_data() if not data: return True response = method(url, data=data) if not response: return False self._track_changes.clear() # reset the tracked changes as they are all saved if not self.object_id: # new message message = response.json() self.object_id = message.get(self._cc('id'), None) self.folder_id = message.get(self._cc('parentFolderId'), None) # fallback to office365 v1.0 self.__created = message.get(self._cc('createdDateTime'), message.get( self._cc('dateTimeCreated'), None)) # fallback to office365 v1.0 self.__modified = message.get(self._cc('lastModifiedDateTime'), message.get( self._cc('dateTimeModified'), None)) self.__created = parse(self.__created).astimezone( self.protocol.timezone) if self.__created else None self.__modified = parse(self.__modified).astimezone( self.protocol.timezone) if self.__modified else None else: self.__modified = self.protocol.timezone.localize(dt.datetime.now()) return True
[ "def", "save_draft", "(", "self", ",", "target_folder", "=", "OutlookWellKnowFolderNames", ".", "DRAFTS", ")", ":", "if", "self", ".", "object_id", ":", "# update message. Attachments are NOT included nor saved.", "if", "not", "self", ".", "__is_draft", ":", "raise", "RuntimeError", "(", "'Only draft messages can be updated'", ")", "if", "not", "self", ".", "_track_changes", ":", "return", "True", "# there's nothing to update", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'get_message'", ")", ".", "format", "(", "id", "=", "self", ".", "object_id", ")", ")", "method", "=", "self", ".", "con", ".", "patch", "data", "=", "self", ".", "to_api_data", "(", "restrict_keys", "=", "self", ".", "_track_changes", ")", "data", ".", "pop", "(", "self", ".", "_cc", "(", "'attachments'", ")", ",", "None", ")", "# attachments are handled by the next method call", "# noinspection PyProtectedMember", "self", ".", "attachments", ".", "_update_attachments_to_cloud", "(", ")", "else", ":", "# new message. Attachments are included and saved.", "if", "not", "self", ".", "__is_draft", ":", "raise", "RuntimeError", "(", "'Only draft messages can be saved as drafts'", ")", "target_folder", "=", "target_folder", "or", "OutlookWellKnowFolderNames", ".", "DRAFTS", "if", "isinstance", "(", "target_folder", ",", "OutlookWellKnowFolderNames", ")", ":", "target_folder", "=", "target_folder", ".", "value", "elif", "not", "isinstance", "(", "target_folder", ",", "str", ")", ":", "# a Folder instance", "target_folder", "=", "getattr", "(", "target_folder", ",", "'folder_id'", ",", "OutlookWellKnowFolderNames", ".", "DRAFTS", ".", "value", ")", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'create_draft_folder'", ")", ".", "format", "(", "id", "=", "target_folder", ")", ")", "method", "=", "self", ".", "con", ".", "post", "data", "=", "self", ".", "to_api_data", "(", ")", "if", "not", "data", ":", "return", "True", "response", "=", "method", "(", "url", ",", "data", "=", "data", ")", "if", "not", "response", ":", "return", "False", "self", ".", "_track_changes", ".", "clear", "(", ")", "# reset the tracked changes as they are all saved", "if", "not", "self", ".", "object_id", ":", "# new message", "message", "=", "response", ".", "json", "(", ")", "self", ".", "object_id", "=", "message", ".", "get", "(", "self", ".", "_cc", "(", "'id'", ")", ",", "None", ")", "self", ".", "folder_id", "=", "message", ".", "get", "(", "self", ".", "_cc", "(", "'parentFolderId'", ")", ",", "None", ")", "# fallback to office365 v1.0", "self", ".", "__created", "=", "message", ".", "get", "(", "self", ".", "_cc", "(", "'createdDateTime'", ")", ",", "message", ".", "get", "(", "self", ".", "_cc", "(", "'dateTimeCreated'", ")", ",", "None", ")", ")", "# fallback to office365 v1.0", "self", ".", "__modified", "=", "message", ".", "get", "(", "self", ".", "_cc", "(", "'lastModifiedDateTime'", ")", ",", "message", ".", "get", "(", "self", ".", "_cc", "(", "'dateTimeModified'", ")", ",", "None", ")", ")", "self", ".", "__created", "=", "parse", "(", "self", ".", "__created", ")", ".", "astimezone", "(", "self", ".", "protocol", ".", "timezone", ")", "if", "self", ".", "__created", "else", "None", "self", ".", "__modified", "=", "parse", "(", "self", ".", "__modified", ")", ".", "astimezone", "(", "self", ".", "protocol", ".", "timezone", ")", "if", "self", ".", "__modified", "else", "None", "else", ":", "self", ".", "__modified", "=", "self", ".", "protocol", ".", "timezone", ".", "localize", "(", "dt", ".", "datetime", ".", "now", "(", ")", ")", "return", "True" ]
Save this message as a draft on the cloud :param target_folder: name of the drafts folder :return: Success / Failure :rtype: bool
[ "Save", "this", "message", "as", "a", "draft", "on", "the", "cloud" ]
python
train
wilson-eft/wilson
wilson/translate/wet.py
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/translate/wet.py#L1352-L1368
def Fierz_to_Bern_chrom(C, dd, parameters): """From Fierz to chromomagnetic Bern basis for Class V. dd should be of the form 'sb', 'ds' etc.""" e = sqrt(4 * pi * parameters['alpha_e']) gs = sqrt(4 * pi * parameters['alpha_s']) if dd == 'sb' or dd == 'db': mq = parameters['m_b'] elif dd == 'ds': mq = parameters['m_s'] else: KeyError("Not sure what to do with quark mass for flavour {}".format(dd)) return { '7gamma' + dd : gs**2 / e / mq * C['F7gamma' + dd ], '8g' + dd : gs / mq * C['F8g' + dd ], '7pgamma' + dd : gs**2 / e /mq * C['F7pgamma' + dd], '8pg' + dd : gs / mq * C['F8pg' + dd] }
[ "def", "Fierz_to_Bern_chrom", "(", "C", ",", "dd", ",", "parameters", ")", ":", "e", "=", "sqrt", "(", "4", "*", "pi", "*", "parameters", "[", "'alpha_e'", "]", ")", "gs", "=", "sqrt", "(", "4", "*", "pi", "*", "parameters", "[", "'alpha_s'", "]", ")", "if", "dd", "==", "'sb'", "or", "dd", "==", "'db'", ":", "mq", "=", "parameters", "[", "'m_b'", "]", "elif", "dd", "==", "'ds'", ":", "mq", "=", "parameters", "[", "'m_s'", "]", "else", ":", "KeyError", "(", "\"Not sure what to do with quark mass for flavour {}\"", ".", "format", "(", "dd", ")", ")", "return", "{", "'7gamma'", "+", "dd", ":", "gs", "**", "2", "/", "e", "/", "mq", "*", "C", "[", "'F7gamma'", "+", "dd", "]", ",", "'8g'", "+", "dd", ":", "gs", "/", "mq", "*", "C", "[", "'F8g'", "+", "dd", "]", ",", "'7pgamma'", "+", "dd", ":", "gs", "**", "2", "/", "e", "/", "mq", "*", "C", "[", "'F7pgamma'", "+", "dd", "]", ",", "'8pg'", "+", "dd", ":", "gs", "/", "mq", "*", "C", "[", "'F8pg'", "+", "dd", "]", "}" ]
From Fierz to chromomagnetic Bern basis for Class V. dd should be of the form 'sb', 'ds' etc.
[ "From", "Fierz", "to", "chromomagnetic", "Bern", "basis", "for", "Class", "V", ".", "dd", "should", "be", "of", "the", "form", "sb", "ds", "etc", "." ]
python
train
wummel/linkchecker
third_party/dnspython/dns/rdata.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/rdata.py#L184-L190
def validate(self): """Check that the current contents of the rdata's fields are valid. If you change an rdata by assigning to its fields, it is a good idea to call validate() when you are done making changes. """ dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
[ "def", "validate", "(", "self", ")", ":", "dns", ".", "rdata", ".", "from_text", "(", "self", ".", "rdclass", ",", "self", ".", "rdtype", ",", "self", ".", "to_text", "(", ")", ")" ]
Check that the current contents of the rdata's fields are valid. If you change an rdata by assigning to its fields, it is a good idea to call validate() when you are done making changes.
[ "Check", "that", "the", "current", "contents", "of", "the", "rdata", "s", "fields", "are", "valid", ".", "If", "you", "change", "an", "rdata", "by", "assigning", "to", "its", "fields", "it", "is", "a", "good", "idea", "to", "call", "validate", "()", "when", "you", "are", "done", "making", "changes", "." ]
python
train
toastdriven/alligator
alligator/backends/beanstalk_backend.py
https://github.com/toastdriven/alligator/blob/f18bcb35b350fc6b0886393f5246d69c892b36c7/alligator/backends/beanstalk_backend.py#L34-L53
def len(self, queue_name): """ Returns the length of the queue. :param queue_name: The name of the queue. Usually handled by the ``Gator`` instance. :type queue_name: string :returns: The length of the queue :rtype: integer """ try: stats = self.conn.stats_tube(queue_name) except beanstalkc.CommandFailed as err: if err[1] == 'NOT_FOUND': return 0 raise return stats.get('current-jobs-ready', 0)
[ "def", "len", "(", "self", ",", "queue_name", ")", ":", "try", ":", "stats", "=", "self", ".", "conn", ".", "stats_tube", "(", "queue_name", ")", "except", "beanstalkc", ".", "CommandFailed", "as", "err", ":", "if", "err", "[", "1", "]", "==", "'NOT_FOUND'", ":", "return", "0", "raise", "return", "stats", ".", "get", "(", "'current-jobs-ready'", ",", "0", ")" ]
Returns the length of the queue. :param queue_name: The name of the queue. Usually handled by the ``Gator`` instance. :type queue_name: string :returns: The length of the queue :rtype: integer
[ "Returns", "the", "length", "of", "the", "queue", "." ]
python
train
teddychoi/BynamoDB
bynamodb/model.py
https://github.com/teddychoi/BynamoDB/blob/9b143d0554c89fb8edbfb99db5542e48bd126b39/bynamodb/model.py#L167-L205
def update_item(cls, hash_key, range_key=None, attributes_to_set=None, attributes_to_add=None): """Update item attributes. Currently SET and ADD actions are supported.""" primary_key = cls._encode_key(hash_key, range_key) value_names = {} encoded_values = {} dynamizer = Dynamizer() set_expression = '' if attributes_to_set: for i, key in enumerate(attributes_to_set.keys()): value_name = ':s{0}'.format(i) value_names[key] = value_name encoded_values[value_name] = dynamizer.encode(attributes_to_set[key]) set_expression = 'SET {0}'.format( ', '.join( '{key}={value_name}'.format(key=key, value_name=value_names[key]) for key in attributes_to_set ) ) add_expression = '' if attributes_to_add: for i, key in enumerate(attributes_to_add.keys()): value_name = ':a{0}'.format(i) value_names[key] = value_name encoded_values[value_name] = dynamizer.encode(attributes_to_add[key]) add_expression = 'ADD {0}'.format( ', '.join( '{key} {value_name}'.format(key=key, value_name=value_names[key]) for key in attributes_to_add ) ) update_expression = ' '.join([set_expression, add_expression]) cls._get_connection().update_item( cls.get_table_name(), primary_key, update_expression=update_expression, expression_attribute_values=encoded_values)
[ "def", "update_item", "(", "cls", ",", "hash_key", ",", "range_key", "=", "None", ",", "attributes_to_set", "=", "None", ",", "attributes_to_add", "=", "None", ")", ":", "primary_key", "=", "cls", ".", "_encode_key", "(", "hash_key", ",", "range_key", ")", "value_names", "=", "{", "}", "encoded_values", "=", "{", "}", "dynamizer", "=", "Dynamizer", "(", ")", "set_expression", "=", "''", "if", "attributes_to_set", ":", "for", "i", ",", "key", "in", "enumerate", "(", "attributes_to_set", ".", "keys", "(", ")", ")", ":", "value_name", "=", "':s{0}'", ".", "format", "(", "i", ")", "value_names", "[", "key", "]", "=", "value_name", "encoded_values", "[", "value_name", "]", "=", "dynamizer", ".", "encode", "(", "attributes_to_set", "[", "key", "]", ")", "set_expression", "=", "'SET {0}'", ".", "format", "(", "', '", ".", "join", "(", "'{key}={value_name}'", ".", "format", "(", "key", "=", "key", ",", "value_name", "=", "value_names", "[", "key", "]", ")", "for", "key", "in", "attributes_to_set", ")", ")", "add_expression", "=", "''", "if", "attributes_to_add", ":", "for", "i", ",", "key", "in", "enumerate", "(", "attributes_to_add", ".", "keys", "(", ")", ")", ":", "value_name", "=", "':a{0}'", ".", "format", "(", "i", ")", "value_names", "[", "key", "]", "=", "value_name", "encoded_values", "[", "value_name", "]", "=", "dynamizer", ".", "encode", "(", "attributes_to_add", "[", "key", "]", ")", "add_expression", "=", "'ADD {0}'", ".", "format", "(", "', '", ".", "join", "(", "'{key} {value_name}'", ".", "format", "(", "key", "=", "key", ",", "value_name", "=", "value_names", "[", "key", "]", ")", "for", "key", "in", "attributes_to_add", ")", ")", "update_expression", "=", "' '", ".", "join", "(", "[", "set_expression", ",", "add_expression", "]", ")", "cls", ".", "_get_connection", "(", ")", ".", "update_item", "(", "cls", ".", "get_table_name", "(", ")", ",", "primary_key", ",", "update_expression", "=", "update_expression", ",", "expression_attribute_values", "=", "encoded_values", ")" ]
Update item attributes. Currently SET and ADD actions are supported.
[ "Update", "item", "attributes", ".", "Currently", "SET", "and", "ADD", "actions", "are", "supported", "." ]
python
train
scott-griffiths/bitstring
bitstring.py
https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L3365-L3388
def insert(self, bs, pos=None): """Insert bs at bit position pos. bs -- The bitstring to insert. pos -- The bit position to insert at. Raises ValueError if pos < 0 or pos > self.len. """ bs = Bits(bs) if not bs.len: return self if bs is self: bs = self.__copy__() if pos is None: try: pos = self._pos except AttributeError: raise TypeError("insert require a bit position for this type.") if pos < 0: pos += self.len if not 0 <= pos <= self.len: raise ValueError("Invalid insert position.") self._insert(bs, pos)
[ "def", "insert", "(", "self", ",", "bs", ",", "pos", "=", "None", ")", ":", "bs", "=", "Bits", "(", "bs", ")", "if", "not", "bs", ".", "len", ":", "return", "self", "if", "bs", "is", "self", ":", "bs", "=", "self", ".", "__copy__", "(", ")", "if", "pos", "is", "None", ":", "try", ":", "pos", "=", "self", ".", "_pos", "except", "AttributeError", ":", "raise", "TypeError", "(", "\"insert require a bit position for this type.\"", ")", "if", "pos", "<", "0", ":", "pos", "+=", "self", ".", "len", "if", "not", "0", "<=", "pos", "<=", "self", ".", "len", ":", "raise", "ValueError", "(", "\"Invalid insert position.\"", ")", "self", ".", "_insert", "(", "bs", ",", "pos", ")" ]
Insert bs at bit position pos. bs -- The bitstring to insert. pos -- The bit position to insert at. Raises ValueError if pos < 0 or pos > self.len.
[ "Insert", "bs", "at", "bit", "position", "pos", "." ]
python
train
openvax/pyensembl
pyensembl/download_cache.py
https://github.com/openvax/pyensembl/blob/4b995fb72e848206d6fbf11950cf30964cd9b3aa/pyensembl/download_cache.py#L243-L276
def download_or_copy_if_necessary( self, path_or_url, download_if_missing=False, overwrite=False): """ Download a remote file or copy Get the local path to a possibly remote file. Download if file is missing from the cache directory and `download_if_missing` is True. Download even if local file exists if both `download_if_missing` and `overwrite` are True. If the file is on the local file system then return its path, unless self.copy_local_to_cache is True, and then copy it to the cache first. Parameters ---------- path_or_url : str download_if_missing : bool, optional Download files if missing from local cache overwrite : bool, optional Overwrite existing copy if it exists """ assert path_or_url, "Expected non-empty string for path_or_url" if self.is_url_format(path_or_url): return self._download_if_necessary( path_or_url, download_if_missing, overwrite) else: return self._copy_if_necessary(path_or_url, overwrite)
[ "def", "download_or_copy_if_necessary", "(", "self", ",", "path_or_url", ",", "download_if_missing", "=", "False", ",", "overwrite", "=", "False", ")", ":", "assert", "path_or_url", ",", "\"Expected non-empty string for path_or_url\"", "if", "self", ".", "is_url_format", "(", "path_or_url", ")", ":", "return", "self", ".", "_download_if_necessary", "(", "path_or_url", ",", "download_if_missing", ",", "overwrite", ")", "else", ":", "return", "self", ".", "_copy_if_necessary", "(", "path_or_url", ",", "overwrite", ")" ]
Download a remote file or copy Get the local path to a possibly remote file. Download if file is missing from the cache directory and `download_if_missing` is True. Download even if local file exists if both `download_if_missing` and `overwrite` are True. If the file is on the local file system then return its path, unless self.copy_local_to_cache is True, and then copy it to the cache first. Parameters ---------- path_or_url : str download_if_missing : bool, optional Download files if missing from local cache overwrite : bool, optional Overwrite existing copy if it exists
[ "Download", "a", "remote", "file", "or", "copy", "Get", "the", "local", "path", "to", "a", "possibly", "remote", "file", "." ]
python
train
tanghaibao/goatools
goatools/statsdescribe.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/statsdescribe.py#L37-L40
def getstr_data(self, name, vals): """Return stats data string in markdown style.""" fld2val = self.get_fld2val(name, vals) return self.fmt.format(**fld2val)
[ "def", "getstr_data", "(", "self", ",", "name", ",", "vals", ")", ":", "fld2val", "=", "self", ".", "get_fld2val", "(", "name", ",", "vals", ")", "return", "self", ".", "fmt", ".", "format", "(", "*", "*", "fld2val", ")" ]
Return stats data string in markdown style.
[ "Return", "stats", "data", "string", "in", "markdown", "style", "." ]
python
train
IntegralDefense/critsapi
critsapi/critsapi.py
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L723-L761
def source_add_update(self, crits_id, crits_type, source, action_type='add', method='', reference='', date=None): """ date must be in the format "%Y-%m-%d %H:%M:%S.%f" """ type_trans = self._type_translation(crits_type) submit_url = '{}/{}/{}/'.format(self.url, type_trans, crits_id) if date is None: date = datetime.datetime.now() date = datetime.datetime.strftime(date, '%Y-%m-%d %H:%M:%S.%f') params = { 'api_key': self.api_key, 'username': self.username, } data = { 'action': 'source_add_update', 'action_type': action_type, 'source': source, 'method': method, 'reference': reference, 'date': date } r = requests.patch(submit_url, params=params, data=json.dumps(data), proxies=self.proxies, verify=self.verify) if r.status_code == 200: log.debug('Source {0} added successfully to {1} ' '{2}'.format(source, crits_type, crits_id)) return True else: log.error('Error with status code {0} and message {1} for ' 'type {2} and id {3} and source ' '{4}'.format(r.status_code, r.text, crits_type, crits_id, source)) return False
[ "def", "source_add_update", "(", "self", ",", "crits_id", ",", "crits_type", ",", "source", ",", "action_type", "=", "'add'", ",", "method", "=", "''", ",", "reference", "=", "''", ",", "date", "=", "None", ")", ":", "type_trans", "=", "self", ".", "_type_translation", "(", "crits_type", ")", "submit_url", "=", "'{}/{}/{}/'", ".", "format", "(", "self", ".", "url", ",", "type_trans", ",", "crits_id", ")", "if", "date", "is", "None", ":", "date", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "date", "=", "datetime", ".", "datetime", ".", "strftime", "(", "date", ",", "'%Y-%m-%d %H:%M:%S.%f'", ")", "params", "=", "{", "'api_key'", ":", "self", ".", "api_key", ",", "'username'", ":", "self", ".", "username", ",", "}", "data", "=", "{", "'action'", ":", "'source_add_update'", ",", "'action_type'", ":", "action_type", ",", "'source'", ":", "source", ",", "'method'", ":", "method", ",", "'reference'", ":", "reference", ",", "'date'", ":", "date", "}", "r", "=", "requests", ".", "patch", "(", "submit_url", ",", "params", "=", "params", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "proxies", "=", "self", ".", "proxies", ",", "verify", "=", "self", ".", "verify", ")", "if", "r", ".", "status_code", "==", "200", ":", "log", ".", "debug", "(", "'Source {0} added successfully to {1} '", "'{2}'", ".", "format", "(", "source", ",", "crits_type", ",", "crits_id", ")", ")", "return", "True", "else", ":", "log", ".", "error", "(", "'Error with status code {0} and message {1} for '", "'type {2} and id {3} and source '", "'{4}'", ".", "format", "(", "r", ".", "status_code", ",", "r", ".", "text", ",", "crits_type", ",", "crits_id", ",", "source", ")", ")", "return", "False" ]
date must be in the format "%Y-%m-%d %H:%M:%S.%f"
[ "date", "must", "be", "in", "the", "format", "%Y", "-", "%m", "-", "%d", "%H", ":", "%M", ":", "%S", ".", "%f" ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L3749-L3756
def getBoundsColor(self, nNumOutputColors, flCollisionBoundsFadeDistance): """Get the current chaperone bounds draw color and brightness""" fn = self.function_table.getBoundsColor pOutputColorArray = HmdColor_t() pOutputCameraColor = HmdColor_t() fn(byref(pOutputColorArray), nNumOutputColors, flCollisionBoundsFadeDistance, byref(pOutputCameraColor)) return pOutputColorArray, pOutputCameraColor
[ "def", "getBoundsColor", "(", "self", ",", "nNumOutputColors", ",", "flCollisionBoundsFadeDistance", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getBoundsColor", "pOutputColorArray", "=", "HmdColor_t", "(", ")", "pOutputCameraColor", "=", "HmdColor_t", "(", ")", "fn", "(", "byref", "(", "pOutputColorArray", ")", ",", "nNumOutputColors", ",", "flCollisionBoundsFadeDistance", ",", "byref", "(", "pOutputCameraColor", ")", ")", "return", "pOutputColorArray", ",", "pOutputCameraColor" ]
Get the current chaperone bounds draw color and brightness
[ "Get", "the", "current", "chaperone", "bounds", "draw", "color", "and", "brightness" ]
python
train
portfors-lab/sparkle
sparkle/gui/stim/auto_parameters_editor.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/auto_parameters_editor.py#L98-L104
def closeEvent(self, event): """Emits a signal to update start values on components""" self.visibilityChanged.emit(0) model = self.paramList.model() model.hintRequested.disconnect() model.rowsInserted.disconnect() model.rowsRemoved.disconnect()
[ "def", "closeEvent", "(", "self", ",", "event", ")", ":", "self", ".", "visibilityChanged", ".", "emit", "(", "0", ")", "model", "=", "self", ".", "paramList", ".", "model", "(", ")", "model", ".", "hintRequested", ".", "disconnect", "(", ")", "model", ".", "rowsInserted", ".", "disconnect", "(", ")", "model", ".", "rowsRemoved", ".", "disconnect", "(", ")" ]
Emits a signal to update start values on components
[ "Emits", "a", "signal", "to", "update", "start", "values", "on", "components" ]
python
train
MacHu-GWU/angora-project
angora/visual/timeseries.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/visual/timeseries.py#L136-L179
def plot_one_month(x, y, xlabel=None, ylabel=None, title=None, ylim=None): """时间跨度为一月。 major tick = every days """ plt.close("all") fig = plt.figure(figsize=(20, 10)) ax = fig.add_subplot(111) ax.plot(x, y) days = DayLocator(range(365)) daysFmt = DateFormatter("%Y-%m-%d") ax.xaxis.set_major_locator(days) ax.xaxis.set_major_formatter(daysFmt) ax.autoscale_view() ax.grid() plt.setp( ax.xaxis.get_majorticklabels(), rotation=90 ) if xlabel: plt.xlabel(xlabel) else: plt.xlabel("Time") if ylabel: plt.ylabel(ylabel) else: plt.ylabel("Value") if title: plt.title(title) else: plt.title("%s to %s" % (str(x[0]), str(x[-1]) ) ) if ylim: plt.ylim(ylim) else: plt.ylim([min(y) - (max(y) - min(y) ) * 0.05, max(y) + (max(y) - min(y) ) * 0.05]) return plt, ax
[ "def", "plot_one_month", "(", "x", ",", "y", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "title", "=", "None", ",", "ylim", "=", "None", ")", ":", "plt", ".", "close", "(", "\"all\"", ")", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "20", ",", "10", ")", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "ax", ".", "plot", "(", "x", ",", "y", ")", "days", "=", "DayLocator", "(", "range", "(", "365", ")", ")", "daysFmt", "=", "DateFormatter", "(", "\"%Y-%m-%d\"", ")", "ax", ".", "xaxis", ".", "set_major_locator", "(", "days", ")", "ax", ".", "xaxis", ".", "set_major_formatter", "(", "daysFmt", ")", "ax", ".", "autoscale_view", "(", ")", "ax", ".", "grid", "(", ")", "plt", ".", "setp", "(", "ax", ".", "xaxis", ".", "get_majorticklabels", "(", ")", ",", "rotation", "=", "90", ")", "if", "xlabel", ":", "plt", ".", "xlabel", "(", "xlabel", ")", "else", ":", "plt", ".", "xlabel", "(", "\"Time\"", ")", "if", "ylabel", ":", "plt", ".", "ylabel", "(", "ylabel", ")", "else", ":", "plt", ".", "ylabel", "(", "\"Value\"", ")", "if", "title", ":", "plt", ".", "title", "(", "title", ")", "else", ":", "plt", ".", "title", "(", "\"%s to %s\"", "%", "(", "str", "(", "x", "[", "0", "]", ")", ",", "str", "(", "x", "[", "-", "1", "]", ")", ")", ")", "if", "ylim", ":", "plt", ".", "ylim", "(", "ylim", ")", "else", ":", "plt", ".", "ylim", "(", "[", "min", "(", "y", ")", "-", "(", "max", "(", "y", ")", "-", "min", "(", "y", ")", ")", "*", "0.05", ",", "max", "(", "y", ")", "+", "(", "max", "(", "y", ")", "-", "min", "(", "y", ")", ")", "*", "0.05", "]", ")", "return", "plt", ",", "ax" ]
时间跨度为一月。 major tick = every days
[ "时间跨度为一月。", "major", "tick", "=", "every", "days" ]
python
train
apache/incubator-heron
heronpy/api/cloudpickle.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heronpy/api/cloudpickle.py#L604-L657
def save_file(self, obj): # pylint: disable=too-many-branches """Save a file""" try: import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute except ImportError: import io as pystringIO # pylint: disable=reimported if not hasattr(obj, 'name') or not hasattr(obj, 'mode'): raise pickle.PicklingError("Cannot pickle files that do not map to an actual file") if obj is sys.stdout: return self.save_reduce(getattr, (sys, 'stdout'), obj=obj) if obj is sys.stderr: return self.save_reduce(getattr, (sys, 'stderr'), obj=obj) if obj is sys.stdin: raise pickle.PicklingError("Cannot pickle standard input") if hasattr(obj, 'isatty') and obj.isatty(): raise pickle.PicklingError("Cannot pickle files that map to tty objects") if 'r' not in obj.mode: raise pickle.PicklingError("Cannot pickle files that are not opened for reading") name = obj.name try: fsize = os.stat(name).st_size except OSError: raise pickle.PicklingError("Cannot pickle file %s as it cannot be stat" % name) if obj.closed: #create an empty closed string io retval = pystringIO.StringIO("") retval.close() elif not fsize: #empty file retval = pystringIO.StringIO("") try: tmpfile = file(name) tst = tmpfile.read(1) except IOError: raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name) tmpfile.close() if tst != '': raise pickle.PicklingError( "Cannot pickle file %s as it does not appear to map to a physical, real file" % name) else: try: tmpfile = file(name) contents = tmpfile.read() tmpfile.close() except IOError: raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name) retval = pystringIO.StringIO(contents) curloc = obj.tell() retval.seek(curloc) retval.name = name self.save(retval) self.memoize(obj)
[ "def", "save_file", "(", "self", ",", "obj", ")", ":", "# pylint: disable=too-many-branches", "try", ":", "import", "StringIO", "as", "pystringIO", "#we can't use cStringIO as it lacks the name attribute", "except", "ImportError", ":", "import", "io", "as", "pystringIO", "# pylint: disable=reimported", "if", "not", "hasattr", "(", "obj", ",", "'name'", ")", "or", "not", "hasattr", "(", "obj", ",", "'mode'", ")", ":", "raise", "pickle", ".", "PicklingError", "(", "\"Cannot pickle files that do not map to an actual file\"", ")", "if", "obj", "is", "sys", ".", "stdout", ":", "return", "self", ".", "save_reduce", "(", "getattr", ",", "(", "sys", ",", "'stdout'", ")", ",", "obj", "=", "obj", ")", "if", "obj", "is", "sys", ".", "stderr", ":", "return", "self", ".", "save_reduce", "(", "getattr", ",", "(", "sys", ",", "'stderr'", ")", ",", "obj", "=", "obj", ")", "if", "obj", "is", "sys", ".", "stdin", ":", "raise", "pickle", ".", "PicklingError", "(", "\"Cannot pickle standard input\"", ")", "if", "hasattr", "(", "obj", ",", "'isatty'", ")", "and", "obj", ".", "isatty", "(", ")", ":", "raise", "pickle", ".", "PicklingError", "(", "\"Cannot pickle files that map to tty objects\"", ")", "if", "'r'", "not", "in", "obj", ".", "mode", ":", "raise", "pickle", ".", "PicklingError", "(", "\"Cannot pickle files that are not opened for reading\"", ")", "name", "=", "obj", ".", "name", "try", ":", "fsize", "=", "os", ".", "stat", "(", "name", ")", ".", "st_size", "except", "OSError", ":", "raise", "pickle", ".", "PicklingError", "(", "\"Cannot pickle file %s as it cannot be stat\"", "%", "name", ")", "if", "obj", ".", "closed", ":", "#create an empty closed string io", "retval", "=", "pystringIO", ".", "StringIO", "(", "\"\"", ")", "retval", ".", "close", "(", ")", "elif", "not", "fsize", ":", "#empty file", "retval", "=", "pystringIO", ".", "StringIO", "(", "\"\"", ")", "try", ":", "tmpfile", "=", "file", "(", "name", ")", "tst", "=", "tmpfile", ".", "read", "(", "1", ")", "except", "IOError", ":", "raise", "pickle", ".", "PicklingError", "(", "\"Cannot pickle file %s as it cannot be read\"", "%", "name", ")", "tmpfile", ".", "close", "(", ")", "if", "tst", "!=", "''", ":", "raise", "pickle", ".", "PicklingError", "(", "\"Cannot pickle file %s as it does not appear to map to a physical, real file\"", "%", "name", ")", "else", ":", "try", ":", "tmpfile", "=", "file", "(", "name", ")", "contents", "=", "tmpfile", ".", "read", "(", ")", "tmpfile", ".", "close", "(", ")", "except", "IOError", ":", "raise", "pickle", ".", "PicklingError", "(", "\"Cannot pickle file %s as it cannot be read\"", "%", "name", ")", "retval", "=", "pystringIO", ".", "StringIO", "(", "contents", ")", "curloc", "=", "obj", ".", "tell", "(", ")", "retval", ".", "seek", "(", "curloc", ")", "retval", ".", "name", "=", "name", "self", ".", "save", "(", "retval", ")", "self", ".", "memoize", "(", "obj", ")" ]
Save a file
[ "Save", "a", "file" ]
python
valid
mirukan/pydecensooru
pydecensooru/main.py
https://github.com/mirukan/pydecensooru/blob/2a2bec93c40ed2d3e359ee203eceabf42ef1755d/pydecensooru/main.py#L27-L32
def decensor_iter(posts_info: Iterable[dict], site_url: str = DEFAULT_SITE ) -> Generator[dict, None, None]: """Apply decensoring on an iterable of posts info dicts from Danbooru API. Any censored post is automatically decensored if needed.""" for info in posts_info: yield decensor(info, site_url)
[ "def", "decensor_iter", "(", "posts_info", ":", "Iterable", "[", "dict", "]", ",", "site_url", ":", "str", "=", "DEFAULT_SITE", ")", "->", "Generator", "[", "dict", ",", "None", ",", "None", "]", ":", "for", "info", "in", "posts_info", ":", "yield", "decensor", "(", "info", ",", "site_url", ")" ]
Apply decensoring on an iterable of posts info dicts from Danbooru API. Any censored post is automatically decensored if needed.
[ "Apply", "decensoring", "on", "an", "iterable", "of", "posts", "info", "dicts", "from", "Danbooru", "API", ".", "Any", "censored", "post", "is", "automatically", "decensored", "if", "needed", "." ]
python
train
contentful-labs/contentful.py
contentful/cda/errors.py
https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/errors.py#L23-L33
def api_exception(http_code): """Convenience decorator to associate HTTP status codes with :class:`.ApiError` subclasses. :param http_code: (int) HTTP status code. :return: wrapper function. """ def wrapper(*args): code = args[0] ErrorMapping.mapping[http_code] = code return code return wrapper
[ "def", "api_exception", "(", "http_code", ")", ":", "def", "wrapper", "(", "*", "args", ")", ":", "code", "=", "args", "[", "0", "]", "ErrorMapping", ".", "mapping", "[", "http_code", "]", "=", "code", "return", "code", "return", "wrapper" ]
Convenience decorator to associate HTTP status codes with :class:`.ApiError` subclasses. :param http_code: (int) HTTP status code. :return: wrapper function.
[ "Convenience", "decorator", "to", "associate", "HTTP", "status", "codes", "with", ":", "class", ":", ".", "ApiError", "subclasses", "." ]
python
train
opentok/Opentok-Python-SDK
opentok/opentok.py
https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L490-L495
def list_archives(self, offset=None, count=None, session_id=None): """ New method to get archive list, it's alternative to 'get_archives()', both methods exist to have backwards compatible """ return self.get_archives(offset, count, session_id)
[ "def", "list_archives", "(", "self", ",", "offset", "=", "None", ",", "count", "=", "None", ",", "session_id", "=", "None", ")", ":", "return", "self", ".", "get_archives", "(", "offset", ",", "count", ",", "session_id", ")" ]
New method to get archive list, it's alternative to 'get_archives()', both methods exist to have backwards compatible
[ "New", "method", "to", "get", "archive", "list", "it", "s", "alternative", "to", "get_archives", "()", "both", "methods", "exist", "to", "have", "backwards", "compatible" ]
python
train
Equitable/trump
trump/orm.py
https://github.com/Equitable/trump/blob/a2802692bc642fa32096374159eea7ceca2947b4/trump/orm.py#L1270-L1286
def describe(self): """ describes a Symbol, returns a string """ lines = [] lines.append("Symbol = {}".format(self.name)) if len(self.tags): tgs = ", ".join(x.tag for x in self.tags) lines.append(" tagged = {}".format(tgs)) if len(self.aliases): als = ", ".join(x.alias for x in self.aliases) lines.append(" aliased = {}".format(als)) if len(self.feeds): lines.append(" feeds:") for fed in self.feeds: lines.append(" {}. {}".format(fed.fnum, fed.ftype)) return "\n".join(lines)
[ "def", "describe", "(", "self", ")", ":", "lines", "=", "[", "]", "lines", ".", "append", "(", "\"Symbol = {}\"", ".", "format", "(", "self", ".", "name", ")", ")", "if", "len", "(", "self", ".", "tags", ")", ":", "tgs", "=", "\", \"", ".", "join", "(", "x", ".", "tag", "for", "x", "in", "self", ".", "tags", ")", "lines", ".", "append", "(", "\" tagged = {}\"", ".", "format", "(", "tgs", ")", ")", "if", "len", "(", "self", ".", "aliases", ")", ":", "als", "=", "\", \"", ".", "join", "(", "x", ".", "alias", "for", "x", "in", "self", ".", "aliases", ")", "lines", ".", "append", "(", "\" aliased = {}\"", ".", "format", "(", "als", ")", ")", "if", "len", "(", "self", ".", "feeds", ")", ":", "lines", ".", "append", "(", "\" feeds:\"", ")", "for", "fed", "in", "self", ".", "feeds", ":", "lines", ".", "append", "(", "\" {}. {}\"", ".", "format", "(", "fed", ".", "fnum", ",", "fed", ".", "ftype", ")", ")", "return", "\"\\n\"", ".", "join", "(", "lines", ")" ]
describes a Symbol, returns a string
[ "describes", "a", "Symbol", "returns", "a", "string" ]
python
train
nfcpy/nfcpy
src/nfc/tag/tt3.py
https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/tag/tt3.py#L644-L665
def write_to_ndef_service(self, data, *blocks): """Write block data to an NDEF compatible tag. This is a convinience method to write block data to a tag that has system code 0x12FC (NDEF). For other tags this method simply does nothing. The *data* to write must be a string or bytearray with length equal ``16 * len(blocks)``. All parameters following *data* are interpreted as block numbers to write. To actually pass a list of block numbers requires unpacking. The following example calls would have the same effect of writing 32 byte zeros into blocks 1 and 8.:: tag.write_to_ndef_service(32 * "\\0", 1, 8) tag.write_to_ndef_service(32 * "\\0", *list(1, 8)) Command execution errors raise :exc:`~nfc.tag.TagCommandError`. """ if self.sys == 0x12FC: sc_list = [ServiceCode(0, 0b001001)] bc_list = [BlockCode(n) for n in blocks] self.write_without_encryption(sc_list, bc_list, data)
[ "def", "write_to_ndef_service", "(", "self", ",", "data", ",", "*", "blocks", ")", ":", "if", "self", ".", "sys", "==", "0x12FC", ":", "sc_list", "=", "[", "ServiceCode", "(", "0", ",", "0b001001", ")", "]", "bc_list", "=", "[", "BlockCode", "(", "n", ")", "for", "n", "in", "blocks", "]", "self", ".", "write_without_encryption", "(", "sc_list", ",", "bc_list", ",", "data", ")" ]
Write block data to an NDEF compatible tag. This is a convinience method to write block data to a tag that has system code 0x12FC (NDEF). For other tags this method simply does nothing. The *data* to write must be a string or bytearray with length equal ``16 * len(blocks)``. All parameters following *data* are interpreted as block numbers to write. To actually pass a list of block numbers requires unpacking. The following example calls would have the same effect of writing 32 byte zeros into blocks 1 and 8.:: tag.write_to_ndef_service(32 * "\\0", 1, 8) tag.write_to_ndef_service(32 * "\\0", *list(1, 8)) Command execution errors raise :exc:`~nfc.tag.TagCommandError`.
[ "Write", "block", "data", "to", "an", "NDEF", "compatible", "tag", "." ]
python
train
HazyResearch/pdftotree
pdftotree/utils/display_utils.py
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/display_utils.py#L65-L74
def pdf_to_img(pdf_file, page_num, page_width, page_height): """ Converts pdf file into image :param pdf_file: path to the pdf file :param page_num: page number to convert (index starting at 1) :return: wand image object """ img = Image(filename="{}[{}]".format(pdf_file, page_num - 1)) img.resize(page_width, page_height) return img
[ "def", "pdf_to_img", "(", "pdf_file", ",", "page_num", ",", "page_width", ",", "page_height", ")", ":", "img", "=", "Image", "(", "filename", "=", "\"{}[{}]\"", ".", "format", "(", "pdf_file", ",", "page_num", "-", "1", ")", ")", "img", ".", "resize", "(", "page_width", ",", "page_height", ")", "return", "img" ]
Converts pdf file into image :param pdf_file: path to the pdf file :param page_num: page number to convert (index starting at 1) :return: wand image object
[ "Converts", "pdf", "file", "into", "image", ":", "param", "pdf_file", ":", "path", "to", "the", "pdf", "file", ":", "param", "page_num", ":", "page", "number", "to", "convert", "(", "index", "starting", "at", "1", ")", ":", "return", ":", "wand", "image", "object" ]
python
train
brutasse/graphite-api
graphite_api/functions.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L4146-L4175
def randomWalkFunction(requestContext, name, step=60): """ Short Alias: randomWalk() Returns a random walk starting at 0. This is great for testing when there is no real data in whisper. Example:: &target=randomWalk("The.time.series") This would create a series named "The.time.series" that contains points where x(t) == x(t-1)+random()-0.5, and x(0) == 0. Accepts an optional second argument as step parameter (default step is 60 sec). """ delta = timedelta(seconds=step) when = requestContext["startTime"] values = [] current = 0 while when < requestContext["endTime"]: values.append(current) current += random.random() - 0.5 when += delta return [TimeSeries( name, int(epoch(requestContext["startTime"])), int(epoch(requestContext["endTime"])), step, values)]
[ "def", "randomWalkFunction", "(", "requestContext", ",", "name", ",", "step", "=", "60", ")", ":", "delta", "=", "timedelta", "(", "seconds", "=", "step", ")", "when", "=", "requestContext", "[", "\"startTime\"", "]", "values", "=", "[", "]", "current", "=", "0", "while", "when", "<", "requestContext", "[", "\"endTime\"", "]", ":", "values", ".", "append", "(", "current", ")", "current", "+=", "random", ".", "random", "(", ")", "-", "0.5", "when", "+=", "delta", "return", "[", "TimeSeries", "(", "name", ",", "int", "(", "epoch", "(", "requestContext", "[", "\"startTime\"", "]", ")", ")", ",", "int", "(", "epoch", "(", "requestContext", "[", "\"endTime\"", "]", ")", ")", ",", "step", ",", "values", ")", "]" ]
Short Alias: randomWalk() Returns a random walk starting at 0. This is great for testing when there is no real data in whisper. Example:: &target=randomWalk("The.time.series") This would create a series named "The.time.series" that contains points where x(t) == x(t-1)+random()-0.5, and x(0) == 0. Accepts an optional second argument as step parameter (default step is 60 sec).
[ "Short", "Alias", ":", "randomWalk", "()" ]
python
train
hasgeek/coaster
coaster/sqlalchemy/mixins.py
https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/sqlalchemy/mixins.py#L536-L545
def permissions(self, actor, inherited=None): """ Permissions for this model, plus permissions inherited from the parent. """ if inherited is not None: return inherited | super(BaseScopedNameMixin, self).permissions(actor) elif self.parent is not None and isinstance(self.parent, PermissionMixin): return self.parent.permissions(actor) | super(BaseScopedNameMixin, self).permissions(actor) else: return super(BaseScopedNameMixin, self).permissions(actor)
[ "def", "permissions", "(", "self", ",", "actor", ",", "inherited", "=", "None", ")", ":", "if", "inherited", "is", "not", "None", ":", "return", "inherited", "|", "super", "(", "BaseScopedNameMixin", ",", "self", ")", ".", "permissions", "(", "actor", ")", "elif", "self", ".", "parent", "is", "not", "None", "and", "isinstance", "(", "self", ".", "parent", ",", "PermissionMixin", ")", ":", "return", "self", ".", "parent", ".", "permissions", "(", "actor", ")", "|", "super", "(", "BaseScopedNameMixin", ",", "self", ")", ".", "permissions", "(", "actor", ")", "else", ":", "return", "super", "(", "BaseScopedNameMixin", ",", "self", ")", ".", "permissions", "(", "actor", ")" ]
Permissions for this model, plus permissions inherited from the parent.
[ "Permissions", "for", "this", "model", "plus", "permissions", "inherited", "from", "the", "parent", "." ]
python
train
wndhydrnt/python-oauth2
oauth2/store/memory.py
https://github.com/wndhydrnt/python-oauth2/blob/abe3bf5f27bda2ff737cab387b040e2e6e85c2e2/oauth2/store/memory.py#L69-L82
def fetch_by_code(self, code): """ Returns an AuthorizationCode. :param code: The authorization code. :return: An instance of :class:`oauth2.datatype.AuthorizationCode`. :raises: :class:`AuthCodeNotFound` if no data could be retrieved for given code. """ if code not in self.auth_codes: raise AuthCodeNotFound return self.auth_codes[code]
[ "def", "fetch_by_code", "(", "self", ",", "code", ")", ":", "if", "code", "not", "in", "self", ".", "auth_codes", ":", "raise", "AuthCodeNotFound", "return", "self", ".", "auth_codes", "[", "code", "]" ]
Returns an AuthorizationCode. :param code: The authorization code. :return: An instance of :class:`oauth2.datatype.AuthorizationCode`. :raises: :class:`AuthCodeNotFound` if no data could be retrieved for given code.
[ "Returns", "an", "AuthorizationCode", "." ]
python
train
scanny/python-pptx
pptx/chart/xmlwriter.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/xmlwriter.py#L1661-L1673
def yVal(self): """ Return the ``<c:yVal>`` element for this series as an oxml element. This element contains the Y values for this series. """ xml = self._yVal_tmpl.format(**{ 'nsdecls': ' %s' % nsdecls('c'), 'numRef_xml': self.numRef_xml( self._series.y_values_ref, self._series.number_format, self._series.y_values ), }) return parse_xml(xml)
[ "def", "yVal", "(", "self", ")", ":", "xml", "=", "self", ".", "_yVal_tmpl", ".", "format", "(", "*", "*", "{", "'nsdecls'", ":", "' %s'", "%", "nsdecls", "(", "'c'", ")", ",", "'numRef_xml'", ":", "self", ".", "numRef_xml", "(", "self", ".", "_series", ".", "y_values_ref", ",", "self", ".", "_series", ".", "number_format", ",", "self", ".", "_series", ".", "y_values", ")", ",", "}", ")", "return", "parse_xml", "(", "xml", ")" ]
Return the ``<c:yVal>`` element for this series as an oxml element. This element contains the Y values for this series.
[ "Return", "the", "<c", ":", "yVal", ">", "element", "for", "this", "series", "as", "an", "oxml", "element", ".", "This", "element", "contains", "the", "Y", "values", "for", "this", "series", "." ]
python
train
helixyte/everest
everest/views/base.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/views/base.py#L540-L560
def create_307_response(self): """ Creates a 307 "Temporary Redirect" response including a HTTP Warning header with code 299 that contains the user message received during processing the request. """ request = get_current_request() msg_mb = UserMessageMember(self.message) coll = request.root['_messages'] coll.add(msg_mb) # Figure out the new location URL. qs = self.__get_new_query_string(request.query_string, self.message.slug) resubmit_url = "%s?%s" % (request.path_url, qs) headers = [('Warning', '299 %s' % self.message.text), # ('Content-Type', cnt_type), ] http_exc = HttpWarningResubmit(location=resubmit_url, detail=self.message.text, headers=headers) return request.get_response(http_exc)
[ "def", "create_307_response", "(", "self", ")", ":", "request", "=", "get_current_request", "(", ")", "msg_mb", "=", "UserMessageMember", "(", "self", ".", "message", ")", "coll", "=", "request", ".", "root", "[", "'_messages'", "]", "coll", ".", "add", "(", "msg_mb", ")", "# Figure out the new location URL.", "qs", "=", "self", ".", "__get_new_query_string", "(", "request", ".", "query_string", ",", "self", ".", "message", ".", "slug", ")", "resubmit_url", "=", "\"%s?%s\"", "%", "(", "request", ".", "path_url", ",", "qs", ")", "headers", "=", "[", "(", "'Warning'", ",", "'299 %s'", "%", "self", ".", "message", ".", "text", ")", ",", "# ('Content-Type', cnt_type),", "]", "http_exc", "=", "HttpWarningResubmit", "(", "location", "=", "resubmit_url", ",", "detail", "=", "self", ".", "message", ".", "text", ",", "headers", "=", "headers", ")", "return", "request", ".", "get_response", "(", "http_exc", ")" ]
Creates a 307 "Temporary Redirect" response including a HTTP Warning header with code 299 that contains the user message received during processing the request.
[ "Creates", "a", "307", "Temporary", "Redirect", "response", "including", "a", "HTTP", "Warning", "header", "with", "code", "299", "that", "contains", "the", "user", "message", "received", "during", "processing", "the", "request", "." ]
python
train
SpriteLink/NIPAP
nipap-www/nipapwww/controllers/prefix.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap-www/nipapwww/controllers/prefix.py#L34-L47
def add(self): """ Add a prefix. """ # pass prefix to template - if we have any if 'prefix' in request.params: c.prefix = request.params['prefix'] else: c.prefix = '' c.search_opt_parent = "all" c.search_opt_child = "none" return render('/prefix_add.html')
[ "def", "add", "(", "self", ")", ":", "# pass prefix to template - if we have any", "if", "'prefix'", "in", "request", ".", "params", ":", "c", ".", "prefix", "=", "request", ".", "params", "[", "'prefix'", "]", "else", ":", "c", ".", "prefix", "=", "''", "c", ".", "search_opt_parent", "=", "\"all\"", "c", ".", "search_opt_child", "=", "\"none\"", "return", "render", "(", "'/prefix_add.html'", ")" ]
Add a prefix.
[ "Add", "a", "prefix", "." ]
python
train
fboender/ansible-cmdb
src/ansiblecmdb/ansible.py
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/src/ansiblecmdb/ansible.py#L155-L189
def _parse_hostvar_dir(self, inventory_path): """ Parse host_vars dir, if it exists. """ # inventory_path could point to a `hosts` file, or to a dir. So we # construct the location to the `host_vars` differently. if os.path.isdir(inventory_path): path = os.path.join(inventory_path, 'host_vars') else: path = os.path.join(os.path.dirname(inventory_path), 'host_vars') self.log.debug("Parsing host vars (dir): {0}".format(path)) if not os.path.exists(path): self.log.info("No such dir {0}".format(path)) return for entry in os.listdir(path): # Skip .git folder if entry == '.git': continue full_path = os.path.join(path, entry) # file or dir name is the hostname hostname = strip_exts(entry, ('.yml', '.yaml', '.json')) if os.path.isfile(full_path): # Parse contents of file as host vars. self._parse_hostvar_file(hostname, full_path) elif os.path.isdir(full_path): # Parse each file in the directory as a file containing # variables for the host. for file_entry in os.listdir(full_path): p = os.path.join(full_path, file_entry) if not os.path.isdir(p): self._parse_hostvar_file(hostname, p)
[ "def", "_parse_hostvar_dir", "(", "self", ",", "inventory_path", ")", ":", "# inventory_path could point to a `hosts` file, or to a dir. So we", "# construct the location to the `host_vars` differently.", "if", "os", ".", "path", ".", "isdir", "(", "inventory_path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "inventory_path", ",", "'host_vars'", ")", "else", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "inventory_path", ")", ",", "'host_vars'", ")", "self", ".", "log", ".", "debug", "(", "\"Parsing host vars (dir): {0}\"", ".", "format", "(", "path", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "self", ".", "log", ".", "info", "(", "\"No such dir {0}\"", ".", "format", "(", "path", ")", ")", "return", "for", "entry", "in", "os", ".", "listdir", "(", "path", ")", ":", "# Skip .git folder", "if", "entry", "==", "'.git'", ":", "continue", "full_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "entry", ")", "# file or dir name is the hostname", "hostname", "=", "strip_exts", "(", "entry", ",", "(", "'.yml'", ",", "'.yaml'", ",", "'.json'", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "full_path", ")", ":", "# Parse contents of file as host vars.", "self", ".", "_parse_hostvar_file", "(", "hostname", ",", "full_path", ")", "elif", "os", ".", "path", ".", "isdir", "(", "full_path", ")", ":", "# Parse each file in the directory as a file containing", "# variables for the host.", "for", "file_entry", "in", "os", ".", "listdir", "(", "full_path", ")", ":", "p", "=", "os", ".", "path", ".", "join", "(", "full_path", ",", "file_entry", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "p", ")", ":", "self", ".", "_parse_hostvar_file", "(", "hostname", ",", "p", ")" ]
Parse host_vars dir, if it exists.
[ "Parse", "host_vars", "dir", "if", "it", "exists", "." ]
python
train
bitshares/uptick
uptick/account.py
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/account.py#L198-L220
def cloneaccount(ctx, account_name, account): """ Clone an account This copies the owner and active permissions as well as the options (e.g. votes, memo key) """ from bitsharesbase import transactions, operations account = Account(account) op = { "fee": {"amount": 0, "asset_id": "1.3.0"}, "registrar": account["id"], "referrer": account["id"], "referrer_percent": 100, "name": account_name, "owner": account["owner"], "active": account["active"], "options": account["options"], "extensions": {}, "prefix": ctx.bitshares.rpc.chain_params["prefix"], } op = operations.Account_create(**op) print_tx(ctx.bitshares.finalizeOp(op, account, "active"))
[ "def", "cloneaccount", "(", "ctx", ",", "account_name", ",", "account", ")", ":", "from", "bitsharesbase", "import", "transactions", ",", "operations", "account", "=", "Account", "(", "account", ")", "op", "=", "{", "\"fee\"", ":", "{", "\"amount\"", ":", "0", ",", "\"asset_id\"", ":", "\"1.3.0\"", "}", ",", "\"registrar\"", ":", "account", "[", "\"id\"", "]", ",", "\"referrer\"", ":", "account", "[", "\"id\"", "]", ",", "\"referrer_percent\"", ":", "100", ",", "\"name\"", ":", "account_name", ",", "\"owner\"", ":", "account", "[", "\"owner\"", "]", ",", "\"active\"", ":", "account", "[", "\"active\"", "]", ",", "\"options\"", ":", "account", "[", "\"options\"", "]", ",", "\"extensions\"", ":", "{", "}", ",", "\"prefix\"", ":", "ctx", ".", "bitshares", ".", "rpc", ".", "chain_params", "[", "\"prefix\"", "]", ",", "}", "op", "=", "operations", ".", "Account_create", "(", "*", "*", "op", ")", "print_tx", "(", "ctx", ".", "bitshares", ".", "finalizeOp", "(", "op", ",", "account", ",", "\"active\"", ")", ")" ]
Clone an account This copies the owner and active permissions as well as the options (e.g. votes, memo key)
[ "Clone", "an", "account" ]
python
train
oscarbranson/latools
latools/helpers/helpers.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L187-L218
def collate_data(in_dir, extension='.csv', out_dir=None): """ Copy all csvs in nested directroy to single directory. Function to copy all csvs from a directory, and place them in a new directory. Parameters ---------- in_dir : str Input directory containing csv files in subfolders extension : str The extension that identifies your data files. Defaults to '.csv'. out_dir : str Destination directory Returns ------- None """ if out_dir is None: out_dir = './' + re.search('^\.(.*)', extension).groups(0)[0] if not os.path.isdir(out_dir): os.mkdir(out_dir) for p, d, fs in os.walk(in_dir): for f in fs: if extension in f: shutil.copy(p + '/' + f, out_dir + '/' + f) return
[ "def", "collate_data", "(", "in_dir", ",", "extension", "=", "'.csv'", ",", "out_dir", "=", "None", ")", ":", "if", "out_dir", "is", "None", ":", "out_dir", "=", "'./'", "+", "re", ".", "search", "(", "'^\\.(.*)'", ",", "extension", ")", ".", "groups", "(", "0", ")", "[", "0", "]", "if", "not", "os", ".", "path", ".", "isdir", "(", "out_dir", ")", ":", "os", ".", "mkdir", "(", "out_dir", ")", "for", "p", ",", "d", ",", "fs", "in", "os", ".", "walk", "(", "in_dir", ")", ":", "for", "f", "in", "fs", ":", "if", "extension", "in", "f", ":", "shutil", ".", "copy", "(", "p", "+", "'/'", "+", "f", ",", "out_dir", "+", "'/'", "+", "f", ")", "return" ]
Copy all csvs in nested directroy to single directory. Function to copy all csvs from a directory, and place them in a new directory. Parameters ---------- in_dir : str Input directory containing csv files in subfolders extension : str The extension that identifies your data files. Defaults to '.csv'. out_dir : str Destination directory Returns ------- None
[ "Copy", "all", "csvs", "in", "nested", "directroy", "to", "single", "directory", "." ]
python
test
aisthesis/pynance
pynance/opt/price.py
https://github.com/aisthesis/pynance/blob/9eb0d78b60fe2a324ed328d026fedb6dbe8f7f41/pynance/opt/price.py#L150-L182
def exps(self, opttype, strike): """ Prices for given strike on all available dates. Parameters ---------- opttype : str ('call' or 'put') strike : numeric Returns ---------- df : :class:`pandas.DataFrame` eq : float Price of underlying. qt : :class:`datetime.datetime` Time of quote. See Also -------- :meth:`strikes` """ _relevant = _relevant_rows(self.data, (strike, slice(None), opttype,), "No key for {} {}".format(strike, opttype)) _index = _relevant.index.get_level_values('Expiry') _columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int'] _df = pd.DataFrame(index=_index, columns=_columns) _eq = _relevant.loc[:, 'Underlying_Price'].values[0] _qt = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime() for _col in _columns[2:]: _df.loc[:, _col] = _relevant.loc[:, _col].values _df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2. _set_tv_other_ix(_df, opttype, 'Price', 'Time_Val', _eq, strike) return _df, _eq, _qt
[ "def", "exps", "(", "self", ",", "opttype", ",", "strike", ")", ":", "_relevant", "=", "_relevant_rows", "(", "self", ".", "data", ",", "(", "strike", ",", "slice", "(", "None", ")", ",", "opttype", ",", ")", ",", "\"No key for {} {}\"", ".", "format", "(", "strike", ",", "opttype", ")", ")", "_index", "=", "_relevant", ".", "index", ".", "get_level_values", "(", "'Expiry'", ")", "_columns", "=", "[", "'Price'", ",", "'Time_Val'", ",", "'Last'", ",", "'Bid'", ",", "'Ask'", ",", "'Vol'", ",", "'Open_Int'", "]", "_df", "=", "pd", ".", "DataFrame", "(", "index", "=", "_index", ",", "columns", "=", "_columns", ")", "_eq", "=", "_relevant", ".", "loc", "[", ":", ",", "'Underlying_Price'", "]", ".", "values", "[", "0", "]", "_qt", "=", "pd", ".", "to_datetime", "(", "_relevant", ".", "loc", "[", ":", ",", "'Quote_Time'", "]", ".", "values", "[", "0", "]", ",", "utc", "=", "True", ")", ".", "to_datetime", "(", ")", "for", "_col", "in", "_columns", "[", "2", ":", "]", ":", "_df", ".", "loc", "[", ":", ",", "_col", "]", "=", "_relevant", ".", "loc", "[", ":", ",", "_col", "]", ".", "values", "_df", ".", "loc", "[", ":", ",", "'Price'", "]", "=", "(", "_df", ".", "loc", "[", ":", ",", "'Bid'", "]", "+", "_df", ".", "loc", "[", ":", ",", "'Ask'", "]", ")", "/", "2.", "_set_tv_other_ix", "(", "_df", ",", "opttype", ",", "'Price'", ",", "'Time_Val'", ",", "_eq", ",", "strike", ")", "return", "_df", ",", "_eq", ",", "_qt" ]
Prices for given strike on all available dates. Parameters ---------- opttype : str ('call' or 'put') strike : numeric Returns ---------- df : :class:`pandas.DataFrame` eq : float Price of underlying. qt : :class:`datetime.datetime` Time of quote. See Also -------- :meth:`strikes`
[ "Prices", "for", "given", "strike", "on", "all", "available", "dates", "." ]
python
train
bakwc/PySyncObj
pysyncobj/syncobj.py
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/syncobj.py#L1298-L1373
def replicated(*decArgs, **decKwargs): """Replicated decorator. Use it to mark your class members that modifies a class state. Function will be called asynchronously. Function accepts flowing additional parameters (optional): 'callback': callback(result, failReason), failReason - `FAIL_REASON <#pysyncobj.FAIL_REASON>`_. 'sync': True - to block execution and wait for result, False - async call. If callback is passed, 'sync' option is ignored. 'timeout': if 'sync' is enabled, and no result is available for 'timeout' seconds - SyncObjException will be raised. These parameters are reserved and should not be used in kwargs of your replicated method. :param func: arbitrary class member :type func: function :param ver: (optional) - code version (for zero deployment) :type ver: int """ def replicatedImpl(func): def newFunc(self, *args, **kwargs): if kwargs.pop('_doApply', False): return func(self, *args, **kwargs) else: if isinstance(self, SyncObj): applier = self._applyCommand funcName = self._getFuncName(func.__name__) funcID = self._methodToID[funcName] elif isinstance(self, SyncObjConsumer): consumerId = id(self) funcName = self._syncObj._getFuncName((consumerId, func.__name__)) funcID = self._syncObj._methodToID[(consumerId, funcName)] applier = self._syncObj._applyCommand else: raise SyncObjException("Class should be inherited from SyncObj or SyncObjConsumer") callback = kwargs.pop('callback', None) if kwargs: cmd = (funcID, args, kwargs) elif args and not kwargs: cmd = (funcID, args) else: cmd = funcID sync = kwargs.pop('sync', False) if callback is not None: sync = False if sync: asyncResult = AsyncResult() callback = asyncResult.onResult timeout = kwargs.pop('timeout', None) applier(pickle.dumps(cmd), callback, _COMMAND_TYPE.REGULAR) if sync: res = asyncResult.event.wait(timeout) if not res: raise SyncObjException('Timeout') if not asyncResult.error == 0: raise SyncObjException(asyncResult.error) return asyncResult.result func_dict = newFunc.__dict__ if is_py3 else newFunc.func_dict func_dict['replicated'] = True func_dict['ver'] = int(decKwargs.get('ver', 0)) func_dict['origName'] = func.__name__ callframe = sys._getframe(1 if decKwargs else 2) namespace = callframe.f_locals newFuncName = func.__name__ + '_v' + str(func_dict['ver']) namespace[newFuncName] = __copy_func(newFunc, newFuncName) functools.update_wrapper(newFunc, func) return newFunc if len(decArgs) == 1 and len(decKwargs) == 0 and callable(decArgs[0]): return replicatedImpl(decArgs[0]) return replicatedImpl
[ "def", "replicated", "(", "*", "decArgs", ",", "*", "*", "decKwargs", ")", ":", "def", "replicatedImpl", "(", "func", ")", ":", "def", "newFunc", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "pop", "(", "'_doApply'", ",", "False", ")", ":", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "if", "isinstance", "(", "self", ",", "SyncObj", ")", ":", "applier", "=", "self", ".", "_applyCommand", "funcName", "=", "self", ".", "_getFuncName", "(", "func", ".", "__name__", ")", "funcID", "=", "self", ".", "_methodToID", "[", "funcName", "]", "elif", "isinstance", "(", "self", ",", "SyncObjConsumer", ")", ":", "consumerId", "=", "id", "(", "self", ")", "funcName", "=", "self", ".", "_syncObj", ".", "_getFuncName", "(", "(", "consumerId", ",", "func", ".", "__name__", ")", ")", "funcID", "=", "self", ".", "_syncObj", ".", "_methodToID", "[", "(", "consumerId", ",", "funcName", ")", "]", "applier", "=", "self", ".", "_syncObj", ".", "_applyCommand", "else", ":", "raise", "SyncObjException", "(", "\"Class should be inherited from SyncObj or SyncObjConsumer\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "None", ")", "if", "kwargs", ":", "cmd", "=", "(", "funcID", ",", "args", ",", "kwargs", ")", "elif", "args", "and", "not", "kwargs", ":", "cmd", "=", "(", "funcID", ",", "args", ")", "else", ":", "cmd", "=", "funcID", "sync", "=", "kwargs", ".", "pop", "(", "'sync'", ",", "False", ")", "if", "callback", "is", "not", "None", ":", "sync", "=", "False", "if", "sync", ":", "asyncResult", "=", "AsyncResult", "(", ")", "callback", "=", "asyncResult", ".", "onResult", "timeout", "=", "kwargs", ".", "pop", "(", "'timeout'", ",", "None", ")", "applier", "(", "pickle", ".", "dumps", "(", "cmd", ")", ",", "callback", ",", "_COMMAND_TYPE", ".", "REGULAR", ")", "if", "sync", ":", "res", "=", "asyncResult", ".", "event", ".", "wait", "(", "timeout", ")", "if", "not", "res", ":", "raise", "SyncObjException", "(", "'Timeout'", ")", "if", "not", "asyncResult", ".", "error", "==", "0", ":", "raise", "SyncObjException", "(", "asyncResult", ".", "error", ")", "return", "asyncResult", ".", "result", "func_dict", "=", "newFunc", ".", "__dict__", "if", "is_py3", "else", "newFunc", ".", "func_dict", "func_dict", "[", "'replicated'", "]", "=", "True", "func_dict", "[", "'ver'", "]", "=", "int", "(", "decKwargs", ".", "get", "(", "'ver'", ",", "0", ")", ")", "func_dict", "[", "'origName'", "]", "=", "func", ".", "__name__", "callframe", "=", "sys", ".", "_getframe", "(", "1", "if", "decKwargs", "else", "2", ")", "namespace", "=", "callframe", ".", "f_locals", "newFuncName", "=", "func", ".", "__name__", "+", "'_v'", "+", "str", "(", "func_dict", "[", "'ver'", "]", ")", "namespace", "[", "newFuncName", "]", "=", "__copy_func", "(", "newFunc", ",", "newFuncName", ")", "functools", ".", "update_wrapper", "(", "newFunc", ",", "func", ")", "return", "newFunc", "if", "len", "(", "decArgs", ")", "==", "1", "and", "len", "(", "decKwargs", ")", "==", "0", "and", "callable", "(", "decArgs", "[", "0", "]", ")", ":", "return", "replicatedImpl", "(", "decArgs", "[", "0", "]", ")", "return", "replicatedImpl" ]
Replicated decorator. Use it to mark your class members that modifies a class state. Function will be called asynchronously. Function accepts flowing additional parameters (optional): 'callback': callback(result, failReason), failReason - `FAIL_REASON <#pysyncobj.FAIL_REASON>`_. 'sync': True - to block execution and wait for result, False - async call. If callback is passed, 'sync' option is ignored. 'timeout': if 'sync' is enabled, and no result is available for 'timeout' seconds - SyncObjException will be raised. These parameters are reserved and should not be used in kwargs of your replicated method. :param func: arbitrary class member :type func: function :param ver: (optional) - code version (for zero deployment) :type ver: int
[ "Replicated", "decorator", ".", "Use", "it", "to", "mark", "your", "class", "members", "that", "modifies", "a", "class", "state", ".", "Function", "will", "be", "called", "asynchronously", ".", "Function", "accepts", "flowing", "additional", "parameters", "(", "optional", ")", ":", "callback", ":", "callback", "(", "result", "failReason", ")", "failReason", "-", "FAIL_REASON", "<#pysyncobj", ".", "FAIL_REASON", ">", "_", ".", "sync", ":", "True", "-", "to", "block", "execution", "and", "wait", "for", "result", "False", "-", "async", "call", ".", "If", "callback", "is", "passed", "sync", "option", "is", "ignored", ".", "timeout", ":", "if", "sync", "is", "enabled", "and", "no", "result", "is", "available", "for", "timeout", "seconds", "-", "SyncObjException", "will", "be", "raised", ".", "These", "parameters", "are", "reserved", "and", "should", "not", "be", "used", "in", "kwargs", "of", "your", "replicated", "method", "." ]
python
test
dshean/pygeotools
pygeotools/lib/timelib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/timelib.py#L488-L494
def dt2jd(dt): """Convert datetime to julian date """ a = (14 - dt.month)//12 y = dt.year + 4800 - a m = dt.month + 12*a - 3 return dt.day + ((153*m + 2)//5) + 365*y + y//4 - y//100 + y//400 - 32045
[ "def", "dt2jd", "(", "dt", ")", ":", "a", "=", "(", "14", "-", "dt", ".", "month", ")", "//", "12", "y", "=", "dt", ".", "year", "+", "4800", "-", "a", "m", "=", "dt", ".", "month", "+", "12", "*", "a", "-", "3", "return", "dt", ".", "day", "+", "(", "(", "153", "*", "m", "+", "2", ")", "//", "5", ")", "+", "365", "*", "y", "+", "y", "//", "4", "-", "y", "//", "100", "+", "y", "//", "400", "-", "32045" ]
Convert datetime to julian date
[ "Convert", "datetime", "to", "julian", "date" ]
python
train
lsst-sqre/lsst-projectmeta-kit
lsstprojectmeta/jsonld.py
https://github.com/lsst-sqre/lsst-projectmeta-kit/blob/ac8d4ff65bb93d8fdeb1b46ae6eb5d7414f1ae14/lsstprojectmeta/jsonld.py#L46-L60
def _encode_datetime(self, dt): """Encode a datetime in the format '%Y-%m-%dT%H:%M:%SZ'. The datetime can be naieve (doesn't have timezone info) or aware (it does have a tzinfo attribute set). Regardless, the datetime is transformed into UTC. """ if dt.tzinfo is None: # Force it to be a UTC datetime dt = dt.replace(tzinfo=datetime.timezone.utc) # Convert to UTC (no matter what) dt = dt.astimezone(datetime.timezone.utc) return dt.strftime('%Y-%m-%dT%H:%M:%SZ')
[ "def", "_encode_datetime", "(", "self", ",", "dt", ")", ":", "if", "dt", ".", "tzinfo", "is", "None", ":", "# Force it to be a UTC datetime", "dt", "=", "dt", ".", "replace", "(", "tzinfo", "=", "datetime", ".", "timezone", ".", "utc", ")", "# Convert to UTC (no matter what)", "dt", "=", "dt", ".", "astimezone", "(", "datetime", ".", "timezone", ".", "utc", ")", "return", "dt", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%SZ'", ")" ]
Encode a datetime in the format '%Y-%m-%dT%H:%M:%SZ'. The datetime can be naieve (doesn't have timezone info) or aware (it does have a tzinfo attribute set). Regardless, the datetime is transformed into UTC.
[ "Encode", "a", "datetime", "in", "the", "format", "%Y", "-", "%m", "-", "%dT%H", ":", "%M", ":", "%SZ", "." ]
python
valid
Microsoft/knack
knack/deprecation.py
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/deprecation.py#L53-L62
def ensure_new_style_deprecation(cli_ctx, kwargs, object_type): """ Helper method to make the previous string-based deprecate_info kwarg work with the new style. """ deprecate_info = kwargs.get('deprecate_info', None) if isinstance(deprecate_info, Deprecated): deprecate_info.object_type = object_type elif isinstance(deprecate_info, STRING_TYPES): deprecate_info = Deprecated(cli_ctx, redirect=deprecate_info, object_type=object_type) kwargs['deprecate_info'] = deprecate_info return deprecate_info
[ "def", "ensure_new_style_deprecation", "(", "cli_ctx", ",", "kwargs", ",", "object_type", ")", ":", "deprecate_info", "=", "kwargs", ".", "get", "(", "'deprecate_info'", ",", "None", ")", "if", "isinstance", "(", "deprecate_info", ",", "Deprecated", ")", ":", "deprecate_info", ".", "object_type", "=", "object_type", "elif", "isinstance", "(", "deprecate_info", ",", "STRING_TYPES", ")", ":", "deprecate_info", "=", "Deprecated", "(", "cli_ctx", ",", "redirect", "=", "deprecate_info", ",", "object_type", "=", "object_type", ")", "kwargs", "[", "'deprecate_info'", "]", "=", "deprecate_info", "return", "deprecate_info" ]
Helper method to make the previous string-based deprecate_info kwarg work with the new style.
[ "Helper", "method", "to", "make", "the", "previous", "string", "-", "based", "deprecate_info", "kwarg", "work", "with", "the", "new", "style", "." ]
python
train
indranilsinharoy/pyzos
pyzos/zos.py
https://github.com/indranilsinharoy/pyzos/blob/da6bf3296b0154ccee44ad9a4286055ae031ecc7/pyzos/zos.py#L396-L414
def zSetSurfaceData(self, surfNum, radius=None, thick=None, material=None, semidia=None, conic=None, comment=None): """Sets surface data""" if self.pMode == 0: # Sequential mode surf = self.pLDE.GetSurfaceAt(surfNum) if radius is not None: surf.pRadius = radius if thick is not None: surf.pThickness = thick if material is not None: surf.pMaterial = material if semidia is not None: surf.pSemiDiameter = semidia if conic is not None: surf.pConic = conic if comment is not None: surf.pComment = comment else: raise NotImplementedError('Function not implemented for non-sequential mode')
[ "def", "zSetSurfaceData", "(", "self", ",", "surfNum", ",", "radius", "=", "None", ",", "thick", "=", "None", ",", "material", "=", "None", ",", "semidia", "=", "None", ",", "conic", "=", "None", ",", "comment", "=", "None", ")", ":", "if", "self", ".", "pMode", "==", "0", ":", "# Sequential mode", "surf", "=", "self", ".", "pLDE", ".", "GetSurfaceAt", "(", "surfNum", ")", "if", "radius", "is", "not", "None", ":", "surf", ".", "pRadius", "=", "radius", "if", "thick", "is", "not", "None", ":", "surf", ".", "pThickness", "=", "thick", "if", "material", "is", "not", "None", ":", "surf", ".", "pMaterial", "=", "material", "if", "semidia", "is", "not", "None", ":", "surf", ".", "pSemiDiameter", "=", "semidia", "if", "conic", "is", "not", "None", ":", "surf", ".", "pConic", "=", "conic", "if", "comment", "is", "not", "None", ":", "surf", ".", "pComment", "=", "comment", "else", ":", "raise", "NotImplementedError", "(", "'Function not implemented for non-sequential mode'", ")" ]
Sets surface data
[ "Sets", "surface", "data" ]
python
train
theislab/scanpy
scanpy/plotting/_tools/scatterplots.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/plotting/_tools/scatterplots.py#L651-L736
def _get_color_values(adata, value_to_plot, groups=None, palette=None, use_raw=False, gene_symbols=None, layer=None): """ Returns the value or color associated to each data point. For categorical data, the return value is list of colors taken from the category palette or from the given `palette` value. For non-categorical data, the values are returned """ ### # when plotting, the color of the dots is determined for each plot # the data is either categorical or continuous and the data could be in # 'obs' or in 'var' categorical = False if value_to_plot is None: color_vector = 'lightgray' # check if value to plot is in obs elif value_to_plot in adata.obs.columns: if is_categorical_dtype(adata.obs[value_to_plot]): categorical = True if palette: # use category colors base on given palette _set_colors_for_categorical_obs(adata, value_to_plot, palette) else: if value_to_plot + '_colors' not in adata.uns or \ len(adata.uns[value_to_plot + '_colors']) < len(adata.obs[value_to_plot].cat.categories): # set a default palette in case that no colors or few colors are found _set_default_colors_for_categorical_obs(adata, value_to_plot) else: # check that the colors in 'uns' are valid _palette = [] for color in adata.uns[value_to_plot + '_colors']: if not is_color_like(color): # check if the color is a valid R color and translate it # to a valid hex color value if color in utils.additional_colors: color = utils.additional_colors[color] else: logg.warn("The following color value found in adata.uns['{}'] " " is not valid: '{}'. Default colors are used.".format(value_to_plot + '_colors', color)) _set_default_colors_for_categorical_obs(adata, value_to_plot) _palette = None break _palette.append(color) if _palette is not None: adata.uns[value_to_plot + '_colors'] = _palette # for categorical data, colors should be # stored in adata.uns[value_to_plot + '_colors'] # Obtain color vector by converting every category # into its respective color color_vector = [adata.uns[value_to_plot + '_colors'][x] for x in adata.obs[value_to_plot].cat.codes] if groups is not None: if isinstance(groups, str): groups = [groups] color_vector = np.array(color_vector, dtype='<U15') # set color to 'light gray' for all values # that are not in the groups color_vector[~adata.obs[value_to_plot].isin(groups)] = "lightgray" else: color_vector = adata.obs[value_to_plot].values # when value_to_plot is not in adata.obs else: if gene_symbols is not None and gene_symbols in adata.var.columns: if value_to_plot not in adata.var[gene_symbols].values: logg.error("Gene symbol {!r} not found in given gene_symbols " "column: {!r}".format(value_to_plot, gene_symbols)) return value_to_plot = adata.var[adata.var[gene_symbols] == value_to_plot].index[0] if layer is not None and value_to_plot in adata.var_names: if layer not in adata.layers.keys(): raise KeyError('Selected layer: {} is not in the layers list. The list of ' 'valid layers is: {}'.format(layer, adata.layers.keys())) color_vector = adata[:, value_to_plot].layers[layer] elif use_raw and value_to_plot in adata.raw.var_names: color_vector = adata.raw[:, value_to_plot].X elif value_to_plot in adata.var_names: color_vector = adata[:, value_to_plot].X else: raise ValueError("The passed `color` {} is not a valid observation annotation " "or variable name. Valid observation annotation keys are: {}" .format(value_to_plot, adata.obs.columns)) return color_vector, categorical
[ "def", "_get_color_values", "(", "adata", ",", "value_to_plot", ",", "groups", "=", "None", ",", "palette", "=", "None", ",", "use_raw", "=", "False", ",", "gene_symbols", "=", "None", ",", "layer", "=", "None", ")", ":", "###", "# when plotting, the color of the dots is determined for each plot", "# the data is either categorical or continuous and the data could be in", "# 'obs' or in 'var'", "categorical", "=", "False", "if", "value_to_plot", "is", "None", ":", "color_vector", "=", "'lightgray'", "# check if value to plot is in obs", "elif", "value_to_plot", "in", "adata", ".", "obs", ".", "columns", ":", "if", "is_categorical_dtype", "(", "adata", ".", "obs", "[", "value_to_plot", "]", ")", ":", "categorical", "=", "True", "if", "palette", ":", "# use category colors base on given palette", "_set_colors_for_categorical_obs", "(", "adata", ",", "value_to_plot", ",", "palette", ")", "else", ":", "if", "value_to_plot", "+", "'_colors'", "not", "in", "adata", ".", "uns", "or", "len", "(", "adata", ".", "uns", "[", "value_to_plot", "+", "'_colors'", "]", ")", "<", "len", "(", "adata", ".", "obs", "[", "value_to_plot", "]", ".", "cat", ".", "categories", ")", ":", "# set a default palette in case that no colors or few colors are found", "_set_default_colors_for_categorical_obs", "(", "adata", ",", "value_to_plot", ")", "else", ":", "# check that the colors in 'uns' are valid", "_palette", "=", "[", "]", "for", "color", "in", "adata", ".", "uns", "[", "value_to_plot", "+", "'_colors'", "]", ":", "if", "not", "is_color_like", "(", "color", ")", ":", "# check if the color is a valid R color and translate it", "# to a valid hex color value", "if", "color", "in", "utils", ".", "additional_colors", ":", "color", "=", "utils", ".", "additional_colors", "[", "color", "]", "else", ":", "logg", ".", "warn", "(", "\"The following color value found in adata.uns['{}'] \"", "\" is not valid: '{}'. Default colors are used.\"", ".", "format", "(", "value_to_plot", "+", "'_colors'", ",", "color", ")", ")", "_set_default_colors_for_categorical_obs", "(", "adata", ",", "value_to_plot", ")", "_palette", "=", "None", "break", "_palette", ".", "append", "(", "color", ")", "if", "_palette", "is", "not", "None", ":", "adata", ".", "uns", "[", "value_to_plot", "+", "'_colors'", "]", "=", "_palette", "# for categorical data, colors should be", "# stored in adata.uns[value_to_plot + '_colors']", "# Obtain color vector by converting every category", "# into its respective color", "color_vector", "=", "[", "adata", ".", "uns", "[", "value_to_plot", "+", "'_colors'", "]", "[", "x", "]", "for", "x", "in", "adata", ".", "obs", "[", "value_to_plot", "]", ".", "cat", ".", "codes", "]", "if", "groups", "is", "not", "None", ":", "if", "isinstance", "(", "groups", ",", "str", ")", ":", "groups", "=", "[", "groups", "]", "color_vector", "=", "np", ".", "array", "(", "color_vector", ",", "dtype", "=", "'<U15'", ")", "# set color to 'light gray' for all values", "# that are not in the groups", "color_vector", "[", "~", "adata", ".", "obs", "[", "value_to_plot", "]", ".", "isin", "(", "groups", ")", "]", "=", "\"lightgray\"", "else", ":", "color_vector", "=", "adata", ".", "obs", "[", "value_to_plot", "]", ".", "values", "# when value_to_plot is not in adata.obs", "else", ":", "if", "gene_symbols", "is", "not", "None", "and", "gene_symbols", "in", "adata", ".", "var", ".", "columns", ":", "if", "value_to_plot", "not", "in", "adata", ".", "var", "[", "gene_symbols", "]", ".", "values", ":", "logg", ".", "error", "(", "\"Gene symbol {!r} not found in given gene_symbols \"", "\"column: {!r}\"", ".", "format", "(", "value_to_plot", ",", "gene_symbols", ")", ")", "return", "value_to_plot", "=", "adata", ".", "var", "[", "adata", ".", "var", "[", "gene_symbols", "]", "==", "value_to_plot", "]", ".", "index", "[", "0", "]", "if", "layer", "is", "not", "None", "and", "value_to_plot", "in", "adata", ".", "var_names", ":", "if", "layer", "not", "in", "adata", ".", "layers", ".", "keys", "(", ")", ":", "raise", "KeyError", "(", "'Selected layer: {} is not in the layers list. The list of '", "'valid layers is: {}'", ".", "format", "(", "layer", ",", "adata", ".", "layers", ".", "keys", "(", ")", ")", ")", "color_vector", "=", "adata", "[", ":", ",", "value_to_plot", "]", ".", "layers", "[", "layer", "]", "elif", "use_raw", "and", "value_to_plot", "in", "adata", ".", "raw", ".", "var_names", ":", "color_vector", "=", "adata", ".", "raw", "[", ":", ",", "value_to_plot", "]", ".", "X", "elif", "value_to_plot", "in", "adata", ".", "var_names", ":", "color_vector", "=", "adata", "[", ":", ",", "value_to_plot", "]", ".", "X", "else", ":", "raise", "ValueError", "(", "\"The passed `color` {} is not a valid observation annotation \"", "\"or variable name. Valid observation annotation keys are: {}\"", ".", "format", "(", "value_to_plot", ",", "adata", ".", "obs", ".", "columns", ")", ")", "return", "color_vector", ",", "categorical" ]
Returns the value or color associated to each data point. For categorical data, the return value is list of colors taken from the category palette or from the given `palette` value. For non-categorical data, the values are returned
[ "Returns", "the", "value", "or", "color", "associated", "to", "each", "data", "point", ".", "For", "categorical", "data", "the", "return", "value", "is", "list", "of", "colors", "taken", "from", "the", "category", "palette", "or", "from", "the", "given", "palette", "value", "." ]
python
train
ahmontero/dop
dop/client.py
https://github.com/ahmontero/dop/blob/40354ac6feefe92a7555fe2d1834138c9a03e518/dop/client.py#L724-L742
def domain_records(self, domain_id): """ This method returns all of your current domain records. Required parameters domain_id: Integer or Domain Name (e.g. domain.com), specifies the domain for which to retrieve records. """ json = self.request('/domains/%s/records' % domain_id, method='GET') status = json.get('status') if status == 'OK': records_json = json.get('records', []) records = [Record.from_json(record) for record in records_json] return records else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
[ "def", "domain_records", "(", "self", ",", "domain_id", ")", ":", "json", "=", "self", ".", "request", "(", "'/domains/%s/records'", "%", "domain_id", ",", "method", "=", "'GET'", ")", "status", "=", "json", ".", "get", "(", "'status'", ")", "if", "status", "==", "'OK'", ":", "records_json", "=", "json", ".", "get", "(", "'records'", ",", "[", "]", ")", "records", "=", "[", "Record", ".", "from_json", "(", "record", ")", "for", "record", "in", "records_json", "]", "return", "records", "else", ":", "message", "=", "json", ".", "get", "(", "'message'", ")", "raise", "DOPException", "(", "'[%s]: %s'", "%", "(", "status", ",", "message", ")", ")" ]
This method returns all of your current domain records. Required parameters domain_id: Integer or Domain Name (e.g. domain.com), specifies the domain for which to retrieve records.
[ "This", "method", "returns", "all", "of", "your", "current", "domain", "records", "." ]
python
train
waqasbhatti/astrobase
astrobase/cpserver/checkplotlist.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/cpserver/checkplotlist.py#L169-L203
def checkplot_infokey_worker(task): '''This gets the required keys from the requested file. Parameters ---------- task : tuple Task is a two element tuple:: - task[0] is the dict to work on - task[1] is a list of lists of str indicating all the key address to extract items from the dict for Returns ------- list This is a list of all of the items at the requested key addresses. ''' cpf, keys = task cpd = _read_checkplot_picklefile(cpf) resultkeys = [] for k in keys: try: resultkeys.append(_dict_get(cpd, k)) except Exception as e: resultkeys.append(np.nan) return resultkeys
[ "def", "checkplot_infokey_worker", "(", "task", ")", ":", "cpf", ",", "keys", "=", "task", "cpd", "=", "_read_checkplot_picklefile", "(", "cpf", ")", "resultkeys", "=", "[", "]", "for", "k", "in", "keys", ":", "try", ":", "resultkeys", ".", "append", "(", "_dict_get", "(", "cpd", ",", "k", ")", ")", "except", "Exception", "as", "e", ":", "resultkeys", ".", "append", "(", "np", ".", "nan", ")", "return", "resultkeys" ]
This gets the required keys from the requested file. Parameters ---------- task : tuple Task is a two element tuple:: - task[0] is the dict to work on - task[1] is a list of lists of str indicating all the key address to extract items from the dict for Returns ------- list This is a list of all of the items at the requested key addresses.
[ "This", "gets", "the", "required", "keys", "from", "the", "requested", "file", "." ]
python
valid
knipknap/SpiffWorkflow
SpiffWorkflow/bpmn/serializer/Packager.py
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/bpmn/serializer/Packager.py#L240-L256
def pre_parse_and_validate_signavio(self, bpmn, filename): """ This is the Signavio specific editor hook for pre-parsing and validation. A subclass can override this method to provide additional parseing or validation. It should call the parent method first. :param bpmn: an lxml tree of the bpmn content :param filename: the source file name This must return the updated bpmn object (or a replacement) """ self._check_for_disconnected_boundary_events_signavio(bpmn, filename) self._fix_call_activities_signavio(bpmn, filename) return bpmn
[ "def", "pre_parse_and_validate_signavio", "(", "self", ",", "bpmn", ",", "filename", ")", ":", "self", ".", "_check_for_disconnected_boundary_events_signavio", "(", "bpmn", ",", "filename", ")", "self", ".", "_fix_call_activities_signavio", "(", "bpmn", ",", "filename", ")", "return", "bpmn" ]
This is the Signavio specific editor hook for pre-parsing and validation. A subclass can override this method to provide additional parseing or validation. It should call the parent method first. :param bpmn: an lxml tree of the bpmn content :param filename: the source file name This must return the updated bpmn object (or a replacement)
[ "This", "is", "the", "Signavio", "specific", "editor", "hook", "for", "pre", "-", "parsing", "and", "validation", "." ]
python
valid
saltstack/salt
salt/modules/mac_user.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_user.py#L437-L456
def list_groups(name): ''' Return a list of groups the named user belongs to. name The name of the user for which to list groups. Starting in Salt 2016.11.0, all groups for the user, including groups beginning with an underscore will be listed. .. versionchanged:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' user.list_groups foo ''' groups = [group for group in salt.utils.user.get_group_list(name)] return groups
[ "def", "list_groups", "(", "name", ")", ":", "groups", "=", "[", "group", "for", "group", "in", "salt", ".", "utils", ".", "user", ".", "get_group_list", "(", "name", ")", "]", "return", "groups" ]
Return a list of groups the named user belongs to. name The name of the user for which to list groups. Starting in Salt 2016.11.0, all groups for the user, including groups beginning with an underscore will be listed. .. versionchanged:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' user.list_groups foo
[ "Return", "a", "list", "of", "groups", "the", "named", "user", "belongs", "to", "." ]
python
train
jasonrbriggs/proton
python/proton/xmlutils.py
https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/xmlutils.py#L42-L49
def parseelement(elem): ''' Convert the content of an element into more ElementTree structures. We do this because sometimes we want to set xml as the content of an element. ''' xml = '<%(tag)s>%(content)s</%(tag)s>' % {'tag' : elem.tag, 'content' : elem.text} et = etree.fromstring(xml) replaceelement(elem, et)
[ "def", "parseelement", "(", "elem", ")", ":", "xml", "=", "'<%(tag)s>%(content)s</%(tag)s>'", "%", "{", "'tag'", ":", "elem", ".", "tag", ",", "'content'", ":", "elem", ".", "text", "}", "et", "=", "etree", ".", "fromstring", "(", "xml", ")", "replaceelement", "(", "elem", ",", "et", ")" ]
Convert the content of an element into more ElementTree structures. We do this because sometimes we want to set xml as the content of an element.
[ "Convert", "the", "content", "of", "an", "element", "into", "more", "ElementTree", "structures", ".", "We", "do", "this", "because", "sometimes", "we", "want", "to", "set", "xml", "as", "the", "content", "of", "an", "element", "." ]
python
train
raphaelgyory/django-rest-messaging
rest_messaging/models.py
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L191-L194
def return_daily_messages_count(self, sender): """ Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits """ h24 = now() - timedelta(days=1) return Message.objects.filter(sender=sender, sent_at__gte=h24).count()
[ "def", "return_daily_messages_count", "(", "self", ",", "sender", ")", ":", "h24", "=", "now", "(", ")", "-", "timedelta", "(", "days", "=", "1", ")", "return", "Message", ".", "objects", ".", "filter", "(", "sender", "=", "sender", ",", "sent_at__gte", "=", "h24", ")", ".", "count", "(", ")" ]
Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits
[ "Returns", "the", "number", "of", "messages", "sent", "in", "the", "last", "24", "hours", "so", "we", "can", "ensure", "the", "user", "does", "not", "exceed", "his", "messaging", "limits" ]
python
train
liamw9534/bt-manager
bt_manager/codecs.py
https://github.com/liamw9534/bt-manager/blob/51be2919394ce8134c698359649bfad09eedf4ec/bt_manager/codecs.py#L159-L181
def encode(self, fd, mtu, data): """ Encode the supplied data (byte array) and write to the media transport file descriptor encapsulated as RTP packets. The encoder will calculate the required number of SBC frames and encapsulate as RTP to fit the MTU size. :param int fd: Media transport file descriptor :param int mtu: Media transport MTU size as returned when the media transport was acquired. :param array{byte} data: Data to encode and send over the media transport. :return: """ self.codec.rtp_sbc_encode_to_fd(self.config, ffi.new('char[]', data), len(data), mtu, self.ts, self.seq_num, fd)
[ "def", "encode", "(", "self", ",", "fd", ",", "mtu", ",", "data", ")", ":", "self", ".", "codec", ".", "rtp_sbc_encode_to_fd", "(", "self", ".", "config", ",", "ffi", ".", "new", "(", "'char[]'", ",", "data", ")", ",", "len", "(", "data", ")", ",", "mtu", ",", "self", ".", "ts", ",", "self", ".", "seq_num", ",", "fd", ")" ]
Encode the supplied data (byte array) and write to the media transport file descriptor encapsulated as RTP packets. The encoder will calculate the required number of SBC frames and encapsulate as RTP to fit the MTU size. :param int fd: Media transport file descriptor :param int mtu: Media transport MTU size as returned when the media transport was acquired. :param array{byte} data: Data to encode and send over the media transport. :return:
[ "Encode", "the", "supplied", "data", "(", "byte", "array", ")", "and", "write", "to", "the", "media", "transport", "file", "descriptor", "encapsulated", "as", "RTP", "packets", ".", "The", "encoder", "will", "calculate", "the", "required", "number", "of", "SBC", "frames", "and", "encapsulate", "as", "RTP", "to", "fit", "the", "MTU", "size", "." ]
python
train
pkkid/python-plexapi
plexapi/server.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/server.py#L174-L179
def settings(self): """ Returns a list of all server settings. """ if not self._settings: data = self.query(Settings.key) self._settings = Settings(self, data) return self._settings
[ "def", "settings", "(", "self", ")", ":", "if", "not", "self", ".", "_settings", ":", "data", "=", "self", ".", "query", "(", "Settings", ".", "key", ")", "self", ".", "_settings", "=", "Settings", "(", "self", ",", "data", ")", "return", "self", ".", "_settings" ]
Returns a list of all server settings.
[ "Returns", "a", "list", "of", "all", "server", "settings", "." ]
python
train
google/grr
api_client/python/grr_api_client/client.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/api_client/python/grr_api_client/client.py#L242-L247
def Get(self): """Fetch client's data and return a proper Client object.""" args = client_pb2.ApiGetClientArgs(client_id=self.client_id) result = self._context.SendRequest("GetClient", args) return Client(data=result, context=self._context)
[ "def", "Get", "(", "self", ")", ":", "args", "=", "client_pb2", ".", "ApiGetClientArgs", "(", "client_id", "=", "self", ".", "client_id", ")", "result", "=", "self", ".", "_context", ".", "SendRequest", "(", "\"GetClient\"", ",", "args", ")", "return", "Client", "(", "data", "=", "result", ",", "context", "=", "self", ".", "_context", ")" ]
Fetch client's data and return a proper Client object.
[ "Fetch", "client", "s", "data", "and", "return", "a", "proper", "Client", "object", "." ]
python
train
aio-libs/aioftp
ftpbench.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/ftpbench.py#L248-L269
def stor(ftp=None): """Same as ftplib's storbinary() but just sends dummy data instead of reading it from a real file. """ if ftp is None: ftp = connect() quit = True else: quit = False ftp.voidcmd('TYPE I') with contextlib.closing(ftp.transfercmd("STOR " + TESTFN)) as conn: chunk = b'x' * BUFFER_LEN total_sent = 0 while True: sent = conn.send(chunk) total_sent += sent if total_sent >= FILE_SIZE: break ftp.voidresp() if quit: ftp.quit() return ftp
[ "def", "stor", "(", "ftp", "=", "None", ")", ":", "if", "ftp", "is", "None", ":", "ftp", "=", "connect", "(", ")", "quit", "=", "True", "else", ":", "quit", "=", "False", "ftp", ".", "voidcmd", "(", "'TYPE I'", ")", "with", "contextlib", ".", "closing", "(", "ftp", ".", "transfercmd", "(", "\"STOR \"", "+", "TESTFN", ")", ")", "as", "conn", ":", "chunk", "=", "b'x'", "*", "BUFFER_LEN", "total_sent", "=", "0", "while", "True", ":", "sent", "=", "conn", ".", "send", "(", "chunk", ")", "total_sent", "+=", "sent", "if", "total_sent", ">=", "FILE_SIZE", ":", "break", "ftp", ".", "voidresp", "(", ")", "if", "quit", ":", "ftp", ".", "quit", "(", ")", "return", "ftp" ]
Same as ftplib's storbinary() but just sends dummy data instead of reading it from a real file.
[ "Same", "as", "ftplib", "s", "storbinary", "()", "but", "just", "sends", "dummy", "data", "instead", "of", "reading", "it", "from", "a", "real", "file", "." ]
python
valid
benjamin-hodgson/asynqp
src/asynqp/__init__.py
https://github.com/benjamin-hodgson/asynqp/blob/ea8630d1803d10d4fd64b1a0e50f3097710b34d1/src/asynqp/__init__.py#L90-L112
def connect_and_open_channel(host='localhost', port=5672, username='guest', password='guest', virtual_host='/', on_connection_close=None, *, loop=None, **kwargs): """ Connect to an AMQP server and open a channel on the connection. This function is a :ref:`coroutine <coroutine>`. Parameters of this function are the same as :func:`connect`. :return: a tuple of ``(connection, channel)``. Equivalent to:: connection = yield from connect(host, port, username, password, virtual_host, on_connection_close, loop=loop, **kwargs) channel = yield from connection.open_channel() return connection, channel """ connection = yield from connect(host, port, username, password, virtual_host, on_connection_close, loop=loop, **kwargs) channel = yield from connection.open_channel() return connection, channel
[ "def", "connect_and_open_channel", "(", "host", "=", "'localhost'", ",", "port", "=", "5672", ",", "username", "=", "'guest'", ",", "password", "=", "'guest'", ",", "virtual_host", "=", "'/'", ",", "on_connection_close", "=", "None", ",", "*", ",", "loop", "=", "None", ",", "*", "*", "kwargs", ")", ":", "connection", "=", "yield", "from", "connect", "(", "host", ",", "port", ",", "username", ",", "password", ",", "virtual_host", ",", "on_connection_close", ",", "loop", "=", "loop", ",", "*", "*", "kwargs", ")", "channel", "=", "yield", "from", "connection", ".", "open_channel", "(", ")", "return", "connection", ",", "channel" ]
Connect to an AMQP server and open a channel on the connection. This function is a :ref:`coroutine <coroutine>`. Parameters of this function are the same as :func:`connect`. :return: a tuple of ``(connection, channel)``. Equivalent to:: connection = yield from connect(host, port, username, password, virtual_host, on_connection_close, loop=loop, **kwargs) channel = yield from connection.open_channel() return connection, channel
[ "Connect", "to", "an", "AMQP", "server", "and", "open", "a", "channel", "on", "the", "connection", ".", "This", "function", "is", "a", ":", "ref", ":", "coroutine", "<coroutine", ">", "." ]
python
train
DEIB-GECO/PyGMQL
gmql/ml/dataset/parser/parser.py
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/dataset/parser/parser.py#L59-L70
def parse_schema(schema_file): """ parses the schema file and returns the columns that are later going to represent the columns of the genometric space dataframe :param schema_file: the path to the schema file :return: the columns of the schema file """ e = xml.etree.ElementTree.parse(schema_file) root = e.getroot() cols = [] for elem in root.findall(".//{http://genomic.elet.polimi.it/entities}field"): # XPATH cols.append(elem.text) return cols
[ "def", "parse_schema", "(", "schema_file", ")", ":", "e", "=", "xml", ".", "etree", ".", "ElementTree", ".", "parse", "(", "schema_file", ")", "root", "=", "e", ".", "getroot", "(", ")", "cols", "=", "[", "]", "for", "elem", "in", "root", ".", "findall", "(", "\".//{http://genomic.elet.polimi.it/entities}field\"", ")", ":", "# XPATH", "cols", ".", "append", "(", "elem", ".", "text", ")", "return", "cols" ]
parses the schema file and returns the columns that are later going to represent the columns of the genometric space dataframe :param schema_file: the path to the schema file :return: the columns of the schema file
[ "parses", "the", "schema", "file", "and", "returns", "the", "columns", "that", "are", "later", "going", "to", "represent", "the", "columns", "of", "the", "genometric", "space", "dataframe", ":", "param", "schema_file", ":", "the", "path", "to", "the", "schema", "file", ":", "return", ":", "the", "columns", "of", "the", "schema", "file" ]
python
train
Gandi/gandi.cli
gandi/cli/modules/paas.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/paas.py#L362-L366
def list_names(cls): """Retrieve paas id and names.""" ret = dict([(item['id'], item['name']) for item in cls.list({'items_per_page': 500})]) return ret
[ "def", "list_names", "(", "cls", ")", ":", "ret", "=", "dict", "(", "[", "(", "item", "[", "'id'", "]", ",", "item", "[", "'name'", "]", ")", "for", "item", "in", "cls", ".", "list", "(", "{", "'items_per_page'", ":", "500", "}", ")", "]", ")", "return", "ret" ]
Retrieve paas id and names.
[ "Retrieve", "paas", "id", "and", "names", "." ]
python
train
pyQode/pyqode.core
pyqode/core/widgets/filesystem_treeview.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/filesystem_treeview.py#L360-L370
def selected_urls(self): """ Gets the list of selected items file path (url) """ urls = [] debug('gettings urls') for proxy_index in self.tree_view.selectedIndexes(): finfo = self.tree_view.fileInfo(proxy_index) urls.append(finfo.canonicalFilePath()) debug('selected urls %r' % [str(url) for url in urls]) return urls
[ "def", "selected_urls", "(", "self", ")", ":", "urls", "=", "[", "]", "debug", "(", "'gettings urls'", ")", "for", "proxy_index", "in", "self", ".", "tree_view", ".", "selectedIndexes", "(", ")", ":", "finfo", "=", "self", ".", "tree_view", ".", "fileInfo", "(", "proxy_index", ")", "urls", ".", "append", "(", "finfo", ".", "canonicalFilePath", "(", ")", ")", "debug", "(", "'selected urls %r'", "%", "[", "str", "(", "url", ")", "for", "url", "in", "urls", "]", ")", "return", "urls" ]
Gets the list of selected items file path (url)
[ "Gets", "the", "list", "of", "selected", "items", "file", "path", "(", "url", ")" ]
python
train
edx/edx-enterprise
enterprise/admin/utils.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/admin/utils.py#L141-L173
def validate_email_to_link(email, raw_email=None, message_template=None, ignore_existing=False): """ Validate email to be linked to Enterprise Customer. Performs two checks: * Checks that email is valid * Checks that it is not already linked to any Enterprise Customer Arguments: email (str): user email to link raw_email (str): raw value as it was passed by user - used in error message. message_template (str): Validation error template string. ignore_existing (bool): If True to skip the check for an existing Enterprise Customer Raises: ValidationError: if email is invalid or already linked to Enterprise Customer. Returns: bool: Whether or not there is an existing record with the same email address. """ raw_email = raw_email if raw_email is not None else email message_template = message_template if message_template is not None else ValidationMessages.INVALID_EMAIL try: validate_email(email) except ValidationError: raise ValidationError(message_template.format(argument=raw_email)) existing_record = EnterpriseCustomerUser.objects.get_link_by_email(email) if existing_record and not ignore_existing: raise ValidationError(ValidationMessages.USER_ALREADY_REGISTERED.format( email=email, ec_name=existing_record.enterprise_customer.name )) return existing_record or False
[ "def", "validate_email_to_link", "(", "email", ",", "raw_email", "=", "None", ",", "message_template", "=", "None", ",", "ignore_existing", "=", "False", ")", ":", "raw_email", "=", "raw_email", "if", "raw_email", "is", "not", "None", "else", "email", "message_template", "=", "message_template", "if", "message_template", "is", "not", "None", "else", "ValidationMessages", ".", "INVALID_EMAIL", "try", ":", "validate_email", "(", "email", ")", "except", "ValidationError", ":", "raise", "ValidationError", "(", "message_template", ".", "format", "(", "argument", "=", "raw_email", ")", ")", "existing_record", "=", "EnterpriseCustomerUser", ".", "objects", ".", "get_link_by_email", "(", "email", ")", "if", "existing_record", "and", "not", "ignore_existing", ":", "raise", "ValidationError", "(", "ValidationMessages", ".", "USER_ALREADY_REGISTERED", ".", "format", "(", "email", "=", "email", ",", "ec_name", "=", "existing_record", ".", "enterprise_customer", ".", "name", ")", ")", "return", "existing_record", "or", "False" ]
Validate email to be linked to Enterprise Customer. Performs two checks: * Checks that email is valid * Checks that it is not already linked to any Enterprise Customer Arguments: email (str): user email to link raw_email (str): raw value as it was passed by user - used in error message. message_template (str): Validation error template string. ignore_existing (bool): If True to skip the check for an existing Enterprise Customer Raises: ValidationError: if email is invalid or already linked to Enterprise Customer. Returns: bool: Whether or not there is an existing record with the same email address.
[ "Validate", "email", "to", "be", "linked", "to", "Enterprise", "Customer", "." ]
python
valid
DataBiosphere/toil
src/toil/utils/toilStats.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L410-L418
def computeColumnWidths(job_types, worker, job, options): """ Return a ColumnWidths() object with the correct max widths. """ cw = ColumnWidths() for t in job_types: updateColumnWidths(t, cw, options) updateColumnWidths(worker, cw, options) updateColumnWidths(job, cw, options) return cw
[ "def", "computeColumnWidths", "(", "job_types", ",", "worker", ",", "job", ",", "options", ")", ":", "cw", "=", "ColumnWidths", "(", ")", "for", "t", "in", "job_types", ":", "updateColumnWidths", "(", "t", ",", "cw", ",", "options", ")", "updateColumnWidths", "(", "worker", ",", "cw", ",", "options", ")", "updateColumnWidths", "(", "job", ",", "cw", ",", "options", ")", "return", "cw" ]
Return a ColumnWidths() object with the correct max widths.
[ "Return", "a", "ColumnWidths", "()", "object", "with", "the", "correct", "max", "widths", "." ]
python
train
Dentosal/python-sc2
sc2/bot_ai.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/bot_ai.py#L468-L472
def get_terrain_height(self, pos: Union[Point2, Point3, Unit]) -> int: """ Returns terrain height at a position. Caution: terrain height is not anywhere near a unit's z-coordinate. """ assert isinstance(pos, (Point2, Point3, Unit)) pos = pos.position.to2.rounded return self._game_info.terrain_height[pos]
[ "def", "get_terrain_height", "(", "self", ",", "pos", ":", "Union", "[", "Point2", ",", "Point3", ",", "Unit", "]", ")", "->", "int", ":", "assert", "isinstance", "(", "pos", ",", "(", "Point2", ",", "Point3", ",", "Unit", ")", ")", "pos", "=", "pos", ".", "position", ".", "to2", ".", "rounded", "return", "self", ".", "_game_info", ".", "terrain_height", "[", "pos", "]" ]
Returns terrain height at a position. Caution: terrain height is not anywhere near a unit's z-coordinate.
[ "Returns", "terrain", "height", "at", "a", "position", ".", "Caution", ":", "terrain", "height", "is", "not", "anywhere", "near", "a", "unit", "s", "z", "-", "coordinate", "." ]
python
train
saltstack/salt
salt/runners/cloud.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/cloud.py#L129-L137
def destroy(instances, opts=None): ''' Destroy the named vm(s) ''' client = _get_client() if isinstance(opts, dict): client.opts.update(opts) info = client.destroy(instances) return info
[ "def", "destroy", "(", "instances", ",", "opts", "=", "None", ")", ":", "client", "=", "_get_client", "(", ")", "if", "isinstance", "(", "opts", ",", "dict", ")", ":", "client", ".", "opts", ".", "update", "(", "opts", ")", "info", "=", "client", ".", "destroy", "(", "instances", ")", "return", "info" ]
Destroy the named vm(s)
[ "Destroy", "the", "named", "vm", "(", "s", ")" ]
python
train
dwavesystems/penaltymodel
penaltymodel_cache/penaltymodel/cache/database_manager.py
https://github.com/dwavesystems/penaltymodel/blob/b9d343233aea8df0f59cea45a07f12d0b3b8d9b3/penaltymodel_cache/penaltymodel/cache/database_manager.py#L316-L337
def _serialize_linear_biases(linear, nodelist): """Serializes the linear biases. Args: linear: a interable object where linear[v] is the bias associated with v. nodelist (list): an ordered iterable containing the nodes. Returns: str: base 64 encoded string of little endian 8 byte floats, one for each of the biases in linear. Ordered according to nodelist. Examples: >>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [1, 2, 3]) 'AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA' >>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [3, 2, 1]) 'AAAAAAAAAAAAAAAAAADwPwAAAAAAAPC/' """ linear_bytes = struct.pack('<' + 'd' * len(linear), *[linear[i] for i in nodelist]) return base64.b64encode(linear_bytes).decode('utf-8')
[ "def", "_serialize_linear_biases", "(", "linear", ",", "nodelist", ")", ":", "linear_bytes", "=", "struct", ".", "pack", "(", "'<'", "+", "'d'", "*", "len", "(", "linear", ")", ",", "*", "[", "linear", "[", "i", "]", "for", "i", "in", "nodelist", "]", ")", "return", "base64", ".", "b64encode", "(", "linear_bytes", ")", ".", "decode", "(", "'utf-8'", ")" ]
Serializes the linear biases. Args: linear: a interable object where linear[v] is the bias associated with v. nodelist (list): an ordered iterable containing the nodes. Returns: str: base 64 encoded string of little endian 8 byte floats, one for each of the biases in linear. Ordered according to nodelist. Examples: >>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [1, 2, 3]) 'AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA' >>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [3, 2, 1]) 'AAAAAAAAAAAAAAAAAADwPwAAAAAAAPC/'
[ "Serializes", "the", "linear", "biases", "." ]
python
train
fumitoh/modelx
modelx/core/node.py
https://github.com/fumitoh/modelx/blob/0180da34d052c44fb94dab9e115e218bbebfc9c3/modelx/core/node.py#L63-L80
def tuplize_key(obj, key, remove_extra=False): """Args""" paramlen = len(obj.formula.parameters) if isinstance(key, str): key = (key,) elif not isinstance(key, Sequence): key = (key,) if not remove_extra: return key else: arglen = len(key) if arglen: return key[: min(arglen, paramlen)] else: return key
[ "def", "tuplize_key", "(", "obj", ",", "key", ",", "remove_extra", "=", "False", ")", ":", "paramlen", "=", "len", "(", "obj", ".", "formula", ".", "parameters", ")", "if", "isinstance", "(", "key", ",", "str", ")", ":", "key", "=", "(", "key", ",", ")", "elif", "not", "isinstance", "(", "key", ",", "Sequence", ")", ":", "key", "=", "(", "key", ",", ")", "if", "not", "remove_extra", ":", "return", "key", "else", ":", "arglen", "=", "len", "(", "key", ")", "if", "arglen", ":", "return", "key", "[", ":", "min", "(", "arglen", ",", "paramlen", ")", "]", "else", ":", "return", "key" ]
Args
[ "Args" ]
python
valid
coin-or/GiMPy
src/gimpy/tree.py
https://github.com/coin-or/GiMPy/blob/51853122a50eb6019d06bbdedbfc396a833b5a22/src/gimpy/tree.py#L281-L296
def del_node(self, n): ''' API: del_node(self, n) Description: Removes node n from tree. Pre: Node n should be present in the tree. Input: n: Node name. ''' parent = self.get_node_attr(n, 'parent') if self.get_node_attr(n, 'direction') == 'R': self.set_node_attr(parent, 'Rchild', None) else: self.set_node_attr(parent, 'Lchild', None) Graph.del_node(self, n)
[ "def", "del_node", "(", "self", ",", "n", ")", ":", "parent", "=", "self", ".", "get_node_attr", "(", "n", ",", "'parent'", ")", "if", "self", ".", "get_node_attr", "(", "n", ",", "'direction'", ")", "==", "'R'", ":", "self", ".", "set_node_attr", "(", "parent", ",", "'Rchild'", ",", "None", ")", "else", ":", "self", ".", "set_node_attr", "(", "parent", ",", "'Lchild'", ",", "None", ")", "Graph", ".", "del_node", "(", "self", ",", "n", ")" ]
API: del_node(self, n) Description: Removes node n from tree. Pre: Node n should be present in the tree. Input: n: Node name.
[ "API", ":", "del_node", "(", "self", "n", ")", "Description", ":", "Removes", "node", "n", "from", "tree", ".", "Pre", ":", "Node", "n", "should", "be", "present", "in", "the", "tree", ".", "Input", ":", "n", ":", "Node", "name", "." ]
python
train
raiden-network/raiden
raiden/network/proxies/utils.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/proxies/utils.py#L45-L70
def get_onchain_locksroots( chain: 'BlockChainService', canonical_identifier: CanonicalIdentifier, participant1: Address, participant2: Address, block_identifier: BlockSpecification, ) -> Tuple[Locksroot, Locksroot]: """Return the locksroot for `participant1` and `participant2` at `block_identifier`.""" payment_channel = chain.payment_channel(canonical_identifier=canonical_identifier) token_network = payment_channel.token_network # This will not raise RaidenRecoverableError because we are providing the channel_identifier participants_details = token_network.detail_participants( participant1=participant1, participant2=participant2, channel_identifier=canonical_identifier.channel_identifier, block_identifier=block_identifier, ) our_details = participants_details.our_details our_locksroot = our_details.locksroot partner_details = participants_details.partner_details partner_locksroot = partner_details.locksroot return our_locksroot, partner_locksroot
[ "def", "get_onchain_locksroots", "(", "chain", ":", "'BlockChainService'", ",", "canonical_identifier", ":", "CanonicalIdentifier", ",", "participant1", ":", "Address", ",", "participant2", ":", "Address", ",", "block_identifier", ":", "BlockSpecification", ",", ")", "->", "Tuple", "[", "Locksroot", ",", "Locksroot", "]", ":", "payment_channel", "=", "chain", ".", "payment_channel", "(", "canonical_identifier", "=", "canonical_identifier", ")", "token_network", "=", "payment_channel", ".", "token_network", "# This will not raise RaidenRecoverableError because we are providing the channel_identifier", "participants_details", "=", "token_network", ".", "detail_participants", "(", "participant1", "=", "participant1", ",", "participant2", "=", "participant2", ",", "channel_identifier", "=", "canonical_identifier", ".", "channel_identifier", ",", "block_identifier", "=", "block_identifier", ",", ")", "our_details", "=", "participants_details", ".", "our_details", "our_locksroot", "=", "our_details", ".", "locksroot", "partner_details", "=", "participants_details", ".", "partner_details", "partner_locksroot", "=", "partner_details", ".", "locksroot", "return", "our_locksroot", ",", "partner_locksroot" ]
Return the locksroot for `participant1` and `participant2` at `block_identifier`.
[ "Return", "the", "locksroot", "for", "participant1", "and", "participant2", "at", "block_identifier", "." ]
python
train
openvax/datacache
datacache/download.py
https://github.com/openvax/datacache/blob/73bcac02d37cf153710a07fbdc636aa55cb214ca/datacache/download.py#L159-L214
def fetch_file( download_url, filename=None, decompress=False, subdir=None, force=False, timeout=None, use_wget_if_available=False): """ Download a remote file and store it locally in a cache directory. Don't download it again if it's already present (unless `force` is True.) Parameters ---------- download_url : str Remote URL of file to download. filename : str, optional Local filename, used as cache key. If omitted, then determine the local filename from the URL. decompress : bool, optional By default any file whose remote extension is one of (".zip", ".gzip") and whose local filename lacks this suffix is decompressed. If a local filename wasn't provided but you still want to decompress the stored data then set this option to True. subdir : str, optional Group downloads in a single subdirectory. force : bool, optional By default, a remote file is not downloaded if it's already present. However, with this argument set to True, it will be overwritten. timeout : float, optional Timeout for download in seconds, default is None which uses global timeout. use_wget_if_available: bool, optional If the `wget` command is available, use that for download instead of Python libraries (default True) Returns the full path of the local file. """ filename = build_local_filename(download_url, filename, decompress) full_path = build_path(filename, subdir) if not os.path.exists(full_path) or force: logger.info("Fetching %s from URL %s", filename, download_url) _download_and_decompress_if_necessary( full_path=full_path, download_url=download_url, timeout=timeout, use_wget_if_available=use_wget_if_available) else: logger.info("Cached file %s from URL %s", filename, download_url) return full_path
[ "def", "fetch_file", "(", "download_url", ",", "filename", "=", "None", ",", "decompress", "=", "False", ",", "subdir", "=", "None", ",", "force", "=", "False", ",", "timeout", "=", "None", ",", "use_wget_if_available", "=", "False", ")", ":", "filename", "=", "build_local_filename", "(", "download_url", ",", "filename", ",", "decompress", ")", "full_path", "=", "build_path", "(", "filename", ",", "subdir", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "full_path", ")", "or", "force", ":", "logger", ".", "info", "(", "\"Fetching %s from URL %s\"", ",", "filename", ",", "download_url", ")", "_download_and_decompress_if_necessary", "(", "full_path", "=", "full_path", ",", "download_url", "=", "download_url", ",", "timeout", "=", "timeout", ",", "use_wget_if_available", "=", "use_wget_if_available", ")", "else", ":", "logger", ".", "info", "(", "\"Cached file %s from URL %s\"", ",", "filename", ",", "download_url", ")", "return", "full_path" ]
Download a remote file and store it locally in a cache directory. Don't download it again if it's already present (unless `force` is True.) Parameters ---------- download_url : str Remote URL of file to download. filename : str, optional Local filename, used as cache key. If omitted, then determine the local filename from the URL. decompress : bool, optional By default any file whose remote extension is one of (".zip", ".gzip") and whose local filename lacks this suffix is decompressed. If a local filename wasn't provided but you still want to decompress the stored data then set this option to True. subdir : str, optional Group downloads in a single subdirectory. force : bool, optional By default, a remote file is not downloaded if it's already present. However, with this argument set to True, it will be overwritten. timeout : float, optional Timeout for download in seconds, default is None which uses global timeout. use_wget_if_available: bool, optional If the `wget` command is available, use that for download instead of Python libraries (default True) Returns the full path of the local file.
[ "Download", "a", "remote", "file", "and", "store", "it", "locally", "in", "a", "cache", "directory", ".", "Don", "t", "download", "it", "again", "if", "it", "s", "already", "present", "(", "unless", "force", "is", "True", ".", ")" ]
python
train
rduplain/jeni-python
jeni.py
https://github.com/rduplain/jeni-python/blob/feca12ce5e4f0438ae5d7bec59d61826063594f1/jeni.py#L300-L326
def partial(__fn, *a, **kw): """Wrap a note for injection of a partially applied function. This allows for annotated functions to be injected for composition:: from jeni import annotate @annotate('foo', bar=annotate.maybe('bar')) def foobar(foo, bar=None): return @annotate('foo', annotate.partial(foobar)) def bazquux(foo, fn): # fn: injector.partial(foobar) return Keyword arguments are treated as `maybe` when using partial, in order to allow partial application of only the notes which can be provided, where the caller could then apply arguments known to be unavailable in the injector. Note that with Python 3 function annotations, all annotations are injected as keyword arguments. Injections on the partial function are lazy and not applied until the injected partial function is called. See `eager_partial` to inject eagerly. """ return (PARTIAL, (__fn, a, tuple(kw.items())))
[ "def", "partial", "(", "__fn", ",", "*", "a", ",", "*", "*", "kw", ")", ":", "return", "(", "PARTIAL", ",", "(", "__fn", ",", "a", ",", "tuple", "(", "kw", ".", "items", "(", ")", ")", ")", ")" ]
Wrap a note for injection of a partially applied function. This allows for annotated functions to be injected for composition:: from jeni import annotate @annotate('foo', bar=annotate.maybe('bar')) def foobar(foo, bar=None): return @annotate('foo', annotate.partial(foobar)) def bazquux(foo, fn): # fn: injector.partial(foobar) return Keyword arguments are treated as `maybe` when using partial, in order to allow partial application of only the notes which can be provided, where the caller could then apply arguments known to be unavailable in the injector. Note that with Python 3 function annotations, all annotations are injected as keyword arguments. Injections on the partial function are lazy and not applied until the injected partial function is called. See `eager_partial` to inject eagerly.
[ "Wrap", "a", "note", "for", "injection", "of", "a", "partially", "applied", "function", "." ]
python
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/main.py
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/main.py#L43-L68
def init(): """Initialize the pipeline in maya so everything works Init environment and load plugins. This also creates the initial Jukebox Menu entry. :returns: None :rtype: None :raises: None """ main.init_environment() pluginpath = os.pathsep.join((os.environ.get('JUKEBOX_PLUGIN_PATH', ''), BUILTIN_PLUGIN_PATH)) os.environ['JUKEBOX_PLUGIN_PATH'] = pluginpath try: maya.standalone.initialize() jukeboxmaya.STANDALONE_INITIALIZED = True except RuntimeError as e: jukeboxmaya.STANDALONE_INITIALIZED = False if str(e) == "maya.standalone may only be used from an external Python interpreter": mm = MenuManager.get() mainmenu = mm.create_menu("Jukebox", tearOff=True) mm.create_menu("Help", parent=mainmenu, command=show_help) # load plugins pmanager = MayaPluginManager.get() pmanager.load_plugins() load_mayaplugins()
[ "def", "init", "(", ")", ":", "main", ".", "init_environment", "(", ")", "pluginpath", "=", "os", ".", "pathsep", ".", "join", "(", "(", "os", ".", "environ", ".", "get", "(", "'JUKEBOX_PLUGIN_PATH'", ",", "''", ")", ",", "BUILTIN_PLUGIN_PATH", ")", ")", "os", ".", "environ", "[", "'JUKEBOX_PLUGIN_PATH'", "]", "=", "pluginpath", "try", ":", "maya", ".", "standalone", ".", "initialize", "(", ")", "jukeboxmaya", ".", "STANDALONE_INITIALIZED", "=", "True", "except", "RuntimeError", "as", "e", ":", "jukeboxmaya", ".", "STANDALONE_INITIALIZED", "=", "False", "if", "str", "(", "e", ")", "==", "\"maya.standalone may only be used from an external Python interpreter\"", ":", "mm", "=", "MenuManager", ".", "get", "(", ")", "mainmenu", "=", "mm", ".", "create_menu", "(", "\"Jukebox\"", ",", "tearOff", "=", "True", ")", "mm", ".", "create_menu", "(", "\"Help\"", ",", "parent", "=", "mainmenu", ",", "command", "=", "show_help", ")", "# load plugins", "pmanager", "=", "MayaPluginManager", ".", "get", "(", ")", "pmanager", ".", "load_plugins", "(", ")", "load_mayaplugins", "(", ")" ]
Initialize the pipeline in maya so everything works Init environment and load plugins. This also creates the initial Jukebox Menu entry. :returns: None :rtype: None :raises: None
[ "Initialize", "the", "pipeline", "in", "maya", "so", "everything", "works" ]
python
train
cakebread/yolk
yolk/yolklib.py
https://github.com/cakebread/yolk/blob/ee8c9f529a542d9c5eff4fe69b9c7906c802e4d8/yolk/yolklib.py#L107-L128
def get_packages(self, show): """ Return list of Distributions filtered by active status or all @param show: Type of package(s) to show; active, non-active or all @type show: string: "active", "non-active", "all" @returns: list of pkg_resources Distribution objects """ if show == 'nonactive' or show == "all": all_packages = [] for package in self.environment: #There may be multiple versions of same packages for i in range(len(self.environment[package])): if self.environment[package][i]: all_packages.append(self.environment[package][i]) return all_packages else: # Only activated packages return self.working_set
[ "def", "get_packages", "(", "self", ",", "show", ")", ":", "if", "show", "==", "'nonactive'", "or", "show", "==", "\"all\"", ":", "all_packages", "=", "[", "]", "for", "package", "in", "self", ".", "environment", ":", "#There may be multiple versions of same packages", "for", "i", "in", "range", "(", "len", "(", "self", ".", "environment", "[", "package", "]", ")", ")", ":", "if", "self", ".", "environment", "[", "package", "]", "[", "i", "]", ":", "all_packages", ".", "append", "(", "self", ".", "environment", "[", "package", "]", "[", "i", "]", ")", "return", "all_packages", "else", ":", "# Only activated packages", "return", "self", ".", "working_set" ]
Return list of Distributions filtered by active status or all @param show: Type of package(s) to show; active, non-active or all @type show: string: "active", "non-active", "all" @returns: list of pkg_resources Distribution objects
[ "Return", "list", "of", "Distributions", "filtered", "by", "active", "status", "or", "all" ]
python
train
abe-winter/pg13-py
pg13/pgmock_dbapi2.py
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/pgmock_dbapi2.py#L134-L140
def open_only(f): "decorator" @functools.wraps(f) def f2(self, *args, **kwargs): if self.closed: raise NotSupportedError('connection is closed') return f(self, *args, **kwargs) return f2
[ "def", "open_only", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "f2", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "closed", ":", "raise", "NotSupportedError", "(", "'connection is closed'", ")", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "f2" ]
decorator
[ "decorator" ]
python
train
caseyjlaw/rtpipe
rtpipe/RT.py
https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/RT.py#L1027-L1036
def correct_dmdt(d, dmind, dtind, blrange): """ Dedisperses and resamples data *in place*. Drops edges, since it assumes that data is read with overlapping chunks in time. """ data = numpyview(data_mem, 'complex64', datashape(d)) data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) bl0,bl1 = blrange data_resamp[:, bl0:bl1] = data[:, bl0:bl1] rtlib.dedisperse_resample(data_resamp, d['freq'], d['inttime'], d['dmarr'][dmind], d['dtarr'][dtind], blrange, verbose=0)
[ "def", "correct_dmdt", "(", "d", ",", "dmind", ",", "dtind", ",", "blrange", ")", ":", "data", "=", "numpyview", "(", "data_mem", ",", "'complex64'", ",", "datashape", "(", "d", ")", ")", "data_resamp", "=", "numpyview", "(", "data_resamp_mem", ",", "'complex64'", ",", "datashape", "(", "d", ")", ")", "bl0", ",", "bl1", "=", "blrange", "data_resamp", "[", ":", ",", "bl0", ":", "bl1", "]", "=", "data", "[", ":", ",", "bl0", ":", "bl1", "]", "rtlib", ".", "dedisperse_resample", "(", "data_resamp", ",", "d", "[", "'freq'", "]", ",", "d", "[", "'inttime'", "]", ",", "d", "[", "'dmarr'", "]", "[", "dmind", "]", ",", "d", "[", "'dtarr'", "]", "[", "dtind", "]", ",", "blrange", ",", "verbose", "=", "0", ")" ]
Dedisperses and resamples data *in place*. Drops edges, since it assumes that data is read with overlapping chunks in time.
[ "Dedisperses", "and", "resamples", "data", "*", "in", "place", "*", ".", "Drops", "edges", "since", "it", "assumes", "that", "data", "is", "read", "with", "overlapping", "chunks", "in", "time", "." ]
python
train
jazzband/django-model-utils
model_utils/managers.py
https://github.com/jazzband/django-model-utils/blob/d557c4253312774a7c2f14bcd02675e9ac2ea05f/model_utils/managers.py#L327-L400
def join(self, qs=None): ''' Join one queryset together with another using a temporary table. If no queryset is used, it will use the current queryset and join that to itself. `Join` either uses the current queryset and effectively does a self-join to create a new limited queryset OR it uses a querset given by the user. The model of a given queryset needs to contain a valid foreign key to the current queryset to perform a join. A new queryset is then created. ''' to_field = 'id' if qs: fk = [ fk for fk in qs.model._meta.fields if getattr(fk, 'related_model', None) == self.model ] fk = fk[0] if fk else None model_set = '{}_set'.format(self.model.__name__.lower()) key = fk or getattr(qs.model, model_set, None) if not key: raise ValueError('QuerySet is not related to current model') try: fk_column = key.column except AttributeError: fk_column = 'id' to_field = key.field.column qs = qs.only(fk_column) # if we give a qs we need to keep the model qs to not lose anything new_qs = self else: fk_column = 'id' qs = self.only(fk_column) new_qs = self.model.objects.all() TABLE_NAME = 'temp_stuff' query = self.get_quoted_query(qs.query) sql = ''' DROP TABLE IF EXISTS {table_name}; DROP INDEX IF EXISTS {table_name}_id; CREATE TEMPORARY TABLE {table_name} AS {query}; CREATE INDEX {table_name}_{fk_column} ON {table_name} ({fk_column}); '''.format(table_name=TABLE_NAME, fk_column=fk_column, query=str(query)) with connection.cursor() as cursor: cursor.execute(sql) class TempModel(models.Model): temp_key = models.ForeignKey( self.model, on_delete=models.DO_NOTHING, db_column=fk_column, to_field=to_field ) class Meta: managed = False db_table = TABLE_NAME conn = Join( table_name=TempModel._meta.db_table, parent_alias=new_qs.query.get_initial_alias(), table_alias=None, join_type='INNER JOIN', join_field=self.model.tempmodel_set.rel, nullable=False ) new_qs.query.join(conn, reuse=None) return new_qs
[ "def", "join", "(", "self", ",", "qs", "=", "None", ")", ":", "to_field", "=", "'id'", "if", "qs", ":", "fk", "=", "[", "fk", "for", "fk", "in", "qs", ".", "model", ".", "_meta", ".", "fields", "if", "getattr", "(", "fk", ",", "'related_model'", ",", "None", ")", "==", "self", ".", "model", "]", "fk", "=", "fk", "[", "0", "]", "if", "fk", "else", "None", "model_set", "=", "'{}_set'", ".", "format", "(", "self", ".", "model", ".", "__name__", ".", "lower", "(", ")", ")", "key", "=", "fk", "or", "getattr", "(", "qs", ".", "model", ",", "model_set", ",", "None", ")", "if", "not", "key", ":", "raise", "ValueError", "(", "'QuerySet is not related to current model'", ")", "try", ":", "fk_column", "=", "key", ".", "column", "except", "AttributeError", ":", "fk_column", "=", "'id'", "to_field", "=", "key", ".", "field", ".", "column", "qs", "=", "qs", ".", "only", "(", "fk_column", ")", "# if we give a qs we need to keep the model qs to not lose anything", "new_qs", "=", "self", "else", ":", "fk_column", "=", "'id'", "qs", "=", "self", ".", "only", "(", "fk_column", ")", "new_qs", "=", "self", ".", "model", ".", "objects", ".", "all", "(", ")", "TABLE_NAME", "=", "'temp_stuff'", "query", "=", "self", ".", "get_quoted_query", "(", "qs", ".", "query", ")", "sql", "=", "'''\n DROP TABLE IF EXISTS {table_name};\n DROP INDEX IF EXISTS {table_name}_id;\n CREATE TEMPORARY TABLE {table_name} AS {query};\n CREATE INDEX {table_name}_{fk_column} ON {table_name} ({fk_column});\n '''", ".", "format", "(", "table_name", "=", "TABLE_NAME", ",", "fk_column", "=", "fk_column", ",", "query", "=", "str", "(", "query", ")", ")", "with", "connection", ".", "cursor", "(", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "sql", ")", "class", "TempModel", "(", "models", ".", "Model", ")", ":", "temp_key", "=", "models", ".", "ForeignKey", "(", "self", ".", "model", ",", "on_delete", "=", "models", ".", "DO_NOTHING", ",", "db_column", "=", "fk_column", ",", "to_field", "=", "to_field", ")", "class", "Meta", ":", "managed", "=", "False", "db_table", "=", "TABLE_NAME", "conn", "=", "Join", "(", "table_name", "=", "TempModel", ".", "_meta", ".", "db_table", ",", "parent_alias", "=", "new_qs", ".", "query", ".", "get_initial_alias", "(", ")", ",", "table_alias", "=", "None", ",", "join_type", "=", "'INNER JOIN'", ",", "join_field", "=", "self", ".", "model", ".", "tempmodel_set", ".", "rel", ",", "nullable", "=", "False", ")", "new_qs", ".", "query", ".", "join", "(", "conn", ",", "reuse", "=", "None", ")", "return", "new_qs" ]
Join one queryset together with another using a temporary table. If no queryset is used, it will use the current queryset and join that to itself. `Join` either uses the current queryset and effectively does a self-join to create a new limited queryset OR it uses a querset given by the user. The model of a given queryset needs to contain a valid foreign key to the current queryset to perform a join. A new queryset is then created.
[ "Join", "one", "queryset", "together", "with", "another", "using", "a", "temporary", "table", ".", "If", "no", "queryset", "is", "used", "it", "will", "use", "the", "current", "queryset", "and", "join", "that", "to", "itself", "." ]
python
train
fabioz/PyDev.Debugger
third_party/pep8/pycodestyle.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/pycodestyle.py#L791-L806
def whitespace_around_comma(logical_line): r"""Avoid extraneous whitespace after a comma or a colon. Note: these checks are disabled by default Okay: a = (1, 2) E241: a = (1, 2) E242: a = (1,\t2) """ line = logical_line for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): found = m.start() + 1 if '\t' in m.group(): yield found, "E242 tab after '%s'" % m.group()[0] else: yield found, "E241 multiple spaces after '%s'" % m.group()[0]
[ "def", "whitespace_around_comma", "(", "logical_line", ")", ":", "line", "=", "logical_line", "for", "m", "in", "WHITESPACE_AFTER_COMMA_REGEX", ".", "finditer", "(", "line", ")", ":", "found", "=", "m", ".", "start", "(", ")", "+", "1", "if", "'\\t'", "in", "m", ".", "group", "(", ")", ":", "yield", "found", ",", "\"E242 tab after '%s'\"", "%", "m", ".", "group", "(", ")", "[", "0", "]", "else", ":", "yield", "found", ",", "\"E241 multiple spaces after '%s'\"", "%", "m", ".", "group", "(", ")", "[", "0", "]" ]
r"""Avoid extraneous whitespace after a comma or a colon. Note: these checks are disabled by default Okay: a = (1, 2) E241: a = (1, 2) E242: a = (1,\t2)
[ "r", "Avoid", "extraneous", "whitespace", "after", "a", "comma", "or", "a", "colon", "." ]
python
train
consbio/restle
restle/fields.py
https://github.com/consbio/restle/blob/60d100da034c612d4910f4f79eaa57a76eb3dcc6/restle/fields.py#L145-L157
def to_python(self, value, resource): """Dictionary to Python object""" if isinstance(value, dict): d = { self.aliases.get(k, k): self.to_python(v, resource) if isinstance(v, (dict, list)) else v for k, v in six.iteritems(value) } return type(self.class_name, (), d) elif isinstance(value, list): return [self.to_python(x, resource) if isinstance(x, (dict, list)) else x for x in value] else: return value
[ "def", "to_python", "(", "self", ",", "value", ",", "resource", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "d", "=", "{", "self", ".", "aliases", ".", "get", "(", "k", ",", "k", ")", ":", "self", ".", "to_python", "(", "v", ",", "resource", ")", "if", "isinstance", "(", "v", ",", "(", "dict", ",", "list", ")", ")", "else", "v", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "value", ")", "}", "return", "type", "(", "self", ".", "class_name", ",", "(", ")", ",", "d", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "return", "[", "self", ".", "to_python", "(", "x", ",", "resource", ")", "if", "isinstance", "(", "x", ",", "(", "dict", ",", "list", ")", ")", "else", "x", "for", "x", "in", "value", "]", "else", ":", "return", "value" ]
Dictionary to Python object
[ "Dictionary", "to", "Python", "object" ]
python
train
richardchien/nonebot
nonebot/command/argfilter/validators.py
https://github.com/richardchien/nonebot/blob/13ed9e4e87d9824b61592520aabda6d2737c8848/nonebot/command/argfilter/validators.py#L72-L84
def ensure_true(bool_func: Callable[[Any], bool], message=None) -> Filter_T: """ Validate any object to ensure the result of applying a boolean function to it is True. """ def validate(value): if bool_func(value) is not True: _raise_failure(message) return value return validate
[ "def", "ensure_true", "(", "bool_func", ":", "Callable", "[", "[", "Any", "]", ",", "bool", "]", ",", "message", "=", "None", ")", "->", "Filter_T", ":", "def", "validate", "(", "value", ")", ":", "if", "bool_func", "(", "value", ")", "is", "not", "True", ":", "_raise_failure", "(", "message", ")", "return", "value", "return", "validate" ]
Validate any object to ensure the result of applying a boolean function to it is True.
[ "Validate", "any", "object", "to", "ensure", "the", "result", "of", "applying", "a", "boolean", "function", "to", "it", "is", "True", "." ]
python
train
a1ezzz/wasp-general
wasp_general/signals/signals.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/signals/signals.py#L126-L138
def __watchers_callbacks_exec(self, signal_name): """ Generate callback for a queue :param signal_name: name of a signal that callback is generated for :type signal_name: str :rtype: callable """ def callback_fn(): for watcher in self.__watchers_callbacks[signal_name]: if watcher is not None: watcher.notify() return callback_fn
[ "def", "__watchers_callbacks_exec", "(", "self", ",", "signal_name", ")", ":", "def", "callback_fn", "(", ")", ":", "for", "watcher", "in", "self", ".", "__watchers_callbacks", "[", "signal_name", "]", ":", "if", "watcher", "is", "not", "None", ":", "watcher", ".", "notify", "(", ")", "return", "callback_fn" ]
Generate callback for a queue :param signal_name: name of a signal that callback is generated for :type signal_name: str :rtype: callable
[ "Generate", "callback", "for", "a", "queue" ]
python
train
pyca/pyopenssl
src/OpenSSL/crypto.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/crypto.py#L1102-L1115
def set_version(self, version): """ Set the version number of the certificate. Note that the version value is zero-based, eg. a value of 0 is V1. :param version: The version number of the certificate. :type version: :py:class:`int` :return: ``None`` """ if not isinstance(version, int): raise TypeError("version must be an integer") _lib.X509_set_version(self._x509, version)
[ "def", "set_version", "(", "self", ",", "version", ")", ":", "if", "not", "isinstance", "(", "version", ",", "int", ")", ":", "raise", "TypeError", "(", "\"version must be an integer\"", ")", "_lib", ".", "X509_set_version", "(", "self", ".", "_x509", ",", "version", ")" ]
Set the version number of the certificate. Note that the version value is zero-based, eg. a value of 0 is V1. :param version: The version number of the certificate. :type version: :py:class:`int` :return: ``None``
[ "Set", "the", "version", "number", "of", "the", "certificate", ".", "Note", "that", "the", "version", "value", "is", "zero", "-", "based", "eg", ".", "a", "value", "of", "0", "is", "V1", "." ]
python
test
juju/charm-helpers
charmhelpers/contrib/saltstack/__init__.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/saltstack/__init__.py#L88-L104
def install_salt_support(from_ppa=True): """Installs the salt-minion helper for machine state. By default the salt-minion package is installed from the saltstack PPA. If from_ppa is False you must ensure that the salt-minion package is available in the apt cache. """ if from_ppa: subprocess.check_call([ '/usr/bin/add-apt-repository', '--yes', 'ppa:saltstack/salt', ]) subprocess.check_call(['/usr/bin/apt-get', 'update']) # We install salt-common as salt-minion would run the salt-minion # daemon. charmhelpers.fetch.apt_install('salt-common')
[ "def", "install_salt_support", "(", "from_ppa", "=", "True", ")", ":", "if", "from_ppa", ":", "subprocess", ".", "check_call", "(", "[", "'/usr/bin/add-apt-repository'", ",", "'--yes'", ",", "'ppa:saltstack/salt'", ",", "]", ")", "subprocess", ".", "check_call", "(", "[", "'/usr/bin/apt-get'", ",", "'update'", "]", ")", "# We install salt-common as salt-minion would run the salt-minion", "# daemon.", "charmhelpers", ".", "fetch", ".", "apt_install", "(", "'salt-common'", ")" ]
Installs the salt-minion helper for machine state. By default the salt-minion package is installed from the saltstack PPA. If from_ppa is False you must ensure that the salt-minion package is available in the apt cache.
[ "Installs", "the", "salt", "-", "minion", "helper", "for", "machine", "state", "." ]
python
train
mfitzp/biocyc
biocyc/biocyc.py
https://github.com/mfitzp/biocyc/blob/2fe81971687e4dcf1fcf869af0e7b3549be535b1/biocyc/biocyc.py#L338-L379
def get_from_cache(self, org_id, id): ''' Get an object from the cache Use all cache folders available (primary first, then secondary in order) and look for the ID in the dir if found unpickle and return the object, else return False FIXME: Check for expiry of object! Return false is expired (will auto-refetch and overwrite) ''' current_time = datetime.now() # Check memory cache first if id in self.memory_cache[org_id]: obj = self.memory_cache[org_id][id] if obj.created_at > current_time - self.expire_records_after: return obj for cache in [self.cache_path] + self.secondary_cache_paths: read_path = os.path.join( cache, org_id, id ) try: with open(read_path, 'rb') as f: obj = pickle.load(f) except: # Continue to try the next cache pass else: # It worked so we have obj # Check for expiry date; if it's not expired return it else continue if obj.created_at > current_time - self.expire_records_after: # If we're here it mustn't be in the memory cache self.memory_cache[org_id][id] = obj if len(self.memory_cache[org_id]) > self.max_memory_cache: self.memory_cache[org_id].popitem(last=False) return obj # Else continue looking # We found nothing (or all expired) return None
[ "def", "get_from_cache", "(", "self", ",", "org_id", ",", "id", ")", ":", "current_time", "=", "datetime", ".", "now", "(", ")", "# Check memory cache first", "if", "id", "in", "self", ".", "memory_cache", "[", "org_id", "]", ":", "obj", "=", "self", ".", "memory_cache", "[", "org_id", "]", "[", "id", "]", "if", "obj", ".", "created_at", ">", "current_time", "-", "self", ".", "expire_records_after", ":", "return", "obj", "for", "cache", "in", "[", "self", ".", "cache_path", "]", "+", "self", ".", "secondary_cache_paths", ":", "read_path", "=", "os", ".", "path", ".", "join", "(", "cache", ",", "org_id", ",", "id", ")", "try", ":", "with", "open", "(", "read_path", ",", "'rb'", ")", "as", "f", ":", "obj", "=", "pickle", ".", "load", "(", "f", ")", "except", ":", "# Continue to try the next cache", "pass", "else", ":", "# It worked so we have obj", "# Check for expiry date; if it's not expired return it else continue", "if", "obj", ".", "created_at", ">", "current_time", "-", "self", ".", "expire_records_after", ":", "# If we're here it mustn't be in the memory cache", "self", ".", "memory_cache", "[", "org_id", "]", "[", "id", "]", "=", "obj", "if", "len", "(", "self", ".", "memory_cache", "[", "org_id", "]", ")", ">", "self", ".", "max_memory_cache", ":", "self", ".", "memory_cache", "[", "org_id", "]", ".", "popitem", "(", "last", "=", "False", ")", "return", "obj", "# Else continue looking", "# We found nothing (or all expired)", "return", "None" ]
Get an object from the cache Use all cache folders available (primary first, then secondary in order) and look for the ID in the dir if found unpickle and return the object, else return False FIXME: Check for expiry of object! Return false is expired (will auto-refetch and overwrite)
[ "Get", "an", "object", "from", "the", "cache", "Use", "all", "cache", "folders", "available", "(", "primary", "first", "then", "secondary", "in", "order", ")", "and", "look", "for", "the", "ID", "in", "the", "dir", "if", "found", "unpickle", "and", "return", "the", "object", "else", "return", "False", "FIXME", ":", "Check", "for", "expiry", "of", "object!", "Return", "false", "is", "expired", "(", "will", "auto", "-", "refetch", "and", "overwrite", ")" ]
python
train