repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
csparpa/pyowm
pyowm/weatherapi25/forecast.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/forecast.py#L206-L223
def _to_DOM(self): """ Dumps object data to a fully traversable DOM representation of the object. :returns: a ``xml.etree.Element`` object """ root_node = ET.Element("forecast") interval_node = ET.SubElement(root_node, "interval") interval_node.text = self._interval reception_time_node = ET.SubElement(root_node, "reception_time") reception_time_node.text = str(self._reception_time) root_node.append(self._location._to_DOM()) weathers_node = ET.SubElement(root_node, "weathers") for weather in self: weathers_node.append(weather._to_DOM()) return root_node
[ "def", "_to_DOM", "(", "self", ")", ":", "root_node", "=", "ET", ".", "Element", "(", "\"forecast\"", ")", "interval_node", "=", "ET", ".", "SubElement", "(", "root_node", ",", "\"interval\"", ")", "interval_node", ".", "text", "=", "self", ".", "_interval", "reception_time_node", "=", "ET", ".", "SubElement", "(", "root_node", ",", "\"reception_time\"", ")", "reception_time_node", ".", "text", "=", "str", "(", "self", ".", "_reception_time", ")", "root_node", ".", "append", "(", "self", ".", "_location", ".", "_to_DOM", "(", ")", ")", "weathers_node", "=", "ET", ".", "SubElement", "(", "root_node", ",", "\"weathers\"", ")", "for", "weather", "in", "self", ":", "weathers_node", ".", "append", "(", "weather", ".", "_to_DOM", "(", ")", ")", "return", "root_node" ]
Dumps object data to a fully traversable DOM representation of the object. :returns: a ``xml.etree.Element`` object
[ "Dumps", "object", "data", "to", "a", "fully", "traversable", "DOM", "representation", "of", "the", "object", "." ]
python
train
37.055556
FujiMakoto/AgentML
agentml/parser/tags/condition.py
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/parser/tags/condition.py#L37-L41
def value(self): """ Return the current evaluation of a condition statement """ return ''.join(map(str, self.evaluate(self.trigger.user)))
[ "def", "value", "(", "self", ")", ":", "return", "''", ".", "join", "(", "map", "(", "str", ",", "self", ".", "evaluate", "(", "self", ".", "trigger", ".", "user", ")", ")", ")" ]
Return the current evaluation of a condition statement
[ "Return", "the", "current", "evaluation", "of", "a", "condition", "statement" ]
python
train
33.2
eaton-lab/toytree
toytree/etemini.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/etemini.py#L1942-L1964
def get_monophyletic(self, values, target_attr): """ Returns a list of nodes matching the provided monophyly criteria. For a node to be considered a match, all `target_attr` values within and node, and exclusively them, should be grouped. :param values: a set of values for which monophyly is expected. :param target_attr: node attribute being used to check monophyly (i.e. species for species trees, names for gene family trees). """ if type(values) != set: values = set(values) n2values = self.get_cached_content(store_attr=target_attr) is_monophyletic = lambda node: n2values[node] == values for match in self.iter_leaves(is_leaf_fn=is_monophyletic): if is_monophyletic(match): yield match
[ "def", "get_monophyletic", "(", "self", ",", "values", ",", "target_attr", ")", ":", "if", "type", "(", "values", ")", "!=", "set", ":", "values", "=", "set", "(", "values", ")", "n2values", "=", "self", ".", "get_cached_content", "(", "store_attr", "=", "target_attr", ")", "is_monophyletic", "=", "lambda", "node", ":", "n2values", "[", "node", "]", "==", "values", "for", "match", "in", "self", ".", "iter_leaves", "(", "is_leaf_fn", "=", "is_monophyletic", ")", ":", "if", "is_monophyletic", "(", "match", ")", ":", "yield", "match" ]
Returns a list of nodes matching the provided monophyly criteria. For a node to be considered a match, all `target_attr` values within and node, and exclusively them, should be grouped. :param values: a set of values for which monophyly is expected. :param target_attr: node attribute being used to check monophyly (i.e. species for species trees, names for gene family trees).
[ "Returns", "a", "list", "of", "nodes", "matching", "the", "provided", "monophyly", "criteria", ".", "For", "a", "node", "to", "be", "considered", "a", "match", "all", "target_attr", "values", "within", "and", "node", "and", "exclusively", "them", "should", "be", "grouped", "." ]
python
train
36.782609
insightindustry/validator-collection
validator_collection/checkers.py
https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/checkers.py#L927-L960
def is_fraction(value, minimum = None, maximum = None, **kwargs): """Indicate whether ``value`` is a :class:`Fraction <python:fractions.Fraction>`. :param value: The value to evaluate. :param minimum: If supplied, will make sure that ``value`` is greater than or equal to this value. :type minimum: numeric :param maximum: If supplied, will make sure that ``value`` is less than or equal to this value. :type maximum: numeric :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ try: value = validators.fraction(value, minimum = minimum, maximum = maximum, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
[ "def", "is_fraction", "(", "value", ",", "minimum", "=", "None", ",", "maximum", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "value", "=", "validators", ".", "fraction", "(", "value", ",", "minimum", "=", "minimum", ",", "maximum", "=", "maximum", ",", "*", "*", "kwargs", ")", "except", "SyntaxError", "as", "error", ":", "raise", "error", "except", "Exception", ":", "return", "False", "return", "True" ]
Indicate whether ``value`` is a :class:`Fraction <python:fractions.Fraction>`. :param value: The value to evaluate. :param minimum: If supplied, will make sure that ``value`` is greater than or equal to this value. :type minimum: numeric :param maximum: If supplied, will make sure that ``value`` is less than or equal to this value. :type maximum: numeric :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator
[ "Indicate", "whether", "value", "is", "a", ":", "class", ":", "Fraction", "<python", ":", "fractions", ".", "Fraction", ">", "." ]
python
train
31.529412
chuck1/codemach
codemach/machine.py
https://github.com/chuck1/codemach/blob/b0e02f363da7aa58de7d6ad6499784282958adeb/codemach/machine.py#L433-L464
def call_function(self, c, i): """ Implement the CALL_FUNCTION_ operation. .. _CALL_FUNCTION: https://docs.python.org/3/library/dis.html#opcode-CALL_FUNCTION """ callable_ = self.__stack[-1-i.arg] args = tuple(self.__stack[len(self.__stack) - i.arg:]) self._print('call function') self._print('\tfunction ', callable_) self._print('\ti.arg ', i.arg) self._print('\targs ', args) self.call_callbacks('CALL_FUNCTION', callable_, *args) if isinstance(callable_, FunctionType): ret = callable_(*args) elif callable_ is builtins.__build_class__: ret = self.build_class(callable_, args) elif callable_ is builtins.globals: ret = self.builtins_globals() else: ret = callable_(*args) self.pop(1 + i.arg) self.__stack.append(ret)
[ "def", "call_function", "(", "self", ",", "c", ",", "i", ")", ":", "callable_", "=", "self", ".", "__stack", "[", "-", "1", "-", "i", ".", "arg", "]", "args", "=", "tuple", "(", "self", ".", "__stack", "[", "len", "(", "self", ".", "__stack", ")", "-", "i", ".", "arg", ":", "]", ")", "self", ".", "_print", "(", "'call function'", ")", "self", ".", "_print", "(", "'\\tfunction '", ",", "callable_", ")", "self", ".", "_print", "(", "'\\ti.arg '", ",", "i", ".", "arg", ")", "self", ".", "_print", "(", "'\\targs '", ",", "args", ")", "self", ".", "call_callbacks", "(", "'CALL_FUNCTION'", ",", "callable_", ",", "*", "args", ")", "if", "isinstance", "(", "callable_", ",", "FunctionType", ")", ":", "ret", "=", "callable_", "(", "*", "args", ")", "elif", "callable_", "is", "builtins", ".", "__build_class__", ":", "ret", "=", "self", ".", "build_class", "(", "callable_", ",", "args", ")", "elif", "callable_", "is", "builtins", ".", "globals", ":", "ret", "=", "self", ".", "builtins_globals", "(", ")", "else", ":", "ret", "=", "callable_", "(", "*", "args", ")", "self", ".", "pop", "(", "1", "+", "i", ".", "arg", ")", "self", ".", "__stack", ".", "append", "(", "ret", ")" ]
Implement the CALL_FUNCTION_ operation. .. _CALL_FUNCTION: https://docs.python.org/3/library/dis.html#opcode-CALL_FUNCTION
[ "Implement", "the", "CALL_FUNCTION_", "operation", "." ]
python
test
28.4375
theislab/scanpy
scanpy/tools/_rank_genes_groups.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/tools/_rank_genes_groups.py#L425-L532
def filter_rank_genes_groups(adata, key=None, groupby=None, use_raw=True, log=True, key_added='rank_genes_groups_filtered', min_in_group_fraction=0.25, min_fold_change=2, max_out_group_fraction=0.5): """Filters out genes based on fold change and fraction of genes expressing the gene within and outside the `groupby` categories. See :func:`~scanpy.tl.rank_genes_groups`. Results are stored in `adata.uns[key_added]` (default: 'rank_genes_groups_filtered'). To preserve the original structure of adata.uns['rank_genes_groups'], filtered genes are set to `NaN`. Parameters ---------- adata: :class:`~anndata.AnnData` key groupby use_raw log : if true, it means that the values to work with are in log scale key_added min_in_group_fraction min_fold_change max_out_group_fraction Returns ------- Same output as :ref:`scanpy.tl.rank_genes_groups` but with filtered genes names set to `nan` Examples -------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon') >>> sc.tl.filter_rank_genes_groups(adata, min_fold_change=3) >>> # visualize results >>> sc.pl.rank_genes_groups(adata, key='rank_genes_groups_filtered') >>> # visualize results using dotplot >>> sc.pl.rank_genes_groups_dotplot(adata, key='rank_genes_groups_filtered') """ if key is None: key = 'rank_genes_groups' if groupby is None: groupby = str(adata.uns[key]['params']['groupby']) # convert structured numpy array into DataFrame gene_names = pd.DataFrame(adata.uns[key]['names']) fraction_in_cluster_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns, index=gene_names.index) fold_change_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns, index=gene_names.index) fraction_out_cluster_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns, index=gene_names.index) logg.info("Filtering genes using: min_in_group_fraction: {} " "min_fold_change: {}, max_out_group_fraction: {}".format(min_in_group_fraction, min_fold_change, max_out_group_fraction)) from ..plotting._anndata import _prepare_dataframe for cluster in gene_names.columns: # iterate per column var_names = gene_names[cluster].values # add column to adata as __is_in_cluster__. This facilitates to measure fold change # of each gene with respect to all other clusters adata.obs['__is_in_cluster__'] = pd.Categorical(adata.obs[groupby] == cluster) # obs_tidy has rows=groupby, columns=var_names categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby='__is_in_cluster__', use_raw=use_raw) # for if category defined by groupby (if any) compute for each var_name # 1. the mean value over the category # 2. the fraction of cells in the category having a value > 0 # 1. compute mean value mean_obs = obs_tidy.groupby(level=0).mean() # 2. compute fraction of cells having value >0 # transform obs_tidy into boolean matrix obs_bool = obs_tidy.astype(bool) # compute the sum per group which in the boolean matrix this is the number # of values >0, and divide the result by the total number of values in the group # (given by `count()`) fraction_obs = obs_bool.groupby(level=0).sum() / obs_bool.groupby(level=0).count() # Because the dataframe groupby is based on the '__is_in_cluster__' column, # in this context, [True] means __is_in_cluster__. # Also, in this context, fraction_obs.loc[True].values is the row of values # that is assigned *as column* to fraction_in_cluster_matrix to follow the # structure of the gene_names dataFrame fraction_in_cluster_matrix.loc[:, cluster] = fraction_obs.loc[True].values fraction_out_cluster_matrix.loc[:, cluster] = fraction_obs.loc[False].values # compute fold change. if log: fold_change_matrix.loc[:, cluster] = (np.exp(mean_obs.loc[True]) / np.exp(mean_obs.loc[False])).values else: fold_change_matrix.loc[:, cluster] = (mean_obs.loc[True] / mean_obs.loc[False]).values # remove temporary columns adata.obs.drop(columns='__is_in_cluster__') # filter original_matrix gene_names = gene_names[(fraction_in_cluster_matrix > min_in_group_fraction) & (fraction_out_cluster_matrix < max_out_group_fraction) & (fold_change_matrix > min_fold_change)] # create new structured array using 'key_added'. adata.uns[key_added] = adata.uns[key].copy() adata.uns[key_added]['names'] = gene_names.to_records(index=False)
[ "def", "filter_rank_genes_groups", "(", "adata", ",", "key", "=", "None", ",", "groupby", "=", "None", ",", "use_raw", "=", "True", ",", "log", "=", "True", ",", "key_added", "=", "'rank_genes_groups_filtered'", ",", "min_in_group_fraction", "=", "0.25", ",", "min_fold_change", "=", "2", ",", "max_out_group_fraction", "=", "0.5", ")", ":", "if", "key", "is", "None", ":", "key", "=", "'rank_genes_groups'", "if", "groupby", "is", "None", ":", "groupby", "=", "str", "(", "adata", ".", "uns", "[", "key", "]", "[", "'params'", "]", "[", "'groupby'", "]", ")", "# convert structured numpy array into DataFrame", "gene_names", "=", "pd", ".", "DataFrame", "(", "adata", ".", "uns", "[", "key", "]", "[", "'names'", "]", ")", "fraction_in_cluster_matrix", "=", "pd", ".", "DataFrame", "(", "np", ".", "zeros", "(", "gene_names", ".", "shape", ")", ",", "columns", "=", "gene_names", ".", "columns", ",", "index", "=", "gene_names", ".", "index", ")", "fold_change_matrix", "=", "pd", ".", "DataFrame", "(", "np", ".", "zeros", "(", "gene_names", ".", "shape", ")", ",", "columns", "=", "gene_names", ".", "columns", ",", "index", "=", "gene_names", ".", "index", ")", "fraction_out_cluster_matrix", "=", "pd", ".", "DataFrame", "(", "np", ".", "zeros", "(", "gene_names", ".", "shape", ")", ",", "columns", "=", "gene_names", ".", "columns", ",", "index", "=", "gene_names", ".", "index", ")", "logg", ".", "info", "(", "\"Filtering genes using: min_in_group_fraction: {} \"", "\"min_fold_change: {}, max_out_group_fraction: {}\"", ".", "format", "(", "min_in_group_fraction", ",", "min_fold_change", ",", "max_out_group_fraction", ")", ")", "from", ".", ".", "plotting", ".", "_anndata", "import", "_prepare_dataframe", "for", "cluster", "in", "gene_names", ".", "columns", ":", "# iterate per column", "var_names", "=", "gene_names", "[", "cluster", "]", ".", "values", "# add column to adata as __is_in_cluster__. This facilitates to measure fold change", "# of each gene with respect to all other clusters", "adata", ".", "obs", "[", "'__is_in_cluster__'", "]", "=", "pd", ".", "Categorical", "(", "adata", ".", "obs", "[", "groupby", "]", "==", "cluster", ")", "# obs_tidy has rows=groupby, columns=var_names", "categories", ",", "obs_tidy", "=", "_prepare_dataframe", "(", "adata", ",", "var_names", ",", "groupby", "=", "'__is_in_cluster__'", ",", "use_raw", "=", "use_raw", ")", "# for if category defined by groupby (if any) compute for each var_name", "# 1. the mean value over the category", "# 2. the fraction of cells in the category having a value > 0", "# 1. compute mean value", "mean_obs", "=", "obs_tidy", ".", "groupby", "(", "level", "=", "0", ")", ".", "mean", "(", ")", "# 2. compute fraction of cells having value >0", "# transform obs_tidy into boolean matrix", "obs_bool", "=", "obs_tidy", ".", "astype", "(", "bool", ")", "# compute the sum per group which in the boolean matrix this is the number", "# of values >0, and divide the result by the total number of values in the group", "# (given by `count()`)", "fraction_obs", "=", "obs_bool", ".", "groupby", "(", "level", "=", "0", ")", ".", "sum", "(", ")", "/", "obs_bool", ".", "groupby", "(", "level", "=", "0", ")", ".", "count", "(", ")", "# Because the dataframe groupby is based on the '__is_in_cluster__' column,", "# in this context, [True] means __is_in_cluster__.", "# Also, in this context, fraction_obs.loc[True].values is the row of values", "# that is assigned *as column* to fraction_in_cluster_matrix to follow the", "# structure of the gene_names dataFrame", "fraction_in_cluster_matrix", ".", "loc", "[", ":", ",", "cluster", "]", "=", "fraction_obs", ".", "loc", "[", "True", "]", ".", "values", "fraction_out_cluster_matrix", ".", "loc", "[", ":", ",", "cluster", "]", "=", "fraction_obs", ".", "loc", "[", "False", "]", ".", "values", "# compute fold change.", "if", "log", ":", "fold_change_matrix", ".", "loc", "[", ":", ",", "cluster", "]", "=", "(", "np", ".", "exp", "(", "mean_obs", ".", "loc", "[", "True", "]", ")", "/", "np", ".", "exp", "(", "mean_obs", ".", "loc", "[", "False", "]", ")", ")", ".", "values", "else", ":", "fold_change_matrix", ".", "loc", "[", ":", ",", "cluster", "]", "=", "(", "mean_obs", ".", "loc", "[", "True", "]", "/", "mean_obs", ".", "loc", "[", "False", "]", ")", ".", "values", "# remove temporary columns", "adata", ".", "obs", ".", "drop", "(", "columns", "=", "'__is_in_cluster__'", ")", "# filter original_matrix", "gene_names", "=", "gene_names", "[", "(", "fraction_in_cluster_matrix", ">", "min_in_group_fraction", ")", "&", "(", "fraction_out_cluster_matrix", "<", "max_out_group_fraction", ")", "&", "(", "fold_change_matrix", ">", "min_fold_change", ")", "]", "# create new structured array using 'key_added'.", "adata", ".", "uns", "[", "key_added", "]", "=", "adata", ".", "uns", "[", "key", "]", ".", "copy", "(", ")", "adata", ".", "uns", "[", "key_added", "]", "[", "'names'", "]", "=", "gene_names", ".", "to_records", "(", "index", "=", "False", ")" ]
Filters out genes based on fold change and fraction of genes expressing the gene within and outside the `groupby` categories. See :func:`~scanpy.tl.rank_genes_groups`. Results are stored in `adata.uns[key_added]` (default: 'rank_genes_groups_filtered'). To preserve the original structure of adata.uns['rank_genes_groups'], filtered genes are set to `NaN`. Parameters ---------- adata: :class:`~anndata.AnnData` key groupby use_raw log : if true, it means that the values to work with are in log scale key_added min_in_group_fraction min_fold_change max_out_group_fraction Returns ------- Same output as :ref:`scanpy.tl.rank_genes_groups` but with filtered genes names set to `nan` Examples -------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon') >>> sc.tl.filter_rank_genes_groups(adata, min_fold_change=3) >>> # visualize results >>> sc.pl.rank_genes_groups(adata, key='rank_genes_groups_filtered') >>> # visualize results using dotplot >>> sc.pl.rank_genes_groups_dotplot(adata, key='rank_genes_groups_filtered')
[ "Filters", "out", "genes", "based", "on", "fold", "change", "and", "fraction", "of", "genes", "expressing", "the", "gene", "within", "and", "outside", "the", "groupby", "categories", "." ]
python
train
46.277778
honzajavorek/redis-collections
redis_collections/sets.py
https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/sets.py#L78-L83
def add(self, value): """Add element *value* to the set.""" # Raise TypeError if value is not hashable hash(value) self.redis.sadd(self.key, self._pickle(value))
[ "def", "add", "(", "self", ",", "value", ")", ":", "# Raise TypeError if value is not hashable", "hash", "(", "value", ")", "self", ".", "redis", ".", "sadd", "(", "self", ".", "key", ",", "self", ".", "_pickle", "(", "value", ")", ")" ]
Add element *value* to the set.
[ "Add", "element", "*", "value", "*", "to", "the", "set", "." ]
python
train
31.5
soravux/scoop
scoop/launch/workerLaunch.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/launch/workerLaunch.py#L61-L74
def setWorker(self, *args, **kwargs): """Add a worker assignation Arguments and order to pass are defined in LAUNCHING_ARGUMENTS Using named args is advised. """ try: la = self.LAUNCHING_ARGUMENTS(*args, **kwargs) except TypeError as e: scoop.logger.error(("addWorker failed to convert args %s and kwargs %s " "to namedtuple (requires %s arguments (names %s)") % (args, kwargs, len(self.LAUNCHING_ARGUMENTS._fields), self.LAUNCHING_ARGUMENTS._fields)) self.workersArguments = la
[ "def", "setWorker", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "la", "=", "self", ".", "LAUNCHING_ARGUMENTS", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "TypeError", "as", "e", ":", "scoop", ".", "logger", ".", "error", "(", "(", "\"addWorker failed to convert args %s and kwargs %s \"", "\"to namedtuple (requires %s arguments (names %s)\"", ")", "%", "(", "args", ",", "kwargs", ",", "len", "(", "self", ".", "LAUNCHING_ARGUMENTS", ".", "_fields", ")", ",", "self", ".", "LAUNCHING_ARGUMENTS", ".", "_fields", ")", ")", "self", ".", "workersArguments", "=", "la" ]
Add a worker assignation Arguments and order to pass are defined in LAUNCHING_ARGUMENTS Using named args is advised.
[ "Add", "a", "worker", "assignation", "Arguments", "and", "order", "to", "pass", "are", "defined", "in", "LAUNCHING_ARGUMENTS", "Using", "named", "args", "is", "advised", "." ]
python
train
45.642857
neuropsychology/NeuroKit.py
neurokit/bio/bio_emg.py
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/neurokit/bio/bio_emg.py#L185-L234
def emg_linear_envelope(emg, sampling_rate=1000, freqs=[10, 400], lfreq=4): r"""Calculate the linear envelope of a signal. Parameters ---------- emg : array raw EMG signal. sampling_rate : int Sampling rate (samples/second). freqs : list [fc_h, fc_l], optional cutoff frequencies for the band-pass filter (in Hz). lfreq : number, optional cutoff frequency for the low-pass filter (in Hz). Returns ------- envelope : array linear envelope of the signal. Notes ----- *Authors* - Marcos Duarte *See Also* See this notebook [1]_. References ---------- .. [1] https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb """ emg = emg_tkeo(emg) if np.size(freqs) == 2: # band-pass filter b, a = scipy.signal.butter(2, np.array(freqs)/(sampling_rate/2.), btype = 'bandpass') emg = scipy.signal.filtfilt(b, a, emg) if np.size(lfreq) == 1: # full-wave rectification envelope = abs(emg) # low-pass Butterworth filter b, a = scipy.signal.butter(2, np.array(lfreq)/(sampling_rate/2.), btype = 'low') envelope = scipy.signal.filtfilt(b, a, envelope) return (envelope)
[ "def", "emg_linear_envelope", "(", "emg", ",", "sampling_rate", "=", "1000", ",", "freqs", "=", "[", "10", ",", "400", "]", ",", "lfreq", "=", "4", ")", ":", "emg", "=", "emg_tkeo", "(", "emg", ")", "if", "np", ".", "size", "(", "freqs", ")", "==", "2", ":", "# band-pass filter", "b", ",", "a", "=", "scipy", ".", "signal", ".", "butter", "(", "2", ",", "np", ".", "array", "(", "freqs", ")", "/", "(", "sampling_rate", "/", "2.", ")", ",", "btype", "=", "'bandpass'", ")", "emg", "=", "scipy", ".", "signal", ".", "filtfilt", "(", "b", ",", "a", ",", "emg", ")", "if", "np", ".", "size", "(", "lfreq", ")", "==", "1", ":", "# full-wave rectification", "envelope", "=", "abs", "(", "emg", ")", "# low-pass Butterworth filter", "b", ",", "a", "=", "scipy", ".", "signal", ".", "butter", "(", "2", ",", "np", ".", "array", "(", "lfreq", ")", "/", "(", "sampling_rate", "/", "2.", ")", ",", "btype", "=", "'low'", ")", "envelope", "=", "scipy", ".", "signal", ".", "filtfilt", "(", "b", ",", "a", ",", "envelope", ")", "return", "(", "envelope", ")" ]
r"""Calculate the linear envelope of a signal. Parameters ---------- emg : array raw EMG signal. sampling_rate : int Sampling rate (samples/second). freqs : list [fc_h, fc_l], optional cutoff frequencies for the band-pass filter (in Hz). lfreq : number, optional cutoff frequency for the low-pass filter (in Hz). Returns ------- envelope : array linear envelope of the signal. Notes ----- *Authors* - Marcos Duarte *See Also* See this notebook [1]_. References ---------- .. [1] https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb
[ "r", "Calculate", "the", "linear", "envelope", "of", "a", "signal", "." ]
python
train
24.8
ashmastaflash/kal-wrapper
kalibrate/fn.py
https://github.com/ashmastaflash/kal-wrapper/blob/80ee03ab7bd3172ac26b769d6b442960f3424b0e/kalibrate/fn.py#L103-L118
def extract_value_from_output(canary, split_offset, kal_out): """Return value parsed from output. Args: canary(str): This string must exist in the target line. split_offset(int): Split offset for target value in string. kal_out(int): Output from kal. """ retval = "" while retval == "": for line in kal_out.splitlines(): if canary in line: retval = str(line.split()[split_offset]) if retval == "": retval = None return retval
[ "def", "extract_value_from_output", "(", "canary", ",", "split_offset", ",", "kal_out", ")", ":", "retval", "=", "\"\"", "while", "retval", "==", "\"\"", ":", "for", "line", "in", "kal_out", ".", "splitlines", "(", ")", ":", "if", "canary", "in", "line", ":", "retval", "=", "str", "(", "line", ".", "split", "(", ")", "[", "split_offset", "]", ")", "if", "retval", "==", "\"\"", ":", "retval", "=", "None", "return", "retval" ]
Return value parsed from output. Args: canary(str): This string must exist in the target line. split_offset(int): Split offset for target value in string. kal_out(int): Output from kal.
[ "Return", "value", "parsed", "from", "output", "." ]
python
train
32.1875
spdx/tools-python
spdx/parsers/rdfbuilders.py
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/rdfbuilders.py#L140-L149
def set_chksum(self, doc, chk_sum): """ Sets the external document reference's check sum, if not already set. chk_sum - The checksum value in the form of a string. """ if chk_sum: doc.ext_document_references[-1].check_sum = checksum.Algorithm( 'SHA1', chk_sum) else: raise SPDXValueError('ExternalDocumentRef::Checksum')
[ "def", "set_chksum", "(", "self", ",", "doc", ",", "chk_sum", ")", ":", "if", "chk_sum", ":", "doc", ".", "ext_document_references", "[", "-", "1", "]", ".", "check_sum", "=", "checksum", ".", "Algorithm", "(", "'SHA1'", ",", "chk_sum", ")", "else", ":", "raise", "SPDXValueError", "(", "'ExternalDocumentRef::Checksum'", ")" ]
Sets the external document reference's check sum, if not already set. chk_sum - The checksum value in the form of a string.
[ "Sets", "the", "external", "document", "reference", "s", "check", "sum", "if", "not", "already", "set", ".", "chk_sum", "-", "The", "checksum", "value", "in", "the", "form", "of", "a", "string", "." ]
python
valid
39.9
bcbio/bcbio-nextgen
bcbio/variation/varscan.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/varscan.py#L167-L271
def fix_varscan_output(line, normal_name="", tumor_name=""): """Fix a varscan VCF line. Fixes the ALT column and also fixes floating point values output as strings to by Floats: FREQ, SSC. This function was contributed by Sean Davis <[email protected]>, with minor modifications by Luca Beltrame <[email protected]>. """ line = line.strip() tofix = ("##INFO=<ID=SSC", "##FORMAT=<ID=FREQ") if(line.startswith("##")): if line.startswith(tofix): line = line.replace('Number=1,Type=String', 'Number=1,Type=Float') return line line = line.split("\t") if line[0].startswith("#CHROM"): if tumor_name and normal_name: mapping = {"NORMAL": normal_name, "TUMOR": tumor_name} base_header = line[:9] old_samples = line[9:] if len(old_samples) == 0: return "\t".join(line) samples = [mapping[sample_name] for sample_name in old_samples] assert len(old_samples) == len(samples) return "\t".join(base_header + samples) else: return "\t".join(line) try: REF, ALT = line[3:5] except ValueError: return "\t".join(line) def _normalize_freq(line, sample_i): """Ensure FREQ genotype value is float as defined in header. """ ft_parts = line[8].split(":") dat = line[sample_i].split(":") # Non-conforming no-call sample, don't try to fix FREQ if len(dat) != len(ft_parts): return line freq_i = ft_parts.index("FREQ") try: dat[freq_i] = str(float(dat[freq_i].rstrip("%")) / 100) except ValueError: # illegal binary characters -- set frequency to zero dat[freq_i] = "0.0" line[sample_i] = ":".join(dat) return line if len(line) > 9: line = _normalize_freq(line, 9) if len(line) > 10: line = _normalize_freq(line, 10) # HACK: The position of the SS= changes, so we just search for it ss_vals = [item for item in line[7].split(";") if item.startswith("SS=")] if len(ss_vals) > 0: somatic_status = int(ss_vals[0].split("=")[1]) # Get the number else: somatic_status = None if somatic_status == 5: # "Unknown" states are broken in current versions of VarScan # so we just bail out here for now return # fix FREQ for any additional samples -- multi-sample VarScan calling if len(line) > 11: for i in range(11, len(line)): line = _normalize_freq(line, i) #FIXME: VarScan also produces invalid REF records (e.g. CAA/A) # This is not handled yet. if "+" in ALT or "-" in ALT: if "/" not in ALT: if ALT[0] == "+": R = REF A = REF + ALT[1:] elif ALT[0] == "-": R = REF + ALT[1:] A = REF else: Ins = [p[1:] for p in ALT.split("/") if p[0] == "+"] Del = [p[1:] for p in ALT.split("/") if p[0] == "-"] if len(Del): REF += sorted(Del, key=lambda x: len(x))[-1] A = ",".join([REF[::-1].replace(p[::-1], "", 1)[::-1] for p in Del] + [REF + p for p in Ins]) R = REF REF = R ALT = A else: ALT = ALT.replace('/', ',') line[3] = REF line[4] = ALT return "\t".join(line)
[ "def", "fix_varscan_output", "(", "line", ",", "normal_name", "=", "\"\"", ",", "tumor_name", "=", "\"\"", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "tofix", "=", "(", "\"##INFO=<ID=SSC\"", ",", "\"##FORMAT=<ID=FREQ\"", ")", "if", "(", "line", ".", "startswith", "(", "\"##\"", ")", ")", ":", "if", "line", ".", "startswith", "(", "tofix", ")", ":", "line", "=", "line", ".", "replace", "(", "'Number=1,Type=String'", ",", "'Number=1,Type=Float'", ")", "return", "line", "line", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "line", "[", "0", "]", ".", "startswith", "(", "\"#CHROM\"", ")", ":", "if", "tumor_name", "and", "normal_name", ":", "mapping", "=", "{", "\"NORMAL\"", ":", "normal_name", ",", "\"TUMOR\"", ":", "tumor_name", "}", "base_header", "=", "line", "[", ":", "9", "]", "old_samples", "=", "line", "[", "9", ":", "]", "if", "len", "(", "old_samples", ")", "==", "0", ":", "return", "\"\\t\"", ".", "join", "(", "line", ")", "samples", "=", "[", "mapping", "[", "sample_name", "]", "for", "sample_name", "in", "old_samples", "]", "assert", "len", "(", "old_samples", ")", "==", "len", "(", "samples", ")", "return", "\"\\t\"", ".", "join", "(", "base_header", "+", "samples", ")", "else", ":", "return", "\"\\t\"", ".", "join", "(", "line", ")", "try", ":", "REF", ",", "ALT", "=", "line", "[", "3", ":", "5", "]", "except", "ValueError", ":", "return", "\"\\t\"", ".", "join", "(", "line", ")", "def", "_normalize_freq", "(", "line", ",", "sample_i", ")", ":", "\"\"\"Ensure FREQ genotype value is float as defined in header.\n \"\"\"", "ft_parts", "=", "line", "[", "8", "]", ".", "split", "(", "\":\"", ")", "dat", "=", "line", "[", "sample_i", "]", ".", "split", "(", "\":\"", ")", "# Non-conforming no-call sample, don't try to fix FREQ", "if", "len", "(", "dat", ")", "!=", "len", "(", "ft_parts", ")", ":", "return", "line", "freq_i", "=", "ft_parts", ".", "index", "(", "\"FREQ\"", ")", "try", ":", "dat", "[", "freq_i", "]", "=", "str", "(", "float", "(", "dat", "[", "freq_i", "]", ".", "rstrip", "(", "\"%\"", ")", ")", "/", "100", ")", "except", "ValueError", ":", "# illegal binary characters -- set frequency to zero", "dat", "[", "freq_i", "]", "=", "\"0.0\"", "line", "[", "sample_i", "]", "=", "\":\"", ".", "join", "(", "dat", ")", "return", "line", "if", "len", "(", "line", ")", ">", "9", ":", "line", "=", "_normalize_freq", "(", "line", ",", "9", ")", "if", "len", "(", "line", ")", ">", "10", ":", "line", "=", "_normalize_freq", "(", "line", ",", "10", ")", "# HACK: The position of the SS= changes, so we just search for it", "ss_vals", "=", "[", "item", "for", "item", "in", "line", "[", "7", "]", ".", "split", "(", "\";\"", ")", "if", "item", ".", "startswith", "(", "\"SS=\"", ")", "]", "if", "len", "(", "ss_vals", ")", ">", "0", ":", "somatic_status", "=", "int", "(", "ss_vals", "[", "0", "]", ".", "split", "(", "\"=\"", ")", "[", "1", "]", ")", "# Get the number", "else", ":", "somatic_status", "=", "None", "if", "somatic_status", "==", "5", ":", "# \"Unknown\" states are broken in current versions of VarScan", "# so we just bail out here for now", "return", "# fix FREQ for any additional samples -- multi-sample VarScan calling", "if", "len", "(", "line", ")", ">", "11", ":", "for", "i", "in", "range", "(", "11", ",", "len", "(", "line", ")", ")", ":", "line", "=", "_normalize_freq", "(", "line", ",", "i", ")", "#FIXME: VarScan also produces invalid REF records (e.g. CAA/A)", "# This is not handled yet.", "if", "\"+\"", "in", "ALT", "or", "\"-\"", "in", "ALT", ":", "if", "\"/\"", "not", "in", "ALT", ":", "if", "ALT", "[", "0", "]", "==", "\"+\"", ":", "R", "=", "REF", "A", "=", "REF", "+", "ALT", "[", "1", ":", "]", "elif", "ALT", "[", "0", "]", "==", "\"-\"", ":", "R", "=", "REF", "+", "ALT", "[", "1", ":", "]", "A", "=", "REF", "else", ":", "Ins", "=", "[", "p", "[", "1", ":", "]", "for", "p", "in", "ALT", ".", "split", "(", "\"/\"", ")", "if", "p", "[", "0", "]", "==", "\"+\"", "]", "Del", "=", "[", "p", "[", "1", ":", "]", "for", "p", "in", "ALT", ".", "split", "(", "\"/\"", ")", "if", "p", "[", "0", "]", "==", "\"-\"", "]", "if", "len", "(", "Del", ")", ":", "REF", "+=", "sorted", "(", "Del", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", ")", ")", "[", "-", "1", "]", "A", "=", "\",\"", ".", "join", "(", "[", "REF", "[", ":", ":", "-", "1", "]", ".", "replace", "(", "p", "[", ":", ":", "-", "1", "]", ",", "\"\"", ",", "1", ")", "[", ":", ":", "-", "1", "]", "for", "p", "in", "Del", "]", "+", "[", "REF", "+", "p", "for", "p", "in", "Ins", "]", ")", "R", "=", "REF", "REF", "=", "R", "ALT", "=", "A", "else", ":", "ALT", "=", "ALT", ".", "replace", "(", "'/'", ",", "','", ")", "line", "[", "3", "]", "=", "REF", "line", "[", "4", "]", "=", "ALT", "return", "\"\\t\"", ".", "join", "(", "line", ")" ]
Fix a varscan VCF line. Fixes the ALT column and also fixes floating point values output as strings to by Floats: FREQ, SSC. This function was contributed by Sean Davis <[email protected]>, with minor modifications by Luca Beltrame <[email protected]>.
[ "Fix", "a", "varscan", "VCF", "line", "." ]
python
train
33.571429
Yelp/threat_intel
threat_intel/util/http.py
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L382-L397
def _convert_to_json(self, response): """Converts response to JSON. If the response cannot be converted to JSON then `None` is returned. Args: response - An object of type `requests.models.Response` Returns: Response in JSON format if the response can be converted to JSON. `None` otherwise. """ try: return response.json() except ValueError: logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format( response.request.url, response.text, )) return None
[ "def", "_convert_to_json", "(", "self", ",", "response", ")", ":", "try", ":", "return", "response", ".", "json", "(", ")", "except", "ValueError", ":", "logging", ".", "warning", "(", "'Expected response in JSON format from {0} but the actual response text is: {1}'", ".", "format", "(", "response", ".", "request", ".", "url", ",", "response", ".", "text", ",", ")", ")", "return", "None" ]
Converts response to JSON. If the response cannot be converted to JSON then `None` is returned. Args: response - An object of type `requests.models.Response` Returns: Response in JSON format if the response can be converted to JSON. `None` otherwise.
[ "Converts", "response", "to", "JSON", ".", "If", "the", "response", "cannot", "be", "converted", "to", "JSON", "then", "None", "is", "returned", "." ]
python
train
39.0625
log2timeline/dfvfs
dfvfs/vfs/cpio_file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/cpio_file_entry.py#L118-L138
def _GetLink(self): """Retrieves the link. Returns: str: full path of the linked file entry. """ if self._link is None: self._link = '' if self.entry_type != definitions.FILE_ENTRY_TYPE_LINK: return self._link cpio_archive_file = self._file_system.GetCPIOArchiveFile() link_data = cpio_archive_file.ReadDataAtOffset( self._cpio_archive_file_entry.data_offset, self._cpio_archive_file_entry.data_size) # TODO: should this be ASCII? self._link = link_data.decode('ascii') return self._link
[ "def", "_GetLink", "(", "self", ")", ":", "if", "self", ".", "_link", "is", "None", ":", "self", ".", "_link", "=", "''", "if", "self", ".", "entry_type", "!=", "definitions", ".", "FILE_ENTRY_TYPE_LINK", ":", "return", "self", ".", "_link", "cpio_archive_file", "=", "self", ".", "_file_system", ".", "GetCPIOArchiveFile", "(", ")", "link_data", "=", "cpio_archive_file", ".", "ReadDataAtOffset", "(", "self", ".", "_cpio_archive_file_entry", ".", "data_offset", ",", "self", ".", "_cpio_archive_file_entry", ".", "data_size", ")", "# TODO: should this be ASCII?", "self", ".", "_link", "=", "link_data", ".", "decode", "(", "'ascii'", ")", "return", "self", ".", "_link" ]
Retrieves the link. Returns: str: full path of the linked file entry.
[ "Retrieves", "the", "link", "." ]
python
train
26.761905
BlueHack-Core/blueforge
blueforge/util/trans.py
https://github.com/BlueHack-Core/blueforge/blob/ac40a888ee9c388638a8f312c51f7500b8891b6c/blueforge/util/trans.py#L6-L14
def download_file(save_path, file_url): """ Download file from http url link """ r = requests.get(file_url) # create HTTP response object with open(save_path, 'wb') as f: f.write(r.content) return save_path
[ "def", "download_file", "(", "save_path", ",", "file_url", ")", ":", "r", "=", "requests", ".", "get", "(", "file_url", ")", "# create HTTP response object", "with", "open", "(", "save_path", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "r", ".", "content", ")", "return", "save_path" ]
Download file from http url link
[ "Download", "file", "from", "http", "url", "link" ]
python
train
25.111111
yakupadakli/python-unsplash
unsplash/auth.py
https://github.com/yakupadakli/python-unsplash/blob/6e43dce3225237e1b8111fd475fb98b1ea33972c/unsplash/auth.py#L83-L88
def refresh_token(self): """ Refreshing the current expired access token """ self.token = self.oauth.refresh_token(self.access_token_url, refresh_token=self.get_refresh_token()) self.access_token = self.token.get("access_token")
[ "def", "refresh_token", "(", "self", ")", ":", "self", ".", "token", "=", "self", ".", "oauth", ".", "refresh_token", "(", "self", ".", "access_token_url", ",", "refresh_token", "=", "self", ".", "get_refresh_token", "(", ")", ")", "self", ".", "access_token", "=", "self", ".", "token", ".", "get", "(", "\"access_token\"", ")" ]
Refreshing the current expired access token
[ "Refreshing", "the", "current", "expired", "access", "token" ]
python
train
43.833333
crunchyroll/ef-open
efopen/ef_cf_diff.py
https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_cf_diff.py#L61-L78
def render_local_template(service_name, environment, repo_root, template_file): """ Render a given service's template for a given environment and return it """ cmd = 'cd {} && ef-cf {} {} --devel --verbose'.format(repo_root, template_file, environment) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: stderr = indentify('\n{}'.format(stderr)) stdout = indentify('\n{}'.format(stdout)) raise Exception('Service: `{}`, Env: `{}`, Msg: `{}{}`' .format(service_name, environment, stderr, stdout)) logger.debug('Rendered template for `%s` in `%s`', template_file, environment) r = re.match(r".*(^{.*^})$", stdout, re.MULTILINE | re.DOTALL) return jsonify(json.loads(r.group(1)))
[ "def", "render_local_template", "(", "service_name", ",", "environment", ",", "repo_root", ",", "template_file", ")", ":", "cmd", "=", "'cd {} && ef-cf {} {} --devel --verbose'", ".", "format", "(", "repo_root", ",", "template_file", ",", "environment", ")", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "if", "p", ".", "returncode", "!=", "0", ":", "stderr", "=", "indentify", "(", "'\\n{}'", ".", "format", "(", "stderr", ")", ")", "stdout", "=", "indentify", "(", "'\\n{}'", ".", "format", "(", "stdout", ")", ")", "raise", "Exception", "(", "'Service: `{}`, Env: `{}`, Msg: `{}{}`'", ".", "format", "(", "service_name", ",", "environment", ",", "stderr", ",", "stdout", ")", ")", "logger", ".", "debug", "(", "'Rendered template for `%s` in `%s`'", ",", "template_file", ",", "environment", ")", "r", "=", "re", ".", "match", "(", "r\".*(^{.*^})$\"", ",", "stdout", ",", "re", ".", "MULTILINE", "|", "re", ".", "DOTALL", ")", "return", "jsonify", "(", "json", ".", "loads", "(", "r", ".", "group", "(", "1", ")", ")", ")" ]
Render a given service's template for a given environment and return it
[ "Render", "a", "given", "service", "s", "template", "for", "a", "given", "environment", "and", "return", "it" ]
python
train
46.666667
fake-name/ChromeController
ChromeController/manager.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/manager.py#L165-L236
def get_cookies(self): ''' Retreive the cookies from the remote browser. Return value is a list of http.cookiejar.Cookie() instances. These can be directly used with the various http.cookiejar.XXXCookieJar cookie management classes. ''' ret = self.Network_getAllCookies() assert 'result' in ret, "No return value in function response!" assert 'cookies' in ret['result'], "No 'cookies' key in function response" cookies = [] for raw_cookie in ret['result']['cookies']: # Chromium seems to support the following key values for the cookie dict: # "name" # "value" # "domain" # "path" # "expires" # "httpOnly" # "session" # "secure" # # This seems supported by the fact that the underlying chromium cookie implementation has # the following members: # std::string name_; # std::string value_; # std::string domain_; # std::string path_; # base::Time creation_date_; # base::Time expiry_date_; # base::Time last_access_date_; # bool secure_; # bool httponly_; # CookieSameSite same_site_; # CookiePriority priority_; # # See chromium/net/cookies/canonical_cookie.h for more. # # I suspect the python cookie implementation is derived exactly from the standard, while the # chromium implementation is more of a practically derived structure. # Network.setCookie baked_cookie = http.cookiejar.Cookie( # We assume V0 cookies, principally because I don't think I've /ever/ actually encountered a V1 cookie. # Chromium doesn't seem to specify it. version = 0, name = raw_cookie['name'], value = raw_cookie['value'], port = None, port_specified = False, domain = raw_cookie['domain'], domain_specified = True, domain_initial_dot = False, path = raw_cookie['path'], path_specified = False, secure = raw_cookie['secure'], expires = raw_cookie['expires'], discard = raw_cookie['session'], comment = None, comment_url = None, rest = {"httponly":"%s" % raw_cookie['httpOnly']}, rfc2109 = False ) cookies.append(baked_cookie) return cookies
[ "def", "get_cookies", "(", "self", ")", ":", "ret", "=", "self", ".", "Network_getAllCookies", "(", ")", "assert", "'result'", "in", "ret", ",", "\"No return value in function response!\"", "assert", "'cookies'", "in", "ret", "[", "'result'", "]", ",", "\"No 'cookies' key in function response\"", "cookies", "=", "[", "]", "for", "raw_cookie", "in", "ret", "[", "'result'", "]", "[", "'cookies'", "]", ":", "# Chromium seems to support the following key values for the cookie dict:", "# \t\"name\"", "# \t\"value\"", "# \t\"domain\"", "# \t\"path\"", "# \t\"expires\"", "# \t\"httpOnly\"", "# \t\"session\"", "# \t\"secure\"", "#", "# This seems supported by the fact that the underlying chromium cookie implementation has", "# the following members:", "# std::string name_;", "# std::string value_;", "# std::string domain_;", "# std::string path_;", "# base::Time creation_date_;", "# base::Time expiry_date_;", "# base::Time last_access_date_;", "# bool secure_;", "# bool httponly_;", "# CookieSameSite same_site_;", "# CookiePriority priority_;", "#", "# See chromium/net/cookies/canonical_cookie.h for more.", "#", "# I suspect the python cookie implementation is derived exactly from the standard, while the", "# chromium implementation is more of a practically derived structure.", "# Network.setCookie", "baked_cookie", "=", "http", ".", "cookiejar", ".", "Cookie", "(", "# We assume V0 cookies, principally because I don't think I've /ever/ actually encountered a V1 cookie.", "# Chromium doesn't seem to specify it.", "version", "=", "0", ",", "name", "=", "raw_cookie", "[", "'name'", "]", ",", "value", "=", "raw_cookie", "[", "'value'", "]", ",", "port", "=", "None", ",", "port_specified", "=", "False", ",", "domain", "=", "raw_cookie", "[", "'domain'", "]", ",", "domain_specified", "=", "True", ",", "domain_initial_dot", "=", "False", ",", "path", "=", "raw_cookie", "[", "'path'", "]", ",", "path_specified", "=", "False", ",", "secure", "=", "raw_cookie", "[", "'secure'", "]", ",", "expires", "=", "raw_cookie", "[", "'expires'", "]", ",", "discard", "=", "raw_cookie", "[", "'session'", "]", ",", "comment", "=", "None", ",", "comment_url", "=", "None", ",", "rest", "=", "{", "\"httponly\"", ":", "\"%s\"", "%", "raw_cookie", "[", "'httpOnly'", "]", "}", ",", "rfc2109", "=", "False", ")", "cookies", ".", "append", "(", "baked_cookie", ")", "return", "cookies" ]
Retreive the cookies from the remote browser. Return value is a list of http.cookiejar.Cookie() instances. These can be directly used with the various http.cookiejar.XXXCookieJar cookie management classes.
[ "Retreive", "the", "cookies", "from", "the", "remote", "browser", "." ]
python
train
32.319444
ga4gh/ga4gh-server
oidc-provider/simple_op/src/provider/server/server.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/oidc-provider/simple_op/src/provider/server/server.py#L129-L137
def _webfinger(provider, request, **kwargs): """Handle webfinger requests.""" params = urlparse.parse_qs(request) if params["rel"][0] == OIC_ISSUER: wf = WebFinger() return Response(wf.response(params["resource"][0], provider.baseurl), headers=[("Content-Type", "application/jrd+json")]) else: return BadRequest("Incorrect webfinger.")
[ "def", "_webfinger", "(", "provider", ",", "request", ",", "*", "*", "kwargs", ")", ":", "params", "=", "urlparse", ".", "parse_qs", "(", "request", ")", "if", "params", "[", "\"rel\"", "]", "[", "0", "]", "==", "OIC_ISSUER", ":", "wf", "=", "WebFinger", "(", ")", "return", "Response", "(", "wf", ".", "response", "(", "params", "[", "\"resource\"", "]", "[", "0", "]", ",", "provider", ".", "baseurl", ")", ",", "headers", "=", "[", "(", "\"Content-Type\"", ",", "\"application/jrd+json\"", ")", "]", ")", "else", ":", "return", "BadRequest", "(", "\"Incorrect webfinger.\"", ")" ]
Handle webfinger requests.
[ "Handle", "webfinger", "requests", "." ]
python
train
43.444444
casacore/python-casacore
casacore/images/image.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/images/image.py#L492-L519
def tofits(self, filename, overwrite=True, velocity=True, optical=True, bitpix=-32, minpix=1, maxpix=-1): """Write the image to a file in FITS format. `filename` FITS file name `overwrite` If False, an exception is raised if the new image file already exists. Default is True. `velocity` By default a velocity primary spectral axis is written if possible. `optical` If writing a velocity, use the optical definition (otherwise use radio). `bitpix` can be set to -32 (float) or 16 (short) only. When `bitpix` is 16 it will write BSCALE and BZERO into the FITS file. If minPix `minpix` and `maxpix` are used to determine BSCALE and BZERO if `bitpix=16`. If `minpix` is greater than `maxpix` (which is the default), the minimum and maximum pixel values will be determined from the ddta. Oherwise the supplied values will be used and pixels outside that range will be clipped to the minimum and maximum pixel values. Note that this truncation does not occur for `bitpix=-32`. """ return self._tofits(filename, overwrite, velocity, optical, bitpix, minpix, maxpix)
[ "def", "tofits", "(", "self", ",", "filename", ",", "overwrite", "=", "True", ",", "velocity", "=", "True", ",", "optical", "=", "True", ",", "bitpix", "=", "-", "32", ",", "minpix", "=", "1", ",", "maxpix", "=", "-", "1", ")", ":", "return", "self", ".", "_tofits", "(", "filename", ",", "overwrite", ",", "velocity", ",", "optical", ",", "bitpix", ",", "minpix", ",", "maxpix", ")" ]
Write the image to a file in FITS format. `filename` FITS file name `overwrite` If False, an exception is raised if the new image file already exists. Default is True. `velocity` By default a velocity primary spectral axis is written if possible. `optical` If writing a velocity, use the optical definition (otherwise use radio). `bitpix` can be set to -32 (float) or 16 (short) only. When `bitpix` is 16 it will write BSCALE and BZERO into the FITS file. If minPix `minpix` and `maxpix` are used to determine BSCALE and BZERO if `bitpix=16`. If `minpix` is greater than `maxpix` (which is the default), the minimum and maximum pixel values will be determined from the ddta. Oherwise the supplied values will be used and pixels outside that range will be clipped to the minimum and maximum pixel values. Note that this truncation does not occur for `bitpix=-32`.
[ "Write", "the", "image", "to", "a", "file", "in", "FITS", "format", "." ]
python
train
46.035714
datastax/python-driver
cassandra/connection.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/connection.py#L667-L674
def register_watcher(self, event_type, callback, register_timeout=None): """ Register a callback for a given event type. """ self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=[event_type]), timeout=register_timeout)
[ "def", "register_watcher", "(", "self", ",", "event_type", ",", "callback", ",", "register_timeout", "=", "None", ")", ":", "self", ".", "_push_watchers", "[", "event_type", "]", ".", "add", "(", "callback", ")", "self", ".", "wait_for_response", "(", "RegisterMessage", "(", "event_list", "=", "[", "event_type", "]", ")", ",", "timeout", "=", "register_timeout", ")" ]
Register a callback for a given event type.
[ "Register", "a", "callback", "for", "a", "given", "event", "type", "." ]
python
train
39.875
MatterMiners/cobald
cobald/daemon/runners/trio_runner.py
https://github.com/MatterMiners/cobald/blob/264138de4382d1c9b53fabcbc6660e10b33a914d/cobald/daemon/runners/trio_runner.py#L40-L46
async def _start_payloads(self, nursery): """Start all queued payloads""" with self._lock: for coroutine in self._payloads: nursery.start_soon(coroutine) self._payloads.clear() await trio.sleep(0)
[ "async", "def", "_start_payloads", "(", "self", ",", "nursery", ")", ":", "with", "self", ".", "_lock", ":", "for", "coroutine", "in", "self", ".", "_payloads", ":", "nursery", ".", "start_soon", "(", "coroutine", ")", "self", ".", "_payloads", ".", "clear", "(", ")", "await", "trio", ".", "sleep", "(", "0", ")" ]
Start all queued payloads
[ "Start", "all", "queued", "payloads" ]
python
train
36.285714
inveniosoftware/invenio-migrator
invenio_migrator/legacy/users.py
https://github.com/inveniosoftware/invenio-migrator/blob/6902c6968a39b747d15e32363f43b7dffe2622c2/invenio_migrator/legacy/users.py#L73-L91
def dump(u, *args, **kwargs): """Dump the users as a list of dictionaries. :param u: User to be dumped. :type u: `invenio.modules.accounts.models.User [Invenio2.x]` or namedtuple. :returns: User serialized to dictionary. :rtype: dict """ return dict( id=u.id, email=u.email, password=u.password, password_salt=u.password_salt, note=u.note, full_name=u.full_name if hasattr(u, 'full_name') else '{0} {1}'.format( u.given_names, u.family_name), settings=u.settings, nickname=u.nickname, last_login=dt2iso_or_empty(u.last_login))
[ "def", "dump", "(", "u", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "dict", "(", "id", "=", "u", ".", "id", ",", "email", "=", "u", ".", "email", ",", "password", "=", "u", ".", "password", ",", "password_salt", "=", "u", ".", "password_salt", ",", "note", "=", "u", ".", "note", ",", "full_name", "=", "u", ".", "full_name", "if", "hasattr", "(", "u", ",", "'full_name'", ")", "else", "'{0} {1}'", ".", "format", "(", "u", ".", "given_names", ",", "u", ".", "family_name", ")", ",", "settings", "=", "u", ".", "settings", ",", "nickname", "=", "u", ".", "nickname", ",", "last_login", "=", "dt2iso_or_empty", "(", "u", ".", "last_login", ")", ")" ]
Dump the users as a list of dictionaries. :param u: User to be dumped. :type u: `invenio.modules.accounts.models.User [Invenio2.x]` or namedtuple. :returns: User serialized to dictionary. :rtype: dict
[ "Dump", "the", "users", "as", "a", "list", "of", "dictionaries", "." ]
python
test
32.684211
ihabunek/toot
toot/utils.py
https://github.com/ihabunek/toot/blob/d13fa8685b300f96621fa325774913ec0f413a7f/toot/utils.py#L32-L42
def parse_html(html): """Attempt to convert html to plain text while keeping line breaks. Returns a list of paragraphs, each being a list of lines. """ paragraphs = re.split("</?p[^>]*>", html) # Convert <br>s to line breaks and remove empty paragraphs paragraphs = [re.split("<br */?>", p) for p in paragraphs if p] # Convert each line in each paragraph to plain text: return [[get_text(l) for l in p] for p in paragraphs]
[ "def", "parse_html", "(", "html", ")", ":", "paragraphs", "=", "re", ".", "split", "(", "\"</?p[^>]*>\"", ",", "html", ")", "# Convert <br>s to line breaks and remove empty paragraphs", "paragraphs", "=", "[", "re", ".", "split", "(", "\"<br */?>\"", ",", "p", ")", "for", "p", "in", "paragraphs", "if", "p", "]", "# Convert each line in each paragraph to plain text:", "return", "[", "[", "get_text", "(", "l", ")", "for", "l", "in", "p", "]", "for", "p", "in", "paragraphs", "]" ]
Attempt to convert html to plain text while keeping line breaks. Returns a list of paragraphs, each being a list of lines.
[ "Attempt", "to", "convert", "html", "to", "plain", "text", "while", "keeping", "line", "breaks", ".", "Returns", "a", "list", "of", "paragraphs", "each", "being", "a", "list", "of", "lines", "." ]
python
train
40.636364
blue-yonder/tsfresh
tsfresh/feature_extraction/feature_calculators.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L1177-L1221
def cwt_coefficients(x, param): """ Calculates a Continuous wavelet transform for the Ricker wavelet, also known as the "Mexican hat wavelet" which is defined by .. math:: \\frac{2}{\\sqrt{3a} \\pi^{\\frac{1}{4}}} (1 - \\frac{x^2}{a^2}) exp(-\\frac{x^2}{2a^2}) where :math:`a` is the width parameter of the wavelet function. This feature calculator takes three different parameter: widths, coeff and w. The feature calculater takes all the different widths arrays and then calculates the cwt one time for each different width array. Then the values for the different coefficient for coeff and width w are returned. (For each dic in param one feature is returned) :param x: the time series to calculate the feature of :type x: numpy.ndarray :param param: contains dictionaries {"widths":x, "coeff": y, "w": z} with x array of int and y,z int :type param: list :return: the different feature values :return type: pandas.Series """ calculated_cwt = {} res = [] indices = [] for parameter_combination in param: widths = parameter_combination["widths"] w = parameter_combination["w"] coeff = parameter_combination["coeff"] if widths not in calculated_cwt: calculated_cwt[widths] = cwt(x, ricker, widths) calculated_cwt_for_widths = calculated_cwt[widths] indices += ["widths_{}__coeff_{}__w_{}".format(widths, coeff, w)] i = widths.index(w) if calculated_cwt_for_widths.shape[1] <= coeff: res += [np.NaN] else: res += [calculated_cwt_for_widths[i, coeff]] return zip(indices, res)
[ "def", "cwt_coefficients", "(", "x", ",", "param", ")", ":", "calculated_cwt", "=", "{", "}", "res", "=", "[", "]", "indices", "=", "[", "]", "for", "parameter_combination", "in", "param", ":", "widths", "=", "parameter_combination", "[", "\"widths\"", "]", "w", "=", "parameter_combination", "[", "\"w\"", "]", "coeff", "=", "parameter_combination", "[", "\"coeff\"", "]", "if", "widths", "not", "in", "calculated_cwt", ":", "calculated_cwt", "[", "widths", "]", "=", "cwt", "(", "x", ",", "ricker", ",", "widths", ")", "calculated_cwt_for_widths", "=", "calculated_cwt", "[", "widths", "]", "indices", "+=", "[", "\"widths_{}__coeff_{}__w_{}\"", ".", "format", "(", "widths", ",", "coeff", ",", "w", ")", "]", "i", "=", "widths", ".", "index", "(", "w", ")", "if", "calculated_cwt_for_widths", ".", "shape", "[", "1", "]", "<=", "coeff", ":", "res", "+=", "[", "np", ".", "NaN", "]", "else", ":", "res", "+=", "[", "calculated_cwt_for_widths", "[", "i", ",", "coeff", "]", "]", "return", "zip", "(", "indices", ",", "res", ")" ]
Calculates a Continuous wavelet transform for the Ricker wavelet, also known as the "Mexican hat wavelet" which is defined by .. math:: \\frac{2}{\\sqrt{3a} \\pi^{\\frac{1}{4}}} (1 - \\frac{x^2}{a^2}) exp(-\\frac{x^2}{2a^2}) where :math:`a` is the width parameter of the wavelet function. This feature calculator takes three different parameter: widths, coeff and w. The feature calculater takes all the different widths arrays and then calculates the cwt one time for each different width array. Then the values for the different coefficient for coeff and width w are returned. (For each dic in param one feature is returned) :param x: the time series to calculate the feature of :type x: numpy.ndarray :param param: contains dictionaries {"widths":x, "coeff": y, "w": z} with x array of int and y,z int :type param: list :return: the different feature values :return type: pandas.Series
[ "Calculates", "a", "Continuous", "wavelet", "transform", "for", "the", "Ricker", "wavelet", "also", "known", "as", "the", "Mexican", "hat", "wavelet", "which", "is", "defined", "by" ]
python
train
36.4
Cymmetria/honeycomb
honeycomb/cli.py
https://github.com/Cymmetria/honeycomb/blob/33ea91b5cf675000e4e85dd02efe580ea6e95c86/honeycomb/cli.py#L72-L88
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): """Override default logger to allow overriding of internal attributes.""" # See below commented section for a simple example of what the docstring refers to if six.PY2: rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func) else: rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo) if extra is None: extra = dict() extra.update({"pid": os.getpid(), "uid": os.getuid(), "gid": os.getgid(), "ppid": os.getppid()}) for key in extra: # if (key in ["message", "asctime"]) or (key in rv.__dict__): # raise KeyError("Attempt to overwrite %r in LogRecord" % key) rv.__dict__[key] = extra[key] return rv
[ "def", "makeRecord", "(", "self", ",", "name", ",", "level", ",", "fn", ",", "lno", ",", "msg", ",", "args", ",", "exc_info", ",", "func", "=", "None", ",", "extra", "=", "None", ",", "sinfo", "=", "None", ")", ":", "# See below commented section for a simple example of what the docstring refers to", "if", "six", ".", "PY2", ":", "rv", "=", "logging", ".", "LogRecord", "(", "name", ",", "level", ",", "fn", ",", "lno", ",", "msg", ",", "args", ",", "exc_info", ",", "func", ")", "else", ":", "rv", "=", "logging", ".", "LogRecord", "(", "name", ",", "level", ",", "fn", ",", "lno", ",", "msg", ",", "args", ",", "exc_info", ",", "func", ",", "sinfo", ")", "if", "extra", "is", "None", ":", "extra", "=", "dict", "(", ")", "extra", ".", "update", "(", "{", "\"pid\"", ":", "os", ".", "getpid", "(", ")", ",", "\"uid\"", ":", "os", ".", "getuid", "(", ")", ",", "\"gid\"", ":", "os", ".", "getgid", "(", ")", ",", "\"ppid\"", ":", "os", ".", "getppid", "(", ")", "}", ")", "for", "key", "in", "extra", ":", "# if (key in [\"message\", \"asctime\"]) or (key in rv.__dict__):", "# raise KeyError(\"Attempt to overwrite %r in LogRecord\" % key)", "rv", ".", "__dict__", "[", "key", "]", "=", "extra", "[", "key", "]", "return", "rv" ]
Override default logger to allow overriding of internal attributes.
[ "Override", "default", "logger", "to", "allow", "overriding", "of", "internal", "attributes", "." ]
python
train
50.823529
frnsys/broca
broca/vectorize/dcs.py
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L133-L176
def _disambiguate_pos(self, terms, pos): """ Disambiguates a list of tokens of a given PoS. """ # Map the terms to candidate concepts # Consider only the top 3 most common senses candidate_map = {term: wn.synsets(term, pos=pos)[:3] for term in terms} # Filter to unique concepts concepts = set(c for cons in candidate_map.values() for c in cons) # Back to list for consistent ordering concepts = list(concepts) sim_mat = self._similarity_matrix(concepts) # Final map of terms to their disambiguated concepts map = {} # This is terrible # For each term, select the candidate concept # which has the maximum aggregate similarity score against # all other candidate concepts of all other terms sharing the same PoS for term, cons in candidate_map.items(): # Some words may not be in WordNet # and thus have no candidate concepts, so skip if not cons: continue scores = [] for con in cons: i = concepts.index(con) scores_ = [] for term_, cons_ in candidate_map.items(): # Some words may not be in WordNet # and thus have no candidate concepts, so skip if term == term_ or not cons_: continue cons_idx = [concepts.index(c) for c in cons_] top_sim = max(sim_mat[i,cons_idx]) scores_.append(top_sim) scores.append(sum(scores_)) best_idx = np.argmax(scores) map[term] = cons[best_idx] return map
[ "def", "_disambiguate_pos", "(", "self", ",", "terms", ",", "pos", ")", ":", "# Map the terms to candidate concepts", "# Consider only the top 3 most common senses", "candidate_map", "=", "{", "term", ":", "wn", ".", "synsets", "(", "term", ",", "pos", "=", "pos", ")", "[", ":", "3", "]", "for", "term", "in", "terms", "}", "# Filter to unique concepts", "concepts", "=", "set", "(", "c", "for", "cons", "in", "candidate_map", ".", "values", "(", ")", "for", "c", "in", "cons", ")", "# Back to list for consistent ordering", "concepts", "=", "list", "(", "concepts", ")", "sim_mat", "=", "self", ".", "_similarity_matrix", "(", "concepts", ")", "# Final map of terms to their disambiguated concepts", "map", "=", "{", "}", "# This is terrible", "# For each term, select the candidate concept", "# which has the maximum aggregate similarity score against", "# all other candidate concepts of all other terms sharing the same PoS", "for", "term", ",", "cons", "in", "candidate_map", ".", "items", "(", ")", ":", "# Some words may not be in WordNet", "# and thus have no candidate concepts, so skip", "if", "not", "cons", ":", "continue", "scores", "=", "[", "]", "for", "con", "in", "cons", ":", "i", "=", "concepts", ".", "index", "(", "con", ")", "scores_", "=", "[", "]", "for", "term_", ",", "cons_", "in", "candidate_map", ".", "items", "(", ")", ":", "# Some words may not be in WordNet", "# and thus have no candidate concepts, so skip", "if", "term", "==", "term_", "or", "not", "cons_", ":", "continue", "cons_idx", "=", "[", "concepts", ".", "index", "(", "c", ")", "for", "c", "in", "cons_", "]", "top_sim", "=", "max", "(", "sim_mat", "[", "i", ",", "cons_idx", "]", ")", "scores_", ".", "append", "(", "top_sim", ")", "scores", ".", "append", "(", "sum", "(", "scores_", ")", ")", "best_idx", "=", "np", ".", "argmax", "(", "scores", ")", "map", "[", "term", "]", "=", "cons", "[", "best_idx", "]", "return", "map" ]
Disambiguates a list of tokens of a given PoS.
[ "Disambiguates", "a", "list", "of", "tokens", "of", "a", "given", "PoS", "." ]
python
train
38.840909
inveniosoftware/invenio-pidstore
invenio_pidstore/providers/datacite.py
https://github.com/inveniosoftware/invenio-pidstore/blob/8bf35f4e62d5dcaf1a2cfe5803245ba5220a9b78/invenio_pidstore/providers/datacite.py#L35-L49
def create(cls, pid_value, **kwargs): """Create a new record identifier. For more information about parameters, see :meth:`invenio_pidstore.providers.BaseProvider.create`. :param pid_value: Persistent identifier value. :params **kwargs: See :meth:`invenio_pidstore.providers.base.BaseProvider.create` extra parameters. :returns: A :class:`invenio_pidstore.providers.DataCiteProvider` instance. """ return super(DataCiteProvider, cls).create( pid_value=pid_value, **kwargs)
[ "def", "create", "(", "cls", ",", "pid_value", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "DataCiteProvider", ",", "cls", ")", ".", "create", "(", "pid_value", "=", "pid_value", ",", "*", "*", "kwargs", ")" ]
Create a new record identifier. For more information about parameters, see :meth:`invenio_pidstore.providers.BaseProvider.create`. :param pid_value: Persistent identifier value. :params **kwargs: See :meth:`invenio_pidstore.providers.base.BaseProvider.create` extra parameters. :returns: A :class:`invenio_pidstore.providers.DataCiteProvider` instance.
[ "Create", "a", "new", "record", "identifier", "." ]
python
train
38.133333
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L2406-L2419
def disable_svc_freshness_check(self, service): """Disable freshness check for a service Format of the line that triggers function call:: DISABLE_SERVICE_FRESHNESS_CHECK;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if service.check_freshness: service.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value service.check_freshness = False self.send_an_element(service.get_update_status_brok())
[ "def", "disable_svc_freshness_check", "(", "self", ",", "service", ")", ":", "if", "service", ".", "check_freshness", ":", "service", ".", "modified_attributes", "|=", "DICT_MODATTR", "[", "\"MODATTR_FRESHNESS_CHECKS_ENABLED\"", "]", ".", "value", "service", ".", "check_freshness", "=", "False", "self", ".", "send_an_element", "(", "service", ".", "get_update_status_brok", "(", ")", ")" ]
Disable freshness check for a service Format of the line that triggers function call:: DISABLE_SERVICE_FRESHNESS_CHECK;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None
[ "Disable", "freshness", "check", "for", "a", "service", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
42.142857
saltstack/salt
salt/modules/tomcat.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/tomcat.py#L268-L280
def _simple_cmd(cmd, app, url='http://localhost:8080/manager', timeout=180): ''' Simple command wrapper to commands that need only a path option ''' try: opts = { 'path': app, 'version': ls(url)[app]['version'] } return '\n'.join(_wget(cmd, opts, url, timeout=timeout)['msg']) except Exception: return 'FAIL - No context exists for path {0}'.format(app)
[ "def", "_simple_cmd", "(", "cmd", ",", "app", ",", "url", "=", "'http://localhost:8080/manager'", ",", "timeout", "=", "180", ")", ":", "try", ":", "opts", "=", "{", "'path'", ":", "app", ",", "'version'", ":", "ls", "(", "url", ")", "[", "app", "]", "[", "'version'", "]", "}", "return", "'\\n'", ".", "join", "(", "_wget", "(", "cmd", ",", "opts", ",", "url", ",", "timeout", "=", "timeout", ")", "[", "'msg'", "]", ")", "except", "Exception", ":", "return", "'FAIL - No context exists for path {0}'", ".", "format", "(", "app", ")" ]
Simple command wrapper to commands that need only a path option
[ "Simple", "command", "wrapper", "to", "commands", "that", "need", "only", "a", "path", "option" ]
python
train
32.153846
roaet/wafflehaus.neutron
wafflehaus/neutron/last_ip_check/last_ip_check.py
https://github.com/roaet/wafflehaus.neutron/blob/01f6d69ae759ec2f24f2f7cf9dcfa4a4734f7e1c/wafflehaus/neutron/last_ip_check/last_ip_check.py#L135-L142
def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def check_last_ip(app): return LastIpCheck(app, conf) return check_last_ip
[ "def", "filter_factory", "(", "global_conf", ",", "*", "*", "local_conf", ")", ":", "conf", "=", "global_conf", ".", "copy", "(", ")", "conf", ".", "update", "(", "local_conf", ")", "def", "check_last_ip", "(", "app", ")", ":", "return", "LastIpCheck", "(", "app", ",", "conf", ")", "return", "check_last_ip" ]
Returns a WSGI filter app for use with paste.deploy.
[ "Returns", "a", "WSGI", "filter", "app", "for", "use", "with", "paste", ".", "deploy", "." ]
python
train
31.5
cloudmesh/cloudmesh-common
cloudmesh/common/BaseConfigDict.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L449-L477
def set(self, value, *keys): """ Sets the dict of the information as read from the yaml file. To access the file safely, you can use the keys in the order of the access. Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy") will set the value of config["provisioner"]["policy"] in the yaml file if it does not exists an error will be printing that the value does not exists. Alternatively you can use the . notation e.g. set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy") """ element = self if keys is None: return self if '.' in keys[0]: keys = keys[0].split(".") nested_str = ''.join(["['{0}']".format(x) for x in keys]) # Safely evaluate an expression to see if it is one of the Python # literal structures: strings, numbers, tuples, lists, dicts, booleans, # and None. Quoted string will be used if it is none of these types. try: ast.literal_eval(str(value)) converted = str(value) except ValueError: converted = "'" + str(value) + "'" exec("self" + nested_str + "=" + converted) return element
[ "def", "set", "(", "self", ",", "value", ",", "*", "keys", ")", ":", "element", "=", "self", "if", "keys", "is", "None", ":", "return", "self", "if", "'.'", "in", "keys", "[", "0", "]", ":", "keys", "=", "keys", "[", "0", "]", ".", "split", "(", "\".\"", ")", "nested_str", "=", "''", ".", "join", "(", "[", "\"['{0}']\"", ".", "format", "(", "x", ")", "for", "x", "in", "keys", "]", ")", "# Safely evaluate an expression to see if it is one of the Python", "# literal structures: strings, numbers, tuples, lists, dicts, booleans,", "# and None. Quoted string will be used if it is none of these types.", "try", ":", "ast", ".", "literal_eval", "(", "str", "(", "value", ")", ")", "converted", "=", "str", "(", "value", ")", "except", "ValueError", ":", "converted", "=", "\"'\"", "+", "str", "(", "value", ")", "+", "\"'\"", "exec", "(", "\"self\"", "+", "nested_str", "+", "\"=\"", "+", "converted", ")", "return", "element" ]
Sets the dict of the information as read from the yaml file. To access the file safely, you can use the keys in the order of the access. Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy") will set the value of config["provisioner"]["policy"] in the yaml file if it does not exists an error will be printing that the value does not exists. Alternatively you can use the . notation e.g. set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy")
[ "Sets", "the", "dict", "of", "the", "information", "as", "read", "from", "the", "yaml", "file", ".", "To", "access", "the", "file", "safely", "you", "can", "use", "the", "keys", "in", "the", "order", "of", "the", "access", ".", "Example", ":", "set", "(", "{", "project", ":", "{", "fg82", ":", "[", "i0", "-", "i10", "]", "}}", "provisioner", "policy", ")", "will", "set", "the", "value", "of", "config", "[", "provisioner", "]", "[", "policy", "]", "in", "the", "yaml", "file", "if", "it", "does", "not", "exists", "an", "error", "will", "be", "printing", "that", "the", "value", "does", "not", "exists", ".", "Alternatively", "you", "can", "use", "the", ".", "notation", "e", ".", "g", ".", "set", "(", "{", "project", ":", "{", "fg82", ":", "[", "i0", "-", "i10", "]", "}}", "provisioner", ".", "policy", ")" ]
python
train
42.034483
LISE-B26/pylabcontrol
build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py#L962-L990
def update_status(self, progress): """ waits for a signal emitted from a thread and updates the gui Args: progress: Returns: """ # interval at which the gui will be updated, if requests come in faster than they will be ignored update_interval = 0.2 now = datetime.datetime.now() if not self._last_progress_update is None and now-self._last_progress_update < datetime.timedelta(seconds=update_interval): return self._last_progress_update = now self.progressBar.setValue(progress) script = self.current_script # Estimate remaining time if progress has been made if progress: remaining_time = str(datetime.timedelta(seconds=script.remaining_time.seconds)) self.lbl_time_estimate.setText('time remaining: {:s}'.format(remaining_time)) if script is not str(self.tabWidget.tabText(self.tabWidget.currentIndex())).lower() in ['scripts', 'instruments']: self.plot_script(script)
[ "def", "update_status", "(", "self", ",", "progress", ")", ":", "# interval at which the gui will be updated, if requests come in faster than they will be ignored", "update_interval", "=", "0.2", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "if", "not", "self", ".", "_last_progress_update", "is", "None", "and", "now", "-", "self", ".", "_last_progress_update", "<", "datetime", ".", "timedelta", "(", "seconds", "=", "update_interval", ")", ":", "return", "self", ".", "_last_progress_update", "=", "now", "self", ".", "progressBar", ".", "setValue", "(", "progress", ")", "script", "=", "self", ".", "current_script", "# Estimate remaining time if progress has been made", "if", "progress", ":", "remaining_time", "=", "str", "(", "datetime", ".", "timedelta", "(", "seconds", "=", "script", ".", "remaining_time", ".", "seconds", ")", ")", "self", ".", "lbl_time_estimate", ".", "setText", "(", "'time remaining: {:s}'", ".", "format", "(", "remaining_time", ")", ")", "if", "script", "is", "not", "str", "(", "self", ".", "tabWidget", ".", "tabText", "(", "self", ".", "tabWidget", ".", "currentIndex", "(", ")", ")", ")", ".", "lower", "(", ")", "in", "[", "'scripts'", ",", "'instruments'", "]", ":", "self", ".", "plot_script", "(", "script", ")" ]
waits for a signal emitted from a thread and updates the gui Args: progress: Returns:
[ "waits", "for", "a", "signal", "emitted", "from", "a", "thread", "and", "updates", "the", "gui", "Args", ":", "progress", ":", "Returns", ":" ]
python
train
35.517241
Kozea/pygal
pygal/svg.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/svg.py#L470-L493
def render(self, is_unicode=False, pretty_print=False): """Last thing to do before rendering""" for f in self.graph.xml_filters: self.root = f(self.root) args = {'encoding': 'utf-8'} svg = b'' if etree.lxml: args['pretty_print'] = pretty_print if not self.graph.disable_xml_declaration: svg = b"<?xml version='1.0' encoding='utf-8'?>\n" if not self.graph.disable_xml_declaration: svg += b'\n'.join([ etree.tostring(pi, **args) for pi in self.processing_instructions ]) svg += etree.tostring(self.root, **args) if self.graph.disable_xml_declaration or is_unicode: svg = svg.decode('utf-8') return svg
[ "def", "render", "(", "self", ",", "is_unicode", "=", "False", ",", "pretty_print", "=", "False", ")", ":", "for", "f", "in", "self", ".", "graph", ".", "xml_filters", ":", "self", ".", "root", "=", "f", "(", "self", ".", "root", ")", "args", "=", "{", "'encoding'", ":", "'utf-8'", "}", "svg", "=", "b''", "if", "etree", ".", "lxml", ":", "args", "[", "'pretty_print'", "]", "=", "pretty_print", "if", "not", "self", ".", "graph", ".", "disable_xml_declaration", ":", "svg", "=", "b\"<?xml version='1.0' encoding='utf-8'?>\\n\"", "if", "not", "self", ".", "graph", ".", "disable_xml_declaration", ":", "svg", "+=", "b'\\n'", ".", "join", "(", "[", "etree", ".", "tostring", "(", "pi", ",", "*", "*", "args", ")", "for", "pi", "in", "self", ".", "processing_instructions", "]", ")", "svg", "+=", "etree", ".", "tostring", "(", "self", ".", "root", ",", "*", "*", "args", ")", "if", "self", ".", "graph", ".", "disable_xml_declaration", "or", "is_unicode", ":", "svg", "=", "svg", ".", "decode", "(", "'utf-8'", ")", "return", "svg" ]
Last thing to do before rendering
[ "Last", "thing", "to", "do", "before", "rendering" ]
python
train
31.875
inveniosoftware/invenio-migrator
invenio_migrator/legacy/cli.py
https://github.com/inveniosoftware/invenio-migrator/blob/6902c6968a39b747d15e32363f43b7dffe2622c2/invenio_migrator/legacy/cli.py#L57-L97
def dump(thing, query, from_date, file_prefix, chunk_size, limit, thing_flags): """Dump data from Invenio legacy.""" init_app_context() file_prefix = file_prefix if file_prefix else '{0}_dump'.format(thing) kwargs = dict((f.strip('-').replace('-', '_'), True) for f in thing_flags) try: thing_func = collect_things_entry_points()[thing] except KeyError: click.Abort( '{0} is not in the list of available things to migrate: ' '{1}'.format(thing, collect_things_entry_points())) click.echo("Querying {0}...".format(thing)) count, items = thing_func.get(query, from_date, limit=limit, **kwargs) progress_i = 0 # Progress bar counter click.echo("Dumping {0}...".format(thing)) with click.progressbar(length=count) as bar: for i, chunk_ids in enumerate(grouper(items, chunk_size)): with open('{0}_{1}.json'.format(file_prefix, i), 'w') as fp: fp.write("[\n") for _id in chunk_ids: try: json.dump( thing_func.dump(_id, from_date, **kwargs), fp, default=set_serializer ) fp.write(",") except Exception as e: click.secho("Failed dump {0} {1} ({2})".format( thing, _id, e.message), fg='red') progress_i += 1 bar.update(progress_i) # Strip trailing comma. fp.seek(fp.tell()-1) fp.write("\n]")
[ "def", "dump", "(", "thing", ",", "query", ",", "from_date", ",", "file_prefix", ",", "chunk_size", ",", "limit", ",", "thing_flags", ")", ":", "init_app_context", "(", ")", "file_prefix", "=", "file_prefix", "if", "file_prefix", "else", "'{0}_dump'", ".", "format", "(", "thing", ")", "kwargs", "=", "dict", "(", "(", "f", ".", "strip", "(", "'-'", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", ",", "True", ")", "for", "f", "in", "thing_flags", ")", "try", ":", "thing_func", "=", "collect_things_entry_points", "(", ")", "[", "thing", "]", "except", "KeyError", ":", "click", ".", "Abort", "(", "'{0} is not in the list of available things to migrate: '", "'{1}'", ".", "format", "(", "thing", ",", "collect_things_entry_points", "(", ")", ")", ")", "click", ".", "echo", "(", "\"Querying {0}...\"", ".", "format", "(", "thing", ")", ")", "count", ",", "items", "=", "thing_func", ".", "get", "(", "query", ",", "from_date", ",", "limit", "=", "limit", ",", "*", "*", "kwargs", ")", "progress_i", "=", "0", "# Progress bar counter", "click", ".", "echo", "(", "\"Dumping {0}...\"", ".", "format", "(", "thing", ")", ")", "with", "click", ".", "progressbar", "(", "length", "=", "count", ")", "as", "bar", ":", "for", "i", ",", "chunk_ids", "in", "enumerate", "(", "grouper", "(", "items", ",", "chunk_size", ")", ")", ":", "with", "open", "(", "'{0}_{1}.json'", ".", "format", "(", "file_prefix", ",", "i", ")", ",", "'w'", ")", "as", "fp", ":", "fp", ".", "write", "(", "\"[\\n\"", ")", "for", "_id", "in", "chunk_ids", ":", "try", ":", "json", ".", "dump", "(", "thing_func", ".", "dump", "(", "_id", ",", "from_date", ",", "*", "*", "kwargs", ")", ",", "fp", ",", "default", "=", "set_serializer", ")", "fp", ".", "write", "(", "\",\"", ")", "except", "Exception", "as", "e", ":", "click", ".", "secho", "(", "\"Failed dump {0} {1} ({2})\"", ".", "format", "(", "thing", ",", "_id", ",", "e", ".", "message", ")", ",", "fg", "=", "'red'", ")", "progress_i", "+=", "1", "bar", ".", "update", "(", "progress_i", ")", "# Strip trailing comma.", "fp", ".", "seek", "(", "fp", ".", "tell", "(", ")", "-", "1", ")", "fp", ".", "write", "(", "\"\\n]\"", ")" ]
Dump data from Invenio legacy.
[ "Dump", "data", "from", "Invenio", "legacy", "." ]
python
test
39.536585
foremast/foremast
src/foremast/awslambda/cloudwatch_log_event/cloudwatch_log_event.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/awslambda/cloudwatch_log_event/cloudwatch_log_event.py#L28-L75
def create_cloudwatch_log_event(app_name, env, region, rules): """Create cloudwatch log event for lambda from rules. Args: app_name (str): name of the lambda function env (str): Environment/Account for lambda function region (str): AWS region of the lambda function rules (str): Trigger rules from the settings """ session = boto3.Session(profile_name=env, region_name=region) cloudwatch_client = session.client('logs') log_group = rules.get('log_group') filter_name = rules.get('filter_name') filter_pattern = rules.get('filter_pattern') if not log_group: LOG.critical('Log group is required and no "log_group" is defined!') raise InvalidEventConfiguration('Log group is required and no "log_group" is defined!') if not filter_name: LOG.critical('Filter name is required and no filter_name is defined!') raise InvalidEventConfiguration('Filter name is required and no filter_name is defined!') if filter_pattern is None: LOG.critical('Filter pattern is required and no filter_pattern is defined!') raise InvalidEventConfiguration('Filter pattern is required and no filter_pattern is defined!') lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region) statement_id = '{}_cloudwatchlog_{}'.format(app_name, filter_name.replace(" ", "_")) principal = 'logs.{}.amazonaws.com'.format(region) account_id = get_env_credential(env=env)['accountId'] source_arn = "arn:aws:logs:{0}:{1}:log-group:{2}:*".format(region, account_id, log_group) add_lambda_permissions( function=lambda_alias_arn, statement_id=statement_id, action='lambda:InvokeFunction', principal=principal, source_arn=source_arn, env=env, region=region) cloudwatch_client.put_subscription_filter( logGroupName=log_group, filterName=filter_name, filterPattern=filter_pattern, destinationArn=lambda_alias_arn) LOG.info("Created Cloudwatch log event with filter: %s", filter_pattern)
[ "def", "create_cloudwatch_log_event", "(", "app_name", ",", "env", ",", "region", ",", "rules", ")", ":", "session", "=", "boto3", ".", "Session", "(", "profile_name", "=", "env", ",", "region_name", "=", "region", ")", "cloudwatch_client", "=", "session", ".", "client", "(", "'logs'", ")", "log_group", "=", "rules", ".", "get", "(", "'log_group'", ")", "filter_name", "=", "rules", ".", "get", "(", "'filter_name'", ")", "filter_pattern", "=", "rules", ".", "get", "(", "'filter_pattern'", ")", "if", "not", "log_group", ":", "LOG", ".", "critical", "(", "'Log group is required and no \"log_group\" is defined!'", ")", "raise", "InvalidEventConfiguration", "(", "'Log group is required and no \"log_group\" is defined!'", ")", "if", "not", "filter_name", ":", "LOG", ".", "critical", "(", "'Filter name is required and no filter_name is defined!'", ")", "raise", "InvalidEventConfiguration", "(", "'Filter name is required and no filter_name is defined!'", ")", "if", "filter_pattern", "is", "None", ":", "LOG", ".", "critical", "(", "'Filter pattern is required and no filter_pattern is defined!'", ")", "raise", "InvalidEventConfiguration", "(", "'Filter pattern is required and no filter_pattern is defined!'", ")", "lambda_alias_arn", "=", "get_lambda_alias_arn", "(", "app", "=", "app_name", ",", "account", "=", "env", ",", "region", "=", "region", ")", "statement_id", "=", "'{}_cloudwatchlog_{}'", ".", "format", "(", "app_name", ",", "filter_name", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", ")", "principal", "=", "'logs.{}.amazonaws.com'", ".", "format", "(", "region", ")", "account_id", "=", "get_env_credential", "(", "env", "=", "env", ")", "[", "'accountId'", "]", "source_arn", "=", "\"arn:aws:logs:{0}:{1}:log-group:{2}:*\"", ".", "format", "(", "region", ",", "account_id", ",", "log_group", ")", "add_lambda_permissions", "(", "function", "=", "lambda_alias_arn", ",", "statement_id", "=", "statement_id", ",", "action", "=", "'lambda:InvokeFunction'", ",", "principal", "=", "principal", ",", "source_arn", "=", "source_arn", ",", "env", "=", "env", ",", "region", "=", "region", ")", "cloudwatch_client", ".", "put_subscription_filter", "(", "logGroupName", "=", "log_group", ",", "filterName", "=", "filter_name", ",", "filterPattern", "=", "filter_pattern", ",", "destinationArn", "=", "lambda_alias_arn", ")", "LOG", ".", "info", "(", "\"Created Cloudwatch log event with filter: %s\"", ",", "filter_pattern", ")" ]
Create cloudwatch log event for lambda from rules. Args: app_name (str): name of the lambda function env (str): Environment/Account for lambda function region (str): AWS region of the lambda function rules (str): Trigger rules from the settings
[ "Create", "cloudwatch", "log", "event", "for", "lambda", "from", "rules", "." ]
python
train
42.583333
auth0/auth0-python
auth0/v3/management/guardian.py
https://github.com/auth0/auth0-python/blob/34adad3f342226aaaa6071387fa405ab840e5c02/auth0/v3/management/guardian.py#L36-L46
def update_factor(self, name, body): """Update Guardian factor Useful to enable / disable factor Args: name (str): Either push-notification or sms body (dict): Attributes to modify. See: https://auth0.com/docs/api/management/v2#!/Guardian/put_factors_by_name """ url = self._url('factors/{}'.format(name)) return self.client.put(url, data=body)
[ "def", "update_factor", "(", "self", ",", "name", ",", "body", ")", ":", "url", "=", "self", ".", "_url", "(", "'factors/{}'", ".", "format", "(", "name", ")", ")", "return", "self", ".", "client", ".", "put", "(", "url", ",", "data", "=", "body", ")" ]
Update Guardian factor Useful to enable / disable factor Args: name (str): Either push-notification or sms body (dict): Attributes to modify. See: https://auth0.com/docs/api/management/v2#!/Guardian/put_factors_by_name
[ "Update", "Guardian", "factor", "Useful", "to", "enable", "/", "disable", "factor" ]
python
train
38.090909
Erotemic/utool
utool/util_hash.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L65-L95
def make_hash(o): r""" Makes a hash from a dictionary, list, tuple or set to any level, that contains only other hashable types (including any lists, tuples, sets, and dictionaries). In the case where other kinds of objects (like classes) need to be hashed, pass in a collection of object attributes that are pertinent. For example, a class can be hashed in this fashion: make_hash([cls.__dict__, cls.__name__]) A function can be hashed like so: make_hash([fn.__dict__, fn.__code__]) References: http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary """ if type(o) == DictProxyType: o2 = {} for k, v in o.items(): if not k.startswith("__"): o2[k] = v o = o2 if isinstance(o, (set, tuple, list)): return tuple([make_hash(e) for e in o]) elif not isinstance(o, dict): return hash(o) new_o = copy.deepcopy(o) for k, v in new_o.items(): new_o[k] = make_hash(v) return hash(tuple(frozenset(sorted(new_o.items()))))
[ "def", "make_hash", "(", "o", ")", ":", "if", "type", "(", "o", ")", "==", "DictProxyType", ":", "o2", "=", "{", "}", "for", "k", ",", "v", "in", "o", ".", "items", "(", ")", ":", "if", "not", "k", ".", "startswith", "(", "\"__\"", ")", ":", "o2", "[", "k", "]", "=", "v", "o", "=", "o2", "if", "isinstance", "(", "o", ",", "(", "set", ",", "tuple", ",", "list", ")", ")", ":", "return", "tuple", "(", "[", "make_hash", "(", "e", ")", "for", "e", "in", "o", "]", ")", "elif", "not", "isinstance", "(", "o", ",", "dict", ")", ":", "return", "hash", "(", "o", ")", "new_o", "=", "copy", ".", "deepcopy", "(", "o", ")", "for", "k", ",", "v", "in", "new_o", ".", "items", "(", ")", ":", "new_o", "[", "k", "]", "=", "make_hash", "(", "v", ")", "return", "hash", "(", "tuple", "(", "frozenset", "(", "sorted", "(", "new_o", ".", "items", "(", ")", ")", ")", ")", ")" ]
r""" Makes a hash from a dictionary, list, tuple or set to any level, that contains only other hashable types (including any lists, tuples, sets, and dictionaries). In the case where other kinds of objects (like classes) need to be hashed, pass in a collection of object attributes that are pertinent. For example, a class can be hashed in this fashion: make_hash([cls.__dict__, cls.__name__]) A function can be hashed like so: make_hash([fn.__dict__, fn.__code__]) References: http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary
[ "r", "Makes", "a", "hash", "from", "a", "dictionary", "list", "tuple", "or", "set", "to", "any", "level", "that", "contains", "only", "other", "hashable", "types", "(", "including", "any", "lists", "tuples", "sets", "and", "dictionaries", ")", ".", "In", "the", "case", "where", "other", "kinds", "of", "objects", "(", "like", "classes", ")", "need", "to", "be", "hashed", "pass", "in", "a", "collection", "of", "object", "attributes", "that", "are", "pertinent", ".", "For", "example", "a", "class", "can", "be", "hashed", "in", "this", "fashion", ":" ]
python
train
34.064516
profitbricks/profitbricks-sdk-python
profitbricks/client.py
https://github.com/profitbricks/profitbricks-sdk-python/blob/2c804b141688eccb07d6ae56601d5c60a62abebd/profitbricks/client.py#L917-L935
def create_loadbalancer(self, datacenter_id, loadbalancer): """ Creates a load balancer within the specified data center. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param loadbalancer: The load balancer object to be created. :type loadbalancer: ``dict`` """ data = json.dumps(self._create_loadbalancer_dict(loadbalancer)) response = self._perform_request( url='/datacenters/%s/loadbalancers' % datacenter_id, method='POST', data=data) return response
[ "def", "create_loadbalancer", "(", "self", ",", "datacenter_id", ",", "loadbalancer", ")", ":", "data", "=", "json", ".", "dumps", "(", "self", ".", "_create_loadbalancer_dict", "(", "loadbalancer", ")", ")", "response", "=", "self", ".", "_perform_request", "(", "url", "=", "'/datacenters/%s/loadbalancers'", "%", "datacenter_id", ",", "method", "=", "'POST'", ",", "data", "=", "data", ")", "return", "response" ]
Creates a load balancer within the specified data center. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param loadbalancer: The load balancer object to be created. :type loadbalancer: ``dict``
[ "Creates", "a", "load", "balancer", "within", "the", "specified", "data", "center", "." ]
python
valid
32.526316
pyca/pynacl
src/nacl/hash.py
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/hash.py#L144-L160
def siphashx24(message, key=b'', encoder=nacl.encoding.HexEncoder): """ Computes a keyed MAC of ``message`` using the 128 bit variant of the siphash-2-4 construction. :param message: The message to hash. :type message: bytes :param key: the message authentication key for the siphash MAC construct :type key: bytes(:const:`SIPHASHX_KEYBYTES`) :param encoder: A class that is able to encode the hashed message. :returns: The hashed message. :rtype: bytes(:const:`SIPHASHX_BYTES`) .. versionadded:: 1.2 """ digest = _sip_hashx(message, key) return encoder.encode(digest)
[ "def", "siphashx24", "(", "message", ",", "key", "=", "b''", ",", "encoder", "=", "nacl", ".", "encoding", ".", "HexEncoder", ")", ":", "digest", "=", "_sip_hashx", "(", "message", ",", "key", ")", "return", "encoder", ".", "encode", "(", "digest", ")" ]
Computes a keyed MAC of ``message`` using the 128 bit variant of the siphash-2-4 construction. :param message: The message to hash. :type message: bytes :param key: the message authentication key for the siphash MAC construct :type key: bytes(:const:`SIPHASHX_KEYBYTES`) :param encoder: A class that is able to encode the hashed message. :returns: The hashed message. :rtype: bytes(:const:`SIPHASHX_BYTES`) .. versionadded:: 1.2
[ "Computes", "a", "keyed", "MAC", "of", "message", "using", "the", "128", "bit", "variant", "of", "the", "siphash", "-", "2", "-", "4", "construction", "." ]
python
train
35.882353
pescadores/pescador
examples/frameworks/keras_example.py
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/examples/frameworks/keras_example.py#L155-L178
def additive_noise(stream, key='X', scale=1e-1): '''Add noise to a data stream. Parameters ---------- stream : iterable A stream that yields data objects. key : string, default='X' Name of the field to add noise. scale : float, default=0.1 Scale factor for gaussian noise. Yields ------ data : dict Updated data objects in the stream. ''' for data in stream: noise_shape = data[key].shape noise = scale * np.random.randn(*noise_shape) data[key] = data[key] + noise yield data
[ "def", "additive_noise", "(", "stream", ",", "key", "=", "'X'", ",", "scale", "=", "1e-1", ")", ":", "for", "data", "in", "stream", ":", "noise_shape", "=", "data", "[", "key", "]", ".", "shape", "noise", "=", "scale", "*", "np", ".", "random", ".", "randn", "(", "*", "noise_shape", ")", "data", "[", "key", "]", "=", "data", "[", "key", "]", "+", "noise", "yield", "data" ]
Add noise to a data stream. Parameters ---------- stream : iterable A stream that yields data objects. key : string, default='X' Name of the field to add noise. scale : float, default=0.1 Scale factor for gaussian noise. Yields ------ data : dict Updated data objects in the stream.
[ "Add", "noise", "to", "a", "data", "stream", "." ]
python
train
23.5
wright-group/WrightTools
WrightTools/kit/_path.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/kit/_path.py#L20-L44
def get_path_matching(name): """Get path matching a name. Parameters ---------- name : string Name to search for. Returns ------- string Full filepath. """ # first try looking in the user folder p = os.path.join(os.path.expanduser("~"), name) # then try expanding upwards from cwd if not os.path.isdir(p): p = None drive, folders = os.path.splitdrive(os.getcwd()) folders = folders.split(os.sep) folders.insert(0, os.sep) if name in folders: p = os.path.join(drive, *folders[: folders.index(name) + 1]) # TODO: something more robust to catch the rest of the cases? return p
[ "def", "get_path_matching", "(", "name", ")", ":", "# first try looking in the user folder", "p", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", ",", "name", ")", "# then try expanding upwards from cwd", "if", "not", "os", ".", "path", ".", "isdir", "(", "p", ")", ":", "p", "=", "None", "drive", ",", "folders", "=", "os", ".", "path", ".", "splitdrive", "(", "os", ".", "getcwd", "(", ")", ")", "folders", "=", "folders", ".", "split", "(", "os", ".", "sep", ")", "folders", ".", "insert", "(", "0", ",", "os", ".", "sep", ")", "if", "name", "in", "folders", ":", "p", "=", "os", ".", "path", ".", "join", "(", "drive", ",", "*", "folders", "[", ":", "folders", ".", "index", "(", "name", ")", "+", "1", "]", ")", "# TODO: something more robust to catch the rest of the cases?", "return", "p" ]
Get path matching a name. Parameters ---------- name : string Name to search for. Returns ------- string Full filepath.
[ "Get", "path", "matching", "a", "name", "." ]
python
train
27
BlueBrain/NeuroM
neurom/core/types.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/types.py#L66-L92
def tree_type_checker(*ref): '''Tree type checker functor Returns: Functor that takes a tree, and returns true if that tree matches any of NeuriteTypes in ref Ex: >>> from neurom.core.types import NeuriteType, tree_type_checker >>> tree_filter = tree_type_checker(NeuriteType.axon, NeuriteType.basal_dendrite) >>> nrn.i_neurites(tree.isegment, tree_filter=tree_filter) ''' ref = tuple(ref) if NeuriteType.all in ref: def check_tree_type(_): '''Always returns true''' return True else: def check_tree_type(tree): '''Check whether tree has the same type as ref Returns: True if ref in the same type as tree.type or ref is NeuriteType.all ''' return tree.type in ref return check_tree_type
[ "def", "tree_type_checker", "(", "*", "ref", ")", ":", "ref", "=", "tuple", "(", "ref", ")", "if", "NeuriteType", ".", "all", "in", "ref", ":", "def", "check_tree_type", "(", "_", ")", ":", "'''Always returns true'''", "return", "True", "else", ":", "def", "check_tree_type", "(", "tree", ")", ":", "'''Check whether tree has the same type as ref\n\n Returns:\n True if ref in the same type as tree.type or ref is NeuriteType.all\n '''", "return", "tree", ".", "type", "in", "ref", "return", "check_tree_type" ]
Tree type checker functor Returns: Functor that takes a tree, and returns true if that tree matches any of NeuriteTypes in ref Ex: >>> from neurom.core.types import NeuriteType, tree_type_checker >>> tree_filter = tree_type_checker(NeuriteType.axon, NeuriteType.basal_dendrite) >>> nrn.i_neurites(tree.isegment, tree_filter=tree_filter)
[ "Tree", "type", "checker", "functor" ]
python
train
31.111111
cuihantao/andes
andes/variables/call.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/variables/call.py#L178-L188
def _compile_bus_injection(self): """Impose injections on buses""" string = '"""\n' for device, series in zip(self.devices, self.series): if series: string += 'system.' + device + '.gcall(system.dae)\n' string += '\n' string += 'system.dae.reset_small_g()\n' string += self.gisland string += '"""' self.bus_injection = compile(eval(string), '', 'exec')
[ "def", "_compile_bus_injection", "(", "self", ")", ":", "string", "=", "'\"\"\"\\n'", "for", "device", ",", "series", "in", "zip", "(", "self", ".", "devices", ",", "self", ".", "series", ")", ":", "if", "series", ":", "string", "+=", "'system.'", "+", "device", "+", "'.gcall(system.dae)\\n'", "string", "+=", "'\\n'", "string", "+=", "'system.dae.reset_small_g()\\n'", "string", "+=", "self", ".", "gisland", "string", "+=", "'\"\"\"'", "self", ".", "bus_injection", "=", "compile", "(", "eval", "(", "string", ")", ",", "''", ",", "'exec'", ")" ]
Impose injections on buses
[ "Impose", "injections", "on", "buses" ]
python
train
39.454545
log2timeline/dfwinreg
dfwinreg/interface.py
https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/interface.py#L291-L302
def DataIsInteger(self): """Determines, based on the data type, if the data is an integer. The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN), REG_DWORD_BIG_ENDIAN and REG_QWORD. Returns: bool: True if the data is an integer, False otherwise. """ return self.data_type in ( definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN, definitions.REG_QWORD)
[ "def", "DataIsInteger", "(", "self", ")", ":", "return", "self", ".", "data_type", "in", "(", "definitions", ".", "REG_DWORD", ",", "definitions", ".", "REG_DWORD_BIG_ENDIAN", ",", "definitions", ".", "REG_QWORD", ")" ]
Determines, based on the data type, if the data is an integer. The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN), REG_DWORD_BIG_ENDIAN and REG_QWORD. Returns: bool: True if the data is an integer, False otherwise.
[ "Determines", "based", "on", "the", "data", "type", "if", "the", "data", "is", "an", "integer", "." ]
python
train
34.5
hyperledger/indy-sdk
vcx/wrappers/python3/vcx/api/credential.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/vcx/wrappers/python3/vcx/api/credential.py#L186-L210
async def send_request(self, connection: Connection, payment_handle: int): """ Approves the credential offer and submits a credential request. The result will be a credential stored in the prover's wallet. :param connection: connection to submit request from :param payment_handle: currently unused :return: Example: connection = await Connection.create(source_id) await connection.connect(phone_number) credential = await Credential.create(source_id, offer) await credential.send_request(connection, 0) """ if not hasattr(Credential.send_request, "cb"): self.logger.debug("vcx_credential_send_request: Creating callback") Credential.send_request.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32)) c_credential_handle = c_uint32(self.handle) c_connection_handle = c_uint32(connection.handle) c_payment = c_uint32(payment_handle) await do_call('vcx_credential_send_request', c_credential_handle, c_connection_handle, c_payment, Credential.send_request.cb)
[ "async", "def", "send_request", "(", "self", ",", "connection", ":", "Connection", ",", "payment_handle", ":", "int", ")", ":", "if", "not", "hasattr", "(", "Credential", ".", "send_request", ",", "\"cb\"", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"vcx_credential_send_request: Creating callback\"", ")", "Credential", ".", "send_request", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_uint32", ",", "c_uint32", ")", ")", "c_credential_handle", "=", "c_uint32", "(", "self", ".", "handle", ")", "c_connection_handle", "=", "c_uint32", "(", "connection", ".", "handle", ")", "c_payment", "=", "c_uint32", "(", "payment_handle", ")", "await", "do_call", "(", "'vcx_credential_send_request'", ",", "c_credential_handle", ",", "c_connection_handle", ",", "c_payment", ",", "Credential", ".", "send_request", ".", "cb", ")" ]
Approves the credential offer and submits a credential request. The result will be a credential stored in the prover's wallet. :param connection: connection to submit request from :param payment_handle: currently unused :return: Example: connection = await Connection.create(source_id) await connection.connect(phone_number) credential = await Credential.create(source_id, offer) await credential.send_request(connection, 0)
[ "Approves", "the", "credential", "offer", "and", "submits", "a", "credential", "request", ".", "The", "result", "will", "be", "a", "credential", "stored", "in", "the", "prover", "s", "wallet", ".", ":", "param", "connection", ":", "connection", "to", "submit", "request", "from", ":", "param", "payment_handle", ":", "currently", "unused", ":", "return", ":", "Example", ":", "connection", "=", "await", "Connection", ".", "create", "(", "source_id", ")", "await", "connection", ".", "connect", "(", "phone_number", ")", "credential", "=", "await", "Credential", ".", "create", "(", "source_id", "offer", ")", "await", "credential", ".", "send_request", "(", "connection", "0", ")" ]
python
train
46.92
jaredLunde/vital-tools
vital/tools/strings.py
https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/tools/strings.py#L61-L72
def is_username(string, minlen=1, maxlen=15): """ Determines whether the @string pattern is username-like @string: #str being tested @minlen: minimum required username length @maxlen: maximum username length -> #bool """ if string: string = string.strip() return username_re.match(string) and (minlen <= len(string) <= maxlen) return False
[ "def", "is_username", "(", "string", ",", "minlen", "=", "1", ",", "maxlen", "=", "15", ")", ":", "if", "string", ":", "string", "=", "string", ".", "strip", "(", ")", "return", "username_re", ".", "match", "(", "string", ")", "and", "(", "minlen", "<=", "len", "(", "string", ")", "<=", "maxlen", ")", "return", "False" ]
Determines whether the @string pattern is username-like @string: #str being tested @minlen: minimum required username length @maxlen: maximum username length -> #bool
[ "Determines", "whether", "the", "@string", "pattern", "is", "username", "-", "like", "@string", ":", "#str", "being", "tested", "@minlen", ":", "minimum", "required", "username", "length", "@maxlen", ":", "maximum", "username", "length" ]
python
train
32.75
mojaie/chorus
chorus/v2000reader.py
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L127-L144
def properties(lines): """Parse properties block Returns: dict: {property_type: (atom_index, value)} """ results = {} for i, line in enumerate(lines): type_ = line[3:6] if type_ not in ["CHG", "RAD", "ISO"]: continue # Other properties are not supported yet count = int(line[6:9]) results[type_] = [] for j in range(count): idx = int(line[10 + j * 8: 13 + j * 8]) val = int(line[14 + j * 8: 17 + j * 8]) results[type_].append((idx, val)) return results
[ "def", "properties", "(", "lines", ")", ":", "results", "=", "{", "}", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "type_", "=", "line", "[", "3", ":", "6", "]", "if", "type_", "not", "in", "[", "\"CHG\"", ",", "\"RAD\"", ",", "\"ISO\"", "]", ":", "continue", "# Other properties are not supported yet", "count", "=", "int", "(", "line", "[", "6", ":", "9", "]", ")", "results", "[", "type_", "]", "=", "[", "]", "for", "j", "in", "range", "(", "count", ")", ":", "idx", "=", "int", "(", "line", "[", "10", "+", "j", "*", "8", ":", "13", "+", "j", "*", "8", "]", ")", "val", "=", "int", "(", "line", "[", "14", "+", "j", "*", "8", ":", "17", "+", "j", "*", "8", "]", ")", "results", "[", "type_", "]", ".", "append", "(", "(", "idx", ",", "val", ")", ")", "return", "results" ]
Parse properties block Returns: dict: {property_type: (atom_index, value)}
[ "Parse", "properties", "block" ]
python
train
30.944444
browniebroke/deezer-python
deezer/client.py
https://github.com/browniebroke/deezer-python/blob/fb869c3617045b22e7124e4b783ec1a68d283ac3/deezer/client.py#L150-L156
def get_album(self, object_id, relation=None, **kwargs): """ Get the album with the provided id :returns: an :class:`~deezer.resources.Album` object """ return self.get_object("album", object_id, relation=relation, **kwargs)
[ "def", "get_album", "(", "self", ",", "object_id", ",", "relation", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "get_object", "(", "\"album\"", ",", "object_id", ",", "relation", "=", "relation", ",", "*", "*", "kwargs", ")" ]
Get the album with the provided id :returns: an :class:`~deezer.resources.Album` object
[ "Get", "the", "album", "with", "the", "provided", "id" ]
python
train
37
iotile/coretools
iotilecore/iotile/core/utilities/intelhex/__init__.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/intelhex/__init__.py#L348-L360
def _tobinarray_really(self, start, end, pad, size): """Return binary array.""" if pad is None: pad = self.padding bin = array('B') if self._buf == {} and None in (start, end): return bin if size is not None and size <= 0: raise ValueError("tobinarray: wrong value for size") start, end = self._get_start_end(start, end, size) for i in range_g(start, end+1): bin.append(self._buf.get(i, pad)) return bin
[ "def", "_tobinarray_really", "(", "self", ",", "start", ",", "end", ",", "pad", ",", "size", ")", ":", "if", "pad", "is", "None", ":", "pad", "=", "self", ".", "padding", "bin", "=", "array", "(", "'B'", ")", "if", "self", ".", "_buf", "==", "{", "}", "and", "None", "in", "(", "start", ",", "end", ")", ":", "return", "bin", "if", "size", "is", "not", "None", "and", "size", "<=", "0", ":", "raise", "ValueError", "(", "\"tobinarray: wrong value for size\"", ")", "start", ",", "end", "=", "self", ".", "_get_start_end", "(", "start", ",", "end", ",", "size", ")", "for", "i", "in", "range_g", "(", "start", ",", "end", "+", "1", ")", ":", "bin", ".", "append", "(", "self", ".", "_buf", ".", "get", "(", "i", ",", "pad", ")", ")", "return", "bin" ]
Return binary array.
[ "Return", "binary", "array", "." ]
python
train
38.692308
praekeltfoundation/marathon-acme
marathon_acme/clients/_base.py
https://github.com/praekeltfoundation/marathon-acme/blob/b1b71e3dde0ba30e575089280658bd32890e3325/marathon_acme/clients/_base.py#L12-L27
def get_single_header(headers, key): """ Get a single value for the given key out of the given set of headers. :param twisted.web.http_headers.Headers headers: The set of headers in which to look for the header value :param str key: The header key """ raw_headers = headers.getRawHeaders(key) if raw_headers is None: return None # Take the final header as the authorative header, _ = cgi.parse_header(raw_headers[-1]) return header
[ "def", "get_single_header", "(", "headers", ",", "key", ")", ":", "raw_headers", "=", "headers", ".", "getRawHeaders", "(", "key", ")", "if", "raw_headers", "is", "None", ":", "return", "None", "# Take the final header as the authorative", "header", ",", "_", "=", "cgi", ".", "parse_header", "(", "raw_headers", "[", "-", "1", "]", ")", "return", "header" ]
Get a single value for the given key out of the given set of headers. :param twisted.web.http_headers.Headers headers: The set of headers in which to look for the header value :param str key: The header key
[ "Get", "a", "single", "value", "for", "the", "given", "key", "out", "of", "the", "given", "set", "of", "headers", "." ]
python
valid
30.125
pybluez/pybluez
bluetooth/bluez.py
https://github.com/pybluez/pybluez/blob/e0dc4093dcbaa3ecb3fa24f8ccf22bbfe6b57fc9/bluetooth/bluez.py#L119-L129
def get_l2cap_options (sock): """get_l2cap_options (sock, mtu) Gets L2CAP options for the specified L2CAP socket. Options are: omtu, imtu, flush_to, mode, fcs, max_tx, txwin_size. """ # TODO this should be in the C module, because it depends # directly on struct l2cap_options layout. s = sock.getsockopt (SOL_L2CAP, L2CAP_OPTIONS, 12) options = list( struct.unpack ("HHHBBBH", s)) return options
[ "def", "get_l2cap_options", "(", "sock", ")", ":", "# TODO this should be in the C module, because it depends", "# directly on struct l2cap_options layout.", "s", "=", "sock", ".", "getsockopt", "(", "SOL_L2CAP", ",", "L2CAP_OPTIONS", ",", "12", ")", "options", "=", "list", "(", "struct", ".", "unpack", "(", "\"HHHBBBH\"", ",", "s", ")", ")", "return", "options" ]
get_l2cap_options (sock, mtu) Gets L2CAP options for the specified L2CAP socket. Options are: omtu, imtu, flush_to, mode, fcs, max_tx, txwin_size.
[ "get_l2cap_options", "(", "sock", "mtu", ")" ]
python
train
38.454545
KelSolaar/Foundations
foundations/trace.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/trace.py#L455-L482
def untracable(object): """ Marks decorated object as non tracable. :param object: Object to decorate. :type object: object :return: Object. :rtype: object """ @functools.wraps(object) def untracable_wrapper(*args, **kwargs): """ Marks decorated object as non tracable. :param \*args: Arguments. :type \*args: \* :param \*\*kwargs: Keywords arguments. :type \*\*kwargs: \*\* :return: Object. :rtype: object """ return object(*args, **kwargs) set_untracable(untracable_wrapper) return untracable_wrapper
[ "def", "untracable", "(", "object", ")", ":", "@", "functools", ".", "wraps", "(", "object", ")", "def", "untracable_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n Marks decorated object as non tracable.\n\n :param \\*args: Arguments.\n :type \\*args: \\*\n :param \\*\\*kwargs: Keywords arguments.\n :type \\*\\*kwargs: \\*\\*\n :return: Object.\n :rtype: object\n \"\"\"", "return", "object", "(", "*", "args", ",", "*", "*", "kwargs", ")", "set_untracable", "(", "untracable_wrapper", ")", "return", "untracable_wrapper" ]
Marks decorated object as non tracable. :param object: Object to decorate. :type object: object :return: Object. :rtype: object
[ "Marks", "decorated", "object", "as", "non", "tracable", "." ]
python
train
21.607143
bastibe/SoundFile
soundfile.py
https://github.com/bastibe/SoundFile/blob/161e930da9c9ea76579b6ee18a131e10bca8a605/soundfile.py#L1262-L1271
def _check_frames(self, frames, fill_value): """Reduce frames to no more than are available in the file.""" if self.seekable(): remaining_frames = self.frames - self.tell() if frames < 0 or (frames > remaining_frames and fill_value is None): frames = remaining_frames elif frames < 0: raise ValueError("frames must be specified for non-seekable files") return frames
[ "def", "_check_frames", "(", "self", ",", "frames", ",", "fill_value", ")", ":", "if", "self", ".", "seekable", "(", ")", ":", "remaining_frames", "=", "self", ".", "frames", "-", "self", ".", "tell", "(", ")", "if", "frames", "<", "0", "or", "(", "frames", ">", "remaining_frames", "and", "fill_value", "is", "None", ")", ":", "frames", "=", "remaining_frames", "elif", "frames", "<", "0", ":", "raise", "ValueError", "(", "\"frames must be specified for non-seekable files\"", ")", "return", "frames" ]
Reduce frames to no more than are available in the file.
[ "Reduce", "frames", "to", "no", "more", "than", "are", "available", "in", "the", "file", "." ]
python
train
47.1
sentinel-hub/sentinelhub-py
sentinelhub/aws.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/aws.py#L118-L133
def get_safe_type(self): """Determines the type of ESA product. In 2016 ESA changed structure and naming of data. Therefore the class must distinguish between old product type and compact (new) product type. :return: type of ESA product :rtype: constants.EsaSafeType :raises: ValueError """ product_type = self.product_id.split('_')[1] if product_type.startswith('MSI'): return EsaSafeType.COMPACT_TYPE if product_type in ['OPER', 'USER']: return EsaSafeType.OLD_TYPE raise ValueError('Unrecognized product type of product id {}'.format(self.product_id))
[ "def", "get_safe_type", "(", "self", ")", ":", "product_type", "=", "self", ".", "product_id", ".", "split", "(", "'_'", ")", "[", "1", "]", "if", "product_type", ".", "startswith", "(", "'MSI'", ")", ":", "return", "EsaSafeType", ".", "COMPACT_TYPE", "if", "product_type", "in", "[", "'OPER'", ",", "'USER'", "]", ":", "return", "EsaSafeType", ".", "OLD_TYPE", "raise", "ValueError", "(", "'Unrecognized product type of product id {}'", ".", "format", "(", "self", ".", "product_id", ")", ")" ]
Determines the type of ESA product. In 2016 ESA changed structure and naming of data. Therefore the class must distinguish between old product type and compact (new) product type. :return: type of ESA product :rtype: constants.EsaSafeType :raises: ValueError
[ "Determines", "the", "type", "of", "ESA", "product", "." ]
python
train
40.8125
ewels/MultiQC
multiqc/modules/fastp/fastp.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/fastp/fastp.py#L385-L401
def fastp_read_n_plot(self): """ Make the read N content plot for Fastp """ data_labels, pdata = self.filter_pconfig_pdata_subplots(self.fastp_n_content_data, 'Base Content Percent') pconfig = { 'id': 'fastp-seq-content-n-plot', 'title': 'Fastp: Read N Content', 'xlab': 'Read Position', 'ylab': 'R1 Before filtering: Base Content Percent', 'yCeiling': 100, 'yMinRange': 5, 'ymin': 0, 'xDecimals': False, 'yLabelFormat': '{value}%', 'tt_label': '{point.x}: {point.y:.2f}%', 'data_labels': data_labels } return linegraph.plot(pdata, pconfig)
[ "def", "fastp_read_n_plot", "(", "self", ")", ":", "data_labels", ",", "pdata", "=", "self", ".", "filter_pconfig_pdata_subplots", "(", "self", ".", "fastp_n_content_data", ",", "'Base Content Percent'", ")", "pconfig", "=", "{", "'id'", ":", "'fastp-seq-content-n-plot'", ",", "'title'", ":", "'Fastp: Read N Content'", ",", "'xlab'", ":", "'Read Position'", ",", "'ylab'", ":", "'R1 Before filtering: Base Content Percent'", ",", "'yCeiling'", ":", "100", ",", "'yMinRange'", ":", "5", ",", "'ymin'", ":", "0", ",", "'xDecimals'", ":", "False", ",", "'yLabelFormat'", ":", "'{value}%'", ",", "'tt_label'", ":", "'{point.x}: {point.y:.2f}%'", ",", "'data_labels'", ":", "data_labels", "}", "return", "linegraph", ".", "plot", "(", "pdata", ",", "pconfig", ")" ]
Make the read N content plot for Fastp
[ "Make", "the", "read", "N", "content", "plot", "for", "Fastp" ]
python
train
40.941176
inasafe/inasafe
safe/gui/analysis_utilities.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/analysis_utilities.py#L219-L234
def add_layer_to_canvas(layer, name): """Helper method to add layer to QGIS. :param layer: The layer. :type layer: QgsMapLayer :param name: Layer name. :type name: str """ if qgis_version() >= 21800: layer.setName(name) else: layer.setLayerName(name) QgsProject.instance().addMapLayer(layer, False)
[ "def", "add_layer_to_canvas", "(", "layer", ",", "name", ")", ":", "if", "qgis_version", "(", ")", ">=", "21800", ":", "layer", ".", "setName", "(", "name", ")", "else", ":", "layer", ".", "setLayerName", "(", "name", ")", "QgsProject", ".", "instance", "(", ")", ".", "addMapLayer", "(", "layer", ",", "False", ")" ]
Helper method to add layer to QGIS. :param layer: The layer. :type layer: QgsMapLayer :param name: Layer name. :type name: str
[ "Helper", "method", "to", "add", "layer", "to", "QGIS", "." ]
python
train
21.1875
amoffat/sh
sh.py
https://github.com/amoffat/sh/blob/858adf0c682af4c40e41f34d6926696b7a5d3b12/sh.py#L1026-L1051
def bufsize_validator(kwargs): """ a validator to prevent a user from saying that they want custom buffering when they're using an in/out object that will be os.dup'd to the process, and has its own buffering. an example is a pipe or a tty. it doesn't make sense to tell them to have a custom buffering, since the os controls this. """ invalid = [] in_ob = kwargs.get("in", None) out_ob = kwargs.get("out", None) in_buf = kwargs.get("in_bufsize", None) out_buf = kwargs.get("out_bufsize", None) in_no_buf = ob_is_tty(in_ob) or ob_is_pipe(in_ob) out_no_buf = ob_is_tty(out_ob) or ob_is_pipe(out_ob) err = "Can't specify an {target} bufsize if the {target} target is a pipe or TTY" if in_no_buf and in_buf is not None: invalid.append((("in", "in_bufsize"), err.format(target="in"))) if out_no_buf and out_buf is not None: invalid.append((("out", "out_bufsize"), err.format(target="out"))) return invalid
[ "def", "bufsize_validator", "(", "kwargs", ")", ":", "invalid", "=", "[", "]", "in_ob", "=", "kwargs", ".", "get", "(", "\"in\"", ",", "None", ")", "out_ob", "=", "kwargs", ".", "get", "(", "\"out\"", ",", "None", ")", "in_buf", "=", "kwargs", ".", "get", "(", "\"in_bufsize\"", ",", "None", ")", "out_buf", "=", "kwargs", ".", "get", "(", "\"out_bufsize\"", ",", "None", ")", "in_no_buf", "=", "ob_is_tty", "(", "in_ob", ")", "or", "ob_is_pipe", "(", "in_ob", ")", "out_no_buf", "=", "ob_is_tty", "(", "out_ob", ")", "or", "ob_is_pipe", "(", "out_ob", ")", "err", "=", "\"Can't specify an {target} bufsize if the {target} target is a pipe or TTY\"", "if", "in_no_buf", "and", "in_buf", "is", "not", "None", ":", "invalid", ".", "append", "(", "(", "(", "\"in\"", ",", "\"in_bufsize\"", ")", ",", "err", ".", "format", "(", "target", "=", "\"in\"", ")", ")", ")", "if", "out_no_buf", "and", "out_buf", "is", "not", "None", ":", "invalid", ".", "append", "(", "(", "(", "\"out\"", ",", "\"out_bufsize\"", ")", ",", "err", ".", "format", "(", "target", "=", "\"out\"", ")", ")", ")", "return", "invalid" ]
a validator to prevent a user from saying that they want custom buffering when they're using an in/out object that will be os.dup'd to the process, and has its own buffering. an example is a pipe or a tty. it doesn't make sense to tell them to have a custom buffering, since the os controls this.
[ "a", "validator", "to", "prevent", "a", "user", "from", "saying", "that", "they", "want", "custom", "buffering", "when", "they", "re", "using", "an", "in", "/", "out", "object", "that", "will", "be", "os", ".", "dup", "d", "to", "the", "process", "and", "has", "its", "own", "buffering", ".", "an", "example", "is", "a", "pipe", "or", "a", "tty", ".", "it", "doesn", "t", "make", "sense", "to", "tell", "them", "to", "have", "a", "custom", "buffering", "since", "the", "os", "controls", "this", "." ]
python
train
37.115385
kolypto/py-smsframework
smsframework/providers/forward/provider.py
https://github.com/kolypto/py-smsframework/blob/4f3d812711f5e2e037dc80c4014c815fe2d68a0b/smsframework/providers/forward/provider.py#L162-L176
def send(self, message): """ Send a message by forwarding it to the server :param message: Message :type message: smsframework.data.OutgoingMessage :rtype: smsframework.data.OutgoingMessage :raise Exception: any exception reported by the other side :raise urllib2.URLError: Connection error """ res = jsonex_request(self.server_url + '/im'.lstrip('/'), {'message': message}) msg = res['message'] # OutgoingMessage object # Replace properties in the original object (so it's the same object, like with other providers) for k, v in msg.__dict__.items(): setattr(message, k, v) return message
[ "def", "send", "(", "self", ",", "message", ")", ":", "res", "=", "jsonex_request", "(", "self", ".", "server_url", "+", "'/im'", ".", "lstrip", "(", "'/'", ")", ",", "{", "'message'", ":", "message", "}", ")", "msg", "=", "res", "[", "'message'", "]", "# OutgoingMessage object", "# Replace properties in the original object (so it's the same object, like with other providers)", "for", "k", ",", "v", "in", "msg", ".", "__dict__", ".", "items", "(", ")", ":", "setattr", "(", "message", ",", "k", ",", "v", ")", "return", "message" ]
Send a message by forwarding it to the server :param message: Message :type message: smsframework.data.OutgoingMessage :rtype: smsframework.data.OutgoingMessage :raise Exception: any exception reported by the other side :raise urllib2.URLError: Connection error
[ "Send", "a", "message", "by", "forwarding", "it", "to", "the", "server", ":", "param", "message", ":", "Message", ":", "type", "message", ":", "smsframework", ".", "data", ".", "OutgoingMessage", ":", "rtype", ":", "smsframework", ".", "data", ".", "OutgoingMessage", ":", "raise", "Exception", ":", "any", "exception", "reported", "by", "the", "other", "side", ":", "raise", "urllib2", ".", "URLError", ":", "Connection", "error" ]
python
test
45.666667
ioam/lancet
lancet/core.py
https://github.com/ioam/lancet/blob/1fbbf88fa0e8974ff9ed462e3cb11722ddebdd6e/lancet/core.py#L65-L107
def _pprint(self, cycle=False, flat=False, annotate=False, onlychanged=True, level=1, tab = ' '): """ Pretty printer that prints only the modified keywords and generates flat representations (for repr) and optionally annotates the top of the repr with a comment. """ (kwargs, pos_args, infix_operator, extra_params) = self._pprint_args (br, indent) = ('' if flat else '\n', '' if flat else tab * level) prettify = lambda x: isinstance(x, PrettyPrinted) and not flat pretty = lambda x: x._pprint(flat=flat, level=level+1) if prettify(x) else repr(x) params = dict(self.get_param_values()) show_lexsort = getattr(self, '_lexorder', None) is not None modified = [k for (k,v) in self.get_param_values(onlychanged=onlychanged)] pkwargs = [(k, params[k]) for k in kwargs if (k in modified)] + list(extra_params.items()) arg_list = [(k,params[k]) for k in pos_args] + pkwargs lines = [] if annotate: # Optional annotating comment len_ckeys, len_vkeys = len(self.constant_keys), len(self.varying_keys) info_triple = (len(self), ', %d constant key(s)' % len_ckeys if len_ckeys else '', ', %d varying key(s)' % len_vkeys if len_vkeys else '') annotation = '# == %d items%s%s ==\n' % info_triple lines = [annotation] if show_lexsort: lines.append('(') if cycle: lines.append('%s(...)' % self.__class__.__name__) elif infix_operator: level = level - 1 triple = (pretty(params[pos_args[0]]), infix_operator, pretty(params[pos_args[1]])) lines.append('%s %s %s' % triple) else: lines.append('%s(' % self.__class__.__name__) for (k,v) in arg_list: lines.append('%s%s=%s' % (br+indent, k, pretty(v))) lines.append(',') lines = lines[:-1] +[br+(tab*(level-1))+')'] # Remove trailing comma if show_lexsort: lines.append(').lexsort(%s)' % ', '.join(repr(el) for el in self._lexorder)) return ''.join(lines)
[ "def", "_pprint", "(", "self", ",", "cycle", "=", "False", ",", "flat", "=", "False", ",", "annotate", "=", "False", ",", "onlychanged", "=", "True", ",", "level", "=", "1", ",", "tab", "=", "' '", ")", ":", "(", "kwargs", ",", "pos_args", ",", "infix_operator", ",", "extra_params", ")", "=", "self", ".", "_pprint_args", "(", "br", ",", "indent", ")", "=", "(", "''", "if", "flat", "else", "'\\n'", ",", "''", "if", "flat", "else", "tab", "*", "level", ")", "prettify", "=", "lambda", "x", ":", "isinstance", "(", "x", ",", "PrettyPrinted", ")", "and", "not", "flat", "pretty", "=", "lambda", "x", ":", "x", ".", "_pprint", "(", "flat", "=", "flat", ",", "level", "=", "level", "+", "1", ")", "if", "prettify", "(", "x", ")", "else", "repr", "(", "x", ")", "params", "=", "dict", "(", "self", ".", "get_param_values", "(", ")", ")", "show_lexsort", "=", "getattr", "(", "self", ",", "'_lexorder'", ",", "None", ")", "is", "not", "None", "modified", "=", "[", "k", "for", "(", "k", ",", "v", ")", "in", "self", ".", "get_param_values", "(", "onlychanged", "=", "onlychanged", ")", "]", "pkwargs", "=", "[", "(", "k", ",", "params", "[", "k", "]", ")", "for", "k", "in", "kwargs", "if", "(", "k", "in", "modified", ")", "]", "+", "list", "(", "extra_params", ".", "items", "(", ")", ")", "arg_list", "=", "[", "(", "k", ",", "params", "[", "k", "]", ")", "for", "k", "in", "pos_args", "]", "+", "pkwargs", "lines", "=", "[", "]", "if", "annotate", ":", "# Optional annotating comment", "len_ckeys", ",", "len_vkeys", "=", "len", "(", "self", ".", "constant_keys", ")", ",", "len", "(", "self", ".", "varying_keys", ")", "info_triple", "=", "(", "len", "(", "self", ")", ",", "', %d constant key(s)'", "%", "len_ckeys", "if", "len_ckeys", "else", "''", ",", "', %d varying key(s)'", "%", "len_vkeys", "if", "len_vkeys", "else", "''", ")", "annotation", "=", "'# == %d items%s%s ==\\n'", "%", "info_triple", "lines", "=", "[", "annotation", "]", "if", "show_lexsort", ":", "lines", ".", "append", "(", "'('", ")", "if", "cycle", ":", "lines", ".", "append", "(", "'%s(...)'", "%", "self", ".", "__class__", ".", "__name__", ")", "elif", "infix_operator", ":", "level", "=", "level", "-", "1", "triple", "=", "(", "pretty", "(", "params", "[", "pos_args", "[", "0", "]", "]", ")", ",", "infix_operator", ",", "pretty", "(", "params", "[", "pos_args", "[", "1", "]", "]", ")", ")", "lines", ".", "append", "(", "'%s %s %s'", "%", "triple", ")", "else", ":", "lines", ".", "append", "(", "'%s('", "%", "self", ".", "__class__", ".", "__name__", ")", "for", "(", "k", ",", "v", ")", "in", "arg_list", ":", "lines", ".", "append", "(", "'%s%s=%s'", "%", "(", "br", "+", "indent", ",", "k", ",", "pretty", "(", "v", ")", ")", ")", "lines", ".", "append", "(", "','", ")", "lines", "=", "lines", "[", ":", "-", "1", "]", "+", "[", "br", "+", "(", "tab", "*", "(", "level", "-", "1", ")", ")", "+", "')'", "]", "# Remove trailing comma", "if", "show_lexsort", ":", "lines", ".", "append", "(", "').lexsort(%s)'", "%", "', '", ".", "join", "(", "repr", "(", "el", ")", "for", "el", "in", "self", ".", "_lexorder", ")", ")", "return", "''", ".", "join", "(", "lines", ")" ]
Pretty printer that prints only the modified keywords and generates flat representations (for repr) and optionally annotates the top of the repr with a comment.
[ "Pretty", "printer", "that", "prints", "only", "the", "modified", "keywords", "and", "generates", "flat", "representations", "(", "for", "repr", ")", "and", "optionally", "annotates", "the", "top", "of", "the", "repr", "with", "a", "comment", "." ]
python
valid
50.255814
frejanordsiek/hdf5storage
hdf5storage/utilities.py
https://github.com/frejanordsiek/hdf5storage/blob/539275141dd3a4efbbbfd9bdb978f3ed59e3f05d/hdf5storage/utilities.py#L702-L743
def next_unused_name_in_group(grp, length): """ Gives a name that isn't used in a Group. Generates a name of the desired length that is not a Dataset or Group in the given group. Note, if length is not large enough and `grp` is full enough, there may be no available names meaning that this function will hang. Parameters ---------- grp : h5py.Group or h5py.File The HDF5 Group (or File if at '/') to generate an unused name in. length : int Number of characters the name should be. Returns ------- name : str A name that isn't already an existing Dataset or Group in `grp`. """ # While # # ltrs = string.ascii_letters + string.digits # name = ''.join([random.choice(ltrs) for i in range(length)]) # # seems intuitive, its performance is abysmal compared to # # '%0{0}x'.format(length) % random.getrandbits(length * 4) # # The difference is a factor of 20. Idea from # # https://stackoverflow.com/questions/2782229/most-lightweight-way- # to-create-a-random-string-and-a-random-hexadecimal-number/ # 35161595#35161595 fmt = '%0{0}x'.format(length) name = fmt % random.getrandbits(length * 4) while name in grp: name = fmt % random.getrandbits(length * 4) return name
[ "def", "next_unused_name_in_group", "(", "grp", ",", "length", ")", ":", "# While", "#", "# ltrs = string.ascii_letters + string.digits", "# name = ''.join([random.choice(ltrs) for i in range(length)])", "#", "# seems intuitive, its performance is abysmal compared to", "#", "# '%0{0}x'.format(length) % random.getrandbits(length * 4)", "#", "# The difference is a factor of 20. Idea from", "#", "# https://stackoverflow.com/questions/2782229/most-lightweight-way-", "# to-create-a-random-string-and-a-random-hexadecimal-number/", "# 35161595#35161595", "fmt", "=", "'%0{0}x'", ".", "format", "(", "length", ")", "name", "=", "fmt", "%", "random", ".", "getrandbits", "(", "length", "*", "4", ")", "while", "name", "in", "grp", ":", "name", "=", "fmt", "%", "random", ".", "getrandbits", "(", "length", "*", "4", ")", "return", "name" ]
Gives a name that isn't used in a Group. Generates a name of the desired length that is not a Dataset or Group in the given group. Note, if length is not large enough and `grp` is full enough, there may be no available names meaning that this function will hang. Parameters ---------- grp : h5py.Group or h5py.File The HDF5 Group (or File if at '/') to generate an unused name in. length : int Number of characters the name should be. Returns ------- name : str A name that isn't already an existing Dataset or Group in `grp`.
[ "Gives", "a", "name", "that", "isn", "t", "used", "in", "a", "Group", "." ]
python
train
31.071429
waqasbhatti/astrobase
astrobase/services/simbad.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/services/simbad.py#L861-L977
def objectnames_conesearch(racenter, declcenter, searchradiusarcsec, simbad_mirror='simbad', returnformat='csv', forcefetch=False, cachedir='~/.astrobase/simbad-cache', verbose=True, timeout=10.0, refresh=2.0, maxtimeout=90.0, maxtries=1, complete_query_later=True): '''This queries the SIMBAD TAP service for a list of object names near the coords. This is effectively a "reverse" name resolver (i.e. this does the opposite of SESAME). Parameters ---------- racenter,declcenter : float The cone-search center coordinates in decimal degrees searchradiusarcsec : float The radius in arcseconds to search around the center coordinates. simbad_mirror : str This is the key used to select a SIMBAD mirror from the `SIMBAD_URLS` dict above. If set, the specified mirror will be used. If None, a random mirror chosen from that dict will be used. returnformat : {'csv','votable','json'} The returned file format to request from the GAIA catalog service. forcefetch : bool If this is True, the query will be retried even if cached results for it exist. cachedir : str This points to the directory where results will be downloaded. verbose : bool If True, will indicate progress and warn of any issues. timeout : float This sets the amount of time in seconds to wait for the service to respond to our initial request. refresh : float This sets the amount of time in seconds to wait before checking if the result file is available. If the results file isn't available after `refresh` seconds have elapsed, the function will wait for `refresh` seconds continuously, until `maxtimeout` is reached or the results file becomes available. maxtimeout : float The maximum amount of time in seconds to wait for a result to become available after submitting our query request. maxtries : int The maximum number of tries (across all mirrors tried) to make to either submit the request or download the results, before giving up. complete_query_later : bool If set to True, a submitted query that does not return a result before `maxtimeout` has passed will be cancelled but its input request parameters and the result URL provided by the service will be saved. If this function is then called later with these same input request parameters, it will check if the query finally finished and a result is available. If so, will download the results instead of submitting a new query. If it's not done yet, will start waiting for results again. To force launch a new query with the same request parameters, set the `forcefetch` kwarg to True. Returns ------- dict This returns a dict of the following form:: {'params':dict of the input params used for the query, 'provenance':'cache' or 'new download', 'result':path to the file on disk with the downloaded data table} ''' # this was generated using the example at: # http://simbad.u-strasbg.fr/simbad/sim-tap and the table diagram at: # http://simbad.u-strasbg.fr/simbad/tap/tapsearch.html query = ( "select a.oid, a.ra, a.dec, a.main_id, a.otype_txt, " "a.coo_bibcode, a.nbref, b.ids as all_ids, " "(DISTANCE(POINT('ICRS', a.ra, a.dec), " "POINT('ICRS', {ra_center:.5f}, {decl_center:.5f})))*3600.0 " "AS dist_arcsec " "from basic a join ids b on a.oid = b.oidref where " "CONTAINS(POINT('ICRS',a.ra, a.dec)," "CIRCLE('ICRS',{ra_center:.5f},{decl_center:.5f}," "{search_radius:.6f}))=1 " "ORDER by dist_arcsec asc " ) formatted_query = query.format(ra_center=racenter, decl_center=declcenter, search_radius=searchradiusarcsec/3600.0) return tap_query(formatted_query, simbad_mirror=simbad_mirror, returnformat=returnformat, forcefetch=forcefetch, cachedir=cachedir, verbose=verbose, timeout=timeout, refresh=refresh, maxtimeout=maxtimeout, maxtries=maxtries, complete_query_later=complete_query_later)
[ "def", "objectnames_conesearch", "(", "racenter", ",", "declcenter", ",", "searchradiusarcsec", ",", "simbad_mirror", "=", "'simbad'", ",", "returnformat", "=", "'csv'", ",", "forcefetch", "=", "False", ",", "cachedir", "=", "'~/.astrobase/simbad-cache'", ",", "verbose", "=", "True", ",", "timeout", "=", "10.0", ",", "refresh", "=", "2.0", ",", "maxtimeout", "=", "90.0", ",", "maxtries", "=", "1", ",", "complete_query_later", "=", "True", ")", ":", "# this was generated using the example at:", "# http://simbad.u-strasbg.fr/simbad/sim-tap and the table diagram at:", "# http://simbad.u-strasbg.fr/simbad/tap/tapsearch.html", "query", "=", "(", "\"select a.oid, a.ra, a.dec, a.main_id, a.otype_txt, \"", "\"a.coo_bibcode, a.nbref, b.ids as all_ids, \"", "\"(DISTANCE(POINT('ICRS', a.ra, a.dec), \"", "\"POINT('ICRS', {ra_center:.5f}, {decl_center:.5f})))*3600.0 \"", "\"AS dist_arcsec \"", "\"from basic a join ids b on a.oid = b.oidref where \"", "\"CONTAINS(POINT('ICRS',a.ra, a.dec),\"", "\"CIRCLE('ICRS',{ra_center:.5f},{decl_center:.5f},\"", "\"{search_radius:.6f}))=1 \"", "\"ORDER by dist_arcsec asc \"", ")", "formatted_query", "=", "query", ".", "format", "(", "ra_center", "=", "racenter", ",", "decl_center", "=", "declcenter", ",", "search_radius", "=", "searchradiusarcsec", "/", "3600.0", ")", "return", "tap_query", "(", "formatted_query", ",", "simbad_mirror", "=", "simbad_mirror", ",", "returnformat", "=", "returnformat", ",", "forcefetch", "=", "forcefetch", ",", "cachedir", "=", "cachedir", ",", "verbose", "=", "verbose", ",", "timeout", "=", "timeout", ",", "refresh", "=", "refresh", ",", "maxtimeout", "=", "maxtimeout", ",", "maxtries", "=", "maxtries", ",", "complete_query_later", "=", "complete_query_later", ")" ]
This queries the SIMBAD TAP service for a list of object names near the coords. This is effectively a "reverse" name resolver (i.e. this does the opposite of SESAME). Parameters ---------- racenter,declcenter : float The cone-search center coordinates in decimal degrees searchradiusarcsec : float The radius in arcseconds to search around the center coordinates. simbad_mirror : str This is the key used to select a SIMBAD mirror from the `SIMBAD_URLS` dict above. If set, the specified mirror will be used. If None, a random mirror chosen from that dict will be used. returnformat : {'csv','votable','json'} The returned file format to request from the GAIA catalog service. forcefetch : bool If this is True, the query will be retried even if cached results for it exist. cachedir : str This points to the directory where results will be downloaded. verbose : bool If True, will indicate progress and warn of any issues. timeout : float This sets the amount of time in seconds to wait for the service to respond to our initial request. refresh : float This sets the amount of time in seconds to wait before checking if the result file is available. If the results file isn't available after `refresh` seconds have elapsed, the function will wait for `refresh` seconds continuously, until `maxtimeout` is reached or the results file becomes available. maxtimeout : float The maximum amount of time in seconds to wait for a result to become available after submitting our query request. maxtries : int The maximum number of tries (across all mirrors tried) to make to either submit the request or download the results, before giving up. complete_query_later : bool If set to True, a submitted query that does not return a result before `maxtimeout` has passed will be cancelled but its input request parameters and the result URL provided by the service will be saved. If this function is then called later with these same input request parameters, it will check if the query finally finished and a result is available. If so, will download the results instead of submitting a new query. If it's not done yet, will start waiting for results again. To force launch a new query with the same request parameters, set the `forcefetch` kwarg to True. Returns ------- dict This returns a dict of the following form:: {'params':dict of the input params used for the query, 'provenance':'cache' or 'new download', 'result':path to the file on disk with the downloaded data table}
[ "This", "queries", "the", "SIMBAD", "TAP", "service", "for", "a", "list", "of", "object", "names", "near", "the", "coords", ".", "This", "is", "effectively", "a", "reverse", "name", "resolver", "(", "i", ".", "e", ".", "this", "does", "the", "opposite", "of", "SESAME", ")", "." ]
python
valid
40.282051
sorgerlab/indra
indra/sources/biopax/processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L1418-L1432
def _is_modification_or_activity(feature): """Return True if the feature is a modification""" if not (isinstance(feature, _bp('ModificationFeature')) or \ isinstance(feature, _bpimpl('ModificationFeature'))): return None mf_type = feature.getModificationType() if mf_type is None: return None mf_type_terms = mf_type.getTerm().toArray() for term in mf_type_terms: if term in ('residue modification, active', 'residue modification, inactive', 'active', 'inactive'): return 'activity' return 'modification'
[ "def", "_is_modification_or_activity", "(", "feature", ")", ":", "if", "not", "(", "isinstance", "(", "feature", ",", "_bp", "(", "'ModificationFeature'", ")", ")", "or", "isinstance", "(", "feature", ",", "_bpimpl", "(", "'ModificationFeature'", ")", ")", ")", ":", "return", "None", "mf_type", "=", "feature", ".", "getModificationType", "(", ")", "if", "mf_type", "is", "None", ":", "return", "None", "mf_type_terms", "=", "mf_type", ".", "getTerm", "(", ")", ".", "toArray", "(", ")", "for", "term", "in", "mf_type_terms", ":", "if", "term", "in", "(", "'residue modification, active'", ",", "'residue modification, inactive'", ",", "'active'", ",", "'inactive'", ")", ":", "return", "'activity'", "return", "'modification'" ]
Return True if the feature is a modification
[ "Return", "True", "if", "the", "feature", "is", "a", "modification" ]
python
train
40.4
saltstack/salt
salt/states/boto_apigateway.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1008-L1018
def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None
[ "def", "_get_current_deployment_label", "(", "self", ")", ":", "deploymentId", "=", "self", ".", "_get_current_deployment_id", "(", ")", "deployment", "=", "__salt__", "[", "'boto_apigateway.describe_api_deployment'", "]", "(", "restApiId", "=", "self", ".", "restApiId", ",", "deploymentId", "=", "deploymentId", ",", "*", "*", "self", ".", "_common_aws_args", ")", ".", "get", "(", "'deployment'", ")", "if", "deployment", ":", "return", "deployment", ".", "get", "(", "'description'", ")", "return", "None" ]
Helper method to find the deployment label that the stage_name is currently associated with.
[ "Helper", "method", "to", "find", "the", "deployment", "label", "that", "the", "stage_name", "is", "currently", "associated", "with", "." ]
python
train
56.272727
alefnula/tea
tea/process/wrapper.py
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/process/wrapper.py#L36-L59
def get_processes(sort_by_name=True): """Retrieve a list of processes sorted by name. Args: sort_by_name (bool): Sort the list by name or by process ID's. Returns: list of (int, str) or list of (int, str, str): List of process id, process name and optional cmdline tuples. """ if sort_by_name: return sorted( _list_processes(), key=cmp_to_key( lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid)) ), ) else: return sorted( _list_processes(), key=cmp_to_key( lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name)) ), )
[ "def", "get_processes", "(", "sort_by_name", "=", "True", ")", ":", "if", "sort_by_name", ":", "return", "sorted", "(", "_list_processes", "(", ")", ",", "key", "=", "cmp_to_key", "(", "lambda", "p1", ",", "p2", ":", "(", "cmp", "(", "p1", ".", "name", ",", "p2", ".", "name", ")", "or", "cmp", "(", "p1", ".", "pid", ",", "p2", ".", "pid", ")", ")", ")", ",", ")", "else", ":", "return", "sorted", "(", "_list_processes", "(", ")", ",", "key", "=", "cmp_to_key", "(", "lambda", "p1", ",", "p2", ":", "(", "cmp", "(", "p1", ".", "pid", ",", "p2", ".", "pid", ")", "or", "cmp", "(", "p1", ".", "name", ",", "p2", ".", "name", ")", ")", ")", ",", ")" ]
Retrieve a list of processes sorted by name. Args: sort_by_name (bool): Sort the list by name or by process ID's. Returns: list of (int, str) or list of (int, str, str): List of process id, process name and optional cmdline tuples.
[ "Retrieve", "a", "list", "of", "processes", "sorted", "by", "name", ".", "Args", ":", "sort_by_name", "(", "bool", ")", ":", "Sort", "the", "list", "by", "name", "or", "by", "process", "ID", "s", ".", "Returns", ":", "list", "of", "(", "int", "str", ")", "or", "list", "of", "(", "int", "str", "str", ")", ":", "List", "of", "process", "id", "process", "name", "and", "optional", "cmdline", "tuples", "." ]
python
train
30.125
rogerhil/thegamesdb
thegamesdb/resources.py
https://github.com/rogerhil/thegamesdb/blob/795314215f9ee73697c7520dea4ddecfb23ca8e6/thegamesdb/resources.py#L38-L47
def list(self, name, platform='', genre=''): """ The name argument is required for this method as per the API server specification. This method also provides the platform and genre optional arguments as filters. """ data_list = self.db.get_data(self.list_path, name=name, platform=platform, genre=genre) data_list = data_list.get('Data') or {} games = data_list.get('Game') or [] return [self._build_item(**i) for i in games]
[ "def", "list", "(", "self", ",", "name", ",", "platform", "=", "''", ",", "genre", "=", "''", ")", ":", "data_list", "=", "self", ".", "db", ".", "get_data", "(", "self", ".", "list_path", ",", "name", "=", "name", ",", "platform", "=", "platform", ",", "genre", "=", "genre", ")", "data_list", "=", "data_list", ".", "get", "(", "'Data'", ")", "or", "{", "}", "games", "=", "data_list", ".", "get", "(", "'Game'", ")", "or", "[", "]", "return", "[", "self", ".", "_build_item", "(", "*", "*", "i", ")", "for", "i", "in", "games", "]" ]
The name argument is required for this method as per the API server specification. This method also provides the platform and genre optional arguments as filters.
[ "The", "name", "argument", "is", "required", "for", "this", "method", "as", "per", "the", "API", "server", "specification", ".", "This", "method", "also", "provides", "the", "platform", "and", "genre", "optional", "arguments", "as", "filters", "." ]
python
train
51.7
spotify/snakebite
snakebite/client.py
https://github.com/spotify/snakebite/blob/6a456e6100b0c1be66cc1f7f9d7f50494f369da3/snakebite/client.py#L881-L909
def tail(self, path, tail_length=1024, append=False): # Note: append is currently not implemented. ''' Show the end of the file - default 1KB, supports up to the Hadoop block size. :param path: Path to read :type path: string :param tail_length: The length to read from the end of the file - default 1KB, up to block size. :type tail_length: int :param append: Currently not implemented :type append: bool :returns: a generator that yields strings ''' #TODO: Make tail support multiple files at a time, like most other methods do if not path: raise InvalidInputException("tail: no path given") block_size = self.serverdefaults()['blockSize'] if tail_length > block_size: raise InvalidInputException("tail: currently supports length up to the block size (%d)" % (block_size,)) if tail_length <= 0: raise InvalidInputException("tail: tail_length cannot be less than or equal to zero") processor = lambda path, node: self._handle_tail(path, node, tail_length, append) for item in self._find_items([path], processor, include_toplevel=True, include_children=False, recurse=False): if item: yield item
[ "def", "tail", "(", "self", ",", "path", ",", "tail_length", "=", "1024", ",", "append", "=", "False", ")", ":", "# Note: append is currently not implemented.", "#TODO: Make tail support multiple files at a time, like most other methods do", "if", "not", "path", ":", "raise", "InvalidInputException", "(", "\"tail: no path given\"", ")", "block_size", "=", "self", ".", "serverdefaults", "(", ")", "[", "'blockSize'", "]", "if", "tail_length", ">", "block_size", ":", "raise", "InvalidInputException", "(", "\"tail: currently supports length up to the block size (%d)\"", "%", "(", "block_size", ",", ")", ")", "if", "tail_length", "<=", "0", ":", "raise", "InvalidInputException", "(", "\"tail: tail_length cannot be less than or equal to zero\"", ")", "processor", "=", "lambda", "path", ",", "node", ":", "self", ".", "_handle_tail", "(", "path", ",", "node", ",", "tail_length", ",", "append", ")", "for", "item", "in", "self", ".", "_find_items", "(", "[", "path", "]", ",", "processor", ",", "include_toplevel", "=", "True", ",", "include_children", "=", "False", ",", "recurse", "=", "False", ")", ":", "if", "item", ":", "yield", "item" ]
Show the end of the file - default 1KB, supports up to the Hadoop block size. :param path: Path to read :type path: string :param tail_length: The length to read from the end of the file - default 1KB, up to block size. :type tail_length: int :param append: Currently not implemented :type append: bool :returns: a generator that yields strings
[ "Show", "the", "end", "of", "the", "file", "-", "default", "1KB", "supports", "up", "to", "the", "Hadoop", "block", "size", "." ]
python
train
45.137931
developmentseed/landsat-util
landsat/search.py
https://github.com/developmentseed/landsat-util/blob/92dc81771ddaa64a8a9124a89a6516b52485374b/landsat/search.py#L254-L273
def date_range_builder(self, start='2013-02-11', end=None): """ Builds date range query. :param start: Date string. format: YYYY-MM-DD :type start: String :param end: date string. format: YYYY-MM-DD :type end: String :returns: String """ if not end: end = time.strftime('%Y-%m-%d') return 'acquisitionDate:[%s+TO+%s]' % (start, end)
[ "def", "date_range_builder", "(", "self", ",", "start", "=", "'2013-02-11'", ",", "end", "=", "None", ")", ":", "if", "not", "end", ":", "end", "=", "time", ".", "strftime", "(", "'%Y-%m-%d'", ")", "return", "'acquisitionDate:[%s+TO+%s]'", "%", "(", "start", ",", "end", ")" ]
Builds date range query. :param start: Date string. format: YYYY-MM-DD :type start: String :param end: date string. format: YYYY-MM-DD :type end: String :returns: String
[ "Builds", "date", "range", "query", "." ]
python
train
23.4
iamteem/redisco
redisco/models/base.py
https://github.com/iamteem/redisco/blob/a7ba19ff3c38061d6d8bc0c10fa754baadcfeb91/redisco/models/base.py#L447-L458
def _delete_from_indices(self, pipeline): """Deletes the object's id from the sets(indices) it has been added to and removes its list of indices (used for housekeeping). """ s = Set(self.key()['_indices']) z = Set(self.key()['_zindices']) for index in s.members: pipeline.srem(index, self.id) for index in z.members: pipeline.zrem(index, self.id) pipeline.delete(s.key) pipeline.delete(z.key)
[ "def", "_delete_from_indices", "(", "self", ",", "pipeline", ")", ":", "s", "=", "Set", "(", "self", ".", "key", "(", ")", "[", "'_indices'", "]", ")", "z", "=", "Set", "(", "self", ".", "key", "(", ")", "[", "'_zindices'", "]", ")", "for", "index", "in", "s", ".", "members", ":", "pipeline", ".", "srem", "(", "index", ",", "self", ".", "id", ")", "for", "index", "in", "z", ".", "members", ":", "pipeline", ".", "zrem", "(", "index", ",", "self", ".", "id", ")", "pipeline", ".", "delete", "(", "s", ".", "key", ")", "pipeline", ".", "delete", "(", "z", ".", "key", ")" ]
Deletes the object's id from the sets(indices) it has been added to and removes its list of indices (used for housekeeping).
[ "Deletes", "the", "object", "s", "id", "from", "the", "sets", "(", "indices", ")", "it", "has", "been", "added", "to", "and", "removes", "its", "list", "of", "indices", "(", "used", "for", "housekeeping", ")", "." ]
python
train
39.75
noahbenson/pimms
pimms/util.py
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/util.py#L219-L249
def save(filename, obj, overwrite=False, create_directories=False): ''' pimms.save(filename, obj) attempts to pickle the given object obj in the filename (or stream, if given). An error is raised when this cannot be accomplished; the first argument is always returned; though if the argument is a filename, it may be a differet string that refers to the same file. The save/load protocol uses pickle for all saving/loading except when the object is a numpy object, in which case it is written using obj.tofile(). The save function writes meta-data into the file so cannot simply be unpickled, but must be loaded using the pimms.load() function. Fundamentally, however, if an object can be picled, it can be saved/loaded. Options: * overwrite (False) The optional parameter overwrite indicates whether an error should be raised before opening the file if the file already exists. * create_directories (False) The optional parameter create_directories indicates whether the function should attempt to create the directories in which the filename exists if they do not already exist. ''' if isinstance(filename, six.string_types): filename = os.path.expanduser(filename) if not overwrite and os.path.exists(filename): raise ValueError('save would overwrite file %s' % filename) if create_directories: dname = os.path.dirname(os.path.realpath(filename)) if not os.path.isdir(dname): os.makedirs(dname) with open(filename, 'wb') as f: _save_stream(f, obj) else: _save_stream(filename, obj) return filename
[ "def", "save", "(", "filename", ",", "obj", ",", "overwrite", "=", "False", ",", "create_directories", "=", "False", ")", ":", "if", "isinstance", "(", "filename", ",", "six", ".", "string_types", ")", ":", "filename", "=", "os", ".", "path", ".", "expanduser", "(", "filename", ")", "if", "not", "overwrite", "and", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "raise", "ValueError", "(", "'save would overwrite file %s'", "%", "filename", ")", "if", "create_directories", ":", "dname", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "filename", ")", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "dname", ")", ":", "os", ".", "makedirs", "(", "dname", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "_save_stream", "(", "f", ",", "obj", ")", "else", ":", "_save_stream", "(", "filename", ",", "obj", ")", "return", "filename" ]
pimms.save(filename, obj) attempts to pickle the given object obj in the filename (or stream, if given). An error is raised when this cannot be accomplished; the first argument is always returned; though if the argument is a filename, it may be a differet string that refers to the same file. The save/load protocol uses pickle for all saving/loading except when the object is a numpy object, in which case it is written using obj.tofile(). The save function writes meta-data into the file so cannot simply be unpickled, but must be loaded using the pimms.load() function. Fundamentally, however, if an object can be picled, it can be saved/loaded. Options: * overwrite (False) The optional parameter overwrite indicates whether an error should be raised before opening the file if the file already exists. * create_directories (False) The optional parameter create_directories indicates whether the function should attempt to create the directories in which the filename exists if they do not already exist.
[ "pimms", ".", "save", "(", "filename", "obj", ")", "attempts", "to", "pickle", "the", "given", "object", "obj", "in", "the", "filename", "(", "or", "stream", "if", "given", ")", ".", "An", "error", "is", "raised", "when", "this", "cannot", "be", "accomplished", ";", "the", "first", "argument", "is", "always", "returned", ";", "though", "if", "the", "argument", "is", "a", "filename", "it", "may", "be", "a", "differet", "string", "that", "refers", "to", "the", "same", "file", "." ]
python
train
53.483871
jobovy/galpy
galpy/potential/KuzminKutuzovStaeckelPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/KuzminKutuzovStaeckelPotential.py#L113-L134
def _zforce(self,R,z,phi=0.,t=0.): """ NAME: _zforce PURPOSE: evaluate the vertical force for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the vertical force HISTORY: 2015-02-13 - Written - Trick (MPIA) """ l,n = bovy_coords.Rz_to_lambdanu (R,z,ac=self._ac,Delta=self._Delta) jac = bovy_coords.Rz_to_lambdanu_jac(R,z, Delta=self._Delta) dldz = jac[0,1] dndz = jac[1,1] return - (dldz * self._lderiv(l,n) + \ dndz * self._nderiv(l,n))
[ "def", "_zforce", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "l", ",", "n", "=", "bovy_coords", ".", "Rz_to_lambdanu", "(", "R", ",", "z", ",", "ac", "=", "self", ".", "_ac", ",", "Delta", "=", "self", ".", "_Delta", ")", "jac", "=", "bovy_coords", ".", "Rz_to_lambdanu_jac", "(", "R", ",", "z", ",", "Delta", "=", "self", ".", "_Delta", ")", "dldz", "=", "jac", "[", "0", ",", "1", "]", "dndz", "=", "jac", "[", "1", ",", "1", "]", "return", "-", "(", "dldz", "*", "self", ".", "_lderiv", "(", "l", ",", "n", ")", "+", "dndz", "*", "self", ".", "_nderiv", "(", "l", ",", "n", ")", ")" ]
NAME: _zforce PURPOSE: evaluate the vertical force for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the vertical force HISTORY: 2015-02-13 - Written - Trick (MPIA)
[ "NAME", ":", "_zforce", "PURPOSE", ":", "evaluate", "the", "vertical", "force", "for", "this", "potential", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "the", "vertical", "force", "HISTORY", ":", "2015", "-", "02", "-", "13", "-", "Written", "-", "Trick", "(", "MPIA", ")" ]
python
train
31.909091
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L11369-L11379
def file_transfer_protocol_send(self, target_network, target_system, target_component, payload, force_mavlink1=False): ''' File transfer message target_network : Network ID (0 for broadcast) (uint8_t) target_system : System ID (0 for broadcast) (uint8_t) target_component : Component ID (0 for broadcast) (uint8_t) payload : Variable length payload. The length is defined by the remaining message length when subtracting the header and other fields. The entire content of this block is opaque unless you understand any the encoding message_type. The particular encoding used can be extension specific and might not always be documented as part of the mavlink specification. (uint8_t) ''' return self.send(self.file_transfer_protocol_encode(target_network, target_system, target_component, payload), force_mavlink1=force_mavlink1)
[ "def", "file_transfer_protocol_send", "(", "self", ",", "target_network", ",", "target_system", ",", "target_component", ",", "payload", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "file_transfer_protocol_encode", "(", "target_network", ",", "target_system", ",", "target_component", ",", "payload", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
File transfer message target_network : Network ID (0 for broadcast) (uint8_t) target_system : System ID (0 for broadcast) (uint8_t) target_component : Component ID (0 for broadcast) (uint8_t) payload : Variable length payload. The length is defined by the remaining message length when subtracting the header and other fields. The entire content of this block is opaque unless you understand any the encoding message_type. The particular encoding used can be extension specific and might not always be documented as part of the mavlink specification. (uint8_t)
[ "File", "transfer", "message" ]
python
train
91
gabstopper/smc-python
smc/core/node.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/node.py#L517-L530
def ssh(self, enable=True, comment=None): """ Enable or disable SSH :param bool enable: enable or disable SSH daemon :param str comment: optional comment for audit :raises NodeCommandFailed: cannot enable SSH daemon :return: None """ self.make_request( NodeCommandFailed, method='update', resource='ssh', params={'enable': enable, 'comment': comment})
[ "def", "ssh", "(", "self", ",", "enable", "=", "True", ",", "comment", "=", "None", ")", ":", "self", ".", "make_request", "(", "NodeCommandFailed", ",", "method", "=", "'update'", ",", "resource", "=", "'ssh'", ",", "params", "=", "{", "'enable'", ":", "enable", ",", "'comment'", ":", "comment", "}", ")" ]
Enable or disable SSH :param bool enable: enable or disable SSH daemon :param str comment: optional comment for audit :raises NodeCommandFailed: cannot enable SSH daemon :return: None
[ "Enable", "or", "disable", "SSH" ]
python
train
32.214286
Clinical-Genomics/scout
scout/adapter/mongo/clinvar.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/clinvar.py#L71-L92
def get_open_clinvar_submission(self, user_id, institute_id): """Retrieve the database id of an open clinvar submission for a user and institute, if none is available then create a new submission and return it Args: user_id(str): a user ID institute_id(str): an institute ID Returns: submission(obj) : an open clinvar submission object """ LOG.info("Retrieving an open clinvar submission for user '%s' and institute %s", user_id, institute_id) query = dict(user_id=user_id, institute_id=institute_id, status='open') submission = self.clinvar_submission_collection.find_one(query) # If there is no open submission for this user and institute, create one if submission is None: submission_id = self.create_submission(user_id, institute_id) submission = self.clinvar_submission_collection.find_one({'_id':submission_id}) return submission
[ "def", "get_open_clinvar_submission", "(", "self", ",", "user_id", ",", "institute_id", ")", ":", "LOG", ".", "info", "(", "\"Retrieving an open clinvar submission for user '%s' and institute %s\"", ",", "user_id", ",", "institute_id", ")", "query", "=", "dict", "(", "user_id", "=", "user_id", ",", "institute_id", "=", "institute_id", ",", "status", "=", "'open'", ")", "submission", "=", "self", ".", "clinvar_submission_collection", ".", "find_one", "(", "query", ")", "# If there is no open submission for this user and institute, create one", "if", "submission", "is", "None", ":", "submission_id", "=", "self", ".", "create_submission", "(", "user_id", ",", "institute_id", ")", "submission", "=", "self", ".", "clinvar_submission_collection", ".", "find_one", "(", "{", "'_id'", ":", "submission_id", "}", ")", "return", "submission" ]
Retrieve the database id of an open clinvar submission for a user and institute, if none is available then create a new submission and return it Args: user_id(str): a user ID institute_id(str): an institute ID Returns: submission(obj) : an open clinvar submission object
[ "Retrieve", "the", "database", "id", "of", "an", "open", "clinvar", "submission", "for", "a", "user", "and", "institute", "if", "none", "is", "available", "then", "create", "a", "new", "submission", "and", "return", "it" ]
python
test
44.909091
scanny/python-pptx
pptx/presentation.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/presentation.py#L108-L114
def slides(self): """ |Slides| object containing the slides in this presentation. """ sldIdLst = self._element.get_or_add_sldIdLst() self.part.rename_slide_parts([sldId.rId for sldId in sldIdLst]) return Slides(sldIdLst, self)
[ "def", "slides", "(", "self", ")", ":", "sldIdLst", "=", "self", ".", "_element", ".", "get_or_add_sldIdLst", "(", ")", "self", ".", "part", ".", "rename_slide_parts", "(", "[", "sldId", ".", "rId", "for", "sldId", "in", "sldIdLst", "]", ")", "return", "Slides", "(", "sldIdLst", ",", "self", ")" ]
|Slides| object containing the slides in this presentation.
[ "|Slides|", "object", "containing", "the", "slides", "in", "this", "presentation", "." ]
python
train
38.285714
rainwoodman/sharedmem
sharedmem/sharedmem.py
https://github.com/rainwoodman/sharedmem/blob/b23e59c1ed0e28f7b6c96c17a04d55c700e06e3a/sharedmem/sharedmem.py#L186-L201
def total_memory(): """ Returns the the amount of memory available for use. The memory is obtained from MemTotal entry in /proc/meminfo. Notes ===== This function is not very useful and not very portable. """ with file('/proc/meminfo', 'r') as f: for line in f: words = line.split() if words[0].upper() == 'MEMTOTAL:': return int(words[1]) * 1024 raise IOError('MemTotal unknown')
[ "def", "total_memory", "(", ")", ":", "with", "file", "(", "'/proc/meminfo'", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "words", "=", "line", ".", "split", "(", ")", "if", "words", "[", "0", "]", ".", "upper", "(", ")", "==", "'MEMTOTAL:'", ":", "return", "int", "(", "words", "[", "1", "]", ")", "*", "1024", "raise", "IOError", "(", "'MemTotal unknown'", ")" ]
Returns the the amount of memory available for use. The memory is obtained from MemTotal entry in /proc/meminfo. Notes ===== This function is not very useful and not very portable.
[ "Returns", "the", "the", "amount", "of", "memory", "available", "for", "use", "." ]
python
valid
29.0625
JoelBender/bacpypes
py25/bacpypes/iocb.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/iocb.py#L474-L503
def get(self, block=1, delay=None): """Get a request from a queue, optionally block until a request is available.""" if _debug: IOQueue._debug("get block=%r delay=%r", block, delay) # if the queue is empty and we do not block return None if not block and not self.notempty.isSet(): if _debug: IOQueue._debug(" - not blocking and empty") return None # wait for something to be in the queue if delay: self.notempty.wait(delay) if not self.notempty.isSet(): return None else: self.notempty.wait() # extract the first element priority, iocb = self.queue[0] del self.queue[0] iocb.ioQueue = None # if the queue is empty, clear the event qlen = len(self.queue) if not qlen: self.notempty.clear() # return the request return iocb
[ "def", "get", "(", "self", ",", "block", "=", "1", ",", "delay", "=", "None", ")", ":", "if", "_debug", ":", "IOQueue", ".", "_debug", "(", "\"get block=%r delay=%r\"", ",", "block", ",", "delay", ")", "# if the queue is empty and we do not block return None", "if", "not", "block", "and", "not", "self", ".", "notempty", ".", "isSet", "(", ")", ":", "if", "_debug", ":", "IOQueue", ".", "_debug", "(", "\" - not blocking and empty\"", ")", "return", "None", "# wait for something to be in the queue", "if", "delay", ":", "self", ".", "notempty", ".", "wait", "(", "delay", ")", "if", "not", "self", ".", "notempty", ".", "isSet", "(", ")", ":", "return", "None", "else", ":", "self", ".", "notempty", ".", "wait", "(", ")", "# extract the first element", "priority", ",", "iocb", "=", "self", ".", "queue", "[", "0", "]", "del", "self", ".", "queue", "[", "0", "]", "iocb", ".", "ioQueue", "=", "None", "# if the queue is empty, clear the event", "qlen", "=", "len", "(", "self", ".", "queue", ")", "if", "not", "qlen", ":", "self", ".", "notempty", ".", "clear", "(", ")", "# return the request", "return", "iocb" ]
Get a request from a queue, optionally block until a request is available.
[ "Get", "a", "request", "from", "a", "queue", "optionally", "block", "until", "a", "request", "is", "available", "." ]
python
train
30.833333
oasis-open/cti-pattern-validator
stix2patterns/validator.py
https://github.com/oasis-open/cti-pattern-validator/blob/753a6901120db25f0c8550607de1eab4440d59df/stix2patterns/validator.py#L77-L95
def validate(user_input, ret_errs=False, print_errs=False): """ Wrapper for run_validator function that returns True if the user_input contains a valid STIX pattern or False otherwise. The error messages may also be returned or printed based upon the ret_errs and print_errs arg values. """ errs = run_validator(user_input) passed = len(errs) == 0 if print_errs: for err in errs: print(err) if ret_errs: return passed, errs return passed
[ "def", "validate", "(", "user_input", ",", "ret_errs", "=", "False", ",", "print_errs", "=", "False", ")", ":", "errs", "=", "run_validator", "(", "user_input", ")", "passed", "=", "len", "(", "errs", ")", "==", "0", "if", "print_errs", ":", "for", "err", "in", "errs", ":", "print", "(", "err", ")", "if", "ret_errs", ":", "return", "passed", ",", "errs", "return", "passed" ]
Wrapper for run_validator function that returns True if the user_input contains a valid STIX pattern or False otherwise. The error messages may also be returned or printed based upon the ret_errs and print_errs arg values.
[ "Wrapper", "for", "run_validator", "function", "that", "returns", "True", "if", "the", "user_input", "contains", "a", "valid", "STIX", "pattern", "or", "False", "otherwise", ".", "The", "error", "messages", "may", "also", "be", "returned", "or", "printed", "based", "upon", "the", "ret_errs", "and", "print_errs", "arg", "values", "." ]
python
train
26.052632
tanghaibao/jcvi
jcvi/formats/fastq.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L826-L854
def size(args): """ %prog size fastqfile Find the total base pairs in a list of fastq files """ p = OptionParser(size.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) total_size = total_numrecords = 0 for f in args: cur_size = cur_numrecords = 0 for rec in iter_fastq(f): if not rec: break cur_numrecords += 1 cur_size += len(rec) print(" ".join(str(x) for x in \ (op.basename(f), cur_numrecords, cur_size))) total_numrecords += cur_numrecords total_size += cur_size if len(args) > 1: print(" ".join(str(x) for x in \ ("Total", total_numrecords, total_size)))
[ "def", "size", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "size", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "total_size", "=", "total_numrecords", "=", "0", "for", "f", "in", "args", ":", "cur_size", "=", "cur_numrecords", "=", "0", "for", "rec", "in", "iter_fastq", "(", "f", ")", ":", "if", "not", "rec", ":", "break", "cur_numrecords", "+=", "1", "cur_size", "+=", "len", "(", "rec", ")", "print", "(", "\" \"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "(", "op", ".", "basename", "(", "f", ")", ",", "cur_numrecords", ",", "cur_size", ")", ")", ")", "total_numrecords", "+=", "cur_numrecords", "total_size", "+=", "cur_size", "if", "len", "(", "args", ")", ">", "1", ":", "print", "(", "\" \"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "(", "\"Total\"", ",", "total_numrecords", ",", "total_size", ")", ")", ")" ]
%prog size fastqfile Find the total base pairs in a list of fastq files
[ "%prog", "size", "fastqfile" ]
python
train
26.034483
Tanganelli/CoAPthon3
coapthon/layers/messagelayer.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/layers/messagelayer.py#L140-L190
def receive_empty(self, message): """ Pair ACKs with requests. :type message: Message :param message: the received message :rtype : Transaction :return: the transaction to which the message belongs to """ logger.debug("receive_empty - " + str(message)) try: host, port = message.source except AttributeError: return key_mid = str_append_hash(host, port, message.mid) key_mid_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.mid) key_token = str_append_hash(host, port, message.token) key_token_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.token) if key_mid in list(self._transactions.keys()): transaction = self._transactions[key_mid] elif key_token in self._transactions_token: transaction = self._transactions_token[key_token] elif key_mid_multicast in list(self._transactions.keys()): transaction = self._transactions[key_mid_multicast] elif key_token_multicast in self._transactions_token: transaction = self._transactions_token[key_token_multicast] else: logger.warning("Un-Matched incoming empty message " + str(host) + ":" + str(port)) return None if message.type == defines.Types["ACK"]: if not transaction.request.acknowledged: transaction.request.acknowledged = True elif (transaction.response is not None) and (not transaction.response.acknowledged): transaction.response.acknowledged = True elif message.type == defines.Types["RST"]: if not transaction.request.acknowledged: transaction.request.rejected = True elif not transaction.response.acknowledged: transaction.response.rejected = True elif message.type == defines.Types["CON"]: #implicit ACK (might have been lost) logger.debug("Implicit ACK on received CON for waiting transaction") transaction.request.acknowledged = True else: logger.warning("Unhandled message type...") if transaction.retransmit_stop is not None: transaction.retransmit_stop.set() return transaction
[ "def", "receive_empty", "(", "self", ",", "message", ")", ":", "logger", ".", "debug", "(", "\"receive_empty - \"", "+", "str", "(", "message", ")", ")", "try", ":", "host", ",", "port", "=", "message", ".", "source", "except", "AttributeError", ":", "return", "key_mid", "=", "str_append_hash", "(", "host", ",", "port", ",", "message", ".", "mid", ")", "key_mid_multicast", "=", "str_append_hash", "(", "defines", ".", "ALL_COAP_NODES", ",", "port", ",", "message", ".", "mid", ")", "key_token", "=", "str_append_hash", "(", "host", ",", "port", ",", "message", ".", "token", ")", "key_token_multicast", "=", "str_append_hash", "(", "defines", ".", "ALL_COAP_NODES", ",", "port", ",", "message", ".", "token", ")", "if", "key_mid", "in", "list", "(", "self", ".", "_transactions", ".", "keys", "(", ")", ")", ":", "transaction", "=", "self", ".", "_transactions", "[", "key_mid", "]", "elif", "key_token", "in", "self", ".", "_transactions_token", ":", "transaction", "=", "self", ".", "_transactions_token", "[", "key_token", "]", "elif", "key_mid_multicast", "in", "list", "(", "self", ".", "_transactions", ".", "keys", "(", ")", ")", ":", "transaction", "=", "self", ".", "_transactions", "[", "key_mid_multicast", "]", "elif", "key_token_multicast", "in", "self", ".", "_transactions_token", ":", "transaction", "=", "self", ".", "_transactions_token", "[", "key_token_multicast", "]", "else", ":", "logger", ".", "warning", "(", "\"Un-Matched incoming empty message \"", "+", "str", "(", "host", ")", "+", "\":\"", "+", "str", "(", "port", ")", ")", "return", "None", "if", "message", ".", "type", "==", "defines", ".", "Types", "[", "\"ACK\"", "]", ":", "if", "not", "transaction", ".", "request", ".", "acknowledged", ":", "transaction", ".", "request", ".", "acknowledged", "=", "True", "elif", "(", "transaction", ".", "response", "is", "not", "None", ")", "and", "(", "not", "transaction", ".", "response", ".", "acknowledged", ")", ":", "transaction", ".", "response", ".", "acknowledged", "=", "True", "elif", "message", ".", "type", "==", "defines", ".", "Types", "[", "\"RST\"", "]", ":", "if", "not", "transaction", ".", "request", ".", "acknowledged", ":", "transaction", ".", "request", ".", "rejected", "=", "True", "elif", "not", "transaction", ".", "response", ".", "acknowledged", ":", "transaction", ".", "response", ".", "rejected", "=", "True", "elif", "message", ".", "type", "==", "defines", ".", "Types", "[", "\"CON\"", "]", ":", "#implicit ACK (might have been lost)", "logger", ".", "debug", "(", "\"Implicit ACK on received CON for waiting transaction\"", ")", "transaction", ".", "request", ".", "acknowledged", "=", "True", "else", ":", "logger", ".", "warning", "(", "\"Unhandled message type...\"", ")", "if", "transaction", ".", "retransmit_stop", "is", "not", "None", ":", "transaction", ".", "retransmit_stop", ".", "set", "(", ")", "return", "transaction" ]
Pair ACKs with requests. :type message: Message :param message: the received message :rtype : Transaction :return: the transaction to which the message belongs to
[ "Pair", "ACKs", "with", "requests", "." ]
python
train
45.078431
github/octodns
octodns/record/geo.py
https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/record/geo.py#L14-L37
def validate(cls, code, prefix): ''' Validates an octoDNS geo code making sure that it is a valid and corresponding: * continent * continent & country * continent, country, & province ''' reasons = [] pieces = code.split('-') n = len(pieces) if n > 3: reasons.append('{}invalid geo code "{}"'.format(prefix, code)) elif n > 0 and pieces[0] not in geo_data: reasons.append('{}unknown continent code "{}"' .format(prefix, code)) elif n > 1 and pieces[1] not in geo_data[pieces[0]]: reasons.append('{}unknown country code "{}"'.format(prefix, code)) elif n > 2 and \ pieces[2] not in geo_data[pieces[0]][pieces[1]]['provinces']: reasons.append('{}unknown province code "{}"'.format(prefix, code)) return reasons
[ "def", "validate", "(", "cls", ",", "code", ",", "prefix", ")", ":", "reasons", "=", "[", "]", "pieces", "=", "code", ".", "split", "(", "'-'", ")", "n", "=", "len", "(", "pieces", ")", "if", "n", ">", "3", ":", "reasons", ".", "append", "(", "'{}invalid geo code \"{}\"'", ".", "format", "(", "prefix", ",", "code", ")", ")", "elif", "n", ">", "0", "and", "pieces", "[", "0", "]", "not", "in", "geo_data", ":", "reasons", ".", "append", "(", "'{}unknown continent code \"{}\"'", ".", "format", "(", "prefix", ",", "code", ")", ")", "elif", "n", ">", "1", "and", "pieces", "[", "1", "]", "not", "in", "geo_data", "[", "pieces", "[", "0", "]", "]", ":", "reasons", ".", "append", "(", "'{}unknown country code \"{}\"'", ".", "format", "(", "prefix", ",", "code", ")", ")", "elif", "n", ">", "2", "and", "pieces", "[", "2", "]", "not", "in", "geo_data", "[", "pieces", "[", "0", "]", "]", "[", "pieces", "[", "1", "]", "]", "[", "'provinces'", "]", ":", "reasons", ".", "append", "(", "'{}unknown province code \"{}\"'", ".", "format", "(", "prefix", ",", "code", ")", ")", "return", "reasons" ]
Validates an octoDNS geo code making sure that it is a valid and corresponding: * continent * continent & country * continent, country, & province
[ "Validates", "an", "octoDNS", "geo", "code", "making", "sure", "that", "it", "is", "a", "valid", "and", "corresponding", ":", "*", "continent", "*", "continent", "&", "country", "*", "continent", "country", "&", "province" ]
python
train
37.916667
sassoftware/saspy
saspy/sasdata.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasdata.py#L912-L927
def to_csv(self, file: str, opts: dict = None) -> str: """ This method will export a SAS Data Set to a file in CSV format. :param file: the OS filesystem path of the file to be created (exported from this SAS Data Set) :return: """ opts = opts if opts is not None else {} ll = self._is_valid() if ll: if not self.sas.batch: print(ll['LOG']) else: return ll else: return self.sas.write_csv(file, self.table, self.libref, self.dsopts, opts)
[ "def", "to_csv", "(", "self", ",", "file", ":", "str", ",", "opts", ":", "dict", "=", "None", ")", "->", "str", ":", "opts", "=", "opts", "if", "opts", "is", "not", "None", "else", "{", "}", "ll", "=", "self", ".", "_is_valid", "(", ")", "if", "ll", ":", "if", "not", "self", ".", "sas", ".", "batch", ":", "print", "(", "ll", "[", "'LOG'", "]", ")", "else", ":", "return", "ll", "else", ":", "return", "self", ".", "sas", ".", "write_csv", "(", "file", ",", "self", ".", "table", ",", "self", ".", "libref", ",", "self", ".", "dsopts", ",", "opts", ")" ]
This method will export a SAS Data Set to a file in CSV format. :param file: the OS filesystem path of the file to be created (exported from this SAS Data Set) :return:
[ "This", "method", "will", "export", "a", "SAS", "Data", "Set", "to", "a", "file", "in", "CSV", "format", "." ]
python
train
35.25
Alignak-monitoring/alignak
alignak/objects/item.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/item.py#L816-L841
def add_items(self, items, index_items): """ Add items to template if is template, else add in item list :param items: items list to add :type items: alignak.objects.item.Items :param index_items: Flag indicating if the items should be indexed on the fly. :type index_items: bool :return: None """ count_templates = 0 count_items = 0 generated_items = [] for item in items: if item.is_tpl(): self.add_template(item) count_templates = count_templates + 1 else: new_items = self.add_item(item, index_items) count_items = count_items + max(1, len(new_items)) if new_items: generated_items.extend(new_items) if count_templates: logger.info(' indexed %d template(s)', count_templates) if count_items: logger.info(' created %d %s(s).', count_items, self.inner_class.my_type)
[ "def", "add_items", "(", "self", ",", "items", ",", "index_items", ")", ":", "count_templates", "=", "0", "count_items", "=", "0", "generated_items", "=", "[", "]", "for", "item", "in", "items", ":", "if", "item", ".", "is_tpl", "(", ")", ":", "self", ".", "add_template", "(", "item", ")", "count_templates", "=", "count_templates", "+", "1", "else", ":", "new_items", "=", "self", ".", "add_item", "(", "item", ",", "index_items", ")", "count_items", "=", "count_items", "+", "max", "(", "1", ",", "len", "(", "new_items", ")", ")", "if", "new_items", ":", "generated_items", ".", "extend", "(", "new_items", ")", "if", "count_templates", ":", "logger", ".", "info", "(", "' indexed %d template(s)'", ",", "count_templates", ")", "if", "count_items", ":", "logger", ".", "info", "(", "' created %d %s(s).'", ",", "count_items", ",", "self", ".", "inner_class", ".", "my_type", ")" ]
Add items to template if is template, else add in item list :param items: items list to add :type items: alignak.objects.item.Items :param index_items: Flag indicating if the items should be indexed on the fly. :type index_items: bool :return: None
[ "Add", "items", "to", "template", "if", "is", "template", "else", "add", "in", "item", "list" ]
python
train
38.846154
tornadoweb/tornado
tornado/web.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/web.py#L809-L839
def write(self, chunk: Union[str, bytes, dict]) -> None: """Writes the given chunk to the output buffer. To write the output to the network, use the `flush()` method below. If the given chunk is a dictionary, we write it as JSON and set the Content-Type of the response to be ``application/json``. (if you want to send JSON as a different ``Content-Type``, call ``set_header`` *after* calling ``write()``). Note that lists are not converted to JSON because of a potential cross-site security vulnerability. All JSON output should be wrapped in a dictionary. More details at http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and https://github.com/facebook/tornado/issues/1009 """ if self._finished: raise RuntimeError("Cannot write() after finish()") if not isinstance(chunk, (bytes, unicode_type, dict)): message = "write() only accepts bytes, unicode, and dict objects" if isinstance(chunk, list): message += ( ". Lists not accepted for security reasons; see " + "http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" # noqa: E501 ) raise TypeError(message) if isinstance(chunk, dict): chunk = escape.json_encode(chunk) self.set_header("Content-Type", "application/json; charset=UTF-8") chunk = utf8(chunk) self._write_buffer.append(chunk)
[ "def", "write", "(", "self", ",", "chunk", ":", "Union", "[", "str", ",", "bytes", ",", "dict", "]", ")", "->", "None", ":", "if", "self", ".", "_finished", ":", "raise", "RuntimeError", "(", "\"Cannot write() after finish()\"", ")", "if", "not", "isinstance", "(", "chunk", ",", "(", "bytes", ",", "unicode_type", ",", "dict", ")", ")", ":", "message", "=", "\"write() only accepts bytes, unicode, and dict objects\"", "if", "isinstance", "(", "chunk", ",", "list", ")", ":", "message", "+=", "(", "\". Lists not accepted for security reasons; see \"", "+", "\"http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write\"", "# noqa: E501", ")", "raise", "TypeError", "(", "message", ")", "if", "isinstance", "(", "chunk", ",", "dict", ")", ":", "chunk", "=", "escape", ".", "json_encode", "(", "chunk", ")", "self", ".", "set_header", "(", "\"Content-Type\"", ",", "\"application/json; charset=UTF-8\"", ")", "chunk", "=", "utf8", "(", "chunk", ")", "self", ".", "_write_buffer", ".", "append", "(", "chunk", ")" ]
Writes the given chunk to the output buffer. To write the output to the network, use the `flush()` method below. If the given chunk is a dictionary, we write it as JSON and set the Content-Type of the response to be ``application/json``. (if you want to send JSON as a different ``Content-Type``, call ``set_header`` *after* calling ``write()``). Note that lists are not converted to JSON because of a potential cross-site security vulnerability. All JSON output should be wrapped in a dictionary. More details at http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and https://github.com/facebook/tornado/issues/1009
[ "Writes", "the", "given", "chunk", "to", "the", "output", "buffer", "." ]
python
train
49.387097
dls-controls/pymalcolm
malcolm/core/serializable.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/serializable.py#L200-L218
def lookup_subclass(cls, d): """Look up a class based on a serialized dictionary containing a typeid Args: d (dict): Dictionary with key "typeid" Returns: Serializable subclass """ try: typeid = d["typeid"] except KeyError: raise FieldError("typeid not present in keys %s" % list(d)) subclass = cls._subcls_lookup.get(typeid, None) if not subclass: raise FieldError("'%s' not a valid typeid" % typeid) else: return subclass
[ "def", "lookup_subclass", "(", "cls", ",", "d", ")", ":", "try", ":", "typeid", "=", "d", "[", "\"typeid\"", "]", "except", "KeyError", ":", "raise", "FieldError", "(", "\"typeid not present in keys %s\"", "%", "list", "(", "d", ")", ")", "subclass", "=", "cls", ".", "_subcls_lookup", ".", "get", "(", "typeid", ",", "None", ")", "if", "not", "subclass", ":", "raise", "FieldError", "(", "\"'%s' not a valid typeid\"", "%", "typeid", ")", "else", ":", "return", "subclass" ]
Look up a class based on a serialized dictionary containing a typeid Args: d (dict): Dictionary with key "typeid" Returns: Serializable subclass
[ "Look", "up", "a", "class", "based", "on", "a", "serialized", "dictionary", "containing", "a", "typeid" ]
python
train
29.052632
lmjohns3/theanets
theanets/losses.py
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/losses.py#L517-L538
def accuracy(self, outputs): '''Build a Theano expression for computing the accuracy of graph output. Parameters ---------- outputs : dict of Theano expressions A dictionary mapping network output names to Theano expressions representing the outputs of a computation graph. Returns ------- acc : Theano expression A Theano expression representing the accuracy of the output compared to the target data. ''' output = outputs[self.output_name] predict = TT.argmax(output, axis=-1) correct = TT.eq(predict, self._target) acc = correct.mean() if self._weights is not None: acc = (self._weights * correct).sum() / self._weights.sum() return acc
[ "def", "accuracy", "(", "self", ",", "outputs", ")", ":", "output", "=", "outputs", "[", "self", ".", "output_name", "]", "predict", "=", "TT", ".", "argmax", "(", "output", ",", "axis", "=", "-", "1", ")", "correct", "=", "TT", ".", "eq", "(", "predict", ",", "self", ".", "_target", ")", "acc", "=", "correct", ".", "mean", "(", ")", "if", "self", ".", "_weights", "is", "not", "None", ":", "acc", "=", "(", "self", ".", "_weights", "*", "correct", ")", ".", "sum", "(", ")", "/", "self", ".", "_weights", ".", "sum", "(", ")", "return", "acc" ]
Build a Theano expression for computing the accuracy of graph output. Parameters ---------- outputs : dict of Theano expressions A dictionary mapping network output names to Theano expressions representing the outputs of a computation graph. Returns ------- acc : Theano expression A Theano expression representing the accuracy of the output compared to the target data.
[ "Build", "a", "Theano", "expression", "for", "computing", "the", "accuracy", "of", "graph", "output", "." ]
python
test
36
saxix/sample-data-utils
sample_data_utils/utils.py
https://github.com/saxix/sample-data-utils/blob/769f1b46e60def2675a14bd5872047af6d1ea398/sample_data_utils/utils.py#L53-L63
def memoize(func): """Decorator that stores function results in a dictionary to be used on the next time that the same arguments were informed.""" func._cache_dict = {} @wraps(func) def _inner(*args, **kwargs): return _get_memoized_value(func, args, kwargs) return _inner
[ "def", "memoize", "(", "func", ")", ":", "func", ".", "_cache_dict", "=", "{", "}", "@", "wraps", "(", "func", ")", "def", "_inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_get_memoized_value", "(", "func", ",", "args", ",", "kwargs", ")", "return", "_inner" ]
Decorator that stores function results in a dictionary to be used on the next time that the same arguments were informed.
[ "Decorator", "that", "stores", "function", "results", "in", "a", "dictionary", "to", "be", "used", "on", "the", "next", "time", "that", "the", "same", "arguments", "were", "informed", "." ]
python
test
26.909091
robinagist/ezo
ezo/core/helpers.py
https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/helpers.py#L63-L83
def get_topic_sha3(event_block): ''' takes an event block and returns a signature for sha3 hashing :param event_block: :return: ''' sig = "" sig += event_block["name"] if not event_block["inputs"]: sig += "()" return sig sig += "(" for input in event_block["inputs"]: sig += input["type"] sig += "," sig = sig[:-1] sig += ")" return sig
[ "def", "get_topic_sha3", "(", "event_block", ")", ":", "sig", "=", "\"\"", "sig", "+=", "event_block", "[", "\"name\"", "]", "if", "not", "event_block", "[", "\"inputs\"", "]", ":", "sig", "+=", "\"()\"", "return", "sig", "sig", "+=", "\"(\"", "for", "input", "in", "event_block", "[", "\"inputs\"", "]", ":", "sig", "+=", "input", "[", "\"type\"", "]", "sig", "+=", "\",\"", "sig", "=", "sig", "[", ":", "-", "1", "]", "sig", "+=", "\")\"", "return", "sig" ]
takes an event block and returns a signature for sha3 hashing :param event_block: :return:
[ "takes", "an", "event", "block", "and", "returns", "a", "signature", "for", "sha3", "hashing", ":", "param", "event_block", ":", ":", "return", ":" ]
python
train
19.190476
heuer/segno
segno/utils.py
https://github.com/heuer/segno/blob/64d912a2bd17d0b5ff3e8b5d37098edfc663c2b3/segno/utils.py#L43-L65
def get_symbol_size(version, scale=1, border=None): """\ Returns the symbol size (width x height) with the provided border and scaling factor. :param int version: A version constant. :param scale: Indicates the size of a single module (default: 1). The size of a module depends on the used output format; i.e. in a PNG context, a scaling factor of 2 indicates that a module has a size of 2 x 2 pixel. Some outputs (i.e. SVG) accept floating point values. :type scale: int or float :param int border: The border size or ``None`` to specify the default quiet zone (4 for QR Codes, 2 for Micro QR Codes). :rtype: tuple (width, height) """ if border is None: border = get_default_border_size(version) # M4 = 0, M3 = -1 ... dim = version * 4 + 17 if version > 0 else (version + 4) * 2 + 9 dim += 2 * border dim *= scale return dim, dim
[ "def", "get_symbol_size", "(", "version", ",", "scale", "=", "1", ",", "border", "=", "None", ")", ":", "if", "border", "is", "None", ":", "border", "=", "get_default_border_size", "(", "version", ")", "# M4 = 0, M3 = -1 ...", "dim", "=", "version", "*", "4", "+", "17", "if", "version", ">", "0", "else", "(", "version", "+", "4", ")", "*", "2", "+", "9", "dim", "+=", "2", "*", "border", "dim", "*=", "scale", "return", "dim", ",", "dim" ]
\ Returns the symbol size (width x height) with the provided border and scaling factor. :param int version: A version constant. :param scale: Indicates the size of a single module (default: 1). The size of a module depends on the used output format; i.e. in a PNG context, a scaling factor of 2 indicates that a module has a size of 2 x 2 pixel. Some outputs (i.e. SVG) accept floating point values. :type scale: int or float :param int border: The border size or ``None`` to specify the default quiet zone (4 for QR Codes, 2 for Micro QR Codes). :rtype: tuple (width, height)
[ "\\", "Returns", "the", "symbol", "size", "(", "width", "x", "height", ")", "with", "the", "provided", "border", "and", "scaling", "factor", "." ]
python
train
42.608696
mwgielen/jackal
jackal/scripts/domaindump.py
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/domaindump.py#L160-L195
def parse_domain_users(domain_users_file, domain_groups_file): """ Parses the domain users and groups files. """ with open(domain_users_file) as f: users = json.loads(f.read()) domain_groups = {} if domain_groups_file: with open(domain_groups_file) as f: groups = json.loads(f.read()) for group in groups: sid = get_field(group, 'objectSid') domain_groups[int(sid.split('-')[-1])] = get_field(group, 'cn') user_search = UserSearch() count = 0 total = len(users) print_notification("Importing {} users".format(total)) for entry in users: result = parse_user(entry, domain_groups) user = user_search.id_to_object(result['username']) user.name = result['name'] user.domain.append(result['domain']) user.description = result['description'] user.groups.extend(result['groups']) user.flags.extend(result['flags']) user.sid = result['sid'] user.add_tag("domaindump") user.save() count += 1 sys.stdout.write('\r') sys.stdout.write("[{}/{}]".format(count, total)) sys.stdout.flush() sys.stdout.write('\r') return count
[ "def", "parse_domain_users", "(", "domain_users_file", ",", "domain_groups_file", ")", ":", "with", "open", "(", "domain_users_file", ")", "as", "f", ":", "users", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", "domain_groups", "=", "{", "}", "if", "domain_groups_file", ":", "with", "open", "(", "domain_groups_file", ")", "as", "f", ":", "groups", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", "for", "group", "in", "groups", ":", "sid", "=", "get_field", "(", "group", ",", "'objectSid'", ")", "domain_groups", "[", "int", "(", "sid", ".", "split", "(", "'-'", ")", "[", "-", "1", "]", ")", "]", "=", "get_field", "(", "group", ",", "'cn'", ")", "user_search", "=", "UserSearch", "(", ")", "count", "=", "0", "total", "=", "len", "(", "users", ")", "print_notification", "(", "\"Importing {} users\"", ".", "format", "(", "total", ")", ")", "for", "entry", "in", "users", ":", "result", "=", "parse_user", "(", "entry", ",", "domain_groups", ")", "user", "=", "user_search", ".", "id_to_object", "(", "result", "[", "'username'", "]", ")", "user", ".", "name", "=", "result", "[", "'name'", "]", "user", ".", "domain", ".", "append", "(", "result", "[", "'domain'", "]", ")", "user", ".", "description", "=", "result", "[", "'description'", "]", "user", ".", "groups", ".", "extend", "(", "result", "[", "'groups'", "]", ")", "user", ".", "flags", ".", "extend", "(", "result", "[", "'flags'", "]", ")", "user", ".", "sid", "=", "result", "[", "'sid'", "]", "user", ".", "add_tag", "(", "\"domaindump\"", ")", "user", ".", "save", "(", ")", "count", "+=", "1", "sys", ".", "stdout", ".", "write", "(", "'\\r'", ")", "sys", ".", "stdout", ".", "write", "(", "\"[{}/{}]\"", ".", "format", "(", "count", ",", "total", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "'\\r'", ")", "return", "count" ]
Parses the domain users and groups files.
[ "Parses", "the", "domain", "users", "and", "groups", "files", "." ]
python
valid
33.777778
google/openhtf
examples/repeat.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/examples/repeat.py#L41-L48
def run(self): """Increments counter and raises an exception for first two runs.""" self.count += 1 print('FailTwicePlug: Run number %s' % (self.count)) if self.count < 3: raise RuntimeError('Fails a couple times') return True
[ "def", "run", "(", "self", ")", ":", "self", ".", "count", "+=", "1", "print", "(", "'FailTwicePlug: Run number %s'", "%", "(", "self", ".", "count", ")", ")", "if", "self", ".", "count", "<", "3", ":", "raise", "RuntimeError", "(", "'Fails a couple times'", ")", "return", "True" ]
Increments counter and raises an exception for first two runs.
[ "Increments", "counter", "and", "raises", "an", "exception", "for", "first", "two", "runs", "." ]
python
train
30.75
fozzle/python-brotherprint
brotherprint/brotherprint.py
https://github.com/fozzle/python-brotherprint/blob/5fb92df11b599c30a7da3d6ac7ed60acff230044/brotherprint/brotherprint.py#L710-L728
def char_style(self, style): '''Sets the character style. Args: style: The desired character style. Choose from 'normal', 'outline', 'shadow', and 'outlineshadow' Returns: None Raises: RuntimeError: Invalid character style ''' styleset = {'normal': 0, 'outline': 1, 'shadow': 2, 'outlineshadow': 3 } if style in styleset: self.send(chr(27) + 'q' + chr(styleset[style])) else: raise RuntimeError('Invalid character style in function charStyle')
[ "def", "char_style", "(", "self", ",", "style", ")", ":", "styleset", "=", "{", "'normal'", ":", "0", ",", "'outline'", ":", "1", ",", "'shadow'", ":", "2", ",", "'outlineshadow'", ":", "3", "}", "if", "style", "in", "styleset", ":", "self", ".", "send", "(", "chr", "(", "27", ")", "+", "'q'", "+", "chr", "(", "styleset", "[", "style", "]", ")", ")", "else", ":", "raise", "RuntimeError", "(", "'Invalid character style in function charStyle'", ")" ]
Sets the character style. Args: style: The desired character style. Choose from 'normal', 'outline', 'shadow', and 'outlineshadow' Returns: None Raises: RuntimeError: Invalid character style
[ "Sets", "the", "character", "style", ".", "Args", ":", "style", ":", "The", "desired", "character", "style", ".", "Choose", "from", "normal", "outline", "shadow", "and", "outlineshadow", "Returns", ":", "None", "Raises", ":", "RuntimeError", ":", "Invalid", "character", "style" ]
python
train
33.578947
baliame/http-hmac-python
httphmac/v2.py
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/v2.py#L111-L116
def get_response_signer(self): """Returns the response signer for this version of the signature. """ if not hasattr(self, "response_signer"): self.response_signer = V2ResponseSigner(self.digest, orig=self) return self.response_signer
[ "def", "get_response_signer", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"response_signer\"", ")", ":", "self", ".", "response_signer", "=", "V2ResponseSigner", "(", "self", ".", "digest", ",", "orig", "=", "self", ")", "return", "self", ".", "response_signer" ]
Returns the response signer for this version of the signature.
[ "Returns", "the", "response", "signer", "for", "this", "version", "of", "the", "signature", "." ]
python
train
45.333333
NuGrid/NuGridPy
nugridpy/astronomy.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/astronomy.py#L81-L120
def visc_rad_kap_sc(T,rho,X): ''' Radiative viscosity (Thomas, 1930) for e- scattering opacity Parameters ---------- X : float H mass fraction T : float temperature in K rho : float density in cgs Returns ------- nu : float radiative diffusivity in [cm**2/s] Examples -------- >>> In [1]: import astronomy as ast >>> In [2]: l = 100*1.e5 # 100km >>> In [3]: v = 1.e5 # typical velocity >>> In [4]: T = 90.e6 # temperature >>> In [5]: X = 0.001 # H mass fraction >>> In [6]: rho = 100. # density >>> In [7]: nu = ast.visc_rad_kap_sc(T,rho,X) >>> In [8]: Re=v*l/nu >>> In [9]: print "Re_rad = "+str('%g'%Re) >>> Re_rad = 4.43512e+08 Notes ----- Eqn. 14' in Schatzman, 1977, assume electron scattering opacity kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2) ''' kappa = 0.2*(1.+X) nu_rad = 6.88e-26*(old_div(T**4,(kappa*rho**2))) return nu_rad
[ "def", "visc_rad_kap_sc", "(", "T", ",", "rho", ",", "X", ")", ":", "kappa", "=", "0.2", "*", "(", "1.", "+", "X", ")", "nu_rad", "=", "6.88e-26", "*", "(", "old_div", "(", "T", "**", "4", ",", "(", "kappa", "*", "rho", "**", "2", ")", ")", ")", "return", "nu_rad" ]
Radiative viscosity (Thomas, 1930) for e- scattering opacity Parameters ---------- X : float H mass fraction T : float temperature in K rho : float density in cgs Returns ------- nu : float radiative diffusivity in [cm**2/s] Examples -------- >>> In [1]: import astronomy as ast >>> In [2]: l = 100*1.e5 # 100km >>> In [3]: v = 1.e5 # typical velocity >>> In [4]: T = 90.e6 # temperature >>> In [5]: X = 0.001 # H mass fraction >>> In [6]: rho = 100. # density >>> In [7]: nu = ast.visc_rad_kap_sc(T,rho,X) >>> In [8]: Re=v*l/nu >>> In [9]: print "Re_rad = "+str('%g'%Re) >>> Re_rad = 4.43512e+08 Notes ----- Eqn. 14' in Schatzman, 1977, assume electron scattering opacity kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2)
[ "Radiative", "viscosity", "(", "Thomas", "1930", ")", "for", "e", "-", "scattering", "opacity" ]
python
train
24.275
spulec/moto
moto/ecr/models.py
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/ecr/models.py#L208-L228
def list_images(self, repository_name, registry_id=None): """ maxResults and filtering not implemented """ repository = None found = False if repository_name in self.repositories: repository = self.repositories[repository_name] if registry_id: if repository.registry_id == registry_id: found = True else: found = True if not found: raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID) images = [] for image in repository.images: images.append(image) return images
[ "def", "list_images", "(", "self", ",", "repository_name", ",", "registry_id", "=", "None", ")", ":", "repository", "=", "None", "found", "=", "False", "if", "repository_name", "in", "self", ".", "repositories", ":", "repository", "=", "self", ".", "repositories", "[", "repository_name", "]", "if", "registry_id", ":", "if", "repository", ".", "registry_id", "==", "registry_id", ":", "found", "=", "True", "else", ":", "found", "=", "True", "if", "not", "found", ":", "raise", "RepositoryNotFoundException", "(", "repository_name", ",", "registry_id", "or", "DEFAULT_REGISTRY_ID", ")", "images", "=", "[", "]", "for", "image", "in", "repository", ".", "images", ":", "images", ".", "append", "(", "image", ")", "return", "images" ]
maxResults and filtering not implemented
[ "maxResults", "and", "filtering", "not", "implemented" ]
python
train
31.952381
SergeySatskiy/cdm-pythonparser
cdmpyparser.py
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L26-L57
def trim_docstring(docstring): """Taken from http://www.python.org/dev/peps/pep-0257/""" if not docstring: return '' # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = docstring.expandtabs().splitlines() # Determine minimum indentation (first line doesn't count): indent = maxsize for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) # Remove indentation (first line is special): lines[0] = lines[0].strip() if indent < maxsize: index = 1 for line in lines[1:]: lines[index] = line[indent:].rstrip() index += 1 # Strip off trailing and leading blank lines: while lines and not lines[-1]: del lines[-1] while lines and not lines[0]: del lines[0] # Return a single string: return '\n'.join(lines)
[ "def", "trim_docstring", "(", "docstring", ")", ":", "if", "not", "docstring", ":", "return", "''", "# Convert tabs to spaces (following the normal Python rules)", "# and split into a list of lines:", "lines", "=", "docstring", ".", "expandtabs", "(", ")", ".", "splitlines", "(", ")", "# Determine minimum indentation (first line doesn't count):", "indent", "=", "maxsize", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "stripped", "=", "line", ".", "lstrip", "(", ")", "if", "stripped", ":", "indent", "=", "min", "(", "indent", ",", "len", "(", "line", ")", "-", "len", "(", "stripped", ")", ")", "# Remove indentation (first line is special):", "lines", "[", "0", "]", "=", "lines", "[", "0", "]", ".", "strip", "(", ")", "if", "indent", "<", "maxsize", ":", "index", "=", "1", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "lines", "[", "index", "]", "=", "line", "[", "indent", ":", "]", ".", "rstrip", "(", ")", "index", "+=", "1", "# Strip off trailing and leading blank lines:", "while", "lines", "and", "not", "lines", "[", "-", "1", "]", ":", "del", "lines", "[", "-", "1", "]", "while", "lines", "and", "not", "lines", "[", "0", "]", ":", "del", "lines", "[", "0", "]", "# Return a single string:", "return", "'\\n'", ".", "join", "(", "lines", ")" ]
Taken from http://www.python.org/dev/peps/pep-0257/
[ "Taken", "from", "http", ":", "//", "www", ".", "python", ".", "org", "/", "dev", "/", "peps", "/", "pep", "-", "0257", "/" ]
python
train
29.125
kubernetes-client/python
kubernetes/client/apis/core_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/core_v1_api.py#L12346-L12373
def list_namespaced_pod(self, namespace, **kwargs): """ list or watch objects of kind Pod This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_pod(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1PodList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_pod_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_pod_with_http_info(namespace, **kwargs) return data
[ "def", "list_namespaced_pod", "(", "self", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "list_namespaced_pod_with_http_info", "(", "namespace", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "list_namespaced_pod_with_http_info", "(", "namespace", ",", "*", "*", "kwargs", ")", "return", "data" ]
list or watch objects of kind Pod This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_pod(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1PodList If the method is called asynchronously, returns the request thread.
[ "list", "or", "watch", "objects", "of", "kind", "Pod", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "list_namespaced_pod", "(", "namespace", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
165.285714
sam-washington/requests-aws4auth
requests_aws4auth/aws4auth.py
https://github.com/sam-washington/requests-aws4auth/blob/1201e470c6d5847b7fe42e937a55755e1895e72c/requests_aws4auth/aws4auth.py#L368-L394
def get_request_date(cls, req): """ Try to pull a date from the request by looking first at the x-amz-date header, and if that's not present then the Date header. Return a datetime.date object, or None if neither date header is found or is in a recognisable format. req -- a requests PreparedRequest object """ date = None for header in ['x-amz-date', 'date']: if header not in req.headers: continue try: date_str = cls.parse_date(req.headers[header]) except DateFormatError: continue try: date = datetime.datetime.strptime(date_str, '%Y-%m-%d').date() except ValueError: continue else: break return date
[ "def", "get_request_date", "(", "cls", ",", "req", ")", ":", "date", "=", "None", "for", "header", "in", "[", "'x-amz-date'", ",", "'date'", "]", ":", "if", "header", "not", "in", "req", ".", "headers", ":", "continue", "try", ":", "date_str", "=", "cls", ".", "parse_date", "(", "req", ".", "headers", "[", "header", "]", ")", "except", "DateFormatError", ":", "continue", "try", ":", "date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "date_str", ",", "'%Y-%m-%d'", ")", ".", "date", "(", ")", "except", "ValueError", ":", "continue", "else", ":", "break", "return", "date" ]
Try to pull a date from the request by looking first at the x-amz-date header, and if that's not present then the Date header. Return a datetime.date object, or None if neither date header is found or is in a recognisable format. req -- a requests PreparedRequest object
[ "Try", "to", "pull", "a", "date", "from", "the", "request", "by", "looking", "first", "at", "the", "x", "-", "amz", "-", "date", "header", "and", "if", "that", "s", "not", "present", "then", "the", "Date", "header", "." ]
python
valid
30.740741
androguard/androguard
androguard/core/analysis/analysis.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/analysis/analysis.py#L1494-L1520
def find_methods(self, classname=".*", methodname=".*", descriptor=".*", accessflags=".*", no_external=False): """ Find a method by name using regular expression. This method will return all MethodClassAnalysis objects, which match the classname, methodname, descriptor and accessflags of the method. :param classname: regular expression for the classname :param methodname: regular expression for the method name :param descriptor: regular expression for the descriptor :param accessflags: regular expression for the accessflags :param no_external: Remove external method from the output (default False) :rtype: generator of `MethodClassAnalysis` """ for cname, c in self.classes.items(): if re.match(classname, cname): for m in c.get_methods(): z = m.get_method() # TODO is it even possible that an internal class has # external methods? Maybe we should check for ExternalClass # instead... if no_external and isinstance(z, ExternalMethod): continue if re.match(methodname, z.get_name()) and \ re.match(descriptor, z.get_descriptor()) and \ re.match(accessflags, z.get_access_flags_string()): yield m
[ "def", "find_methods", "(", "self", ",", "classname", "=", "\".*\"", ",", "methodname", "=", "\".*\"", ",", "descriptor", "=", "\".*\"", ",", "accessflags", "=", "\".*\"", ",", "no_external", "=", "False", ")", ":", "for", "cname", ",", "c", "in", "self", ".", "classes", ".", "items", "(", ")", ":", "if", "re", ".", "match", "(", "classname", ",", "cname", ")", ":", "for", "m", "in", "c", ".", "get_methods", "(", ")", ":", "z", "=", "m", ".", "get_method", "(", ")", "# TODO is it even possible that an internal class has", "# external methods? Maybe we should check for ExternalClass", "# instead...", "if", "no_external", "and", "isinstance", "(", "z", ",", "ExternalMethod", ")", ":", "continue", "if", "re", ".", "match", "(", "methodname", ",", "z", ".", "get_name", "(", ")", ")", "and", "re", ".", "match", "(", "descriptor", ",", "z", ".", "get_descriptor", "(", ")", ")", "and", "re", ".", "match", "(", "accessflags", ",", "z", ".", "get_access_flags_string", "(", ")", ")", ":", "yield", "m" ]
Find a method by name using regular expression. This method will return all MethodClassAnalysis objects, which match the classname, methodname, descriptor and accessflags of the method. :param classname: regular expression for the classname :param methodname: regular expression for the method name :param descriptor: regular expression for the descriptor :param accessflags: regular expression for the accessflags :param no_external: Remove external method from the output (default False) :rtype: generator of `MethodClassAnalysis`
[ "Find", "a", "method", "by", "name", "using", "regular", "expression", ".", "This", "method", "will", "return", "all", "MethodClassAnalysis", "objects", "which", "match", "the", "classname", "methodname", "descriptor", "and", "accessflags", "of", "the", "method", "." ]
python
train
52.851852
scot-dev/scot
scot/ooapi.py
https://github.com/scot-dev/scot/blob/48598b79d4400dad893b134cd2194715511facda/scot/ooapi.py#L202-L221
def set_used_labels(self, labels): """ Specify which trials to use in subsequent analysis steps. This function masks trials based on their class labels. Parameters ---------- labels : list of class labels Marks all trials that have a label that is in the `labels` list for further processing. Returns ------- self : Workspace The Workspace object. """ mask = np.zeros(self.cl_.size, dtype=bool) for l in labels: mask = np.logical_or(mask, self.cl_ == l) self.trial_mask_ = mask return self
[ "def", "set_used_labels", "(", "self", ",", "labels", ")", ":", "mask", "=", "np", ".", "zeros", "(", "self", ".", "cl_", ".", "size", ",", "dtype", "=", "bool", ")", "for", "l", "in", "labels", ":", "mask", "=", "np", ".", "logical_or", "(", "mask", ",", "self", ".", "cl_", "==", "l", ")", "self", ".", "trial_mask_", "=", "mask", "return", "self" ]
Specify which trials to use in subsequent analysis steps. This function masks trials based on their class labels. Parameters ---------- labels : list of class labels Marks all trials that have a label that is in the `labels` list for further processing. Returns ------- self : Workspace The Workspace object.
[ "Specify", "which", "trials", "to", "use", "in", "subsequent", "analysis", "steps", "." ]
python
train
30.65