repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
robinandeer/puzzle
puzzle/models/mixins.py
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/mixins.py#L17-L25
def is_affected(self): """Boolean for telling if the sample is affected.""" phenotype = self.phenotype if phenotype == '1': return False elif phenotype == '2': return True else: return False
[ "def", "is_affected", "(", "self", ")", ":", "phenotype", "=", "self", ".", "phenotype", "if", "phenotype", "==", "'1'", ":", "return", "False", "elif", "phenotype", "==", "'2'", ":", "return", "True", "else", ":", "return", "False" ]
Boolean for telling if the sample is affected.
[ "Boolean", "for", "telling", "if", "the", "sample", "is", "affected", "." ]
python
train
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/span_utils.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/span_utils.py#L376-L435
def bmes_tags_to_spans(tag_sequence: List[str], classes_to_ignore: List[str] = None) -> List[TypedStringSpan]: """ Given a sequence corresponding to BMES tags, extracts spans. Spans are inclusive and can be of zero length, representing a single word span. Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"), as otherwise it is possible to get a perfect precision score whilst still predicting ill-formed spans in addition to the correct spans. This function works properly when the spans are unlabeled (i.e., your labels are simply "B", "M", "E" and "S"). Parameters ---------- tag_sequence : List[str], required. The integer class labels for a sequence. classes_to_ignore : List[str], optional (default = None). A list of string class labels `excluding` the bio tag which should be ignored when extracting spans. Returns ------- spans : List[TypedStringSpan] The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)). Note that the label `does not` contain any BIO tag prefixes. """ def extract_bmes_tag_label(text): bmes_tag = text[0] label = text[2:] return bmes_tag, label spans: List[Tuple[str, List[int]]] = [] prev_bmes_tag: Optional[str] = None for index, tag in enumerate(tag_sequence): bmes_tag, label = extract_bmes_tag_label(tag) if bmes_tag in ('B', 'S'): # Regardless of tag, we start a new span when reaching B & S. spans.append( (label, [index, index]) ) elif bmes_tag in ('M', 'E') and prev_bmes_tag in ('B', 'M') and spans[-1][0] == label: # Only expand the span if # 1. Valid transition: B/M -> M/E. # 2. Matched label. spans[-1][1][1] = index else: # Best effort split for invalid span. spans.append( (label, [index, index]) ) # update previous BMES tag. prev_bmes_tag = bmes_tag classes_to_ignore = classes_to_ignore or [] return [ # to tuple. (span[0], (span[1][0], span[1][1])) for span in spans if span[0] not in classes_to_ignore ]
[ "def", "bmes_tags_to_spans", "(", "tag_sequence", ":", "List", "[", "str", "]", ",", "classes_to_ignore", ":", "List", "[", "str", "]", "=", "None", ")", "->", "List", "[", "TypedStringSpan", "]", ":", "def", "extract_bmes_tag_label", "(", "text", ")", ":", "bmes_tag", "=", "text", "[", "0", "]", "label", "=", "text", "[", "2", ":", "]", "return", "bmes_tag", ",", "label", "spans", ":", "List", "[", "Tuple", "[", "str", ",", "List", "[", "int", "]", "]", "]", "=", "[", "]", "prev_bmes_tag", ":", "Optional", "[", "str", "]", "=", "None", "for", "index", ",", "tag", "in", "enumerate", "(", "tag_sequence", ")", ":", "bmes_tag", ",", "label", "=", "extract_bmes_tag_label", "(", "tag", ")", "if", "bmes_tag", "in", "(", "'B'", ",", "'S'", ")", ":", "# Regardless of tag, we start a new span when reaching B & S.", "spans", ".", "append", "(", "(", "label", ",", "[", "index", ",", "index", "]", ")", ")", "elif", "bmes_tag", "in", "(", "'M'", ",", "'E'", ")", "and", "prev_bmes_tag", "in", "(", "'B'", ",", "'M'", ")", "and", "spans", "[", "-", "1", "]", "[", "0", "]", "==", "label", ":", "# Only expand the span if", "# 1. Valid transition: B/M -> M/E.", "# 2. Matched label.", "spans", "[", "-", "1", "]", "[", "1", "]", "[", "1", "]", "=", "index", "else", ":", "# Best effort split for invalid span.", "spans", ".", "append", "(", "(", "label", ",", "[", "index", ",", "index", "]", ")", ")", "# update previous BMES tag.", "prev_bmes_tag", "=", "bmes_tag", "classes_to_ignore", "=", "classes_to_ignore", "or", "[", "]", "return", "[", "# to tuple.", "(", "span", "[", "0", "]", ",", "(", "span", "[", "1", "]", "[", "0", "]", ",", "span", "[", "1", "]", "[", "1", "]", ")", ")", "for", "span", "in", "spans", "if", "span", "[", "0", "]", "not", "in", "classes_to_ignore", "]" ]
Given a sequence corresponding to BMES tags, extracts spans. Spans are inclusive and can be of zero length, representing a single word span. Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"), as otherwise it is possible to get a perfect precision score whilst still predicting ill-formed spans in addition to the correct spans. This function works properly when the spans are unlabeled (i.e., your labels are simply "B", "M", "E" and "S"). Parameters ---------- tag_sequence : List[str], required. The integer class labels for a sequence. classes_to_ignore : List[str], optional (default = None). A list of string class labels `excluding` the bio tag which should be ignored when extracting spans. Returns ------- spans : List[TypedStringSpan] The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)). Note that the label `does not` contain any BIO tag prefixes.
[ "Given", "a", "sequence", "corresponding", "to", "BMES", "tags", "extracts", "spans", ".", "Spans", "are", "inclusive", "and", "can", "be", "of", "zero", "length", "representing", "a", "single", "word", "span", ".", "Ill", "-", "formed", "spans", "are", "also", "included", "(", "i", ".", "e", "those", "which", "do", "not", "start", "with", "a", "B", "-", "LABEL", ")", "as", "otherwise", "it", "is", "possible", "to", "get", "a", "perfect", "precision", "score", "whilst", "still", "predicting", "ill", "-", "formed", "spans", "in", "addition", "to", "the", "correct", "spans", ".", "This", "function", "works", "properly", "when", "the", "spans", "are", "unlabeled", "(", "i", ".", "e", ".", "your", "labels", "are", "simply", "B", "M", "E", "and", "S", ")", "." ]
python
train
pschmitt/zhue
zhue/model/basemodel.py
https://github.com/pschmitt/zhue/blob/4a3f4ddf12ceeedcb2157f92d93ff1c6438a7d59/zhue/model/basemodel.py#L65-L75
def address(self): ''' Return the address of this "object", minus the scheme, hostname and port of the bridge ''' return self.API.replace( 'http://{}:{}'.format( self._bridge.hostname, self._bridge.port ), '' )
[ "def", "address", "(", "self", ")", ":", "return", "self", ".", "API", ".", "replace", "(", "'http://{}:{}'", ".", "format", "(", "self", ".", "_bridge", ".", "hostname", ",", "self", ".", "_bridge", ".", "port", ")", ",", "''", ")" ]
Return the address of this "object", minus the scheme, hostname and port of the bridge
[ "Return", "the", "address", "of", "this", "object", "minus", "the", "scheme", "hostname", "and", "port", "of", "the", "bridge" ]
python
train
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L94-L104
def body(self): """Returns the HTTP response body, deserialized if possible. :rtype: mixed """ if not self._responses: return None if self._responses[-1].code >= 400: return self._error_message() return self._deserialize()
[ "def", "body", "(", "self", ")", ":", "if", "not", "self", ".", "_responses", ":", "return", "None", "if", "self", ".", "_responses", "[", "-", "1", "]", ".", "code", ">=", "400", ":", "return", "self", ".", "_error_message", "(", ")", "return", "self", ".", "_deserialize", "(", ")" ]
Returns the HTTP response body, deserialized if possible. :rtype: mixed
[ "Returns", "the", "HTTP", "response", "body", "deserialized", "if", "possible", "." ]
python
train
Unity-Technologies/ml-agents
ml-agents/mlagents/trainers/trainer_controller.py
https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents/mlagents/trainers/trainer_controller.py#L122-L169
def initialize_trainers(self, trainer_config: Dict[str, Dict[str, str]]): """ Initialization of the trainers :param trainer_config: The configurations of the trainers """ trainer_parameters_dict = {} for brain_name in self.external_brains: trainer_parameters = trainer_config['default'].copy() trainer_parameters['summary_path'] = '{basedir}/{name}'.format( basedir=self.summaries_dir, name=str(self.run_id) + '_' + brain_name) trainer_parameters['model_path'] = '{basedir}/{name}'.format( basedir=self.model_path, name=brain_name) trainer_parameters['keep_checkpoints'] = self.keep_checkpoints if brain_name in trainer_config: _brain_key = brain_name while not isinstance(trainer_config[_brain_key], dict): _brain_key = trainer_config[_brain_key] for k in trainer_config[_brain_key]: trainer_parameters[k] = trainer_config[_brain_key][k] trainer_parameters_dict[brain_name] = trainer_parameters.copy() for brain_name in self.external_brains: if trainer_parameters_dict[brain_name]['trainer'] == 'offline_bc': self.trainers[brain_name] = OfflineBCTrainer( self.external_brains[brain_name], trainer_parameters_dict[brain_name], self.train_model, self.load_model, self.seed, self.run_id) elif trainer_parameters_dict[brain_name]['trainer'] == 'online_bc': self.trainers[brain_name] = OnlineBCTrainer( self.external_brains[brain_name], trainer_parameters_dict[brain_name], self.train_model, self.load_model, self.seed, self.run_id) elif trainer_parameters_dict[brain_name]['trainer'] == 'ppo': self.trainers[brain_name] = PPOTrainer( self.external_brains[brain_name], self.meta_curriculum .brains_to_curriculums[brain_name] .min_lesson_length if self.meta_curriculum else 0, trainer_parameters_dict[brain_name], self.train_model, self.load_model, self.seed, self.run_id) self.trainer_metrics[brain_name] = self.trainers[brain_name].trainer_metrics else: raise UnityEnvironmentException('The trainer config contains ' 'an unknown trainer type for ' 'brain {}' .format(brain_name))
[ "def", "initialize_trainers", "(", "self", ",", "trainer_config", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "str", "]", "]", ")", ":", "trainer_parameters_dict", "=", "{", "}", "for", "brain_name", "in", "self", ".", "external_brains", ":", "trainer_parameters", "=", "trainer_config", "[", "'default'", "]", ".", "copy", "(", ")", "trainer_parameters", "[", "'summary_path'", "]", "=", "'{basedir}/{name}'", ".", "format", "(", "basedir", "=", "self", ".", "summaries_dir", ",", "name", "=", "str", "(", "self", ".", "run_id", ")", "+", "'_'", "+", "brain_name", ")", "trainer_parameters", "[", "'model_path'", "]", "=", "'{basedir}/{name}'", ".", "format", "(", "basedir", "=", "self", ".", "model_path", ",", "name", "=", "brain_name", ")", "trainer_parameters", "[", "'keep_checkpoints'", "]", "=", "self", ".", "keep_checkpoints", "if", "brain_name", "in", "trainer_config", ":", "_brain_key", "=", "brain_name", "while", "not", "isinstance", "(", "trainer_config", "[", "_brain_key", "]", ",", "dict", ")", ":", "_brain_key", "=", "trainer_config", "[", "_brain_key", "]", "for", "k", "in", "trainer_config", "[", "_brain_key", "]", ":", "trainer_parameters", "[", "k", "]", "=", "trainer_config", "[", "_brain_key", "]", "[", "k", "]", "trainer_parameters_dict", "[", "brain_name", "]", "=", "trainer_parameters", ".", "copy", "(", ")", "for", "brain_name", "in", "self", ".", "external_brains", ":", "if", "trainer_parameters_dict", "[", "brain_name", "]", "[", "'trainer'", "]", "==", "'offline_bc'", ":", "self", ".", "trainers", "[", "brain_name", "]", "=", "OfflineBCTrainer", "(", "self", ".", "external_brains", "[", "brain_name", "]", ",", "trainer_parameters_dict", "[", "brain_name", "]", ",", "self", ".", "train_model", ",", "self", ".", "load_model", ",", "self", ".", "seed", ",", "self", ".", "run_id", ")", "elif", "trainer_parameters_dict", "[", "brain_name", "]", "[", "'trainer'", "]", "==", "'online_bc'", ":", "self", ".", "trainers", "[", "brain_name", "]", "=", "OnlineBCTrainer", "(", "self", ".", "external_brains", "[", "brain_name", "]", ",", "trainer_parameters_dict", "[", "brain_name", "]", ",", "self", ".", "train_model", ",", "self", ".", "load_model", ",", "self", ".", "seed", ",", "self", ".", "run_id", ")", "elif", "trainer_parameters_dict", "[", "brain_name", "]", "[", "'trainer'", "]", "==", "'ppo'", ":", "self", ".", "trainers", "[", "brain_name", "]", "=", "PPOTrainer", "(", "self", ".", "external_brains", "[", "brain_name", "]", ",", "self", ".", "meta_curriculum", ".", "brains_to_curriculums", "[", "brain_name", "]", ".", "min_lesson_length", "if", "self", ".", "meta_curriculum", "else", "0", ",", "trainer_parameters_dict", "[", "brain_name", "]", ",", "self", ".", "train_model", ",", "self", ".", "load_model", ",", "self", ".", "seed", ",", "self", ".", "run_id", ")", "self", ".", "trainer_metrics", "[", "brain_name", "]", "=", "self", ".", "trainers", "[", "brain_name", "]", ".", "trainer_metrics", "else", ":", "raise", "UnityEnvironmentException", "(", "'The trainer config contains '", "'an unknown trainer type for '", "'brain {}'", ".", "format", "(", "brain_name", ")", ")" ]
Initialization of the trainers :param trainer_config: The configurations of the trainers
[ "Initialization", "of", "the", "trainers", ":", "param", "trainer_config", ":", "The", "configurations", "of", "the", "trainers" ]
python
train
kwikteam/phy
phy/electrode/mea.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/electrode/mea.py#L53-L57
def _probe_positions(probe, group): """Return the positions of a probe channel group.""" positions = probe['channel_groups'][group]['geometry'] channels = _probe_channels(probe, group) return np.array([positions[channel] for channel in channels])
[ "def", "_probe_positions", "(", "probe", ",", "group", ")", ":", "positions", "=", "probe", "[", "'channel_groups'", "]", "[", "group", "]", "[", "'geometry'", "]", "channels", "=", "_probe_channels", "(", "probe", ",", "group", ")", "return", "np", ".", "array", "(", "[", "positions", "[", "channel", "]", "for", "channel", "in", "channels", "]", ")" ]
Return the positions of a probe channel group.
[ "Return", "the", "positions", "of", "a", "probe", "channel", "group", "." ]
python
train
markovmodel/PyEMMA
pyemma/_base/estimator.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_base/estimator.py#L194-L369
def estimate_param_scan(estimator, X, param_sets, evaluate=None, evaluate_args=None, failfast=True, return_estimators=False, n_jobs=1, progress_reporter=None, show_progress=True, return_exceptions=False): """ Runs multiple estimations using a list of parameter settings Parameters ---------- estimator : Estimator object or class An estimator object that provides an estimate(X, **params) function. If only a class is provided here, the Estimator objects will be constructed with default parameter settings, and the parameter settings from param_sets for each estimation. If you want to specify other parameter settings for those parameters not specified in param_sets, construct an Estimator before and pass the object. param_sets : iterable over dictionaries An iterable that provides parameter settings. Each element defines a parameter set, for which an estimation will be run using these parameters in estimate(X, **params). All other parameter settings will be taken from the default settings in the estimator object. evaluate : str or list of str, optional The given methods or properties will be called on the estimated models, and their results will be returned instead of the full models. This may be useful for reducing memory overhead. evaluate_args: iterable of iterable, optional Arguments to be passed to evaluated methods. Note, that size has to match to the size of evaluate. failfast : bool If True, will raise an exception when estimation failed with an exception or trying to calls a method that doesn't exist. If False, will simply return None in these cases. return_estimators: bool If True, return a list estimators in addition to the models. show_progress: bool if the given estimator supports show_progress interface, we set the flag prior doing estimations. return_exceptions: bool, default=False if failfast is False while this setting is True, returns the exception thrown at the actual grid element, instead of None. Returns ------- models : list of model objects or evaluated function values A list of estimated models in the same order as param_sets. If evaluate is given, each element will contain the results from these method evaluations. estimators (optional) : list of estimator objects. These are returned only if return_estimators=True Examples -------- Estimate a maximum likelihood Markov model at lag times 1, 2, 3. >>> from pyemma.msm.estimators import MaximumLikelihoodMSM, BayesianMSM >>> >>> dtraj = [0,0,1,2,1,0,1,0,1,2,2,0,0,0,1,1,2,1,0,0,1,2,1,0,0,0,1,1,0,1,2] # mini-trajectory >>> param_sets=param_grid({'lag': [1,2,3]}) >>> >>> estimate_param_scan(MaximumLikelihoodMSM, dtraj, param_sets, evaluate='timescales') [array([ 1.24113168, 0.77454377]), array([ 2.65266698, 1.42909842]), array([ 5.34810405, 1.14784446])] Now we also want to get samples of the timescales using the BayesianMSM. >>> estimate_param_scan(MaximumLikelihoodMSM, dtraj, param_sets, failfast=False, ... evaluate=['timescales', 'timescales_samples']) # doctest: +SKIP [[array([ 1.24113168, 0.77454377]), None], [array([ 2.48226337, 1.54908754]), None], [array([ 3.72339505, 2.32363131]), None]] We get Nones because the MaximumLikelihoodMSM estimator doesn't provide timescales_samples. Use for example a Bayesian estimator for that. Now we also want to get samples of the timescales using the BayesianMSM. >>> estimate_param_scan(BayesianMSM, dtraj, param_sets, show_progress=False, ... evaluate=['timescales', 'sample_f'], evaluate_args=((), ('timescales', ))) # doctest: +SKIP [[array([ 1.24357685, 0.77609028]), [array([ 1.5963252 , 0.73877883]), array([ 1.29915847, 0.49004912]), array([ 0.90058583, 0.73841786]), ... ]] """ # make sure we have an estimator object estimator = get_estimator(estimator) if hasattr(estimator, 'show_progress'): estimator.show_progress = show_progress if n_jobs is None: from pyemma._base.parallel import get_n_jobs n_jobs = get_n_jobs(logger=getattr(estimator, 'logger', None)) # if we want to return estimators, make clones. Otherwise just copy references. # For parallel processing we always need clones. # Also if the Estimator is its own Model, we have to clone. from pyemma._base.model import Model if (return_estimators or n_jobs > 1 or n_jobs is None or isinstance(estimator, Model)): estimators = [clone_estimator(estimator) for _ in param_sets] else: estimators = [estimator for _ in param_sets] # only show progress of parameter study. if hasattr(estimators[0], 'show_progress'): for e in estimators: e.show_progress = False # if we evaluate, make sure we have a list of functions to evaluate if _types.is_string(evaluate): evaluate = [evaluate] if _types.is_string(evaluate_args): evaluate_args = [evaluate_args] if evaluate is not None and evaluate_args is not None and len(evaluate) != len(evaluate_args): raise ValueError("length mismatch: evaluate ({}) and evaluate_args ({})".format(len(evaluate), len(evaluate_args))) logger_available = hasattr(estimators[0], 'logger') if logger_available: logger = estimators[0].logger if progress_reporter is None: from unittest.mock import MagicMock ctx = progress_reporter = MagicMock() callback = None else: ctx = progress_reporter._progress_context('param-scan') callback = lambda _: progress_reporter._progress_update(1, stage='param-scan') progress_reporter._progress_register(len(estimators), stage='param-scan', description="estimating %s" % str(estimator.__class__.__name__)) # TODO: test on win, osx if n_jobs > 1 and os.name == 'posix': if logger_available: logger.debug('estimating %s with n_jobs=%s', estimator, n_jobs) # iterate over parameter settings task_iter = ((estimator, param_set, X, evaluate, evaluate_args, failfast, return_exceptions) for estimator, param_set in zip(estimators, param_sets)) from pathos.multiprocessing import Pool pool = Pool(processes=n_jobs) args = list(task_iter) from contextlib import closing def error_callback(*args, **kw): if failfast: # TODO: can we be specific here? eg. obtain the stack of the actual process or is this the master proc? raise Exception('something failed') with closing(pool), ctx: res_async = [pool.apply_async(_estimate_param_scan_worker, a, callback=callback, error_callback=error_callback) for a in args] res = [x.get() for x in res_async] # if n_jobs=1 don't invoke the pool, but directly dispatch the iterator else: if logger_available: logger.debug('estimating %s with n_jobs=1 because of the setting or ' 'you not have a POSIX system', estimator) res = [] with ctx: for estimator, param_set in zip(estimators, param_sets): res.append(_estimate_param_scan_worker(estimator, param_set, X, evaluate, evaluate_args, failfast, return_exceptions)) if progress_reporter is not None: progress_reporter._progress_update(1, stage='param-scan') # done if return_estimators: return res, estimators else: return res
[ "def", "estimate_param_scan", "(", "estimator", ",", "X", ",", "param_sets", ",", "evaluate", "=", "None", ",", "evaluate_args", "=", "None", ",", "failfast", "=", "True", ",", "return_estimators", "=", "False", ",", "n_jobs", "=", "1", ",", "progress_reporter", "=", "None", ",", "show_progress", "=", "True", ",", "return_exceptions", "=", "False", ")", ":", "# make sure we have an estimator object", "estimator", "=", "get_estimator", "(", "estimator", ")", "if", "hasattr", "(", "estimator", ",", "'show_progress'", ")", ":", "estimator", ".", "show_progress", "=", "show_progress", "if", "n_jobs", "is", "None", ":", "from", "pyemma", ".", "_base", ".", "parallel", "import", "get_n_jobs", "n_jobs", "=", "get_n_jobs", "(", "logger", "=", "getattr", "(", "estimator", ",", "'logger'", ",", "None", ")", ")", "# if we want to return estimators, make clones. Otherwise just copy references.", "# For parallel processing we always need clones.", "# Also if the Estimator is its own Model, we have to clone.", "from", "pyemma", ".", "_base", ".", "model", "import", "Model", "if", "(", "return_estimators", "or", "n_jobs", ">", "1", "or", "n_jobs", "is", "None", "or", "isinstance", "(", "estimator", ",", "Model", ")", ")", ":", "estimators", "=", "[", "clone_estimator", "(", "estimator", ")", "for", "_", "in", "param_sets", "]", "else", ":", "estimators", "=", "[", "estimator", "for", "_", "in", "param_sets", "]", "# only show progress of parameter study.", "if", "hasattr", "(", "estimators", "[", "0", "]", ",", "'show_progress'", ")", ":", "for", "e", "in", "estimators", ":", "e", ".", "show_progress", "=", "False", "# if we evaluate, make sure we have a list of functions to evaluate", "if", "_types", ".", "is_string", "(", "evaluate", ")", ":", "evaluate", "=", "[", "evaluate", "]", "if", "_types", ".", "is_string", "(", "evaluate_args", ")", ":", "evaluate_args", "=", "[", "evaluate_args", "]", "if", "evaluate", "is", "not", "None", "and", "evaluate_args", "is", "not", "None", "and", "len", "(", "evaluate", ")", "!=", "len", "(", "evaluate_args", ")", ":", "raise", "ValueError", "(", "\"length mismatch: evaluate ({}) and evaluate_args ({})\"", ".", "format", "(", "len", "(", "evaluate", ")", ",", "len", "(", "evaluate_args", ")", ")", ")", "logger_available", "=", "hasattr", "(", "estimators", "[", "0", "]", ",", "'logger'", ")", "if", "logger_available", ":", "logger", "=", "estimators", "[", "0", "]", ".", "logger", "if", "progress_reporter", "is", "None", ":", "from", "unittest", ".", "mock", "import", "MagicMock", "ctx", "=", "progress_reporter", "=", "MagicMock", "(", ")", "callback", "=", "None", "else", ":", "ctx", "=", "progress_reporter", ".", "_progress_context", "(", "'param-scan'", ")", "callback", "=", "lambda", "_", ":", "progress_reporter", ".", "_progress_update", "(", "1", ",", "stage", "=", "'param-scan'", ")", "progress_reporter", ".", "_progress_register", "(", "len", "(", "estimators", ")", ",", "stage", "=", "'param-scan'", ",", "description", "=", "\"estimating %s\"", "%", "str", "(", "estimator", ".", "__class__", ".", "__name__", ")", ")", "# TODO: test on win, osx", "if", "n_jobs", ">", "1", "and", "os", ".", "name", "==", "'posix'", ":", "if", "logger_available", ":", "logger", ".", "debug", "(", "'estimating %s with n_jobs=%s'", ",", "estimator", ",", "n_jobs", ")", "# iterate over parameter settings", "task_iter", "=", "(", "(", "estimator", ",", "param_set", ",", "X", ",", "evaluate", ",", "evaluate_args", ",", "failfast", ",", "return_exceptions", ")", "for", "estimator", ",", "param_set", "in", "zip", "(", "estimators", ",", "param_sets", ")", ")", "from", "pathos", ".", "multiprocessing", "import", "Pool", "pool", "=", "Pool", "(", "processes", "=", "n_jobs", ")", "args", "=", "list", "(", "task_iter", ")", "from", "contextlib", "import", "closing", "def", "error_callback", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "failfast", ":", "# TODO: can we be specific here? eg. obtain the stack of the actual process or is this the master proc?", "raise", "Exception", "(", "'something failed'", ")", "with", "closing", "(", "pool", ")", ",", "ctx", ":", "res_async", "=", "[", "pool", ".", "apply_async", "(", "_estimate_param_scan_worker", ",", "a", ",", "callback", "=", "callback", ",", "error_callback", "=", "error_callback", ")", "for", "a", "in", "args", "]", "res", "=", "[", "x", ".", "get", "(", ")", "for", "x", "in", "res_async", "]", "# if n_jobs=1 don't invoke the pool, but directly dispatch the iterator", "else", ":", "if", "logger_available", ":", "logger", ".", "debug", "(", "'estimating %s with n_jobs=1 because of the setting or '", "'you not have a POSIX system'", ",", "estimator", ")", "res", "=", "[", "]", "with", "ctx", ":", "for", "estimator", ",", "param_set", "in", "zip", "(", "estimators", ",", "param_sets", ")", ":", "res", ".", "append", "(", "_estimate_param_scan_worker", "(", "estimator", ",", "param_set", ",", "X", ",", "evaluate", ",", "evaluate_args", ",", "failfast", ",", "return_exceptions", ")", ")", "if", "progress_reporter", "is", "not", "None", ":", "progress_reporter", ".", "_progress_update", "(", "1", ",", "stage", "=", "'param-scan'", ")", "# done", "if", "return_estimators", ":", "return", "res", ",", "estimators", "else", ":", "return", "res" ]
Runs multiple estimations using a list of parameter settings Parameters ---------- estimator : Estimator object or class An estimator object that provides an estimate(X, **params) function. If only a class is provided here, the Estimator objects will be constructed with default parameter settings, and the parameter settings from param_sets for each estimation. If you want to specify other parameter settings for those parameters not specified in param_sets, construct an Estimator before and pass the object. param_sets : iterable over dictionaries An iterable that provides parameter settings. Each element defines a parameter set, for which an estimation will be run using these parameters in estimate(X, **params). All other parameter settings will be taken from the default settings in the estimator object. evaluate : str or list of str, optional The given methods or properties will be called on the estimated models, and their results will be returned instead of the full models. This may be useful for reducing memory overhead. evaluate_args: iterable of iterable, optional Arguments to be passed to evaluated methods. Note, that size has to match to the size of evaluate. failfast : bool If True, will raise an exception when estimation failed with an exception or trying to calls a method that doesn't exist. If False, will simply return None in these cases. return_estimators: bool If True, return a list estimators in addition to the models. show_progress: bool if the given estimator supports show_progress interface, we set the flag prior doing estimations. return_exceptions: bool, default=False if failfast is False while this setting is True, returns the exception thrown at the actual grid element, instead of None. Returns ------- models : list of model objects or evaluated function values A list of estimated models in the same order as param_sets. If evaluate is given, each element will contain the results from these method evaluations. estimators (optional) : list of estimator objects. These are returned only if return_estimators=True Examples -------- Estimate a maximum likelihood Markov model at lag times 1, 2, 3. >>> from pyemma.msm.estimators import MaximumLikelihoodMSM, BayesianMSM >>> >>> dtraj = [0,0,1,2,1,0,1,0,1,2,2,0,0,0,1,1,2,1,0,0,1,2,1,0,0,0,1,1,0,1,2] # mini-trajectory >>> param_sets=param_grid({'lag': [1,2,3]}) >>> >>> estimate_param_scan(MaximumLikelihoodMSM, dtraj, param_sets, evaluate='timescales') [array([ 1.24113168, 0.77454377]), array([ 2.65266698, 1.42909842]), array([ 5.34810405, 1.14784446])] Now we also want to get samples of the timescales using the BayesianMSM. >>> estimate_param_scan(MaximumLikelihoodMSM, dtraj, param_sets, failfast=False, ... evaluate=['timescales', 'timescales_samples']) # doctest: +SKIP [[array([ 1.24113168, 0.77454377]), None], [array([ 2.48226337, 1.54908754]), None], [array([ 3.72339505, 2.32363131]), None]] We get Nones because the MaximumLikelihoodMSM estimator doesn't provide timescales_samples. Use for example a Bayesian estimator for that. Now we also want to get samples of the timescales using the BayesianMSM. >>> estimate_param_scan(BayesianMSM, dtraj, param_sets, show_progress=False, ... evaluate=['timescales', 'sample_f'], evaluate_args=((), ('timescales', ))) # doctest: +SKIP [[array([ 1.24357685, 0.77609028]), [array([ 1.5963252 , 0.73877883]), array([ 1.29915847, 0.49004912]), array([ 0.90058583, 0.73841786]), ... ]]
[ "Runs", "multiple", "estimations", "using", "a", "list", "of", "parameter", "settings" ]
python
train
mila-iqia/fuel
fuel/transformers/__init__.py
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/transformers/__init__.py#L34-L67
def verify_axis_labels(self, expected, actual, source_name): """Verify that axis labels for a given source are as expected. Parameters ---------- expected : tuple A tuple of strings representing the expected axis labels. actual : tuple or None A tuple of strings representing the actual axis labels, or `None` if they could not be determined. source_name : str The name of the source being checked. Used for caching the results of checks so that the check is only performed once. Notes ----- Logs a warning in case of `actual=None`, raises an error on other mismatches. """ if not getattr(self, '_checked_axis_labels', False): self._checked_axis_labels = defaultdict(bool) if not self._checked_axis_labels[source_name]: if actual is None: log.warning("%s instance could not verify (missing) axis " "expected %s, got None", self.__class__.__name__, expected) else: if expected != actual: raise AxisLabelsMismatchError("{} expected axis labels " "{}, got {} instead".format( self.__class__.__name__, expected, actual)) self._checked_axis_labels[source_name] = True
[ "def", "verify_axis_labels", "(", "self", ",", "expected", ",", "actual", ",", "source_name", ")", ":", "if", "not", "getattr", "(", "self", ",", "'_checked_axis_labels'", ",", "False", ")", ":", "self", ".", "_checked_axis_labels", "=", "defaultdict", "(", "bool", ")", "if", "not", "self", ".", "_checked_axis_labels", "[", "source_name", "]", ":", "if", "actual", "is", "None", ":", "log", ".", "warning", "(", "\"%s instance could not verify (missing) axis \"", "\"expected %s, got None\"", ",", "self", ".", "__class__", ".", "__name__", ",", "expected", ")", "else", ":", "if", "expected", "!=", "actual", ":", "raise", "AxisLabelsMismatchError", "(", "\"{} expected axis labels \"", "\"{}, got {} instead\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "expected", ",", "actual", ")", ")", "self", ".", "_checked_axis_labels", "[", "source_name", "]", "=", "True" ]
Verify that axis labels for a given source are as expected. Parameters ---------- expected : tuple A tuple of strings representing the expected axis labels. actual : tuple or None A tuple of strings representing the actual axis labels, or `None` if they could not be determined. source_name : str The name of the source being checked. Used for caching the results of checks so that the check is only performed once. Notes ----- Logs a warning in case of `actual=None`, raises an error on other mismatches.
[ "Verify", "that", "axis", "labels", "for", "a", "given", "source", "are", "as", "expected", "." ]
python
train
pip-services3-python/pip-services3-commons-python
pip_services3_commons/reflect/TypeReflector.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/TypeReflector.py#L125-L140
def is_primitive(value): """ Checks if value has primitive type. Primitive types are: numbers, strings, booleans, date and time. Complex (non-primitive types are): objects, maps and arrays :param value: a value to check :return: true if the value has primitive type and false if value type is complex. """ typeCode = TypeConverter.to_type_code(value) return typeCode == TypeCode.String or typeCode == TypeCode.Enum or typeCode == TypeCode.Boolean \ or typeCode == TypeCode.Integer or typeCode == TypeCode.Long \ or typeCode == TypeCode.Float or typeCode == TypeCode.Double \ or typeCode == TypeCode.DateTime or typeCode == TypeCode.Duration
[ "def", "is_primitive", "(", "value", ")", ":", "typeCode", "=", "TypeConverter", ".", "to_type_code", "(", "value", ")", "return", "typeCode", "==", "TypeCode", ".", "String", "or", "typeCode", "==", "TypeCode", ".", "Enum", "or", "typeCode", "==", "TypeCode", ".", "Boolean", "or", "typeCode", "==", "TypeCode", ".", "Integer", "or", "typeCode", "==", "TypeCode", ".", "Long", "or", "typeCode", "==", "TypeCode", ".", "Float", "or", "typeCode", "==", "TypeCode", ".", "Double", "or", "typeCode", "==", "TypeCode", ".", "DateTime", "or", "typeCode", "==", "TypeCode", ".", "Duration" ]
Checks if value has primitive type. Primitive types are: numbers, strings, booleans, date and time. Complex (non-primitive types are): objects, maps and arrays :param value: a value to check :return: true if the value has primitive type and false if value type is complex.
[ "Checks", "if", "value", "has", "primitive", "type", "." ]
python
train
PeerAssets/pypeerassets
pypeerassets/provider/cryptoid.py
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/cryptoid.py#L44-L52
def get_url(url: str) -> Union[dict, int, float, str]: '''Perform a GET request for the url and return a dictionary parsed from the JSON response.''' request = Request(url, headers={"User-Agent": "pypeerassets"}) response = cast(HTTPResponse, urlopen(request)) if response.status != 200: raise Exception(response.reason) return json.loads(response.read().decode())
[ "def", "get_url", "(", "url", ":", "str", ")", "->", "Union", "[", "dict", ",", "int", ",", "float", ",", "str", "]", ":", "request", "=", "Request", "(", "url", ",", "headers", "=", "{", "\"User-Agent\"", ":", "\"pypeerassets\"", "}", ")", "response", "=", "cast", "(", "HTTPResponse", ",", "urlopen", "(", "request", ")", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "Exception", "(", "response", ".", "reason", ")", "return", "json", ".", "loads", "(", "response", ".", "read", "(", ")", ".", "decode", "(", ")", ")" ]
Perform a GET request for the url and return a dictionary parsed from the JSON response.
[ "Perform", "a", "GET", "request", "for", "the", "url", "and", "return", "a", "dictionary", "parsed", "from", "the", "JSON", "response", "." ]
python
train
benley/butcher
butcher/buildfile.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/buildfile.py#L122-L126
def local_targets(self): """Iterator over the targets defined in this build file.""" for node in self.node: if (node.repo, node.path) == (self.target.repo, self.target.path): yield node
[ "def", "local_targets", "(", "self", ")", ":", "for", "node", "in", "self", ".", "node", ":", "if", "(", "node", ".", "repo", ",", "node", ".", "path", ")", "==", "(", "self", ".", "target", ".", "repo", ",", "self", ".", "target", ".", "path", ")", ":", "yield", "node" ]
Iterator over the targets defined in this build file.
[ "Iterator", "over", "the", "targets", "defined", "in", "this", "build", "file", "." ]
python
train
getfleety/coralillo
coralillo/core.py
https://github.com/getfleety/coralillo/blob/9cac101738a0fa7c1106f129604c00ef703370e1/coralillo/core.py#L460-L482
def delete(self): ''' Deletes this model from the database, calling delete in each field to properly delete special cases ''' redis = type(self).get_redis() for fieldname, field in self.proxy: field.delete(redis) redis.delete(self.key()) redis.srem(type(self).members_key(), self.id) if isinstance(self, PermissionHolder): redis.delete(self.allow_key()) if self.notify: data = json.dumps({ 'event': 'delete', 'data': self.to_json(), }) redis.publish(type(self).cls_key(), data) redis.publish(self.key(), data) return self
[ "def", "delete", "(", "self", ")", ":", "redis", "=", "type", "(", "self", ")", ".", "get_redis", "(", ")", "for", "fieldname", ",", "field", "in", "self", ".", "proxy", ":", "field", ".", "delete", "(", "redis", ")", "redis", ".", "delete", "(", "self", ".", "key", "(", ")", ")", "redis", ".", "srem", "(", "type", "(", "self", ")", ".", "members_key", "(", ")", ",", "self", ".", "id", ")", "if", "isinstance", "(", "self", ",", "PermissionHolder", ")", ":", "redis", ".", "delete", "(", "self", ".", "allow_key", "(", ")", ")", "if", "self", ".", "notify", ":", "data", "=", "json", ".", "dumps", "(", "{", "'event'", ":", "'delete'", ",", "'data'", ":", "self", ".", "to_json", "(", ")", ",", "}", ")", "redis", ".", "publish", "(", "type", "(", "self", ")", ".", "cls_key", "(", ")", ",", "data", ")", "redis", ".", "publish", "(", "self", ".", "key", "(", ")", ",", "data", ")", "return", "self" ]
Deletes this model from the database, calling delete in each field to properly delete special cases
[ "Deletes", "this", "model", "from", "the", "database", "calling", "delete", "in", "each", "field", "to", "properly", "delete", "special", "cases" ]
python
train
saltstack/salt
salt/states/zenoss.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zenoss.py#L37-L92
def monitored(name, device_class=None, collector='localhost', prod_state=None): ''' Ensure a device is monitored. The 'name' given will be used for Zenoss device name and should be resolvable. .. code-block:: yaml enable_monitoring: zenoss.monitored: - name: web01.example.com - device_class: /Servers/Linux - collector: localhost - prod_state: 1000 ''' ret = {} ret['name'] = name # If device is already monitored, return early device = __salt__['zenoss.find_device'](name) if device: ret['result'] = True ret['changes'] = None ret['comment'] = '{0} is already monitored'.format(name) # if prod_state is set, ensure it matches with the current state if prod_state is not None and device['productionState'] != prod_state: if __opts__['test']: ret['comment'] = '{0} is already monitored but prodState will be updated'.format(name) ret['result'] = None else: __salt__['zenoss.set_prod_state'](prod_state, name) ret['comment'] = '{0} is already monitored but prodState was updated'.format(name) ret['changes'] = { 'old': 'prodState == {0}'.format(device['productionState']), 'new': 'prodState == {0}'.format(prod_state) } return ret # Device not yet in Zenoss if __opts__['test']: ret['comment'] = 'The state of "{0}" will be changed.'.format(name) ret['changes'] = {'old': 'monitored == False', 'new': 'monitored == True'} ret['result'] = None return ret # Add and check result if __salt__['zenoss.add_device'](name, device_class, collector, prod_state): ret['result'] = True ret['changes'] = {'old': 'monitored == False', 'new': 'monitored == True'} ret['comment'] = '{0} has been added to Zenoss'.format(name) else: ret['result'] = False ret['changes'] = None ret['comment'] = 'Unable to add {0} to Zenoss'.format(name) return ret
[ "def", "monitored", "(", "name", ",", "device_class", "=", "None", ",", "collector", "=", "'localhost'", ",", "prod_state", "=", "None", ")", ":", "ret", "=", "{", "}", "ret", "[", "'name'", "]", "=", "name", "# If device is already monitored, return early", "device", "=", "__salt__", "[", "'zenoss.find_device'", "]", "(", "name", ")", "if", "device", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'changes'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'{0} is already monitored'", ".", "format", "(", "name", ")", "# if prod_state is set, ensure it matches with the current state", "if", "prod_state", "is", "not", "None", "and", "device", "[", "'productionState'", "]", "!=", "prod_state", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'{0} is already monitored but prodState will be updated'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "None", "else", ":", "__salt__", "[", "'zenoss.set_prod_state'", "]", "(", "prod_state", ",", "name", ")", "ret", "[", "'comment'", "]", "=", "'{0} is already monitored but prodState was updated'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "'prodState == {0}'", ".", "format", "(", "device", "[", "'productionState'", "]", ")", ",", "'new'", ":", "'prodState == {0}'", ".", "format", "(", "prod_state", ")", "}", "return", "ret", "# Device not yet in Zenoss", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'The state of \"{0}\" will be changed.'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "'monitored == False'", ",", "'new'", ":", "'monitored == True'", "}", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "# Add and check result", "if", "__salt__", "[", "'zenoss.add_device'", "]", "(", "name", ",", "device_class", ",", "collector", ",", "prod_state", ")", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "'monitored == False'", ",", "'new'", ":", "'monitored == True'", "}", "ret", "[", "'comment'", "]", "=", "'{0} has been added to Zenoss'", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'changes'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Unable to add {0} to Zenoss'", ".", "format", "(", "name", ")", "return", "ret" ]
Ensure a device is monitored. The 'name' given will be used for Zenoss device name and should be resolvable. .. code-block:: yaml enable_monitoring: zenoss.monitored: - name: web01.example.com - device_class: /Servers/Linux - collector: localhost - prod_state: 1000
[ "Ensure", "a", "device", "is", "monitored", ".", "The", "name", "given", "will", "be", "used", "for", "Zenoss", "device", "name", "and", "should", "be", "resolvable", "." ]
python
train
programa-stic/barf-project
barf/barf.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/barf.py#L382-L430
def emulate(self, context=None, start=None, end=None, arch_mode=None, hooks=None, max_instrs=None, print_asm=False): """Emulate native code. Args: context (dict): Processor context (register and/or memory). start (int): Start address. end (int): End address. arch_mode (int): Architecture mode. hooks (dict): Hooks by address. max_instrs (int): Maximum number of instructions to execute. print_asm (bool): Print asm. Returns: dict: Processor context. """ if arch_mode is not None: # Reload modules. self._load(arch_mode=arch_mode) context = context if context else {} start_addr = start if start else self.binary.ea_start end_addr = end if end else self.binary.ea_end hooks = hooks if hooks else {} # Load registers for reg, val in context.get('registers', {}).items(): self.ir_emulator.registers[reg] = val # Load memory # TODO Memory content should be encoded as hex strings so each # entry can be of different sizes. for addr, val in context.get('memory', {}).items(): self.ir_emulator.memory.write(addr, 4, val) # Execute the code. self.emulator.emulate(start_addr, end_addr, hooks, max_instrs, print_asm) context_out = { 'registers': {}, 'memory': {} } # save registers for reg, val in self.ir_emulator.registers.items(): context_out['registers'][reg] = val return context_out
[ "def", "emulate", "(", "self", ",", "context", "=", "None", ",", "start", "=", "None", ",", "end", "=", "None", ",", "arch_mode", "=", "None", ",", "hooks", "=", "None", ",", "max_instrs", "=", "None", ",", "print_asm", "=", "False", ")", ":", "if", "arch_mode", "is", "not", "None", ":", "# Reload modules.", "self", ".", "_load", "(", "arch_mode", "=", "arch_mode", ")", "context", "=", "context", "if", "context", "else", "{", "}", "start_addr", "=", "start", "if", "start", "else", "self", ".", "binary", ".", "ea_start", "end_addr", "=", "end", "if", "end", "else", "self", ".", "binary", ".", "ea_end", "hooks", "=", "hooks", "if", "hooks", "else", "{", "}", "# Load registers", "for", "reg", ",", "val", "in", "context", ".", "get", "(", "'registers'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "self", ".", "ir_emulator", ".", "registers", "[", "reg", "]", "=", "val", "# Load memory", "# TODO Memory content should be encoded as hex strings so each", "# entry can be of different sizes.", "for", "addr", ",", "val", "in", "context", ".", "get", "(", "'memory'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "self", ".", "ir_emulator", ".", "memory", ".", "write", "(", "addr", ",", "4", ",", "val", ")", "# Execute the code.", "self", ".", "emulator", ".", "emulate", "(", "start_addr", ",", "end_addr", ",", "hooks", ",", "max_instrs", ",", "print_asm", ")", "context_out", "=", "{", "'registers'", ":", "{", "}", ",", "'memory'", ":", "{", "}", "}", "# save registers", "for", "reg", ",", "val", "in", "self", ".", "ir_emulator", ".", "registers", ".", "items", "(", ")", ":", "context_out", "[", "'registers'", "]", "[", "reg", "]", "=", "val", "return", "context_out" ]
Emulate native code. Args: context (dict): Processor context (register and/or memory). start (int): Start address. end (int): End address. arch_mode (int): Architecture mode. hooks (dict): Hooks by address. max_instrs (int): Maximum number of instructions to execute. print_asm (bool): Print asm. Returns: dict: Processor context.
[ "Emulate", "native", "code", "." ]
python
train
F-Secure/see
see/environment.py
https://github.com/F-Secure/see/blob/3e053e52a45229f96a12db9e98caf7fb3880e811/see/environment.py#L77-L81
def allocate(self): """Builds the context and the Hooks.""" self.logger.debug("Allocating environment.") self._allocate() self.logger.debug("Environment successfully allocated.")
[ "def", "allocate", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Allocating environment.\"", ")", "self", ".", "_allocate", "(", ")", "self", ".", "logger", ".", "debug", "(", "\"Environment successfully allocated.\"", ")" ]
Builds the context and the Hooks.
[ "Builds", "the", "context", "and", "the", "Hooks", "." ]
python
train
zfrenchee/Axial
axial/axial.py
https://github.com/zfrenchee/Axial/blob/69672b4ce46518e0348dedb74a907cc73f71c0fe/axial/axial.py#L364-L425
def heatmap(genes_by_samples_matrix, sample_attributes, title='Axial Heatmap', scripts_mode="CDN", data_mode="directory", organism="human", separate_zscore_by=["system"], output_dir=".", filename="heatmap.html", version=this_version): """ Arguments: genes_by_samples_matrix (pandas.DataFrame): dataframe indexed by genes, columns are samples sample_attributes (pandas.DataFrame): dataframe indexed by samples, columns are sample attributes (e.g. classes) title (str): The title of the plot (to be embedded in the html). scripts_mode (str): Choose from [`"CDN"`, `"directory"`, `"inline"`]: - `"CDN"` compiles a single HTML page with links to scripts hosted on a CDN, - `"directory"` compiles a directory with all scripts locally cached, - `"inline"` compiles a single HTML file with all scripts/styles inlined. data_mode (str): Choose from ["directory", "inline"]: - "directory" compiles a directory with all data locally cached, - "inline" compiles a single HTML file with all data inlined. organism (str): `"human"` or `"mouse"` separate_zscore_by (list): output_dir (str): the directory in which to output the file filename (str): the filename of the output file version (str): the version of the javascripts to use. Leave the default to pin the version, or choose "latest" to get updates, or choose part of the version string to get minor updates. Returns: Path: The filepath which the html was outputted to. """ output_dir = Path(output_dir) output_dir.mkdir(exist_ok=True, parents=True) # Data ======================= _verify_sample_by_genes_matrix(genes_by_samples_matrix) _verify_sample_attributes(genes_by_samples_matrix, sample_attributes) genes_by_samples_matrix = genes_by_samples_matrix.round(2) # TODO drop all zero rows matrix = f"var matrix = {genes_by_samples_matrix.to_json(orient='columns')};" classes = f"var classes = {sample_attributes.to_json(orient='index')};" data_block = _data_block(data_mode, [('matrix', matrix), ('classes', classes)], output_dir, organism=organism) # Scripts ======================= scripts = third_party_scripts + [CDN_url(version)+"js/util.js", CDN_url(version)+"js/reorder.js", CDN_url(version)+"js/heatmap.js"] scripts_block = _scripts_block(scripts, scripts_mode, output_dir) html = templateEnv.get_template('heatmap.html.j2').render(title=title, scripts_block=scripts_block+'\n'+data_block, separate_zscore_by=separate_zscore_by) (output_dir / filename).write_text(html) return (output_dir / filename).resolve()
[ "def", "heatmap", "(", "genes_by_samples_matrix", ",", "sample_attributes", ",", "title", "=", "'Axial Heatmap'", ",", "scripts_mode", "=", "\"CDN\"", ",", "data_mode", "=", "\"directory\"", ",", "organism", "=", "\"human\"", ",", "separate_zscore_by", "=", "[", "\"system\"", "]", ",", "output_dir", "=", "\".\"", ",", "filename", "=", "\"heatmap.html\"", ",", "version", "=", "this_version", ")", ":", "output_dir", "=", "Path", "(", "output_dir", ")", "output_dir", ".", "mkdir", "(", "exist_ok", "=", "True", ",", "parents", "=", "True", ")", "# Data =======================", "_verify_sample_by_genes_matrix", "(", "genes_by_samples_matrix", ")", "_verify_sample_attributes", "(", "genes_by_samples_matrix", ",", "sample_attributes", ")", "genes_by_samples_matrix", "=", "genes_by_samples_matrix", ".", "round", "(", "2", ")", "# TODO drop all zero rows", "matrix", "=", "f\"var matrix = {genes_by_samples_matrix.to_json(orient='columns')};\"", "classes", "=", "f\"var classes = {sample_attributes.to_json(orient='index')};\"", "data_block", "=", "_data_block", "(", "data_mode", ",", "[", "(", "'matrix'", ",", "matrix", ")", ",", "(", "'classes'", ",", "classes", ")", "]", ",", "output_dir", ",", "organism", "=", "organism", ")", "# Scripts =======================", "scripts", "=", "third_party_scripts", "+", "[", "CDN_url", "(", "version", ")", "+", "\"js/util.js\"", ",", "CDN_url", "(", "version", ")", "+", "\"js/reorder.js\"", ",", "CDN_url", "(", "version", ")", "+", "\"js/heatmap.js\"", "]", "scripts_block", "=", "_scripts_block", "(", "scripts", ",", "scripts_mode", ",", "output_dir", ")", "html", "=", "templateEnv", ".", "get_template", "(", "'heatmap.html.j2'", ")", ".", "render", "(", "title", "=", "title", ",", "scripts_block", "=", "scripts_block", "+", "'\\n'", "+", "data_block", ",", "separate_zscore_by", "=", "separate_zscore_by", ")", "(", "output_dir", "/", "filename", ")", ".", "write_text", "(", "html", ")", "return", "(", "output_dir", "/", "filename", ")", ".", "resolve", "(", ")" ]
Arguments: genes_by_samples_matrix (pandas.DataFrame): dataframe indexed by genes, columns are samples sample_attributes (pandas.DataFrame): dataframe indexed by samples, columns are sample attributes (e.g. classes) title (str): The title of the plot (to be embedded in the html). scripts_mode (str): Choose from [`"CDN"`, `"directory"`, `"inline"`]: - `"CDN"` compiles a single HTML page with links to scripts hosted on a CDN, - `"directory"` compiles a directory with all scripts locally cached, - `"inline"` compiles a single HTML file with all scripts/styles inlined. data_mode (str): Choose from ["directory", "inline"]: - "directory" compiles a directory with all data locally cached, - "inline" compiles a single HTML file with all data inlined. organism (str): `"human"` or `"mouse"` separate_zscore_by (list): output_dir (str): the directory in which to output the file filename (str): the filename of the output file version (str): the version of the javascripts to use. Leave the default to pin the version, or choose "latest" to get updates, or choose part of the version string to get minor updates. Returns: Path: The filepath which the html was outputted to.
[ "Arguments", ":", "genes_by_samples_matrix", "(", "pandas", ".", "DataFrame", ")", ":", "dataframe", "indexed", "by", "genes", "columns", "are", "samples", "sample_attributes", "(", "pandas", ".", "DataFrame", ")", ":", "dataframe", "indexed", "by", "samples", "columns", "are", "sample", "attributes", "(", "e", ".", "g", ".", "classes", ")", "title", "(", "str", ")", ":", "The", "title", "of", "the", "plot", "(", "to", "be", "embedded", "in", "the", "html", ")", ".", "scripts_mode", "(", "str", ")", ":", "Choose", "from", "[", "CDN", "directory", "inline", "]", ":" ]
python
valid
lsbardel/python-stdnet
stdnet/odm/query.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L575-L583
def dont_load(self, *fields): '''Works like :meth:`load_only` to provides a :ref:`performance boost <increase-performance>` in cases when you need to load all fields except a subset specified by *fields*. ''' q = self._clone() fs = unique_tuple(q.exclude_fields, fields) q.exclude_fields = fs if fs else None return q
[ "def", "dont_load", "(", "self", ",", "*", "fields", ")", ":", "q", "=", "self", ".", "_clone", "(", ")", "fs", "=", "unique_tuple", "(", "q", ".", "exclude_fields", ",", "fields", ")", "q", ".", "exclude_fields", "=", "fs", "if", "fs", "else", "None", "return", "q" ]
Works like :meth:`load_only` to provides a :ref:`performance boost <increase-performance>` in cases when you need to load all fields except a subset specified by *fields*.
[ "Works", "like", ":", "meth", ":", "load_only", "to", "provides", "a", ":", "ref", ":", "performance", "boost", "<increase", "-", "performance", ">", "in", "cases", "when", "you", "need", "to", "load", "all", "fields", "except", "a", "subset", "specified", "by", "*", "fields", "*", "." ]
python
train
Jaymon/captain
captain/parse.py
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/parse.py#L382-L401
def _fill_text(self, text, width, indent): """Overridden to not get rid of newlines https://github.com/python/cpython/blob/2.7/Lib/argparse.py#L620""" lines = [] for line in text.splitlines(False): if line: # https://docs.python.org/2/library/textwrap.html lines.extend(textwrap.wrap( line.strip(), width, initial_indent=indent, subsequent_indent=indent )) else: lines.append(line) text = "\n".join(lines) return text
[ "def", "_fill_text", "(", "self", ",", "text", ",", "width", ",", "indent", ")", ":", "lines", "=", "[", "]", "for", "line", "in", "text", ".", "splitlines", "(", "False", ")", ":", "if", "line", ":", "# https://docs.python.org/2/library/textwrap.html", "lines", ".", "extend", "(", "textwrap", ".", "wrap", "(", "line", ".", "strip", "(", ")", ",", "width", ",", "initial_indent", "=", "indent", ",", "subsequent_indent", "=", "indent", ")", ")", "else", ":", "lines", ".", "append", "(", "line", ")", "text", "=", "\"\\n\"", ".", "join", "(", "lines", ")", "return", "text" ]
Overridden to not get rid of newlines https://github.com/python/cpython/blob/2.7/Lib/argparse.py#L620
[ "Overridden", "to", "not", "get", "rid", "of", "newlines" ]
python
valid
timkpaine/pyEX
pyEX/stocks.py
https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/stocks.py#L1280-L1300
def ohlcDF(symbol, token='', version=''): '''Returns the official open and close for a give symbol. https://iexcloud.io/docs/api/#news 9:30am-5pm ET Mon-Fri Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result ''' o = ohlc(symbol, token, version) if o: df = pd.io.json.json_normalize(o) _toDatetime(df) else: df = pd.DataFrame() return df
[ "def", "ohlcDF", "(", "symbol", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "o", "=", "ohlc", "(", "symbol", ",", "token", ",", "version", ")", "if", "o", ":", "df", "=", "pd", ".", "io", ".", "json", ".", "json_normalize", "(", "o", ")", "_toDatetime", "(", "df", ")", "else", ":", "df", "=", "pd", ".", "DataFrame", "(", ")", "return", "df" ]
Returns the official open and close for a give symbol. https://iexcloud.io/docs/api/#news 9:30am-5pm ET Mon-Fri Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
[ "Returns", "the", "official", "open", "and", "close", "for", "a", "give", "symbol", "." ]
python
valid
joeyespo/grip
grip/app.py
https://github.com/joeyespo/grip/blob/ce933ccc4ca8e0d3718f271c59bd530a4518bf63/grip/app.py#L375-L385
def render(self, route=None): """ Renders the application and returns the HTML unicode that would normally appear when visiting in the browser. """ if route is None: route = '/' with self.test_client() as c: response = c.get(route, follow_redirects=True) encoding = response.charset return response.data.decode(encoding)
[ "def", "render", "(", "self", ",", "route", "=", "None", ")", ":", "if", "route", "is", "None", ":", "route", "=", "'/'", "with", "self", ".", "test_client", "(", ")", "as", "c", ":", "response", "=", "c", ".", "get", "(", "route", ",", "follow_redirects", "=", "True", ")", "encoding", "=", "response", ".", "charset", "return", "response", ".", "data", ".", "decode", "(", "encoding", ")" ]
Renders the application and returns the HTML unicode that would normally appear when visiting in the browser.
[ "Renders", "the", "application", "and", "returns", "the", "HTML", "unicode", "that", "would", "normally", "appear", "when", "visiting", "in", "the", "browser", "." ]
python
train
jamieleshaw/lurklib
lurklib/core.py
https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/core.py#L83-L101
def find(self, haystack, needle): """ Finds needle in haystack. If needle is found return True, if not return False. Required arguments: * haystack - Text to search in. * needle - Text to search for. """ try: qstatus = haystack.find(needle) except AttributeError: if needle in haystack: return True else: return False if qstatus == -1: return False elif qstatus != -1: return True
[ "def", "find", "(", "self", ",", "haystack", ",", "needle", ")", ":", "try", ":", "qstatus", "=", "haystack", ".", "find", "(", "needle", ")", "except", "AttributeError", ":", "if", "needle", "in", "haystack", ":", "return", "True", "else", ":", "return", "False", "if", "qstatus", "==", "-", "1", ":", "return", "False", "elif", "qstatus", "!=", "-", "1", ":", "return", "True" ]
Finds needle in haystack. If needle is found return True, if not return False. Required arguments: * haystack - Text to search in. * needle - Text to search for.
[ "Finds", "needle", "in", "haystack", ".", "If", "needle", "is", "found", "return", "True", "if", "not", "return", "False", ".", "Required", "arguments", ":", "*", "haystack", "-", "Text", "to", "search", "in", ".", "*", "needle", "-", "Text", "to", "search", "for", "." ]
python
train
djgagne/hagelslag
hagelslag/processing/EnsembleProducts.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/EnsembleProducts.py#L460-L485
def point_consensus(self, consensus_type): """ Calculate grid-point statistics across ensemble members. Args: consensus_type: mean, std, median, max, or percentile_nn Returns: EnsembleConsensus containing point statistic """ if "mean" in consensus_type: consensus_data = np.mean(self.data, axis=0) elif "std" in consensus_type: consensus_data = np.std(self.data, axis=0) elif "median" in consensus_type: consensus_data = np.median(self.data, axis=0) elif "max" in consensus_type: consensus_data = np.max(self.data, axis=0) elif "percentile" in consensus_type: percentile = int(consensus_type.split("_")[1]) consensus_data = np.percentile(self.data, percentile, axis=0) else: consensus_data = np.zeros(self.data.shape[1:]) consensus = EnsembleConsensus(consensus_data, consensus_type, self.ensemble_name, self.run_date, self.variable, self.start_date, self.end_date, self.units) return consensus
[ "def", "point_consensus", "(", "self", ",", "consensus_type", ")", ":", "if", "\"mean\"", "in", "consensus_type", ":", "consensus_data", "=", "np", ".", "mean", "(", "self", ".", "data", ",", "axis", "=", "0", ")", "elif", "\"std\"", "in", "consensus_type", ":", "consensus_data", "=", "np", ".", "std", "(", "self", ".", "data", ",", "axis", "=", "0", ")", "elif", "\"median\"", "in", "consensus_type", ":", "consensus_data", "=", "np", ".", "median", "(", "self", ".", "data", ",", "axis", "=", "0", ")", "elif", "\"max\"", "in", "consensus_type", ":", "consensus_data", "=", "np", ".", "max", "(", "self", ".", "data", ",", "axis", "=", "0", ")", "elif", "\"percentile\"", "in", "consensus_type", ":", "percentile", "=", "int", "(", "consensus_type", ".", "split", "(", "\"_\"", ")", "[", "1", "]", ")", "consensus_data", "=", "np", ".", "percentile", "(", "self", ".", "data", ",", "percentile", ",", "axis", "=", "0", ")", "else", ":", "consensus_data", "=", "np", ".", "zeros", "(", "self", ".", "data", ".", "shape", "[", "1", ":", "]", ")", "consensus", "=", "EnsembleConsensus", "(", "consensus_data", ",", "consensus_type", ",", "self", ".", "ensemble_name", ",", "self", ".", "run_date", ",", "self", ".", "variable", ",", "self", ".", "start_date", ",", "self", ".", "end_date", ",", "self", ".", "units", ")", "return", "consensus" ]
Calculate grid-point statistics across ensemble members. Args: consensus_type: mean, std, median, max, or percentile_nn Returns: EnsembleConsensus containing point statistic
[ "Calculate", "grid", "-", "point", "statistics", "across", "ensemble", "members", "." ]
python
train
manns/pyspread
pyspread/src/actions/_main_window_actions.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_main_window_actions.py#L243-L253
def get_print_rect(self, grid_rect): """Returns wx.Rect that is correctly positioned on the print canvas""" grid = self.grid rect_x = grid_rect.x - \ grid.GetScrollPos(wx.HORIZONTAL) * grid.GetScrollLineX() rect_y = grid_rect.y - \ grid.GetScrollPos(wx.VERTICAL) * grid.GetScrollLineY() return wx.Rect(rect_x, rect_y, grid_rect.width, grid_rect.height)
[ "def", "get_print_rect", "(", "self", ",", "grid_rect", ")", ":", "grid", "=", "self", ".", "grid", "rect_x", "=", "grid_rect", ".", "x", "-", "grid", ".", "GetScrollPos", "(", "wx", ".", "HORIZONTAL", ")", "*", "grid", ".", "GetScrollLineX", "(", ")", "rect_y", "=", "grid_rect", ".", "y", "-", "grid", ".", "GetScrollPos", "(", "wx", ".", "VERTICAL", ")", "*", "grid", ".", "GetScrollLineY", "(", ")", "return", "wx", ".", "Rect", "(", "rect_x", ",", "rect_y", ",", "grid_rect", ".", "width", ",", "grid_rect", ".", "height", ")" ]
Returns wx.Rect that is correctly positioned on the print canvas
[ "Returns", "wx", ".", "Rect", "that", "is", "correctly", "positioned", "on", "the", "print", "canvas" ]
python
train
jssimporter/python-jss
jss/jssobject.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jssobject.py#L547-L561
def as_list_data(self): """Return an Element to be used in a list. Most lists want an element with tag of list_type, and subelements of id and name. Returns: Element: list representation of object. """ element = ElementTree.Element(self.list_type) id_ = ElementTree.SubElement(element, "id") id_.text = self.id name = ElementTree.SubElement(element, "name") name.text = self.name return element
[ "def", "as_list_data", "(", "self", ")", ":", "element", "=", "ElementTree", ".", "Element", "(", "self", ".", "list_type", ")", "id_", "=", "ElementTree", ".", "SubElement", "(", "element", ",", "\"id\"", ")", "id_", ".", "text", "=", "self", ".", "id", "name", "=", "ElementTree", ".", "SubElement", "(", "element", ",", "\"name\"", ")", "name", ".", "text", "=", "self", ".", "name", "return", "element" ]
Return an Element to be used in a list. Most lists want an element with tag of list_type, and subelements of id and name. Returns: Element: list representation of object.
[ "Return", "an", "Element", "to", "be", "used", "in", "a", "list", "." ]
python
train
stefankoegl/kdtree
kdtree.py
https://github.com/stefankoegl/kdtree/blob/587edc7056d7735177ad56a84ad5abccdea91693/kdtree.py#L103-L123
def children(self): """ Returns an iterator for the non-empty children of the Node The children are returned as (Node, pos) tuples where pos is 0 for the left subnode and 1 for the right. >>> len(list(create(dimensions=2).children)) 0 >>> len(list(create([ (1, 2) ]).children)) 0 >>> len(list(create([ (2, 2), (2, 1), (2, 3) ]).children)) 2 """ if self.left and self.left.data is not None: yield self.left, 0 if self.right and self.right.data is not None: yield self.right, 1
[ "def", "children", "(", "self", ")", ":", "if", "self", ".", "left", "and", "self", ".", "left", ".", "data", "is", "not", "None", ":", "yield", "self", ".", "left", ",", "0", "if", "self", ".", "right", "and", "self", ".", "right", ".", "data", "is", "not", "None", ":", "yield", "self", ".", "right", ",", "1" ]
Returns an iterator for the non-empty children of the Node The children are returned as (Node, pos) tuples where pos is 0 for the left subnode and 1 for the right. >>> len(list(create(dimensions=2).children)) 0 >>> len(list(create([ (1, 2) ]).children)) 0 >>> len(list(create([ (2, 2), (2, 1), (2, 3) ]).children)) 2
[ "Returns", "an", "iterator", "for", "the", "non", "-", "empty", "children", "of", "the", "Node" ]
python
train
google/grumpy
third_party/pythonparser/parser.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pythonparser/parser.py#L1004-L1007
def import_name(self, import_loc, names): """import_name: 'import' dotted_as_names""" return ast.Import(names=names, keyword_loc=import_loc, loc=import_loc.join(names[-1].loc))
[ "def", "import_name", "(", "self", ",", "import_loc", ",", "names", ")", ":", "return", "ast", ".", "Import", "(", "names", "=", "names", ",", "keyword_loc", "=", "import_loc", ",", "loc", "=", "import_loc", ".", "join", "(", "names", "[", "-", "1", "]", ".", "loc", ")", ")" ]
import_name: 'import' dotted_as_names
[ "import_name", ":", "import", "dotted_as_names" ]
python
valid
Othernet-Project/squery-pg
squery_pg/squery_pg.py
https://github.com/Othernet-Project/squery-pg/blob/eaa695c3719e2d2b7e1b049bb58c987c132b6b34/squery_pg/squery_pg.py#L68-L80
def serialize_query(func): """ Ensure any SQLExpression instances are serialized""" @functools.wraps(func) def wrapper(self, query, *args, **kwargs): if hasattr(query, 'serialize'): query = query.serialize() assert isinstance(query, basestring), 'Expected query to be string' if self.debug: print('SQL:', query) return func(self, query, *args, **kwargs) return wrapper
[ "def", "serialize_query", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ",", "query", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "hasattr", "(", "query", ",", "'serialize'", ")", ":", "query", "=", "query", ".", "serialize", "(", ")", "assert", "isinstance", "(", "query", ",", "basestring", ")", ",", "'Expected query to be string'", "if", "self", ".", "debug", ":", "print", "(", "'SQL:'", ",", "query", ")", "return", "func", "(", "self", ",", "query", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Ensure any SQLExpression instances are serialized
[ "Ensure", "any", "SQLExpression", "instances", "are", "serialized" ]
python
train
tarzanjw/python-mysql-binlog-to-blinker
mysqlbinlog2blinker/__init__.py
https://github.com/tarzanjw/python-mysql-binlog-to-blinker/blob/d61ab5962345377e142a225b16f731ab4196fc26/mysqlbinlog2blinker/__init__.py#L56-L90
def start_replication(mysql_settings, binlog_pos_memory=(None, 2), **kwargs): """ Start replication on server specified by *mysql_settings* Args: mysql_settings (dict): mysql settings that is used to connect to mysql via pymysql binlog_pos_memory (_bpm.BaseBinlogPosMemory): Binlog Position Memory, it should be an instance of subclass of :py:class:`_bpm.BaseBinlogPosMemory`. If a tuple (str, float) is passed, it will be initialize parameters for default :py:class:`_bpm.FileBasedBinlogPosMemory`. It the file- name is None, it will be *`cwd`\mysqlbinlog2blinker.binlog.pos* **kwargs: any arguments that are accepted by :py:class:`pymysqlreplication.BinLogStreamReader`'s constructor """ if not isinstance(binlog_pos_memory, _bpm.BaseBinlogPosMemory): if not isinstance(binlog_pos_memory, (tuple, list)): raise ValueError('Invalid binlog position memory: %s' % binlog_pos_memory) binlog_pos_memory = _bpm.FileBasedBinlogPosMemory(*binlog_pos_memory) mysql_settings.setdefault('connect_timeout', 5) kwargs.setdefault('blocking', True) kwargs.setdefault('resume_stream', True) with binlog_pos_memory: kwargs.setdefault('log_file', binlog_pos_memory.log_file) kwargs.setdefault('log_pos', binlog_pos_memory.log_pos) _logger.info('Start replication from %s with:\n%s' % (mysql_settings, kwargs)) start_publishing(mysql_settings, **kwargs)
[ "def", "start_replication", "(", "mysql_settings", ",", "binlog_pos_memory", "=", "(", "None", ",", "2", ")", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "binlog_pos_memory", ",", "_bpm", ".", "BaseBinlogPosMemory", ")", ":", "if", "not", "isinstance", "(", "binlog_pos_memory", ",", "(", "tuple", ",", "list", ")", ")", ":", "raise", "ValueError", "(", "'Invalid binlog position memory: %s'", "%", "binlog_pos_memory", ")", "binlog_pos_memory", "=", "_bpm", ".", "FileBasedBinlogPosMemory", "(", "*", "binlog_pos_memory", ")", "mysql_settings", ".", "setdefault", "(", "'connect_timeout'", ",", "5", ")", "kwargs", ".", "setdefault", "(", "'blocking'", ",", "True", ")", "kwargs", ".", "setdefault", "(", "'resume_stream'", ",", "True", ")", "with", "binlog_pos_memory", ":", "kwargs", ".", "setdefault", "(", "'log_file'", ",", "binlog_pos_memory", ".", "log_file", ")", "kwargs", ".", "setdefault", "(", "'log_pos'", ",", "binlog_pos_memory", ".", "log_pos", ")", "_logger", ".", "info", "(", "'Start replication from %s with:\\n%s'", "%", "(", "mysql_settings", ",", "kwargs", ")", ")", "start_publishing", "(", "mysql_settings", ",", "*", "*", "kwargs", ")" ]
Start replication on server specified by *mysql_settings* Args: mysql_settings (dict): mysql settings that is used to connect to mysql via pymysql binlog_pos_memory (_bpm.BaseBinlogPosMemory): Binlog Position Memory, it should be an instance of subclass of :py:class:`_bpm.BaseBinlogPosMemory`. If a tuple (str, float) is passed, it will be initialize parameters for default :py:class:`_bpm.FileBasedBinlogPosMemory`. It the file- name is None, it will be *`cwd`\mysqlbinlog2blinker.binlog.pos* **kwargs: any arguments that are accepted by :py:class:`pymysqlreplication.BinLogStreamReader`'s constructor
[ "Start", "replication", "on", "server", "specified", "by", "*", "mysql_settings", "*" ]
python
train
apache/airflow
airflow/contrib/operators/gcp_container_operator.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/gcp_container_operator.py#L282-L308
def _set_env_from_extras(self, extras): """ Sets the environment variable `GOOGLE_APPLICATION_CREDENTIALS` with either: - The path to the keyfile from the specified connection id - A generated file's path if the user specified JSON in the connection id. The file is assumed to be deleted after the process dies due to how mkstemp() works. The environment variable is used inside the gcloud command to determine correct service account to use. """ key_path = self._get_field(extras, 'key_path', False) keyfile_json_str = self._get_field(extras, 'keyfile_dict', False) if not key_path and not keyfile_json_str: self.log.info('Using gcloud with application default credentials.') elif key_path: os.environ[G_APP_CRED] = key_path else: # Write service account JSON to secure file for gcloud to reference service_key = tempfile.NamedTemporaryFile(delete=False) service_key.write(keyfile_json_str) os.environ[G_APP_CRED] = service_key.name # Return file object to have a pointer to close after use, # thus deleting from file system. return service_key
[ "def", "_set_env_from_extras", "(", "self", ",", "extras", ")", ":", "key_path", "=", "self", ".", "_get_field", "(", "extras", ",", "'key_path'", ",", "False", ")", "keyfile_json_str", "=", "self", ".", "_get_field", "(", "extras", ",", "'keyfile_dict'", ",", "False", ")", "if", "not", "key_path", "and", "not", "keyfile_json_str", ":", "self", ".", "log", ".", "info", "(", "'Using gcloud with application default credentials.'", ")", "elif", "key_path", ":", "os", ".", "environ", "[", "G_APP_CRED", "]", "=", "key_path", "else", ":", "# Write service account JSON to secure file for gcloud to reference", "service_key", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "service_key", ".", "write", "(", "keyfile_json_str", ")", "os", ".", "environ", "[", "G_APP_CRED", "]", "=", "service_key", ".", "name", "# Return file object to have a pointer to close after use,", "# thus deleting from file system.", "return", "service_key" ]
Sets the environment variable `GOOGLE_APPLICATION_CREDENTIALS` with either: - The path to the keyfile from the specified connection id - A generated file's path if the user specified JSON in the connection id. The file is assumed to be deleted after the process dies due to how mkstemp() works. The environment variable is used inside the gcloud command to determine correct service account to use.
[ "Sets", "the", "environment", "variable", "GOOGLE_APPLICATION_CREDENTIALS", "with", "either", ":" ]
python
test
tcalmant/ipopo
pelix/ipopo/waiting.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/waiting.py#L199-L230
def add(self, factory, component, properties=None): # type: (str, str, dict) -> None """ Enqueues the instantiation of the given component :param factory: Factory name :param component: Component name :param properties: Component properties :raise ValueError: Component name already reserved in the queue :raise Exception: Error instantiating the component """ with self.__lock: if component in self.__names: raise ValueError( "Component name already queued: {0}".format(component) ) # Normalize properties if properties is None: properties = {} # Store component description self.__names[component] = factory self.__queue.setdefault(factory, {})[component] = properties try: with use_ipopo(self.__context) as ipopo: # Try to instantiate the component right now self._try_instantiate(ipopo, factory, component) except BundleException: # iPOPO not yet started pass
[ "def", "add", "(", "self", ",", "factory", ",", "component", ",", "properties", "=", "None", ")", ":", "# type: (str, str, dict) -> None", "with", "self", ".", "__lock", ":", "if", "component", "in", "self", ".", "__names", ":", "raise", "ValueError", "(", "\"Component name already queued: {0}\"", ".", "format", "(", "component", ")", ")", "# Normalize properties", "if", "properties", "is", "None", ":", "properties", "=", "{", "}", "# Store component description", "self", ".", "__names", "[", "component", "]", "=", "factory", "self", ".", "__queue", ".", "setdefault", "(", "factory", ",", "{", "}", ")", "[", "component", "]", "=", "properties", "try", ":", "with", "use_ipopo", "(", "self", ".", "__context", ")", "as", "ipopo", ":", "# Try to instantiate the component right now", "self", ".", "_try_instantiate", "(", "ipopo", ",", "factory", ",", "component", ")", "except", "BundleException", ":", "# iPOPO not yet started", "pass" ]
Enqueues the instantiation of the given component :param factory: Factory name :param component: Component name :param properties: Component properties :raise ValueError: Component name already reserved in the queue :raise Exception: Error instantiating the component
[ "Enqueues", "the", "instantiation", "of", "the", "given", "component" ]
python
train
radjkarl/imgProcessor
imgProcessor/uncertainty/positionToIntensityUncertainty.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/uncertainty/positionToIntensityUncertainty.py#L94-L104
def _coarsenImage(image, f): ''' seems to be a more precise (but slower) way to down-scale an image ''' from skimage.morphology import square from skimage.filters import rank from skimage.transform._warps import rescale selem = square(f) arri = rank.mean(image, selem=selem) return rescale(arri, 1 / f, order=0)
[ "def", "_coarsenImage", "(", "image", ",", "f", ")", ":", "from", "skimage", ".", "morphology", "import", "square", "from", "skimage", ".", "filters", "import", "rank", "from", "skimage", ".", "transform", ".", "_warps", "import", "rescale", "selem", "=", "square", "(", "f", ")", "arri", "=", "rank", ".", "mean", "(", "image", ",", "selem", "=", "selem", ")", "return", "rescale", "(", "arri", ",", "1", "/", "f", ",", "order", "=", "0", ")" ]
seems to be a more precise (but slower) way to down-scale an image
[ "seems", "to", "be", "a", "more", "precise", "(", "but", "slower", ")", "way", "to", "down", "-", "scale", "an", "image" ]
python
train
radjkarl/fancyTools
fancytools/math/line.py
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/line.py#L311-L343
def intersection(line1, line2): """ Return the coordinates of a point of intersection given two lines. Return None if the lines are parallel, but non-colli_near. Return an arbitrary point of intersection if the lines are colli_near. Parameters: line1 and line2: lines given by 4 points (x0,y0,x1,y1). """ x1, y1, x2, y2 = line1 u1, v1, u2, v2 = line2 (a, b), (c, d) = (x2 - x1, u1 - u2), (y2 - y1, v1 - v2) e, f = u1 - x1, v1 - y1 # Solve ((a,b), (c,d)) * (t,s) = (e,f) denom = float(a * d - b * c) if _near(denom, 0): # parallel # If colli_near, the equation is solvable with t = 0. # When t=0, s would have to equal e/b and f/d if b == 0 or d == 0: return None if _near(e / b, f / d): # colli_near px = x1 py = y1 else: return None else: t = (e * d - b * f) / denom # s = (a*f - e*c)/denom px = x1 + t * (x2 - x1) py = y1 + t * (y2 - y1) return px, py
[ "def", "intersection", "(", "line1", ",", "line2", ")", ":", "x1", ",", "y1", ",", "x2", ",", "y2", "=", "line1", "u1", ",", "v1", ",", "u2", ",", "v2", "=", "line2", "(", "a", ",", "b", ")", ",", "(", "c", ",", "d", ")", "=", "(", "x2", "-", "x1", ",", "u1", "-", "u2", ")", ",", "(", "y2", "-", "y1", ",", "v1", "-", "v2", ")", "e", ",", "f", "=", "u1", "-", "x1", ",", "v1", "-", "y1", "# Solve ((a,b), (c,d)) * (t,s) = (e,f)", "denom", "=", "float", "(", "a", "*", "d", "-", "b", "*", "c", ")", "if", "_near", "(", "denom", ",", "0", ")", ":", "# parallel", "# If colli_near, the equation is solvable with t = 0.", "# When t=0, s would have to equal e/b and f/d", "if", "b", "==", "0", "or", "d", "==", "0", ":", "return", "None", "if", "_near", "(", "e", "/", "b", ",", "f", "/", "d", ")", ":", "# colli_near", "px", "=", "x1", "py", "=", "y1", "else", ":", "return", "None", "else", ":", "t", "=", "(", "e", "*", "d", "-", "b", "*", "f", ")", "/", "denom", "# s = (a*f - e*c)/denom", "px", "=", "x1", "+", "t", "*", "(", "x2", "-", "x1", ")", "py", "=", "y1", "+", "t", "*", "(", "y2", "-", "y1", ")", "return", "px", ",", "py" ]
Return the coordinates of a point of intersection given two lines. Return None if the lines are parallel, but non-colli_near. Return an arbitrary point of intersection if the lines are colli_near. Parameters: line1 and line2: lines given by 4 points (x0,y0,x1,y1).
[ "Return", "the", "coordinates", "of", "a", "point", "of", "intersection", "given", "two", "lines", ".", "Return", "None", "if", "the", "lines", "are", "parallel", "but", "non", "-", "colli_near", ".", "Return", "an", "arbitrary", "point", "of", "intersection", "if", "the", "lines", "are", "colli_near", "." ]
python
train
SmokinCaterpillar/pypet
pypet/naturalnaming.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/naturalnaming.py#L3188-L3202
def f_get_groups(self, copy=True): """Returns a dictionary of groups hanging immediately below this group. :param copy: Whether the group's original dictionary or a shallow copy is returned. If you want the real dictionary please do not modify it at all! :returns: Dictionary of nodes """ if copy: return self._groups.copy() else: return self._groups
[ "def", "f_get_groups", "(", "self", ",", "copy", "=", "True", ")", ":", "if", "copy", ":", "return", "self", ".", "_groups", ".", "copy", "(", ")", "else", ":", "return", "self", ".", "_groups" ]
Returns a dictionary of groups hanging immediately below this group. :param copy: Whether the group's original dictionary or a shallow copy is returned. If you want the real dictionary please do not modify it at all! :returns: Dictionary of nodes
[ "Returns", "a", "dictionary", "of", "groups", "hanging", "immediately", "below", "this", "group", "." ]
python
test
undertherain/pycontextfree
setup_boilerplate.py
https://github.com/undertherain/pycontextfree/blob/91505e978f6034863747c98d919ac11b029b1ac3/setup_boilerplate.py#L129-L136
def parse_rst(text: str) -> docutils.nodes.document: """Parse text assuming it's an RST markup.""" parser = docutils.parsers.rst.Parser() components = (docutils.parsers.rst.Parser,) settings = docutils.frontend.OptionParser(components=components).get_default_values() document = docutils.utils.new_document('<rst-doc>', settings=settings) parser.parse(text, document) return document
[ "def", "parse_rst", "(", "text", ":", "str", ")", "->", "docutils", ".", "nodes", ".", "document", ":", "parser", "=", "docutils", ".", "parsers", ".", "rst", ".", "Parser", "(", ")", "components", "=", "(", "docutils", ".", "parsers", ".", "rst", ".", "Parser", ",", ")", "settings", "=", "docutils", ".", "frontend", ".", "OptionParser", "(", "components", "=", "components", ")", ".", "get_default_values", "(", ")", "document", "=", "docutils", ".", "utils", ".", "new_document", "(", "'<rst-doc>'", ",", "settings", "=", "settings", ")", "parser", ".", "parse", "(", "text", ",", "document", ")", "return", "document" ]
Parse text assuming it's an RST markup.
[ "Parse", "text", "assuming", "it", "s", "an", "RST", "markup", "." ]
python
train
OpenKMIP/PyKMIP
kmip/core/enums.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/enums.py#L1762-L1787
def convert_attribute_name_to_tag(value): """ A utility function that converts an attribute name string into the corresponding attribute tag. For example: 'State' -> enums.Tags.STATE Args: value (string): The string name of the attribute. Returns: enum: The Tags enumeration value that corresponds to the attribute name string. Raises: ValueError: if the attribute name string is not a string or if it is an unrecognized attribute name """ if not isinstance(value, six.string_types): raise ValueError("The attribute name must be a string.") for entry in attribute_name_tag_table: if value == entry[0]: return entry[1] raise ValueError("Unrecognized attribute name: '{}'".format(value))
[ "def", "convert_attribute_name_to_tag", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "raise", "ValueError", "(", "\"The attribute name must be a string.\"", ")", "for", "entry", "in", "attribute_name_tag_table", ":", "if", "value", "==", "entry", "[", "0", "]", ":", "return", "entry", "[", "1", "]", "raise", "ValueError", "(", "\"Unrecognized attribute name: '{}'\"", ".", "format", "(", "value", ")", ")" ]
A utility function that converts an attribute name string into the corresponding attribute tag. For example: 'State' -> enums.Tags.STATE Args: value (string): The string name of the attribute. Returns: enum: The Tags enumeration value that corresponds to the attribute name string. Raises: ValueError: if the attribute name string is not a string or if it is an unrecognized attribute name
[ "A", "utility", "function", "that", "converts", "an", "attribute", "name", "string", "into", "the", "corresponding", "attribute", "tag", "." ]
python
test
jerith/txTwitter
txtwitter/twitter.py
https://github.com/jerith/txTwitter/blob/f07afd21184cd1bee697737bf98fd143378dbdff/txtwitter/twitter.py#L507-L529
def statuses_retweets(self, id, count=None, trim_user=None): """ Returns a list of the most recent retweets of the Tweet specified by the id parameter. https://dev.twitter.com/docs/api/1.1/get/statuses/retweets/%3Aid :param str id: (*required*) The numerical ID of the desired tweet. :param int count: The maximum number of retweets to return. (Max 100) :param bool trim_user: When set to ``True``, the tweet's user object includes only the status author's numerical ID. :returns: A tweet dict. """ params = {'id': id} set_int_param(params, 'count', count) set_bool_param(params, 'trim_user', trim_user) return self._get_api('statuses/retweets.json', params)
[ "def", "statuses_retweets", "(", "self", ",", "id", ",", "count", "=", "None", ",", "trim_user", "=", "None", ")", ":", "params", "=", "{", "'id'", ":", "id", "}", "set_int_param", "(", "params", ",", "'count'", ",", "count", ")", "set_bool_param", "(", "params", ",", "'trim_user'", ",", "trim_user", ")", "return", "self", ".", "_get_api", "(", "'statuses/retweets.json'", ",", "params", ")" ]
Returns a list of the most recent retweets of the Tweet specified by the id parameter. https://dev.twitter.com/docs/api/1.1/get/statuses/retweets/%3Aid :param str id: (*required*) The numerical ID of the desired tweet. :param int count: The maximum number of retweets to return. (Max 100) :param bool trim_user: When set to ``True``, the tweet's user object includes only the status author's numerical ID. :returns: A tweet dict.
[ "Returns", "a", "list", "of", "the", "most", "recent", "retweets", "of", "the", "Tweet", "specified", "by", "the", "id", "parameter", "." ]
python
train
SeabornGames/RequestClient
seaborn/request_client/connection_basic.py
https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/seaborn/request_client/connection_basic.py#L513-L532
def create_connection(username=None, password=None, login_url=None, auth_url=None, api_key=None, realm=None, base_uri=None, proxies=None, timeout=None, headers=None, cookies=None, accepted_return=None): """ Creates and returns a connection :param realm: :param username : str of user's email to use for the session. :param password : str of password for the user :param login_url : str of the login_url :param auth_url : str of the auth_url, if Oauth2 is used :param api_key : str of api key of the client software :param base_uri : str of base url e.g. seaborngames.us :param proxies : str of proxies dictionary as used in requests :param timeout : str of timeout to use for api call :param headers : str of specially header to use for api calls :param cookies : list of cookies to use in the http request :param accepted_return : str of enum ['json','text','html'] """ return ConnectionBasic(**function_kwargs())
[ "def", "create_connection", "(", "username", "=", "None", ",", "password", "=", "None", ",", "login_url", "=", "None", ",", "auth_url", "=", "None", ",", "api_key", "=", "None", ",", "realm", "=", "None", ",", "base_uri", "=", "None", ",", "proxies", "=", "None", ",", "timeout", "=", "None", ",", "headers", "=", "None", ",", "cookies", "=", "None", ",", "accepted_return", "=", "None", ")", ":", "return", "ConnectionBasic", "(", "*", "*", "function_kwargs", "(", ")", ")" ]
Creates and returns a connection :param realm: :param username : str of user's email to use for the session. :param password : str of password for the user :param login_url : str of the login_url :param auth_url : str of the auth_url, if Oauth2 is used :param api_key : str of api key of the client software :param base_uri : str of base url e.g. seaborngames.us :param proxies : str of proxies dictionary as used in requests :param timeout : str of timeout to use for api call :param headers : str of specially header to use for api calls :param cookies : list of cookies to use in the http request :param accepted_return : str of enum ['json','text','html']
[ "Creates", "and", "returns", "a", "connection", ":", "param", "realm", ":", ":", "param", "username", ":", "str", "of", "user", "s", "email", "to", "use", "for", "the", "session", ".", ":", "param", "password", ":", "str", "of", "password", "for", "the", "user", ":", "param", "login_url", ":", "str", "of", "the", "login_url", ":", "param", "auth_url", ":", "str", "of", "the", "auth_url", "if", "Oauth2", "is", "used", ":", "param", "api_key", ":", "str", "of", "api", "key", "of", "the", "client", "software", ":", "param", "base_uri", ":", "str", "of", "base", "url", "e", ".", "g", ".", "seaborngames", ".", "us", ":", "param", "proxies", ":", "str", "of", "proxies", "dictionary", "as", "used", "in", "requests", ":", "param", "timeout", ":", "str", "of", "timeout", "to", "use", "for", "api", "call", ":", "param", "headers", ":", "str", "of", "specially", "header", "to", "use", "for", "api", "calls", ":", "param", "cookies", ":", "list", "of", "cookies", "to", "use", "in", "the", "http", "request", ":", "param", "accepted_return", ":", "str", "of", "enum", "[", "json", "text", "html", "]" ]
python
train
alephdata/memorious
memorious/logic/context.py
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/context.py#L143-L158
def store_data(self, data, encoding='utf-8'): """Put the given content into a file, possibly encoding it as UTF-8 in the process.""" path = random_filename(self.work_path) try: with open(path, 'wb') as fh: if isinstance(data, str): data = data.encode(encoding) if data is not None: fh.write(data) return self.store_file(path) finally: try: os.unlink(path) except OSError: pass
[ "def", "store_data", "(", "self", ",", "data", ",", "encoding", "=", "'utf-8'", ")", ":", "path", "=", "random_filename", "(", "self", ".", "work_path", ")", "try", ":", "with", "open", "(", "path", ",", "'wb'", ")", "as", "fh", ":", "if", "isinstance", "(", "data", ",", "str", ")", ":", "data", "=", "data", ".", "encode", "(", "encoding", ")", "if", "data", "is", "not", "None", ":", "fh", ".", "write", "(", "data", ")", "return", "self", ".", "store_file", "(", "path", ")", "finally", ":", "try", ":", "os", ".", "unlink", "(", "path", ")", "except", "OSError", ":", "pass" ]
Put the given content into a file, possibly encoding it as UTF-8 in the process.
[ "Put", "the", "given", "content", "into", "a", "file", "possibly", "encoding", "it", "as", "UTF", "-", "8", "in", "the", "process", "." ]
python
train
numenta/nupic
src/nupic/database/client_jobs_dao.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/client_jobs_dao.py#L2274-L2411
def modelInsertAndStart(self, jobID, params, paramsHash, particleHash=None): """ Insert a new unique model (based on params) into the model table in the "running" state. This will return two things: whether or not the model was actually inserted (i.e. that set of params isn't already in the table) and the modelID chosen for that set of params. Even if the model was not inserted by this call (it was already there) the modelID of the one already inserted is returned. Parameters: ---------------------------------------------------------------- jobID: jobID of the job to add models for params: params for this model paramsHash hash of the params, generated by the worker particleHash hash of the particle info (for PSO). If not provided, then paramsHash will be used. retval: (modelID, wasInserted) modelID: the model ID for this set of params wasInserted: True if this call ended up inserting the new model. False if this set of params was already in the model table. """ # Fill in default particleHash if particleHash is None: particleHash = paramsHash # Normalize hashes paramsHash = self._normalizeHash(paramsHash) particleHash = self._normalizeHash(particleHash) def findExactMatchNoRetries(conn): return self._getOneMatchingRowNoRetries( self._models, conn, {'job_id':jobID, '_eng_params_hash':paramsHash, '_eng_particle_hash':particleHash}, ['model_id', '_eng_worker_conn_id']) @g_retrySQL def findExactMatchWithRetries(): with ConnectionFactory.get() as conn: return findExactMatchNoRetries(conn) # Check if the model is already in the models table # # NOTE: with retries of mysql transient failures, we can't always tell # whether the row was already inserted (e.g., comms failure could occur # after insertion into table, but before arrival or response), so the # need to check before attempting to insert a new row # # TODO: if we could be assured that the caller already verified the # model's absence before calling us, we could skip this check here row = findExactMatchWithRetries() if row is not None: return (row[0], False) @g_retrySQL def insertModelWithRetries(): """ NOTE: it's possible that another process on some machine is attempting to insert the same model at the same time as the caller """ with ConnectionFactory.get() as conn: # Create a new job entry query = 'INSERT INTO %s (job_id, params, status, _eng_params_hash, ' \ ' _eng_particle_hash, start_time, _eng_last_update_time, ' \ ' _eng_worker_conn_id) ' \ ' VALUES (%%s, %%s, %%s, %%s, %%s, UTC_TIMESTAMP(), ' \ ' UTC_TIMESTAMP(), %%s) ' \ % (self.modelsTableName,) sqlParams = (jobID, params, self.STATUS_RUNNING, paramsHash, particleHash, self._connectionID) try: numRowsAffected = conn.cursor.execute(query, sqlParams) except Exception, e: # NOTE: We have seen instances where some package in the calling # chain tries to interpret the exception message using unicode. # Since the exception message contains binary data (the hashes), this # can in turn generate a Unicode translation exception. So, we catch # ALL exceptions here and look for the string "Duplicate entry" in # the exception args just in case this happens. For example, the # Unicode exception we might get is: # (<type 'exceptions.UnicodeDecodeError'>, UnicodeDecodeError('utf8', "Duplicate entry '1000-?.\x18\xb1\xd3\xe0CO\x05\x8b\xf80\xd7E5\xbb' for key 'job_id'", 25, 26, 'invalid start byte')) # # If it weren't for this possible Unicode translation error, we # could watch for only the exceptions we want, like this: # except pymysql.IntegrityError, e: # if e.args[0] != mysqlerrors.DUP_ENTRY: # raise if "Duplicate entry" not in str(e): raise # NOTE: duplicate entry scenario: however, we can't discern # whether it was inserted by another process or this one, because an # intermittent failure may have caused us to retry self._logger.info('Model insert attempt failed with DUP_ENTRY: ' 'jobID=%s; paramsHash=%s OR particleHash=%s; %r', jobID, paramsHash.encode('hex'), particleHash.encode('hex'), e) else: if numRowsAffected == 1: # NOTE: SELECT LAST_INSERT_ID() returns 0 after re-connection conn.cursor.execute('SELECT LAST_INSERT_ID()') modelID = conn.cursor.fetchall()[0][0] if modelID != 0: return (modelID, True) else: self._logger.warn( 'SELECT LAST_INSERT_ID for model returned 0, implying loss of ' 'connection: jobID=%s; paramsHash=%r; particleHash=%r', jobID, paramsHash, particleHash) else: self._logger.error( 'Attempt to insert model resulted in unexpected numRowsAffected: ' 'expected 1, but got %r; jobID=%s; paramsHash=%r; ' 'particleHash=%r', numRowsAffected, jobID, paramsHash, particleHash) # Look up the model and discern whether it is tagged with our conn id row = findExactMatchNoRetries(conn) if row is not None: (modelID, connectionID) = row return (modelID, connectionID == self._connectionID) # This set of params is already in the table, just get the modelID query = 'SELECT (model_id) FROM %s ' \ ' WHERE job_id=%%s AND ' \ ' (_eng_params_hash=%%s ' \ ' OR _eng_particle_hash=%%s) ' \ ' LIMIT 1 ' \ % (self.modelsTableName,) sqlParams = [jobID, paramsHash, particleHash] numRowsFound = conn.cursor.execute(query, sqlParams) assert numRowsFound == 1, ( 'Model not found: jobID=%s AND (paramsHash=%r OR particleHash=%r); ' 'numRowsFound=%r') % (jobID, paramsHash, particleHash, numRowsFound) (modelID,) = conn.cursor.fetchall()[0] return (modelID, False) return insertModelWithRetries()
[ "def", "modelInsertAndStart", "(", "self", ",", "jobID", ",", "params", ",", "paramsHash", ",", "particleHash", "=", "None", ")", ":", "# Fill in default particleHash", "if", "particleHash", "is", "None", ":", "particleHash", "=", "paramsHash", "# Normalize hashes", "paramsHash", "=", "self", ".", "_normalizeHash", "(", "paramsHash", ")", "particleHash", "=", "self", ".", "_normalizeHash", "(", "particleHash", ")", "def", "findExactMatchNoRetries", "(", "conn", ")", ":", "return", "self", ".", "_getOneMatchingRowNoRetries", "(", "self", ".", "_models", ",", "conn", ",", "{", "'job_id'", ":", "jobID", ",", "'_eng_params_hash'", ":", "paramsHash", ",", "'_eng_particle_hash'", ":", "particleHash", "}", ",", "[", "'model_id'", ",", "'_eng_worker_conn_id'", "]", ")", "@", "g_retrySQL", "def", "findExactMatchWithRetries", "(", ")", ":", "with", "ConnectionFactory", ".", "get", "(", ")", "as", "conn", ":", "return", "findExactMatchNoRetries", "(", "conn", ")", "# Check if the model is already in the models table", "#", "# NOTE: with retries of mysql transient failures, we can't always tell", "# whether the row was already inserted (e.g., comms failure could occur", "# after insertion into table, but before arrival or response), so the", "# need to check before attempting to insert a new row", "#", "# TODO: if we could be assured that the caller already verified the", "# model's absence before calling us, we could skip this check here", "row", "=", "findExactMatchWithRetries", "(", ")", "if", "row", "is", "not", "None", ":", "return", "(", "row", "[", "0", "]", ",", "False", ")", "@", "g_retrySQL", "def", "insertModelWithRetries", "(", ")", ":", "\"\"\" NOTE: it's possible that another process on some machine is attempting\n to insert the same model at the same time as the caller \"\"\"", "with", "ConnectionFactory", ".", "get", "(", ")", "as", "conn", ":", "# Create a new job entry", "query", "=", "'INSERT INTO %s (job_id, params, status, _eng_params_hash, '", "' _eng_particle_hash, start_time, _eng_last_update_time, '", "' _eng_worker_conn_id) '", "' VALUES (%%s, %%s, %%s, %%s, %%s, UTC_TIMESTAMP(), '", "' UTC_TIMESTAMP(), %%s) '", "%", "(", "self", ".", "modelsTableName", ",", ")", "sqlParams", "=", "(", "jobID", ",", "params", ",", "self", ".", "STATUS_RUNNING", ",", "paramsHash", ",", "particleHash", ",", "self", ".", "_connectionID", ")", "try", ":", "numRowsAffected", "=", "conn", ".", "cursor", ".", "execute", "(", "query", ",", "sqlParams", ")", "except", "Exception", ",", "e", ":", "# NOTE: We have seen instances where some package in the calling", "# chain tries to interpret the exception message using unicode.", "# Since the exception message contains binary data (the hashes), this", "# can in turn generate a Unicode translation exception. So, we catch", "# ALL exceptions here and look for the string \"Duplicate entry\" in", "# the exception args just in case this happens. For example, the", "# Unicode exception we might get is:", "# (<type 'exceptions.UnicodeDecodeError'>, UnicodeDecodeError('utf8', \"Duplicate entry '1000-?.\\x18\\xb1\\xd3\\xe0CO\\x05\\x8b\\xf80\\xd7E5\\xbb' for key 'job_id'\", 25, 26, 'invalid start byte'))", "#", "# If it weren't for this possible Unicode translation error, we", "# could watch for only the exceptions we want, like this:", "# except pymysql.IntegrityError, e:", "# if e.args[0] != mysqlerrors.DUP_ENTRY:", "# raise", "if", "\"Duplicate entry\"", "not", "in", "str", "(", "e", ")", ":", "raise", "# NOTE: duplicate entry scenario: however, we can't discern", "# whether it was inserted by another process or this one, because an", "# intermittent failure may have caused us to retry", "self", ".", "_logger", ".", "info", "(", "'Model insert attempt failed with DUP_ENTRY: '", "'jobID=%s; paramsHash=%s OR particleHash=%s; %r'", ",", "jobID", ",", "paramsHash", ".", "encode", "(", "'hex'", ")", ",", "particleHash", ".", "encode", "(", "'hex'", ")", ",", "e", ")", "else", ":", "if", "numRowsAffected", "==", "1", ":", "# NOTE: SELECT LAST_INSERT_ID() returns 0 after re-connection", "conn", ".", "cursor", ".", "execute", "(", "'SELECT LAST_INSERT_ID()'", ")", "modelID", "=", "conn", ".", "cursor", ".", "fetchall", "(", ")", "[", "0", "]", "[", "0", "]", "if", "modelID", "!=", "0", ":", "return", "(", "modelID", ",", "True", ")", "else", ":", "self", ".", "_logger", ".", "warn", "(", "'SELECT LAST_INSERT_ID for model returned 0, implying loss of '", "'connection: jobID=%s; paramsHash=%r; particleHash=%r'", ",", "jobID", ",", "paramsHash", ",", "particleHash", ")", "else", ":", "self", ".", "_logger", ".", "error", "(", "'Attempt to insert model resulted in unexpected numRowsAffected: '", "'expected 1, but got %r; jobID=%s; paramsHash=%r; '", "'particleHash=%r'", ",", "numRowsAffected", ",", "jobID", ",", "paramsHash", ",", "particleHash", ")", "# Look up the model and discern whether it is tagged with our conn id", "row", "=", "findExactMatchNoRetries", "(", "conn", ")", "if", "row", "is", "not", "None", ":", "(", "modelID", ",", "connectionID", ")", "=", "row", "return", "(", "modelID", ",", "connectionID", "==", "self", ".", "_connectionID", ")", "# This set of params is already in the table, just get the modelID", "query", "=", "'SELECT (model_id) FROM %s '", "' WHERE job_id=%%s AND '", "' (_eng_params_hash=%%s '", "' OR _eng_particle_hash=%%s) '", "' LIMIT 1 '", "%", "(", "self", ".", "modelsTableName", ",", ")", "sqlParams", "=", "[", "jobID", ",", "paramsHash", ",", "particleHash", "]", "numRowsFound", "=", "conn", ".", "cursor", ".", "execute", "(", "query", ",", "sqlParams", ")", "assert", "numRowsFound", "==", "1", ",", "(", "'Model not found: jobID=%s AND (paramsHash=%r OR particleHash=%r); '", "'numRowsFound=%r'", ")", "%", "(", "jobID", ",", "paramsHash", ",", "particleHash", ",", "numRowsFound", ")", "(", "modelID", ",", ")", "=", "conn", ".", "cursor", ".", "fetchall", "(", ")", "[", "0", "]", "return", "(", "modelID", ",", "False", ")", "return", "insertModelWithRetries", "(", ")" ]
Insert a new unique model (based on params) into the model table in the "running" state. This will return two things: whether or not the model was actually inserted (i.e. that set of params isn't already in the table) and the modelID chosen for that set of params. Even if the model was not inserted by this call (it was already there) the modelID of the one already inserted is returned. Parameters: ---------------------------------------------------------------- jobID: jobID of the job to add models for params: params for this model paramsHash hash of the params, generated by the worker particleHash hash of the particle info (for PSO). If not provided, then paramsHash will be used. retval: (modelID, wasInserted) modelID: the model ID for this set of params wasInserted: True if this call ended up inserting the new model. False if this set of params was already in the model table.
[ "Insert", "a", "new", "unique", "model", "(", "based", "on", "params", ")", "into", "the", "model", "table", "in", "the", "running", "state", ".", "This", "will", "return", "two", "things", ":", "whether", "or", "not", "the", "model", "was", "actually", "inserted", "(", "i", ".", "e", ".", "that", "set", "of", "params", "isn", "t", "already", "in", "the", "table", ")", "and", "the", "modelID", "chosen", "for", "that", "set", "of", "params", ".", "Even", "if", "the", "model", "was", "not", "inserted", "by", "this", "call", "(", "it", "was", "already", "there", ")", "the", "modelID", "of", "the", "one", "already", "inserted", "is", "returned", "." ]
python
valid
YosaiProject/yosai
yosai/core/logging/formatters.py
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/logging/formatters.py#L37-L48
def extra_from_record(self, record): """Returns `extra` dict you passed to logger. The `extra` keyword argument is used to populate the `__dict__` of the `LogRecord`. """ return { attr_name: record.__dict__[attr_name] for attr_name in record.__dict__ if attr_name not in BUILTIN_ATTRS }
[ "def", "extra_from_record", "(", "self", ",", "record", ")", ":", "return", "{", "attr_name", ":", "record", ".", "__dict__", "[", "attr_name", "]", "for", "attr_name", "in", "record", ".", "__dict__", "if", "attr_name", "not", "in", "BUILTIN_ATTRS", "}" ]
Returns `extra` dict you passed to logger. The `extra` keyword argument is used to populate the `__dict__` of the `LogRecord`.
[ "Returns", "extra", "dict", "you", "passed", "to", "logger", "." ]
python
train
Peter-Slump/python-keycloak-client
src/keycloak/openid_connect.py
https://github.com/Peter-Slump/python-keycloak-client/blob/379ae58f3c65892327b0c98c06d4982aa83f357e/src/keycloak/openid_connect.py#L276-L293
def _token_request(self, grant_type, **kwargs): """ Do the actual call to the token end-point. :param grant_type: :param kwargs: See invoking methods. :return: """ payload = { 'grant_type': grant_type, 'client_id': self._client_id, 'client_secret': self._client_secret } payload.update(**kwargs) return self._realm.client.post(self.get_url('token_endpoint'), data=payload)
[ "def", "_token_request", "(", "self", ",", "grant_type", ",", "*", "*", "kwargs", ")", ":", "payload", "=", "{", "'grant_type'", ":", "grant_type", ",", "'client_id'", ":", "self", ".", "_client_id", ",", "'client_secret'", ":", "self", ".", "_client_secret", "}", "payload", ".", "update", "(", "*", "*", "kwargs", ")", "return", "self", ".", "_realm", ".", "client", ".", "post", "(", "self", ".", "get_url", "(", "'token_endpoint'", ")", ",", "data", "=", "payload", ")" ]
Do the actual call to the token end-point. :param grant_type: :param kwargs: See invoking methods. :return:
[ "Do", "the", "actual", "call", "to", "the", "token", "end", "-", "point", "." ]
python
train
lbryio/aioupnp
aioupnp/upnp.py
https://github.com/lbryio/aioupnp/blob/a404269d91cff5358bcffb8067b0fd1d9c6842d3/aioupnp/upnp.py#L144-L152
async def delete_port_mapping(self, external_port: int, protocol: str) -> None: """ :param external_port: (int) external port to listen on :param protocol: (str) 'UDP' | 'TCP' :return: None """ return await self.gateway.commands.DeletePortMapping( NewRemoteHost="", NewExternalPort=external_port, NewProtocol=protocol )
[ "async", "def", "delete_port_mapping", "(", "self", ",", "external_port", ":", "int", ",", "protocol", ":", "str", ")", "->", "None", ":", "return", "await", "self", ".", "gateway", ".", "commands", ".", "DeletePortMapping", "(", "NewRemoteHost", "=", "\"\"", ",", "NewExternalPort", "=", "external_port", ",", "NewProtocol", "=", "protocol", ")" ]
:param external_port: (int) external port to listen on :param protocol: (str) 'UDP' | 'TCP' :return: None
[ ":", "param", "external_port", ":", "(", "int", ")", "external", "port", "to", "listen", "on", ":", "param", "protocol", ":", "(", "str", ")", "UDP", "|", "TCP", ":", "return", ":", "None" ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/interface/port_channel/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/interface/port_channel/__init__.py#L447-L471
def _set_minimum_links(self, v, load=False): """ Setter method for minimum_links, mapped from YANG variable /interface/port_channel/minimum_links (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_minimum_links is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_minimum_links() directly. YANG Description: The least number of operationally 'UP' links to indicate port-channel being UP. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 64']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="minimum-links", rest_name="minimum-links", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'interface_po', u'info': u'Least number of operationally UP links to declare \nport-channel UP', u'display-when': u'not(../insight/insight-enable)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint32', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """minimum_links must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 64']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="minimum-links", rest_name="minimum-links", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'interface_po', u'info': u'Least number of operationally UP links to declare \nport-channel UP', u'display-when': u'not(../insight/insight-enable)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint32', is_config=True)""", }) self.__minimum_links = t if hasattr(self, '_set'): self._set()
[ "def", "_set_minimum_links", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "RestrictedClassType", "(", "base_type", "=", "long", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "'0..4294967295'", "]", "}", ",", "int_size", "=", "32", ")", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "u'1 .. 64'", "]", "}", ")", ",", "default", "=", "RestrictedClassType", "(", "base_type", "=", "long", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "'0..4294967295'", "]", "}", ",", "int_size", "=", "32", ")", "(", "1", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"minimum-links\"", ",", "rest_name", "=", "\"minimum-links\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'interface_po'", ",", "u'info'", ":", "u'Least number of operationally UP links to declare \\nport-channel UP'", ",", "u'display-when'", ":", "u'not(../insight/insight-enable)'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-interface'", ",", "defining_module", "=", "'brocade-interface'", ",", "yang_type", "=", "'uint32'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"minimum_links must be of a type compatible with uint32\"\"\"", ",", "'defined-type'", ":", "\"uint32\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 64']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name=\"minimum-links\", rest_name=\"minimum-links\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'interface_po', u'info': u'Least number of operationally UP links to declare \\nport-channel UP', u'display-when': u'not(../insight/insight-enable)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint32', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__minimum_links", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for minimum_links, mapped from YANG variable /interface/port_channel/minimum_links (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_minimum_links is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_minimum_links() directly. YANG Description: The least number of operationally 'UP' links to indicate port-channel being UP.
[ "Setter", "method", "for", "minimum_links", "mapped", "from", "YANG", "variable", "/", "interface", "/", "port_channel", "/", "minimum_links", "(", "uint32", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_minimum_links", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_minimum_links", "()", "directly", "." ]
python
train
titusjan/argos
argos/config/floatcti.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/floatcti.py#L256-L260
def createEditor(self, delegate, parent, option): """ Creates a FloatCtiEditor. For the parameters see the AbstractCti constructor documentation. """ return SnFloatCtiEditor(self, delegate, self.precision, parent=parent)
[ "def", "createEditor", "(", "self", ",", "delegate", ",", "parent", ",", "option", ")", ":", "return", "SnFloatCtiEditor", "(", "self", ",", "delegate", ",", "self", ".", "precision", ",", "parent", "=", "parent", ")" ]
Creates a FloatCtiEditor. For the parameters see the AbstractCti constructor documentation.
[ "Creates", "a", "FloatCtiEditor", ".", "For", "the", "parameters", "see", "the", "AbstractCti", "constructor", "documentation", "." ]
python
train
tklovett/PyShirtsIO
interactive_console.py
https://github.com/tklovett/PyShirtsIO/blob/ff2f2d3b5e4ab2813abbce8545b27319c6af0def/interactive_console.py#L8-L25
def new_user(yaml_path): ''' Return the consumer and oauth tokens with three-legged OAuth process and save in a yaml file in the user's home directory. ''' print 'Retrieve API Key from https://www.shirts.io/accounts/api_console/' api_key = raw_input('Shirts.io API Key: ') tokens = { 'api_key': api_key, } yaml_file = open(yaml_path, 'w+') yaml.dump(tokens, yaml_file, indent=2) yaml_file.close() return tokens
[ "def", "new_user", "(", "yaml_path", ")", ":", "print", "'Retrieve API Key from https://www.shirts.io/accounts/api_console/'", "api_key", "=", "raw_input", "(", "'Shirts.io API Key: '", ")", "tokens", "=", "{", "'api_key'", ":", "api_key", ",", "}", "yaml_file", "=", "open", "(", "yaml_path", ",", "'w+'", ")", "yaml", ".", "dump", "(", "tokens", ",", "yaml_file", ",", "indent", "=", "2", ")", "yaml_file", ".", "close", "(", ")", "return", "tokens" ]
Return the consumer and oauth tokens with three-legged OAuth process and save in a yaml file in the user's home directory.
[ "Return", "the", "consumer", "and", "oauth", "tokens", "with", "three", "-", "legged", "OAuth", "process", "and", "save", "in", "a", "yaml", "file", "in", "the", "user", "s", "home", "directory", "." ]
python
valid
pvlib/pvlib-python
pvlib/forecast.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/forecast.py#L539-L569
def cloud_cover_to_irradiance(self, cloud_cover, how='clearsky_scaling', **kwargs): """ Convert cloud cover to irradiance. A wrapper method. Parameters ---------- cloud_cover : Series how : str, default 'clearsky_scaling' Selects the method for conversion. Can be one of clearsky_scaling or liujordan. **kwargs Passed to the selected method. Returns ------- irradiance : DataFrame Columns include ghi, dni, dhi """ how = how.lower() if how == 'clearsky_scaling': irrads = self.cloud_cover_to_irradiance_clearsky_scaling( cloud_cover, **kwargs) elif how == 'liujordan': irrads = self.cloud_cover_to_irradiance_liujordan( cloud_cover, **kwargs) else: raise ValueError('invalid how argument') return irrads
[ "def", "cloud_cover_to_irradiance", "(", "self", ",", "cloud_cover", ",", "how", "=", "'clearsky_scaling'", ",", "*", "*", "kwargs", ")", ":", "how", "=", "how", ".", "lower", "(", ")", "if", "how", "==", "'clearsky_scaling'", ":", "irrads", "=", "self", ".", "cloud_cover_to_irradiance_clearsky_scaling", "(", "cloud_cover", ",", "*", "*", "kwargs", ")", "elif", "how", "==", "'liujordan'", ":", "irrads", "=", "self", ".", "cloud_cover_to_irradiance_liujordan", "(", "cloud_cover", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "ValueError", "(", "'invalid how argument'", ")", "return", "irrads" ]
Convert cloud cover to irradiance. A wrapper method. Parameters ---------- cloud_cover : Series how : str, default 'clearsky_scaling' Selects the method for conversion. Can be one of clearsky_scaling or liujordan. **kwargs Passed to the selected method. Returns ------- irradiance : DataFrame Columns include ghi, dni, dhi
[ "Convert", "cloud", "cover", "to", "irradiance", ".", "A", "wrapper", "method", "." ]
python
train
googleapis/oauth2client
oauth2client/contrib/appengine.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/contrib/appengine.py#L96-L116
def xsrf_secret_key(): """Return the secret key for use for XSRF protection. If the Site entity does not have a secret key, this method will also create one and persist it. Returns: The secret key. """ secret = memcache.get(XSRF_MEMCACHE_ID, namespace=OAUTH2CLIENT_NAMESPACE) if not secret: # Load the one and only instance of SiteXsrfSecretKey. model = SiteXsrfSecretKey.get_or_insert(key_name='site') if not model.secret: model.secret = _generate_new_xsrf_secret_key() model.put() secret = model.secret memcache.add(XSRF_MEMCACHE_ID, secret, namespace=OAUTH2CLIENT_NAMESPACE) return str(secret)
[ "def", "xsrf_secret_key", "(", ")", ":", "secret", "=", "memcache", ".", "get", "(", "XSRF_MEMCACHE_ID", ",", "namespace", "=", "OAUTH2CLIENT_NAMESPACE", ")", "if", "not", "secret", ":", "# Load the one and only instance of SiteXsrfSecretKey.", "model", "=", "SiteXsrfSecretKey", ".", "get_or_insert", "(", "key_name", "=", "'site'", ")", "if", "not", "model", ".", "secret", ":", "model", ".", "secret", "=", "_generate_new_xsrf_secret_key", "(", ")", "model", ".", "put", "(", ")", "secret", "=", "model", ".", "secret", "memcache", ".", "add", "(", "XSRF_MEMCACHE_ID", ",", "secret", ",", "namespace", "=", "OAUTH2CLIENT_NAMESPACE", ")", "return", "str", "(", "secret", ")" ]
Return the secret key for use for XSRF protection. If the Site entity does not have a secret key, this method will also create one and persist it. Returns: The secret key.
[ "Return", "the", "secret", "key", "for", "use", "for", "XSRF", "protection", "." ]
python
valid
yuma-m/pychord
pychord/chord.py
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/chord.py#L130-L143
def as_chord(chord): """ convert from str to Chord instance if input is str :type chord: str|pychord.Chord :param chord: Chord name or Chord instance :rtype: pychord.Chord :return: Chord instance """ if isinstance(chord, Chord): return chord elif isinstance(chord, str): return Chord(chord) else: raise TypeError("input type should be str or Chord instance.")
[ "def", "as_chord", "(", "chord", ")", ":", "if", "isinstance", "(", "chord", ",", "Chord", ")", ":", "return", "chord", "elif", "isinstance", "(", "chord", ",", "str", ")", ":", "return", "Chord", "(", "chord", ")", "else", ":", "raise", "TypeError", "(", "\"input type should be str or Chord instance.\"", ")" ]
convert from str to Chord instance if input is str :type chord: str|pychord.Chord :param chord: Chord name or Chord instance :rtype: pychord.Chord :return: Chord instance
[ "convert", "from", "str", "to", "Chord", "instance", "if", "input", "is", "str" ]
python
train
eternnoir/pyTelegramBotAPI
telebot/__init__.py
https://github.com/eternnoir/pyTelegramBotAPI/blob/47b53b88123097f1b9562a6cd5d4e080b86185d1/telebot/__init__.py#L879-L903
def promote_chat_member(self, chat_id, user_id, can_change_info=None, can_post_messages=None, can_edit_messages=None, can_delete_messages=None, can_invite_users=None, can_restrict_members=None, can_pin_messages=None, can_promote_members=None): """ Use this method to promote or demote a user in a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Pass False for all boolean parameters to demote a user. Returns True on success. :param chat_id: Unique identifier for the target chat or username of the target channel ( in the format @channelusername) :param user_id: Int : Unique identifier of the target user :param can_change_info: Bool: Pass True, if the administrator can change chat title, photo and other settings :param can_post_messages: Bool : Pass True, if the administrator can create channel posts, channels only :param can_edit_messages: Bool : Pass True, if the administrator can edit messages of other users, channels only :param can_delete_messages: Bool : Pass True, if the administrator can delete messages of other users :param can_invite_users: Bool : Pass True, if the administrator can invite new users to the chat :param can_restrict_members: Bool: Pass True, if the administrator can restrict, ban or unban chat members :param can_pin_messages: Bool: Pass True, if the administrator can pin messages, supergroups only :param can_promote_members: Bool: Pass True, if the administrator can add new administrators with a subset of his own privileges or demote administrators that he has promoted, directly or indirectly (promoted by administrators that were appointed by him) :return: """ return apihelper.promote_chat_member(self.token, chat_id, user_id, can_change_info, can_post_messages, can_edit_messages, can_delete_messages, can_invite_users, can_restrict_members, can_pin_messages, can_promote_members)
[ "def", "promote_chat_member", "(", "self", ",", "chat_id", ",", "user_id", ",", "can_change_info", "=", "None", ",", "can_post_messages", "=", "None", ",", "can_edit_messages", "=", "None", ",", "can_delete_messages", "=", "None", ",", "can_invite_users", "=", "None", ",", "can_restrict_members", "=", "None", ",", "can_pin_messages", "=", "None", ",", "can_promote_members", "=", "None", ")", ":", "return", "apihelper", ".", "promote_chat_member", "(", "self", ".", "token", ",", "chat_id", ",", "user_id", ",", "can_change_info", ",", "can_post_messages", ",", "can_edit_messages", ",", "can_delete_messages", ",", "can_invite_users", ",", "can_restrict_members", ",", "can_pin_messages", ",", "can_promote_members", ")" ]
Use this method to promote or demote a user in a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Pass False for all boolean parameters to demote a user. Returns True on success. :param chat_id: Unique identifier for the target chat or username of the target channel ( in the format @channelusername) :param user_id: Int : Unique identifier of the target user :param can_change_info: Bool: Pass True, if the administrator can change chat title, photo and other settings :param can_post_messages: Bool : Pass True, if the administrator can create channel posts, channels only :param can_edit_messages: Bool : Pass True, if the administrator can edit messages of other users, channels only :param can_delete_messages: Bool : Pass True, if the administrator can delete messages of other users :param can_invite_users: Bool : Pass True, if the administrator can invite new users to the chat :param can_restrict_members: Bool: Pass True, if the administrator can restrict, ban or unban chat members :param can_pin_messages: Bool: Pass True, if the administrator can pin messages, supergroups only :param can_promote_members: Bool: Pass True, if the administrator can add new administrators with a subset of his own privileges or demote administrators that he has promoted, directly or indirectly (promoted by administrators that were appointed by him) :return:
[ "Use", "this", "method", "to", "promote", "or", "demote", "a", "user", "in", "a", "supergroup", "or", "a", "channel", ".", "The", "bot", "must", "be", "an", "administrator", "in", "the", "chat", "for", "this", "to", "work", "and", "must", "have", "the", "appropriate", "admin", "rights", ".", "Pass", "False", "for", "all", "boolean", "parameters", "to", "demote", "a", "user", ".", "Returns", "True", "on", "success", ".", ":", "param", "chat_id", ":", "Unique", "identifier", "for", "the", "target", "chat", "or", "username", "of", "the", "target", "channel", "(", "in", "the", "format" ]
python
train
emilydolson/avida-spatial-tools
avidaspatial/utils.py
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/utils.py#L382-L393
def initialize_grid(world_size, inner): """ Creates an empty grid (2d list) with the dimensions specified in world_size. Each element is initialized to the inner argument. """ data = [] for i in range(world_size[1]): data.append([]) for j in range(world_size[0]): data[i].append(deepcopy(inner)) return data
[ "def", "initialize_grid", "(", "world_size", ",", "inner", ")", ":", "data", "=", "[", "]", "for", "i", "in", "range", "(", "world_size", "[", "1", "]", ")", ":", "data", ".", "append", "(", "[", "]", ")", "for", "j", "in", "range", "(", "world_size", "[", "0", "]", ")", ":", "data", "[", "i", "]", ".", "append", "(", "deepcopy", "(", "inner", ")", ")", "return", "data" ]
Creates an empty grid (2d list) with the dimensions specified in world_size. Each element is initialized to the inner argument.
[ "Creates", "an", "empty", "grid", "(", "2d", "list", ")", "with", "the", "dimensions", "specified", "in", "world_size", ".", "Each", "element", "is", "initialized", "to", "the", "inner", "argument", "." ]
python
train
rootpy/rootpy
rootpy/decorators.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/decorators.py#L187-L200
def sync(lock): """ A synchronization decorator """ def sync(f): @wraps(f) def new_function(*args, **kwargs): lock.acquire() try: return f(*args, **kwargs) finally: lock.release() return new_function return sync
[ "def", "sync", "(", "lock", ")", ":", "def", "sync", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "new_function", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lock", ".", "acquire", "(", ")", "try", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "finally", ":", "lock", ".", "release", "(", ")", "return", "new_function", "return", "sync" ]
A synchronization decorator
[ "A", "synchronization", "decorator" ]
python
train
QualiSystems/cloudshell-networking-devices
cloudshell/devices/runners/state_runner.py
https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/runners/state_runner.py#L26-L46
def health_check(self): """ Verify that device is accessible over CLI by sending ENTER for cli session """ api_response = 'Online' result = 'Health check on resource {}'.format(self._resource_name) try: health_check_flow = RunCommandFlow(self.cli_handler, self._logger) health_check_flow.execute_flow() result += ' passed.' except Exception as e: self._logger.exception(e) api_response = 'Error' result += ' failed.' try: self._api.SetResourceLiveStatus(self._resource_name, api_response, result) except Exception: self._logger.error('Cannot update {} resource status on portal'.format(self._resource_name)) return result
[ "def", "health_check", "(", "self", ")", ":", "api_response", "=", "'Online'", "result", "=", "'Health check on resource {}'", ".", "format", "(", "self", ".", "_resource_name", ")", "try", ":", "health_check_flow", "=", "RunCommandFlow", "(", "self", ".", "cli_handler", ",", "self", ".", "_logger", ")", "health_check_flow", ".", "execute_flow", "(", ")", "result", "+=", "' passed.'", "except", "Exception", "as", "e", ":", "self", ".", "_logger", ".", "exception", "(", "e", ")", "api_response", "=", "'Error'", "result", "+=", "' failed.'", "try", ":", "self", ".", "_api", ".", "SetResourceLiveStatus", "(", "self", ".", "_resource_name", ",", "api_response", ",", "result", ")", "except", "Exception", ":", "self", ".", "_logger", ".", "error", "(", "'Cannot update {} resource status on portal'", ".", "format", "(", "self", ".", "_resource_name", ")", ")", "return", "result" ]
Verify that device is accessible over CLI by sending ENTER for cli session
[ "Verify", "that", "device", "is", "accessible", "over", "CLI", "by", "sending", "ENTER", "for", "cli", "session" ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_gimbal.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_gimbal.py#L58-L74
def cmd_gimbal_mode(self, args): '''control gimbal mode''' if len(args) != 1: print("usage: gimbal mode <GPS|MAVLink>") return if args[0].upper() == 'GPS': mode = mavutil.mavlink.MAV_MOUNT_MODE_GPS_POINT elif args[0].upper() == 'MAVLINK': mode = mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING elif args[0].upper() == 'RC': mode = mavutil.mavlink.MAV_MOUNT_MODE_RC_TARGETING else: print("Unsupported mode %s" % args[0]) self.master.mav.mount_configure_send(self.target_system, self.target_component, mode, 1, 1, 1)
[ "def", "cmd_gimbal_mode", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "!=", "1", ":", "print", "(", "\"usage: gimbal mode <GPS|MAVLink>\"", ")", "return", "if", "args", "[", "0", "]", ".", "upper", "(", ")", "==", "'GPS'", ":", "mode", "=", "mavutil", ".", "mavlink", ".", "MAV_MOUNT_MODE_GPS_POINT", "elif", "args", "[", "0", "]", ".", "upper", "(", ")", "==", "'MAVLINK'", ":", "mode", "=", "mavutil", ".", "mavlink", ".", "MAV_MOUNT_MODE_MAVLINK_TARGETING", "elif", "args", "[", "0", "]", ".", "upper", "(", ")", "==", "'RC'", ":", "mode", "=", "mavutil", ".", "mavlink", ".", "MAV_MOUNT_MODE_RC_TARGETING", "else", ":", "print", "(", "\"Unsupported mode %s\"", "%", "args", "[", "0", "]", ")", "self", ".", "master", ".", "mav", ".", "mount_configure_send", "(", "self", ".", "target_system", ",", "self", ".", "target_component", ",", "mode", ",", "1", ",", "1", ",", "1", ")" ]
control gimbal mode
[ "control", "gimbal", "mode" ]
python
train
jxtech/wechatpy
wechatpy/client/api/merchant/__init__.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/merchant/__init__.py#L185-L197
def update_express(self, template_id, delivery_template): """ 增加邮费模板 :param template_id: 邮费模板ID :param delivery_template: 邮费模板信息(字段说明详见增加邮费模板) :return: 返回的 JSON 数据包 """ delivery_template['template_id'] = template_id return self._post( 'merchant/express/update', data=delivery_template )
[ "def", "update_express", "(", "self", ",", "template_id", ",", "delivery_template", ")", ":", "delivery_template", "[", "'template_id'", "]", "=", "template_id", "return", "self", ".", "_post", "(", "'merchant/express/update'", ",", "data", "=", "delivery_template", ")" ]
增加邮费模板 :param template_id: 邮费模板ID :param delivery_template: 邮费模板信息(字段说明详见增加邮费模板) :return: 返回的 JSON 数据包
[ "增加邮费模板" ]
python
train
intel-analytics/BigDL
pyspark/bigdl/util/common.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L306-L345
def from_ndarray(cls, features, labels, bigdl_type="float"): """ Convert a ndarray of features and labels to Sample, which would be used in Java side. :param features: an ndarray or a list of ndarrays :param labels: an ndarray or a list of ndarrays or a scalar :param bigdl_type: "double" or "float" >>> import numpy as np >>> from bigdl.util.common import callBigDlFunc >>> from numpy.testing import assert_allclose >>> np.random.seed(123) >>> sample = Sample.from_ndarray(np.random.random((2,3)), np.random.random((2,3))) >>> sample_back = callBigDlFunc("float", "testSample", sample) >>> assert_allclose(sample.features[0].to_ndarray(), sample_back.features[0].to_ndarray()) >>> assert_allclose(sample.label.to_ndarray(), sample_back.label.to_ndarray()) >>> expected_feature_storage = np.array(([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]])) >>> expected_feature_shape = np.array([2, 3]) >>> expected_label_storage = np.array(([[0.98076421, 0.68482971, 0.48093191], [0.39211753, 0.343178, 0.72904968]])) >>> expected_label_shape = np.array([2, 3]) >>> assert_allclose(sample.features[0].storage, expected_feature_storage, rtol=1e-6, atol=1e-6) >>> assert_allclose(sample.features[0].shape, expected_feature_shape) >>> assert_allclose(sample.labels[0].storage, expected_label_storage, rtol=1e-6, atol=1e-6) >>> assert_allclose(sample.labels[0].shape, expected_label_shape) """ if isinstance(features, np.ndarray): features = [features] else: assert all(isinstance(feature, np.ndarray) for feature in features), \ "features should be a list of np.ndarray, not %s" % type(features) if np.isscalar(labels): # in case labels is a scalar. labels = [np.array(labels)] elif isinstance(labels, np.ndarray): labels = [labels] else: assert all(isinstance(label, np.ndarray) for label in labels), \ "labels should be a list of np.ndarray, not %s" % type(labels) return cls( features=[JTensor.from_ndarray(feature) for feature in features], labels=[JTensor.from_ndarray(label) for label in labels], bigdl_type=bigdl_type)
[ "def", "from_ndarray", "(", "cls", ",", "features", ",", "labels", ",", "bigdl_type", "=", "\"float\"", ")", ":", "if", "isinstance", "(", "features", ",", "np", ".", "ndarray", ")", ":", "features", "=", "[", "features", "]", "else", ":", "assert", "all", "(", "isinstance", "(", "feature", ",", "np", ".", "ndarray", ")", "for", "feature", "in", "features", ")", ",", "\"features should be a list of np.ndarray, not %s\"", "%", "type", "(", "features", ")", "if", "np", ".", "isscalar", "(", "labels", ")", ":", "# in case labels is a scalar.", "labels", "=", "[", "np", ".", "array", "(", "labels", ")", "]", "elif", "isinstance", "(", "labels", ",", "np", ".", "ndarray", ")", ":", "labels", "=", "[", "labels", "]", "else", ":", "assert", "all", "(", "isinstance", "(", "label", ",", "np", ".", "ndarray", ")", "for", "label", "in", "labels", ")", ",", "\"labels should be a list of np.ndarray, not %s\"", "%", "type", "(", "labels", ")", "return", "cls", "(", "features", "=", "[", "JTensor", ".", "from_ndarray", "(", "feature", ")", "for", "feature", "in", "features", "]", ",", "labels", "=", "[", "JTensor", ".", "from_ndarray", "(", "label", ")", "for", "label", "in", "labels", "]", ",", "bigdl_type", "=", "bigdl_type", ")" ]
Convert a ndarray of features and labels to Sample, which would be used in Java side. :param features: an ndarray or a list of ndarrays :param labels: an ndarray or a list of ndarrays or a scalar :param bigdl_type: "double" or "float" >>> import numpy as np >>> from bigdl.util.common import callBigDlFunc >>> from numpy.testing import assert_allclose >>> np.random.seed(123) >>> sample = Sample.from_ndarray(np.random.random((2,3)), np.random.random((2,3))) >>> sample_back = callBigDlFunc("float", "testSample", sample) >>> assert_allclose(sample.features[0].to_ndarray(), sample_back.features[0].to_ndarray()) >>> assert_allclose(sample.label.to_ndarray(), sample_back.label.to_ndarray()) >>> expected_feature_storage = np.array(([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]])) >>> expected_feature_shape = np.array([2, 3]) >>> expected_label_storage = np.array(([[0.98076421, 0.68482971, 0.48093191], [0.39211753, 0.343178, 0.72904968]])) >>> expected_label_shape = np.array([2, 3]) >>> assert_allclose(sample.features[0].storage, expected_feature_storage, rtol=1e-6, atol=1e-6) >>> assert_allclose(sample.features[0].shape, expected_feature_shape) >>> assert_allclose(sample.labels[0].storage, expected_label_storage, rtol=1e-6, atol=1e-6) >>> assert_allclose(sample.labels[0].shape, expected_label_shape)
[ "Convert", "a", "ndarray", "of", "features", "and", "labels", "to", "Sample", "which", "would", "be", "used", "in", "Java", "side", ".", ":", "param", "features", ":", "an", "ndarray", "or", "a", "list", "of", "ndarrays", ":", "param", "labels", ":", "an", "ndarray", "or", "a", "list", "of", "ndarrays", "or", "a", "scalar", ":", "param", "bigdl_type", ":", "double", "or", "float" ]
python
test
rogerhil/thegamesdb
thegamesdb/resources.py
https://github.com/rogerhil/thegamesdb/blob/795314215f9ee73697c7520dea4ddecfb23ca8e6/thegamesdb/resources.py#L38-L47
def list(self, name, platform='', genre=''): """ The name argument is required for this method as per the API server specification. This method also provides the platform and genre optional arguments as filters. """ data_list = self.db.get_data(self.list_path, name=name, platform=platform, genre=genre) data_list = data_list.get('Data') or {} games = data_list.get('Game') or [] return [self._build_item(**i) for i in games]
[ "def", "list", "(", "self", ",", "name", ",", "platform", "=", "''", ",", "genre", "=", "''", ")", ":", "data_list", "=", "self", ".", "db", ".", "get_data", "(", "self", ".", "list_path", ",", "name", "=", "name", ",", "platform", "=", "platform", ",", "genre", "=", "genre", ")", "data_list", "=", "data_list", ".", "get", "(", "'Data'", ")", "or", "{", "}", "games", "=", "data_list", ".", "get", "(", "'Game'", ")", "or", "[", "]", "return", "[", "self", ".", "_build_item", "(", "*", "*", "i", ")", "for", "i", "in", "games", "]" ]
The name argument is required for this method as per the API server specification. This method also provides the platform and genre optional arguments as filters.
[ "The", "name", "argument", "is", "required", "for", "this", "method", "as", "per", "the", "API", "server", "specification", ".", "This", "method", "also", "provides", "the", "platform", "and", "genre", "optional", "arguments", "as", "filters", "." ]
python
train
danifus/django-override-storage
override_storage/utils.py
https://github.com/danifus/django-override-storage/blob/a1e6c19ca102147762d09aa1f633734224f84926/override_storage/utils.py#L320-L331
def setup_storage(self): """Save existing FileField storages and patch them with test instance(s). If storage_per_field is False (default) this function will create a single instance here and assign it to self.storage to be used for all filefields. If storage_per_field is True, an independent storage instance will be used for each FileField . """ if self.storage_callable is not None and not self.storage_per_field: self.storage = self.get_storage_from_callable(field=None) super(override_storage, self).setup_storage()
[ "def", "setup_storage", "(", "self", ")", ":", "if", "self", ".", "storage_callable", "is", "not", "None", "and", "not", "self", ".", "storage_per_field", ":", "self", ".", "storage", "=", "self", ".", "get_storage_from_callable", "(", "field", "=", "None", ")", "super", "(", "override_storage", ",", "self", ")", ".", "setup_storage", "(", ")" ]
Save existing FileField storages and patch them with test instance(s). If storage_per_field is False (default) this function will create a single instance here and assign it to self.storage to be used for all filefields. If storage_per_field is True, an independent storage instance will be used for each FileField .
[ "Save", "existing", "FileField", "storages", "and", "patch", "them", "with", "test", "instance", "(", "s", ")", "." ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/geometry/triangulation.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/geometry/triangulation.py#L575-L583
def _edges_in_tri_except(self, tri, edge): """Return the edges in *tri*, excluding *edge*. """ edges = [(tri[i], tri[(i+1) % 3]) for i in range(3)] try: edges.remove(tuple(edge)) except ValueError: edges.remove(tuple(edge[::-1])) return edges
[ "def", "_edges_in_tri_except", "(", "self", ",", "tri", ",", "edge", ")", ":", "edges", "=", "[", "(", "tri", "[", "i", "]", ",", "tri", "[", "(", "i", "+", "1", ")", "%", "3", "]", ")", "for", "i", "in", "range", "(", "3", ")", "]", "try", ":", "edges", ".", "remove", "(", "tuple", "(", "edge", ")", ")", "except", "ValueError", ":", "edges", ".", "remove", "(", "tuple", "(", "edge", "[", ":", ":", "-", "1", "]", ")", ")", "return", "edges" ]
Return the edges in *tri*, excluding *edge*.
[ "Return", "the", "edges", "in", "*", "tri", "*", "excluding", "*", "edge", "*", "." ]
python
train
adaptive-learning/proso-apps
proso_models/views.py
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/views.py#L245-L277
def answer(request): """ Save the answer. GET parameters: html: turn on the HTML version of the API BODY json in following format: { "answer": #answer, -- for one answer "answers": [#answer, #answer, #answer ...] -- for multiple answers } answer = { "answer_class": str, -- class of answer to save (e.g., flashcard_answer) "response_time": int, -- response time in milliseconds "meta": "str" -- optional information "time_gap": int -- waiting time in frontend in seconds ... -- other fields depending on aswer type (see from_json method of Django model class) } """ if request.method == 'GET': return render(request, 'models_answer.html', {}, help_text=answer.__doc__) elif request.method == 'POST': practice_filter = get_filter(request) practice_context = PracticeContext.objects.from_content(practice_filter) saved_answers = _save_answers(request, practice_context, True) return render_json(request, saved_answers, status=200, template='models_answer.html') else: return HttpResponseBadRequest("method %s is not allowed".format(request.method))
[ "def", "answer", "(", "request", ")", ":", "if", "request", ".", "method", "==", "'GET'", ":", "return", "render", "(", "request", ",", "'models_answer.html'", ",", "{", "}", ",", "help_text", "=", "answer", ".", "__doc__", ")", "elif", "request", ".", "method", "==", "'POST'", ":", "practice_filter", "=", "get_filter", "(", "request", ")", "practice_context", "=", "PracticeContext", ".", "objects", ".", "from_content", "(", "practice_filter", ")", "saved_answers", "=", "_save_answers", "(", "request", ",", "practice_context", ",", "True", ")", "return", "render_json", "(", "request", ",", "saved_answers", ",", "status", "=", "200", ",", "template", "=", "'models_answer.html'", ")", "else", ":", "return", "HttpResponseBadRequest", "(", "\"method %s is not allowed\"", ".", "format", "(", "request", ".", "method", ")", ")" ]
Save the answer. GET parameters: html: turn on the HTML version of the API BODY json in following format: { "answer": #answer, -- for one answer "answers": [#answer, #answer, #answer ...] -- for multiple answers } answer = { "answer_class": str, -- class of answer to save (e.g., flashcard_answer) "response_time": int, -- response time in milliseconds "meta": "str" -- optional information "time_gap": int -- waiting time in frontend in seconds ... -- other fields depending on aswer type (see from_json method of Django model class) }
[ "Save", "the", "answer", "." ]
python
train
onicagroup/runway
runway/module/cloudformation.py
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/module/cloudformation.py#L45-L50
def get_stacker_env_file(path, environment, region): """Determine Stacker environment file name.""" for name in gen_stacker_env_files(environment, region): if os.path.isfile(os.path.join(path, name)): return name return "%s-%s.env" % (environment, region)
[ "def", "get_stacker_env_file", "(", "path", ",", "environment", ",", "region", ")", ":", "for", "name", "in", "gen_stacker_env_files", "(", "environment", ",", "region", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "name", ")", ")", ":", "return", "name", "return", "\"%s-%s.env\"", "%", "(", "environment", ",", "region", ")" ]
Determine Stacker environment file name.
[ "Determine", "Stacker", "environment", "file", "name", "." ]
python
train
HumanCellAtlas/cloud-blobstore
cloud_blobstore/s3.py
https://github.com/HumanCellAtlas/cloud-blobstore/blob/b8a60e8e8c0da0e39dda084cb467a34cd2d1ef0a/cloud_blobstore/s3.py#L206-L223
def get(self, bucket: str, key: str) -> bytes: """ Retrieves the data for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: the data """ try: response = self.s3_client.get_object( Bucket=bucket, Key=key ) return response['Body'].read() except botocore.exceptions.ClientError as ex: if ex.response['Error']['Code'] == "NoSuchKey": raise BlobNotFoundError(f"Could not find s3://{bucket}/{key}") from ex raise BlobStoreUnknownError(ex)
[ "def", "get", "(", "self", ",", "bucket", ":", "str", ",", "key", ":", "str", ")", "->", "bytes", ":", "try", ":", "response", "=", "self", ".", "s3_client", ".", "get_object", "(", "Bucket", "=", "bucket", ",", "Key", "=", "key", ")", "return", "response", "[", "'Body'", "]", ".", "read", "(", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "ex", ":", "if", "ex", ".", "response", "[", "'Error'", "]", "[", "'Code'", "]", "==", "\"NoSuchKey\"", ":", "raise", "BlobNotFoundError", "(", "f\"Could not find s3://{bucket}/{key}\"", ")", "from", "ex", "raise", "BlobStoreUnknownError", "(", "ex", ")" ]
Retrieves the data for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: the data
[ "Retrieves", "the", "data", "for", "a", "given", "object", "in", "a", "given", "bucket", ".", ":", "param", "bucket", ":", "the", "bucket", "the", "object", "resides", "in", ".", ":", "param", "key", ":", "the", "key", "of", "the", "object", "for", "which", "metadata", "is", "being", "retrieved", ".", ":", "return", ":", "the", "data" ]
python
train
rix0rrr/gcl
gcl/ast.py
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L909-L921
def bracketedList(l, r, sep, expr, allow_missing_close=False): """Parse bracketed list. Empty list is possible, as is a trailing separator. """ # We may need to backtrack for lists, because of list comprehension, but not for # any of the other lists strict = l != '[' closer = sym(r) if not allow_missing_close else p.Optional(sym(r)) if strict: return sym(l) - listMembers(sep, expr) - closer else: return sym(l) + listMembers(sep, expr) + closer
[ "def", "bracketedList", "(", "l", ",", "r", ",", "sep", ",", "expr", ",", "allow_missing_close", "=", "False", ")", ":", "# We may need to backtrack for lists, because of list comprehension, but not for", "# any of the other lists", "strict", "=", "l", "!=", "'['", "closer", "=", "sym", "(", "r", ")", "if", "not", "allow_missing_close", "else", "p", ".", "Optional", "(", "sym", "(", "r", ")", ")", "if", "strict", ":", "return", "sym", "(", "l", ")", "-", "listMembers", "(", "sep", ",", "expr", ")", "-", "closer", "else", ":", "return", "sym", "(", "l", ")", "+", "listMembers", "(", "sep", ",", "expr", ")", "+", "closer" ]
Parse bracketed list. Empty list is possible, as is a trailing separator.
[ "Parse", "bracketed", "list", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L33-L55
def recurse( record, index, stop_types=STOP_TYPES,already_seen=None, type_group=False ): """Depth first traversal of a tree, all children are yielded before parent record -- dictionary record to be recursed upon index -- mapping 'address' ids to dictionary records stop_types -- types which will *not* recurse already_seen -- set storing already-visited nodes yields the traversed nodes """ if already_seen is None: already_seen = set() if record['address'] not in already_seen: already_seen.add(record['address']) if 'refs' in record: for child in children( record, index, stop_types=stop_types ): if child['address'] not in already_seen: for descendant in recurse( child, index, stop_types, already_seen=already_seen, type_group=type_group, ): yield descendant yield record
[ "def", "recurse", "(", "record", ",", "index", ",", "stop_types", "=", "STOP_TYPES", ",", "already_seen", "=", "None", ",", "type_group", "=", "False", ")", ":", "if", "already_seen", "is", "None", ":", "already_seen", "=", "set", "(", ")", "if", "record", "[", "'address'", "]", "not", "in", "already_seen", ":", "already_seen", ".", "add", "(", "record", "[", "'address'", "]", ")", "if", "'refs'", "in", "record", ":", "for", "child", "in", "children", "(", "record", ",", "index", ",", "stop_types", "=", "stop_types", ")", ":", "if", "child", "[", "'address'", "]", "not", "in", "already_seen", ":", "for", "descendant", "in", "recurse", "(", "child", ",", "index", ",", "stop_types", ",", "already_seen", "=", "already_seen", ",", "type_group", "=", "type_group", ",", ")", ":", "yield", "descendant", "yield", "record" ]
Depth first traversal of a tree, all children are yielded before parent record -- dictionary record to be recursed upon index -- mapping 'address' ids to dictionary records stop_types -- types which will *not* recurse already_seen -- set storing already-visited nodes yields the traversed nodes
[ "Depth", "first", "traversal", "of", "a", "tree", "all", "children", "are", "yielded", "before", "parent", "record", "--", "dictionary", "record", "to", "be", "recursed", "upon", "index", "--", "mapping", "address", "ids", "to", "dictionary", "records", "stop_types", "--", "types", "which", "will", "*", "not", "*", "recurse", "already_seen", "--", "set", "storing", "already", "-", "visited", "nodes", "yields", "the", "traversed", "nodes" ]
python
train
dpkp/kafka-python
kafka/client_async.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/client_async.py#L478-L502
def is_ready(self, node_id, metadata_priority=True): """Check whether a node is ready to send more requests. In addition to connection-level checks, this method also is used to block additional requests from being sent during a metadata refresh. Arguments: node_id (int): id of the node to check metadata_priority (bool): Mark node as not-ready if a metadata refresh is required. Default: True Returns: bool: True if the node is ready and metadata is not refreshing """ if not self._can_send_request(node_id): return False # if we need to update our metadata now declare all requests unready to # make metadata requests first priority if metadata_priority: if self._metadata_refresh_in_progress: return False if self.cluster.ttl() == 0: return False return True
[ "def", "is_ready", "(", "self", ",", "node_id", ",", "metadata_priority", "=", "True", ")", ":", "if", "not", "self", ".", "_can_send_request", "(", "node_id", ")", ":", "return", "False", "# if we need to update our metadata now declare all requests unready to", "# make metadata requests first priority", "if", "metadata_priority", ":", "if", "self", ".", "_metadata_refresh_in_progress", ":", "return", "False", "if", "self", ".", "cluster", ".", "ttl", "(", ")", "==", "0", ":", "return", "False", "return", "True" ]
Check whether a node is ready to send more requests. In addition to connection-level checks, this method also is used to block additional requests from being sent during a metadata refresh. Arguments: node_id (int): id of the node to check metadata_priority (bool): Mark node as not-ready if a metadata refresh is required. Default: True Returns: bool: True if the node is ready and metadata is not refreshing
[ "Check", "whether", "a", "node", "is", "ready", "to", "send", "more", "requests", "." ]
python
train
Netflix-Skunkworks/historical
historical/historical-cookiecutter/historical_{{cookiecutter.technology_slug}}/{{cookiecutter.technology_slug}}/collector.py
https://github.com/Netflix-Skunkworks/historical/blob/c3ebaa8388a3fe67e23a6c9c6b04c3e618497c4a/historical/historical-cookiecutter/historical_{{cookiecutter.technology_slug}}/{{cookiecutter.technology_slug}}/collector.py#L135-L149
def capture_delete_records(records): """Writes all of our delete events to DynamoDB.""" for r in records: model = create_delete_model(r) if model: try: model.delete(eventTime__le=r['detail']['eventTime']) except DeleteError as e: log.warning('Unable to delete {{cookiecutter.technology_name}}. {{cookiecutter.technology_name}} does not exist. Record: {record}'.format( record=r )) else: log.warning('Unable to delete {{cookiecutter.technology_name}}. {{cookiecutter.technology_name}} does not exist. Record: {record}'.format( record=r ))
[ "def", "capture_delete_records", "(", "records", ")", ":", "for", "r", "in", "records", ":", "model", "=", "create_delete_model", "(", "r", ")", "if", "model", ":", "try", ":", "model", ".", "delete", "(", "eventTime__le", "=", "r", "[", "'detail'", "]", "[", "'eventTime'", "]", ")", "except", "DeleteError", "as", "e", ":", "log", ".", "warning", "(", "'Unable to delete {{cookiecutter.technology_name}}. {{cookiecutter.technology_name}} does not exist. Record: {record}'", ".", "format", "(", "record", "=", "r", ")", ")", "else", ":", "log", ".", "warning", "(", "'Unable to delete {{cookiecutter.technology_name}}. {{cookiecutter.technology_name}} does not exist. Record: {record}'", ".", "format", "(", "record", "=", "r", ")", ")" ]
Writes all of our delete events to DynamoDB.
[ "Writes", "all", "of", "our", "delete", "events", "to", "DynamoDB", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/message.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/message.py#L594-L601
def _start(self, request_id, docs): """Publish a CommandStartedEvent.""" cmd = self.command.copy() cmd[self.field] = docs self.listeners.publish_command_start( cmd, self.db_name, request_id, self.sock_info.address, self.op_id) return cmd
[ "def", "_start", "(", "self", ",", "request_id", ",", "docs", ")", ":", "cmd", "=", "self", ".", "command", ".", "copy", "(", ")", "cmd", "[", "self", ".", "field", "]", "=", "docs", "self", ".", "listeners", ".", "publish_command_start", "(", "cmd", ",", "self", ".", "db_name", ",", "request_id", ",", "self", ".", "sock_info", ".", "address", ",", "self", ".", "op_id", ")", "return", "cmd" ]
Publish a CommandStartedEvent.
[ "Publish", "a", "CommandStartedEvent", "." ]
python
train
urschrei/pyzotero
pyzotero/zotero.py
https://github.com/urschrei/pyzotero/blob/b378966b30146a952f7953c23202fb5a1ddf81d9/pyzotero/zotero.py#L337-L365
def _extract_links(self): """ Extract self, first, next, last links from a request response """ extracted = dict() try: for key, value in self.request.links.items(): parsed = urlparse(value["url"]) fragment = "{path}?{query}".format(path=parsed[2], query=parsed[4]) extracted[key] = fragment # add a 'self' link parsed = list(urlparse(self.self_link)) # strip 'format' query parameter stripped = "&".join( [ "%s=%s" % (p[0], p[1]) for p in parse_qsl(parsed[4]) if p[0] != "format" ] ) # rebuild url fragment # this is a death march extracted["self"] = urlunparse( [parsed[0], parsed[1], parsed[2], parsed[3], stripped, parsed[5]] ) return extracted except KeyError: # No links present, because it's a single item return None
[ "def", "_extract_links", "(", "self", ")", ":", "extracted", "=", "dict", "(", ")", "try", ":", "for", "key", ",", "value", "in", "self", ".", "request", ".", "links", ".", "items", "(", ")", ":", "parsed", "=", "urlparse", "(", "value", "[", "\"url\"", "]", ")", "fragment", "=", "\"{path}?{query}\"", ".", "format", "(", "path", "=", "parsed", "[", "2", "]", ",", "query", "=", "parsed", "[", "4", "]", ")", "extracted", "[", "key", "]", "=", "fragment", "# add a 'self' link", "parsed", "=", "list", "(", "urlparse", "(", "self", ".", "self_link", ")", ")", "# strip 'format' query parameter", "stripped", "=", "\"&\"", ".", "join", "(", "[", "\"%s=%s\"", "%", "(", "p", "[", "0", "]", ",", "p", "[", "1", "]", ")", "for", "p", "in", "parse_qsl", "(", "parsed", "[", "4", "]", ")", "if", "p", "[", "0", "]", "!=", "\"format\"", "]", ")", "# rebuild url fragment", "# this is a death march", "extracted", "[", "\"self\"", "]", "=", "urlunparse", "(", "[", "parsed", "[", "0", "]", ",", "parsed", "[", "1", "]", ",", "parsed", "[", "2", "]", ",", "parsed", "[", "3", "]", ",", "stripped", ",", "parsed", "[", "5", "]", "]", ")", "return", "extracted", "except", "KeyError", ":", "# No links present, because it's a single item", "return", "None" ]
Extract self, first, next, last links from a request response
[ "Extract", "self", "first", "next", "last", "links", "from", "a", "request", "response" ]
python
valid
blockstack/blockstack-core
blockstack/lib/subdomains.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/subdomains.py#L345-L357
def get_public_key(self): """ Parse the scriptSig and extract the public key. Raises ValueError if this is a multisig-controlled subdomain. """ res = self.get_public_key_info() if 'error' in res: raise ValueError(res['error']) if res['type'] != 'singlesig': raise ValueError(res['error']) return res['public_keys'][0]
[ "def", "get_public_key", "(", "self", ")", ":", "res", "=", "self", ".", "get_public_key_info", "(", ")", "if", "'error'", "in", "res", ":", "raise", "ValueError", "(", "res", "[", "'error'", "]", ")", "if", "res", "[", "'type'", "]", "!=", "'singlesig'", ":", "raise", "ValueError", "(", "res", "[", "'error'", "]", ")", "return", "res", "[", "'public_keys'", "]", "[", "0", "]" ]
Parse the scriptSig and extract the public key. Raises ValueError if this is a multisig-controlled subdomain.
[ "Parse", "the", "scriptSig", "and", "extract", "the", "public", "key", ".", "Raises", "ValueError", "if", "this", "is", "a", "multisig", "-", "controlled", "subdomain", "." ]
python
train
spyder-ide/spyder
spyder/config/base.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/config/base.py#L222-L224
def get_module_path(modname): """Return module *modname* base path""" return osp.abspath(osp.dirname(sys.modules[modname].__file__))
[ "def", "get_module_path", "(", "modname", ")", ":", "return", "osp", ".", "abspath", "(", "osp", ".", "dirname", "(", "sys", ".", "modules", "[", "modname", "]", ".", "__file__", ")", ")" ]
Return module *modname* base path
[ "Return", "module", "*", "modname", "*", "base", "path" ]
python
train
sleepyfran/itunespy
itunespy/music_album.py
https://github.com/sleepyfran/itunespy/blob/0e7e931b135b5e0daae49ba68e9167ff4ac73eb5/itunespy/music_album.py#L31-L40
def get_tracks(self): """ Retrieves all the tracks of the album if they haven't been retrieved yet :return: List. Tracks of the current album """ if not self._track_list: tracks = itunespy.lookup(id=self.collection_id, entity=itunespy.entities['song'])[1:] for track in tracks: self._track_list.append(track) return self._track_list
[ "def", "get_tracks", "(", "self", ")", ":", "if", "not", "self", ".", "_track_list", ":", "tracks", "=", "itunespy", ".", "lookup", "(", "id", "=", "self", ".", "collection_id", ",", "entity", "=", "itunespy", ".", "entities", "[", "'song'", "]", ")", "[", "1", ":", "]", "for", "track", "in", "tracks", ":", "self", ".", "_track_list", ".", "append", "(", "track", ")", "return", "self", ".", "_track_list" ]
Retrieves all the tracks of the album if they haven't been retrieved yet :return: List. Tracks of the current album
[ "Retrieves", "all", "the", "tracks", "of", "the", "album", "if", "they", "haven", "t", "been", "retrieved", "yet", ":", "return", ":", "List", ".", "Tracks", "of", "the", "current", "album" ]
python
train
PyCQA/astroid
astroid/builder.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/builder.py#L278-L314
def _extract_expressions(node): """Find expressions in a call to _TRANSIENT_FUNCTION and extract them. The function walks the AST recursively to search for expressions that are wrapped into a call to _TRANSIENT_FUNCTION. If it finds such an expression, it completely removes the function call node from the tree, replacing it by the wrapped expression inside the parent. :param node: An astroid node. :type node: astroid.bases.NodeNG :yields: The sequence of wrapped expressions on the modified tree expression can be found. """ if ( isinstance(node, nodes.Call) and isinstance(node.func, nodes.Name) and node.func.name == _TRANSIENT_FUNCTION ): real_expr = node.args[0] real_expr.parent = node.parent # Search for node in all _astng_fields (the fields checked when # get_children is called) of its parent. Some of those fields may # be lists or tuples, in which case the elements need to be checked. # When we find it, replace it by real_expr, so that the AST looks # like no call to _TRANSIENT_FUNCTION ever took place. for name in node.parent._astroid_fields: child = getattr(node.parent, name) if isinstance(child, (list, tuple)): for idx, compound_child in enumerate(child): if compound_child is node: child[idx] = real_expr elif child is node: setattr(node.parent, name, real_expr) yield real_expr else: for child in node.get_children(): yield from _extract_expressions(child)
[ "def", "_extract_expressions", "(", "node", ")", ":", "if", "(", "isinstance", "(", "node", ",", "nodes", ".", "Call", ")", "and", "isinstance", "(", "node", ".", "func", ",", "nodes", ".", "Name", ")", "and", "node", ".", "func", ".", "name", "==", "_TRANSIENT_FUNCTION", ")", ":", "real_expr", "=", "node", ".", "args", "[", "0", "]", "real_expr", ".", "parent", "=", "node", ".", "parent", "# Search for node in all _astng_fields (the fields checked when", "# get_children is called) of its parent. Some of those fields may", "# be lists or tuples, in which case the elements need to be checked.", "# When we find it, replace it by real_expr, so that the AST looks", "# like no call to _TRANSIENT_FUNCTION ever took place.", "for", "name", "in", "node", ".", "parent", ".", "_astroid_fields", ":", "child", "=", "getattr", "(", "node", ".", "parent", ",", "name", ")", "if", "isinstance", "(", "child", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "idx", ",", "compound_child", "in", "enumerate", "(", "child", ")", ":", "if", "compound_child", "is", "node", ":", "child", "[", "idx", "]", "=", "real_expr", "elif", "child", "is", "node", ":", "setattr", "(", "node", ".", "parent", ",", "name", ",", "real_expr", ")", "yield", "real_expr", "else", ":", "for", "child", "in", "node", ".", "get_children", "(", ")", ":", "yield", "from", "_extract_expressions", "(", "child", ")" ]
Find expressions in a call to _TRANSIENT_FUNCTION and extract them. The function walks the AST recursively to search for expressions that are wrapped into a call to _TRANSIENT_FUNCTION. If it finds such an expression, it completely removes the function call node from the tree, replacing it by the wrapped expression inside the parent. :param node: An astroid node. :type node: astroid.bases.NodeNG :yields: The sequence of wrapped expressions on the modified tree expression can be found.
[ "Find", "expressions", "in", "a", "call", "to", "_TRANSIENT_FUNCTION", "and", "extract", "them", "." ]
python
train
aliyun/aliyun-odps-python-sdk
odps/ml/algolib/objects.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/ml/algolib/objects.py#L252-L265
def build_data_output(cls, name='output', copy_input=None, schema=None): """ Build a data output port. :param name: port name :type name: str :return: port object :param copy_input: input name where the schema is copied from. :type copy_input: str :param schema: k1:v1,k2:v2 string describing the schema to be appended :type schema: str :rtype: PortDef """ return cls(name, PortDirection.OUTPUT, type=PortType.DATA, copy_input=copy_input, schema=schema)
[ "def", "build_data_output", "(", "cls", ",", "name", "=", "'output'", ",", "copy_input", "=", "None", ",", "schema", "=", "None", ")", ":", "return", "cls", "(", "name", ",", "PortDirection", ".", "OUTPUT", ",", "type", "=", "PortType", ".", "DATA", ",", "copy_input", "=", "copy_input", ",", "schema", "=", "schema", ")" ]
Build a data output port. :param name: port name :type name: str :return: port object :param copy_input: input name where the schema is copied from. :type copy_input: str :param schema: k1:v1,k2:v2 string describing the schema to be appended :type schema: str :rtype: PortDef
[ "Build", "a", "data", "output", "port", "." ]
python
train
tamasgal/km3pipe
km3modules/k40.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L529-L566
def load_k40_coincidences_from_rootfile(filename, dom_id): """Load k40 coincidences from JMonitorK40 ROOT file Parameters ---------- filename: root file produced by JMonitorK40 dom_id: DOM ID Returns ------- data: numpy array of coincidences dom_weight: weight to apply to coincidences to get rate in Hz """ from ROOT import TFile root_file_monitor = TFile(filename, "READ") dom_name = str(dom_id) + ".2S" histo_2d_monitor = root_file_monitor.Get(dom_name) data = [] for c in range(1, histo_2d_monitor.GetNbinsX() + 1): combination = [] for b in range(1, histo_2d_monitor.GetNbinsY() + 1): combination.append(histo_2d_monitor.GetBinContent(c, b)) data.append(combination) weights = {} weights_histo = root_file_monitor.Get('weights_hist') try: for i in range(1, weights_histo.GetNbinsX() + 1): # we have to read all the entries, unfortunately weight = weights_histo.GetBinContent(i) label = weights_histo.GetXaxis().GetBinLabel(i) weights[label[3:]] = weight dom_weight = weights[str(dom_id)] except AttributeError: log.info("Weights histogram broken or not found, setting weight to 1.") dom_weight = 1. return np.array(data), dom_weight
[ "def", "load_k40_coincidences_from_rootfile", "(", "filename", ",", "dom_id", ")", ":", "from", "ROOT", "import", "TFile", "root_file_monitor", "=", "TFile", "(", "filename", ",", "\"READ\"", ")", "dom_name", "=", "str", "(", "dom_id", ")", "+", "\".2S\"", "histo_2d_monitor", "=", "root_file_monitor", ".", "Get", "(", "dom_name", ")", "data", "=", "[", "]", "for", "c", "in", "range", "(", "1", ",", "histo_2d_monitor", ".", "GetNbinsX", "(", ")", "+", "1", ")", ":", "combination", "=", "[", "]", "for", "b", "in", "range", "(", "1", ",", "histo_2d_monitor", ".", "GetNbinsY", "(", ")", "+", "1", ")", ":", "combination", ".", "append", "(", "histo_2d_monitor", ".", "GetBinContent", "(", "c", ",", "b", ")", ")", "data", ".", "append", "(", "combination", ")", "weights", "=", "{", "}", "weights_histo", "=", "root_file_monitor", ".", "Get", "(", "'weights_hist'", ")", "try", ":", "for", "i", "in", "range", "(", "1", ",", "weights_histo", ".", "GetNbinsX", "(", ")", "+", "1", ")", ":", "# we have to read all the entries, unfortunately", "weight", "=", "weights_histo", ".", "GetBinContent", "(", "i", ")", "label", "=", "weights_histo", ".", "GetXaxis", "(", ")", ".", "GetBinLabel", "(", "i", ")", "weights", "[", "label", "[", "3", ":", "]", "]", "=", "weight", "dom_weight", "=", "weights", "[", "str", "(", "dom_id", ")", "]", "except", "AttributeError", ":", "log", ".", "info", "(", "\"Weights histogram broken or not found, setting weight to 1.\"", ")", "dom_weight", "=", "1.", "return", "np", ".", "array", "(", "data", ")", ",", "dom_weight" ]
Load k40 coincidences from JMonitorK40 ROOT file Parameters ---------- filename: root file produced by JMonitorK40 dom_id: DOM ID Returns ------- data: numpy array of coincidences dom_weight: weight to apply to coincidences to get rate in Hz
[ "Load", "k40", "coincidences", "from", "JMonitorK40", "ROOT", "file" ]
python
train
blockstack/virtualchain
virtualchain/lib/blockchain/session.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/session.py#L151-L178
def connect_bitcoind_impl( bitcoind_opts ): """ Create a connection to bitcoind, using a dict of config options. """ if 'bitcoind_port' not in bitcoind_opts.keys() or bitcoind_opts['bitcoind_port'] is None: log.error("No port given") raise ValueError("No RPC port given (bitcoind_port)") if 'bitcoind_timeout' not in bitcoind_opts.keys() or bitcoind_opts['bitcoind_timeout'] is None: # default bitcoind_opts['bitcoind_timeout'] = 300 try: int(bitcoind_opts['bitcoind_port']) except: log.error("Not an int: '%s'" % bitcoind_opts.get('bitcoind_port')) raise try: float(bitcoind_opts.get('bitcoind_timeout', 300)) except: log.error("Not a float: '%s'" % bitcoind_opts.get('bitcoind_timeout', 300)) raise return create_bitcoind_connection( bitcoind_opts['bitcoind_user'], bitcoind_opts['bitcoind_passwd'], \ bitcoind_opts['bitcoind_server'], int(bitcoind_opts['bitcoind_port']), \ bitcoind_opts.get('bitcoind_use_https', False), float(bitcoind_opts.get('bitcoind_timeout', 300)) )
[ "def", "connect_bitcoind_impl", "(", "bitcoind_opts", ")", ":", "if", "'bitcoind_port'", "not", "in", "bitcoind_opts", ".", "keys", "(", ")", "or", "bitcoind_opts", "[", "'bitcoind_port'", "]", "is", "None", ":", "log", ".", "error", "(", "\"No port given\"", ")", "raise", "ValueError", "(", "\"No RPC port given (bitcoind_port)\"", ")", "if", "'bitcoind_timeout'", "not", "in", "bitcoind_opts", ".", "keys", "(", ")", "or", "bitcoind_opts", "[", "'bitcoind_timeout'", "]", "is", "None", ":", "# default", "bitcoind_opts", "[", "'bitcoind_timeout'", "]", "=", "300", "try", ":", "int", "(", "bitcoind_opts", "[", "'bitcoind_port'", "]", ")", "except", ":", "log", ".", "error", "(", "\"Not an int: '%s'\"", "%", "bitcoind_opts", ".", "get", "(", "'bitcoind_port'", ")", ")", "raise", "try", ":", "float", "(", "bitcoind_opts", ".", "get", "(", "'bitcoind_timeout'", ",", "300", ")", ")", "except", ":", "log", ".", "error", "(", "\"Not a float: '%s'\"", "%", "bitcoind_opts", ".", "get", "(", "'bitcoind_timeout'", ",", "300", ")", ")", "raise", "return", "create_bitcoind_connection", "(", "bitcoind_opts", "[", "'bitcoind_user'", "]", ",", "bitcoind_opts", "[", "'bitcoind_passwd'", "]", ",", "bitcoind_opts", "[", "'bitcoind_server'", "]", ",", "int", "(", "bitcoind_opts", "[", "'bitcoind_port'", "]", ")", ",", "bitcoind_opts", ".", "get", "(", "'bitcoind_use_https'", ",", "False", ")", ",", "float", "(", "bitcoind_opts", ".", "get", "(", "'bitcoind_timeout'", ",", "300", ")", ")", ")" ]
Create a connection to bitcoind, using a dict of config options.
[ "Create", "a", "connection", "to", "bitcoind", "using", "a", "dict", "of", "config", "options", "." ]
python
train
tyarkoni/pliers
pliers/external/tensorflow/classify_image.py
https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/external/tensorflow/classify_image.py#L120-L127
def create_graph(): """Creates a graph from saved GraphDef file and returns a saver.""" # Creates graph from saved graph_def.pb. with tf.gfile.FastGFile(os.path.join( FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='')
[ "def", "create_graph", "(", ")", ":", "# Creates graph from saved graph_def.pb.", "with", "tf", ".", "gfile", ".", "FastGFile", "(", "os", ".", "path", ".", "join", "(", "FLAGS", ".", "model_dir", ",", "'classify_image_graph_def.pb'", ")", ",", "'rb'", ")", "as", "f", ":", "graph_def", "=", "tf", ".", "GraphDef", "(", ")", "graph_def", ".", "ParseFromString", "(", "f", ".", "read", "(", ")", ")", "_", "=", "tf", ".", "import_graph_def", "(", "graph_def", ",", "name", "=", "''", ")" ]
Creates a graph from saved GraphDef file and returns a saver.
[ "Creates", "a", "graph", "from", "saved", "GraphDef", "file", "and", "returns", "a", "saver", "." ]
python
train
kblin/ncbi-genome-download
ncbi_genome_download/core.py
https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L300-L328
def get_summary(section, domain, uri, use_cache): """Get the assembly_summary.txt file from NCBI and return a StringIO object for it.""" logging.debug('Checking for a cached summary file') cachefile = "{section}_{domain}_assembly_summary.txt".format(section=section, domain=domain) full_cachefile = os.path.join(CACHE_DIR, cachefile) if use_cache and os.path.exists(full_cachefile) and \ datetime.utcnow() - datetime.fromtimestamp(os.path.getmtime(full_cachefile)) < timedelta(days=1): logging.info('Using cached summary.') with codecs.open(full_cachefile, 'r', encoding='utf-8') as fh: return StringIO(fh.read()) logging.debug('Downloading summary for %r/%r uri: %r', section, domain, uri) url = '{uri}/{section}/{domain}/assembly_summary.txt'.format( section=section, domain=domain, uri=uri) req = requests.get(url) if use_cache: try: os.makedirs(CACHE_DIR) except OSError as err: # Errno 17 is "file exists", ignore that, otherwise re-raise if err.errno != 17: raise with codecs.open(full_cachefile, 'w', encoding='utf-8') as fh: fh.write(req.text) return StringIO(req.text)
[ "def", "get_summary", "(", "section", ",", "domain", ",", "uri", ",", "use_cache", ")", ":", "logging", ".", "debug", "(", "'Checking for a cached summary file'", ")", "cachefile", "=", "\"{section}_{domain}_assembly_summary.txt\"", ".", "format", "(", "section", "=", "section", ",", "domain", "=", "domain", ")", "full_cachefile", "=", "os", ".", "path", ".", "join", "(", "CACHE_DIR", ",", "cachefile", ")", "if", "use_cache", "and", "os", ".", "path", ".", "exists", "(", "full_cachefile", ")", "and", "datetime", ".", "utcnow", "(", ")", "-", "datetime", ".", "fromtimestamp", "(", "os", ".", "path", ".", "getmtime", "(", "full_cachefile", ")", ")", "<", "timedelta", "(", "days", "=", "1", ")", ":", "logging", ".", "info", "(", "'Using cached summary.'", ")", "with", "codecs", ".", "open", "(", "full_cachefile", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "fh", ":", "return", "StringIO", "(", "fh", ".", "read", "(", ")", ")", "logging", ".", "debug", "(", "'Downloading summary for %r/%r uri: %r'", ",", "section", ",", "domain", ",", "uri", ")", "url", "=", "'{uri}/{section}/{domain}/assembly_summary.txt'", ".", "format", "(", "section", "=", "section", ",", "domain", "=", "domain", ",", "uri", "=", "uri", ")", "req", "=", "requests", ".", "get", "(", "url", ")", "if", "use_cache", ":", "try", ":", "os", ".", "makedirs", "(", "CACHE_DIR", ")", "except", "OSError", "as", "err", ":", "# Errno 17 is \"file exists\", ignore that, otherwise re-raise", "if", "err", ".", "errno", "!=", "17", ":", "raise", "with", "codecs", ".", "open", "(", "full_cachefile", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "fh", ":", "fh", ".", "write", "(", "req", ".", "text", ")", "return", "StringIO", "(", "req", ".", "text", ")" ]
Get the assembly_summary.txt file from NCBI and return a StringIO object for it.
[ "Get", "the", "assembly_summary", ".", "txt", "file", "from", "NCBI", "and", "return", "a", "StringIO", "object", "for", "it", "." ]
python
train
kalekundert/nonstdlib
nonstdlib/meta.py
https://github.com/kalekundert/nonstdlib/blob/3abf4a4680056d6d97f2a5988972eb9392756fb6/nonstdlib/meta.py#L40-L75
def singleton(cls): """ Decorator function that turns a class into a singleton. """ import inspect # Create a structure to store instances of any singletons that get # created. instances = {} # Make sure that the constructor for this class doesn't take any # arguments. Since singletons can only be instantiated once, it doesn't # make any sense for the constructor to take arguments. If the class # doesn't implement its own constructor, don't do anything. This case is # considered specially because it causes a TypeError in python 3.3 but not # in python 3.4. if cls.__init__ is not object.__init__: argspec = inspect.getfullargspec(cls.__init__) if len(argspec.args) != 1 or argspec.varargs or argspec.varkw: raise TypeError("Singleton classes cannot accept arguments to the constructor.") def get_instance(): """ Creates and returns the singleton object. This function is what gets returned by this decorator. """ # Check to see if an instance of this class has already been # instantiated. If it hasn't, create one. The `instances` structure # is technically a global variable, so it will be preserved between # calls to this function. if cls not in instances: instances[cls] = cls() # Return a previously instantiated object of the requested type. return instances[cls] # Return the decorator function. return get_instance
[ "def", "singleton", "(", "cls", ")", ":", "import", "inspect", "# Create a structure to store instances of any singletons that get", "# created.", "instances", "=", "{", "}", "# Make sure that the constructor for this class doesn't take any", "# arguments. Since singletons can only be instantiated once, it doesn't", "# make any sense for the constructor to take arguments. If the class ", "# doesn't implement its own constructor, don't do anything. This case is ", "# considered specially because it causes a TypeError in python 3.3 but not ", "# in python 3.4.", "if", "cls", ".", "__init__", "is", "not", "object", ".", "__init__", ":", "argspec", "=", "inspect", ".", "getfullargspec", "(", "cls", ".", "__init__", ")", "if", "len", "(", "argspec", ".", "args", ")", "!=", "1", "or", "argspec", ".", "varargs", "or", "argspec", ".", "varkw", ":", "raise", "TypeError", "(", "\"Singleton classes cannot accept arguments to the constructor.\"", ")", "def", "get_instance", "(", ")", ":", "\"\"\" Creates and returns the singleton object. This function is what \n gets returned by this decorator. \"\"\"", "# Check to see if an instance of this class has already been", "# instantiated. If it hasn't, create one. The `instances` structure", "# is technically a global variable, so it will be preserved between", "# calls to this function.", "if", "cls", "not", "in", "instances", ":", "instances", "[", "cls", "]", "=", "cls", "(", ")", "# Return a previously instantiated object of the requested type.", "return", "instances", "[", "cls", "]", "# Return the decorator function.", "return", "get_instance" ]
Decorator function that turns a class into a singleton.
[ "Decorator", "function", "that", "turns", "a", "class", "into", "a", "singleton", "." ]
python
train
dbcli/athenacli
athenacli/packages/special/iocommands.py
https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/special/iocommands.py#L158-L186
def execute_favorite_query(cur, arg, **_): """Returns (title, rows, headers, status)""" if arg == '': for result in list_favorite_queries(): yield result """Parse out favorite name and optional substitution parameters""" name, _, arg_str = arg.partition(' ') args = shlex.split(arg_str) query = favoritequeries.get(name) if query is None: message = "No favorite query: %s" % (name) yield (None, None, None, message) else: query, arg_error = subst_favorite_query_args(query, args) if arg_error: yield (None, None, None, arg_error) else: for sql in sqlparse.split(query): _logger.debug("query is [%s]", sql) sql = sql.rstrip(';') title = '> %s' % (sql) cur.execute(sql) if cur.description: headers = [x[0] for x in cur.description] yield (title, cur.fetchall(), headers, None) else: yield (title, None, None, None)
[ "def", "execute_favorite_query", "(", "cur", ",", "arg", ",", "*", "*", "_", ")", ":", "if", "arg", "==", "''", ":", "for", "result", "in", "list_favorite_queries", "(", ")", ":", "yield", "result", "\"\"\"Parse out favorite name and optional substitution parameters\"\"\"", "name", ",", "_", ",", "arg_str", "=", "arg", ".", "partition", "(", "' '", ")", "args", "=", "shlex", ".", "split", "(", "arg_str", ")", "query", "=", "favoritequeries", ".", "get", "(", "name", ")", "if", "query", "is", "None", ":", "message", "=", "\"No favorite query: %s\"", "%", "(", "name", ")", "yield", "(", "None", ",", "None", ",", "None", ",", "message", ")", "else", ":", "query", ",", "arg_error", "=", "subst_favorite_query_args", "(", "query", ",", "args", ")", "if", "arg_error", ":", "yield", "(", "None", ",", "None", ",", "None", ",", "arg_error", ")", "else", ":", "for", "sql", "in", "sqlparse", ".", "split", "(", "query", ")", ":", "_logger", ".", "debug", "(", "\"query is [%s]\"", ",", "sql", ")", "sql", "=", "sql", ".", "rstrip", "(", "';'", ")", "title", "=", "'> %s'", "%", "(", "sql", ")", "cur", ".", "execute", "(", "sql", ")", "if", "cur", ".", "description", ":", "headers", "=", "[", "x", "[", "0", "]", "for", "x", "in", "cur", ".", "description", "]", "yield", "(", "title", ",", "cur", ".", "fetchall", "(", ")", ",", "headers", ",", "None", ")", "else", ":", "yield", "(", "title", ",", "None", ",", "None", ",", "None", ")" ]
Returns (title, rows, headers, status)
[ "Returns", "(", "title", "rows", "headers", "status", ")" ]
python
train
Shizmob/pydle
pydle/client.py
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/client.py#L468-L472
def disconnect(self, client): """ Remove client from pool. """ self.clients.remove(client) del self.connect_args[client] client.disconnect()
[ "def", "disconnect", "(", "self", ",", "client", ")", ":", "self", ".", "clients", ".", "remove", "(", "client", ")", "del", "self", ".", "connect_args", "[", "client", "]", "client", ".", "disconnect", "(", ")" ]
Remove client from pool.
[ "Remove", "client", "from", "pool", "." ]
python
train
jldbc/pybaseball
pybaseball/league_pitching_stats.py
https://github.com/jldbc/pybaseball/blob/085ea26bfd1b5f5926d79d4fac985c88278115f2/pybaseball/league_pitching_stats.py#L67-L96
def pitching_stats_range(start_dt=None, end_dt=None): """ Get all pitching stats for a set time range. This can be the past week, the month of August, anything. Just supply the start and end date in YYYY-MM-DD format. """ # ensure valid date strings, perform necessary processing for query start_dt, end_dt = sanitize_input(start_dt, end_dt) if datetime.datetime.strptime(start_dt, "%Y-%m-%d").year < 2008: raise ValueError("Year must be 2008 or later") if datetime.datetime.strptime(end_dt, "%Y-%m-%d").year < 2008: raise ValueError("Year must be 2008 or later") # retrieve html from baseball reference soup = get_soup(start_dt, end_dt) table = get_table(soup) table = table.dropna(how='all') # drop if all columns are NA #fix some strange formatting for percentage columns table = table.replace('---%', np.nan) #make sure these are all numeric for column in ['Age', '#days', 'G', 'GS', 'W', 'L', 'SV', 'IP', 'H', 'R', 'ER', 'BB', 'SO', 'HR', 'HBP', 'ERA', 'AB', '2B', '3B', 'IBB', 'GDP', 'SF', 'SB', 'CS', 'PO', 'BF', 'Pit', 'WHIP', 'BAbip', 'SO9', 'SO/W']: table[column] = pd.to_numeric(table[column]) #convert str(xx%) values to float(0.XX) decimal values for column in ['Str', 'StL', 'StS', 'GB/FB', 'LD', 'PU']: table[column] = table[column].replace('%','',regex=True).astype('float')/100 table = table.drop('',1) return table
[ "def", "pitching_stats_range", "(", "start_dt", "=", "None", ",", "end_dt", "=", "None", ")", ":", "# ensure valid date strings, perform necessary processing for query", "start_dt", ",", "end_dt", "=", "sanitize_input", "(", "start_dt", ",", "end_dt", ")", "if", "datetime", ".", "datetime", ".", "strptime", "(", "start_dt", ",", "\"%Y-%m-%d\"", ")", ".", "year", "<", "2008", ":", "raise", "ValueError", "(", "\"Year must be 2008 or later\"", ")", "if", "datetime", ".", "datetime", ".", "strptime", "(", "end_dt", ",", "\"%Y-%m-%d\"", ")", ".", "year", "<", "2008", ":", "raise", "ValueError", "(", "\"Year must be 2008 or later\"", ")", "# retrieve html from baseball reference", "soup", "=", "get_soup", "(", "start_dt", ",", "end_dt", ")", "table", "=", "get_table", "(", "soup", ")", "table", "=", "table", ".", "dropna", "(", "how", "=", "'all'", ")", "# drop if all columns are NA", "#fix some strange formatting for percentage columns", "table", "=", "table", ".", "replace", "(", "'---%'", ",", "np", ".", "nan", ")", "#make sure these are all numeric", "for", "column", "in", "[", "'Age'", ",", "'#days'", ",", "'G'", ",", "'GS'", ",", "'W'", ",", "'L'", ",", "'SV'", ",", "'IP'", ",", "'H'", ",", "'R'", ",", "'ER'", ",", "'BB'", ",", "'SO'", ",", "'HR'", ",", "'HBP'", ",", "'ERA'", ",", "'AB'", ",", "'2B'", ",", "'3B'", ",", "'IBB'", ",", "'GDP'", ",", "'SF'", ",", "'SB'", ",", "'CS'", ",", "'PO'", ",", "'BF'", ",", "'Pit'", ",", "'WHIP'", ",", "'BAbip'", ",", "'SO9'", ",", "'SO/W'", "]", ":", "table", "[", "column", "]", "=", "pd", ".", "to_numeric", "(", "table", "[", "column", "]", ")", "#convert str(xx%) values to float(0.XX) decimal values", "for", "column", "in", "[", "'Str'", ",", "'StL'", ",", "'StS'", ",", "'GB/FB'", ",", "'LD'", ",", "'PU'", "]", ":", "table", "[", "column", "]", "=", "table", "[", "column", "]", ".", "replace", "(", "'%'", ",", "''", ",", "regex", "=", "True", ")", ".", "astype", "(", "'float'", ")", "/", "100", "table", "=", "table", ".", "drop", "(", "''", ",", "1", ")", "return", "table" ]
Get all pitching stats for a set time range. This can be the past week, the month of August, anything. Just supply the start and end date in YYYY-MM-DD format.
[ "Get", "all", "pitching", "stats", "for", "a", "set", "time", "range", ".", "This", "can", "be", "the", "past", "week", "the", "month", "of", "August", "anything", ".", "Just", "supply", "the", "start", "and", "end", "date", "in", "YYYY", "-", "MM", "-", "DD", "format", "." ]
python
train
serkanyersen/underscore.py
src/underscore.py
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L561-L579
def without(self, *values): """ Return a version of the array that does not contain the specified value(s). """ if self._clean.isDict(): newlist = {} for i, k in enumerate(self.obj): # if k not in values: # use indexof to check identity if _(values).indexOf(k) is -1: newlist.set(k, self.obj[k]) else: newlist = [] for i, v in enumerate(self.obj): # if v not in values: # use indexof to check identity if _(values).indexOf(v) is -1: newlist.append(v) return self._wrap(newlist)
[ "def", "without", "(", "self", ",", "*", "values", ")", ":", "if", "self", ".", "_clean", ".", "isDict", "(", ")", ":", "newlist", "=", "{", "}", "for", "i", ",", "k", "in", "enumerate", "(", "self", ".", "obj", ")", ":", "# if k not in values: # use indexof to check identity", "if", "_", "(", "values", ")", ".", "indexOf", "(", "k", ")", "is", "-", "1", ":", "newlist", ".", "set", "(", "k", ",", "self", ".", "obj", "[", "k", "]", ")", "else", ":", "newlist", "=", "[", "]", "for", "i", ",", "v", "in", "enumerate", "(", "self", ".", "obj", ")", ":", "# if v not in values: # use indexof to check identity", "if", "_", "(", "values", ")", ".", "indexOf", "(", "v", ")", "is", "-", "1", ":", "newlist", ".", "append", "(", "v", ")", "return", "self", ".", "_wrap", "(", "newlist", ")" ]
Return a version of the array that does not contain the specified value(s).
[ "Return", "a", "version", "of", "the", "array", "that", "does", "not", "contain", "the", "specified", "value", "(", "s", ")", "." ]
python
train
gwastro/pycbc
pycbc/filter/matchedfilter.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/filter/matchedfilter.py#L659-L788
def compute_max_snr_over_sky_loc_stat_no_phase(hplus, hcross, hphccorr, hpnorm=None, hcnorm=None, out=None, thresh=0, analyse_slice=None): """ Compute the match maximized over polarization phase. In contrast to compute_max_snr_over_sky_loc_stat_no_phase this function performs no maximization over orbital phase, treating that as an intrinsic parameter. In the case of aligned-spin 2,2-mode only waveforms, this collapses to the normal statistic (at twice the computational cost!) Parameters ----------- hplus : TimeSeries This is the IFFTed complex SNR time series of (h+, data). If not normalized, supply the normalization factor so this can be done! It is recommended to normalize this before sending through this function hcross : TimeSeries This is the IFFTed complex SNR time series of (hx, data). If not normalized, supply the normalization factor so this can be done! hphccorr : float The real component of the overlap between the two polarizations Re[(h+, hx)]. Note that the imaginary component does not enter the detection statistic. This must be normalized and is sign-sensitive. thresh : float Used for optimization. If we do not care about the value of SNR values below thresh we can calculate a quick statistic that will always overestimate SNR and then only calculate the proper, more expensive, statistic at points where the quick SNR is above thresh. hpsigmasq : float The normalization factor (h+, h+). Default = None (=1, already normalized) hcsigmasq : float The normalization factor (hx, hx). Default = None (=1, already normalized) out : TimeSeries (optional, default=None) If given, use this array to store the output. Returns -------- det_stat : TimeSeries The SNR maximized over sky location """ # NOTE: Not much optimization has been done here! This may need to be # C-ified using scipy.weave. if out is None: out = zeros(len(hplus)) out.non_zero_locs = numpy.array([], dtype=out.dtype) else: if not hasattr(out, 'non_zero_locs'): # Doing this every time is not a zero-cost operation out.data[:] = 0 out.non_zero_locs = numpy.array([], dtype=out.dtype) else: # Only set non zero locations to zero out.data[out.non_zero_locs] = 0 # If threshold is given we can limit the points at which to compute the # full statistic if thresh: # This is the statistic that always overestimates the SNR... # It allows some unphysical freedom that the full statistic does not # # For now this is copied from the max-over-phase statistic. One could # probably make this faster by removing the imaginary components of # the matched filter, as these are not used here. idx_p, _ = events.threshold_only(hplus[analyse_slice], thresh / (2**0.5 * hpnorm)) idx_c, _ = events.threshold_only(hcross[analyse_slice], thresh / (2**0.5 * hcnorm)) idx_p = idx_p + analyse_slice.start idx_c = idx_c + analyse_slice.start hp_red = hplus[idx_p] * hpnorm hc_red = hcross[idx_p] * hcnorm stat_p = hp_red.real**2 + hp_red.imag**2 + \ hc_red.real**2 + hc_red.imag**2 locs_p = idx_p[stat_p > (thresh*thresh)] hp_red = hplus[idx_c] * hpnorm hc_red = hcross[idx_c] * hcnorm stat_c = hp_red.real**2 + hp_red.imag**2 + \ hc_red.real**2 + hc_red.imag**2 locs_c = idx_c[stat_c > (thresh*thresh)] locs = numpy.unique(numpy.concatenate((locs_p, locs_c))) hplus = hplus[locs] hcross = hcross[locs] hplus = hplus * hpnorm hcross = hcross * hcnorm # Calculate and sanity check the denominator denom = 1 - hphccorr*hphccorr if denom < 0: if hphccorr > 1: err_msg = "Overlap between hp and hc is given as %f. " %(hphccorr) err_msg += "How can an overlap be bigger than 1?" raise ValueError(err_msg) else: err_msg = "There really is no way to raise this error!?! " err_msg += "If you're seeing this, it is bad." raise ValueError(err_msg) if denom == 0: # This case, of hphccorr==1, makes the statistic degenerate # This case should not physically be possible luckily. err_msg = "You have supplied a real overlap between hp and hc of 1. " err_msg += "Ian is reasonably certain this is physically impossible " err_msg += "so why are you seeing this?" raise ValueError(err_msg) assert(len(hplus) == len(hcross)) # Now the stuff where comp. cost may be a problem hplus_magsq = numpy.real(hplus) * numpy.real(hplus) hcross_magsq = numpy.real(hcross) * numpy.real(hcross) rho_pluscross = numpy.real(hplus) * numpy.real(hcross) det_stat_sq = (hplus_magsq + hcross_magsq - 2 * rho_pluscross*hphccorr) det_stat = numpy.sqrt(det_stat_sq / denom) if thresh: out.data[locs] = det_stat out.non_zero_locs = locs return out else: return Array(det_stat, copy=False)
[ "def", "compute_max_snr_over_sky_loc_stat_no_phase", "(", "hplus", ",", "hcross", ",", "hphccorr", ",", "hpnorm", "=", "None", ",", "hcnorm", "=", "None", ",", "out", "=", "None", ",", "thresh", "=", "0", ",", "analyse_slice", "=", "None", ")", ":", "# NOTE: Not much optimization has been done here! This may need to be", "# C-ified using scipy.weave.", "if", "out", "is", "None", ":", "out", "=", "zeros", "(", "len", "(", "hplus", ")", ")", "out", ".", "non_zero_locs", "=", "numpy", ".", "array", "(", "[", "]", ",", "dtype", "=", "out", ".", "dtype", ")", "else", ":", "if", "not", "hasattr", "(", "out", ",", "'non_zero_locs'", ")", ":", "# Doing this every time is not a zero-cost operation", "out", ".", "data", "[", ":", "]", "=", "0", "out", ".", "non_zero_locs", "=", "numpy", ".", "array", "(", "[", "]", ",", "dtype", "=", "out", ".", "dtype", ")", "else", ":", "# Only set non zero locations to zero", "out", ".", "data", "[", "out", ".", "non_zero_locs", "]", "=", "0", "# If threshold is given we can limit the points at which to compute the", "# full statistic", "if", "thresh", ":", "# This is the statistic that always overestimates the SNR...", "# It allows some unphysical freedom that the full statistic does not", "#", "# For now this is copied from the max-over-phase statistic. One could", "# probably make this faster by removing the imaginary components of", "# the matched filter, as these are not used here.", "idx_p", ",", "_", "=", "events", ".", "threshold_only", "(", "hplus", "[", "analyse_slice", "]", ",", "thresh", "/", "(", "2", "**", "0.5", "*", "hpnorm", ")", ")", "idx_c", ",", "_", "=", "events", ".", "threshold_only", "(", "hcross", "[", "analyse_slice", "]", ",", "thresh", "/", "(", "2", "**", "0.5", "*", "hcnorm", ")", ")", "idx_p", "=", "idx_p", "+", "analyse_slice", ".", "start", "idx_c", "=", "idx_c", "+", "analyse_slice", ".", "start", "hp_red", "=", "hplus", "[", "idx_p", "]", "*", "hpnorm", "hc_red", "=", "hcross", "[", "idx_p", "]", "*", "hcnorm", "stat_p", "=", "hp_red", ".", "real", "**", "2", "+", "hp_red", ".", "imag", "**", "2", "+", "hc_red", ".", "real", "**", "2", "+", "hc_red", ".", "imag", "**", "2", "locs_p", "=", "idx_p", "[", "stat_p", ">", "(", "thresh", "*", "thresh", ")", "]", "hp_red", "=", "hplus", "[", "idx_c", "]", "*", "hpnorm", "hc_red", "=", "hcross", "[", "idx_c", "]", "*", "hcnorm", "stat_c", "=", "hp_red", ".", "real", "**", "2", "+", "hp_red", ".", "imag", "**", "2", "+", "hc_red", ".", "real", "**", "2", "+", "hc_red", ".", "imag", "**", "2", "locs_c", "=", "idx_c", "[", "stat_c", ">", "(", "thresh", "*", "thresh", ")", "]", "locs", "=", "numpy", ".", "unique", "(", "numpy", ".", "concatenate", "(", "(", "locs_p", ",", "locs_c", ")", ")", ")", "hplus", "=", "hplus", "[", "locs", "]", "hcross", "=", "hcross", "[", "locs", "]", "hplus", "=", "hplus", "*", "hpnorm", "hcross", "=", "hcross", "*", "hcnorm", "# Calculate and sanity check the denominator", "denom", "=", "1", "-", "hphccorr", "*", "hphccorr", "if", "denom", "<", "0", ":", "if", "hphccorr", ">", "1", ":", "err_msg", "=", "\"Overlap between hp and hc is given as %f. \"", "%", "(", "hphccorr", ")", "err_msg", "+=", "\"How can an overlap be bigger than 1?\"", "raise", "ValueError", "(", "err_msg", ")", "else", ":", "err_msg", "=", "\"There really is no way to raise this error!?! \"", "err_msg", "+=", "\"If you're seeing this, it is bad.\"", "raise", "ValueError", "(", "err_msg", ")", "if", "denom", "==", "0", ":", "# This case, of hphccorr==1, makes the statistic degenerate", "# This case should not physically be possible luckily.", "err_msg", "=", "\"You have supplied a real overlap between hp and hc of 1. \"", "err_msg", "+=", "\"Ian is reasonably certain this is physically impossible \"", "err_msg", "+=", "\"so why are you seeing this?\"", "raise", "ValueError", "(", "err_msg", ")", "assert", "(", "len", "(", "hplus", ")", "==", "len", "(", "hcross", ")", ")", "# Now the stuff where comp. cost may be a problem", "hplus_magsq", "=", "numpy", ".", "real", "(", "hplus", ")", "*", "numpy", ".", "real", "(", "hplus", ")", "hcross_magsq", "=", "numpy", ".", "real", "(", "hcross", ")", "*", "numpy", ".", "real", "(", "hcross", ")", "rho_pluscross", "=", "numpy", ".", "real", "(", "hplus", ")", "*", "numpy", ".", "real", "(", "hcross", ")", "det_stat_sq", "=", "(", "hplus_magsq", "+", "hcross_magsq", "-", "2", "*", "rho_pluscross", "*", "hphccorr", ")", "det_stat", "=", "numpy", ".", "sqrt", "(", "det_stat_sq", "/", "denom", ")", "if", "thresh", ":", "out", ".", "data", "[", "locs", "]", "=", "det_stat", "out", ".", "non_zero_locs", "=", "locs", "return", "out", "else", ":", "return", "Array", "(", "det_stat", ",", "copy", "=", "False", ")" ]
Compute the match maximized over polarization phase. In contrast to compute_max_snr_over_sky_loc_stat_no_phase this function performs no maximization over orbital phase, treating that as an intrinsic parameter. In the case of aligned-spin 2,2-mode only waveforms, this collapses to the normal statistic (at twice the computational cost!) Parameters ----------- hplus : TimeSeries This is the IFFTed complex SNR time series of (h+, data). If not normalized, supply the normalization factor so this can be done! It is recommended to normalize this before sending through this function hcross : TimeSeries This is the IFFTed complex SNR time series of (hx, data). If not normalized, supply the normalization factor so this can be done! hphccorr : float The real component of the overlap between the two polarizations Re[(h+, hx)]. Note that the imaginary component does not enter the detection statistic. This must be normalized and is sign-sensitive. thresh : float Used for optimization. If we do not care about the value of SNR values below thresh we can calculate a quick statistic that will always overestimate SNR and then only calculate the proper, more expensive, statistic at points where the quick SNR is above thresh. hpsigmasq : float The normalization factor (h+, h+). Default = None (=1, already normalized) hcsigmasq : float The normalization factor (hx, hx). Default = None (=1, already normalized) out : TimeSeries (optional, default=None) If given, use this array to store the output. Returns -------- det_stat : TimeSeries The SNR maximized over sky location
[ "Compute", "the", "match", "maximized", "over", "polarization", "phase", "." ]
python
train
nerdvegas/rez
src/rez/vendor/sortedcontainers/sortedlist.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/sortedcontainers/sortedlist.py#L33-L52
def recursive_repr(func): """Decorator to prevent infinite repr recursion.""" repr_running = set() @wraps(func) def wrapper(self): "Return ellipsis on recursive re-entry to function." key = id(self), get_ident() if key in repr_running: return '...' repr_running.add(key) try: return func(self) finally: repr_running.discard(key) return wrapper
[ "def", "recursive_repr", "(", "func", ")", ":", "repr_running", "=", "set", "(", ")", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ")", ":", "\"Return ellipsis on recursive re-entry to function.\"", "key", "=", "id", "(", "self", ")", ",", "get_ident", "(", ")", "if", "key", "in", "repr_running", ":", "return", "'...'", "repr_running", ".", "add", "(", "key", ")", "try", ":", "return", "func", "(", "self", ")", "finally", ":", "repr_running", ".", "discard", "(", "key", ")", "return", "wrapper" ]
Decorator to prevent infinite repr recursion.
[ "Decorator", "to", "prevent", "infinite", "repr", "recursion", "." ]
python
train
reflexsc/reflex
dev/build.py
https://github.com/reflexsc/reflex/blob/cee6b0ccfef395ca5e157d644a2e3252cea9fe62/dev/build.py#L146-L194
def release(self, lane, status, target=None, meta=None, svcs=None): """Set release information on a build""" if target not in (None, 'current', 'future'): raise ValueError("\nError: Target must be None, 'current', or 'future'\n") svcs, meta, lane = self._prep_for_release(lane, svcs=svcs, meta=meta) when = time.time() # loathe non-functional dictionaries in python rel_data = meta.copy() rel_data.update({ "_time": when, "status": status, "services": list(svcs.keys()), }) rel_lane = self.obj.get('lanes', {}).get(lane, dict(log=[],status=status)) rel_lane['status'] = status rel_lane['log'] = [rel_data] + rel_lane.get('log', []) self.rcs.patch('build', self.name, { "lanes": { lane: rel_lane, } }) if target: for svc in svcs: rel_data = {target: self.name} # if target is specified, then also update svc.release # {current/previous/future} if target == "current": mysvc = svcs[svc] curver = mysvc.get('release', {}).get('current', '') prev = [] if curver: prev = mysvc.get('release', {}).get('previous', []) if not prev or prev[0] != curver: prev = [curver] + prev while len(prev) > 5: # magic values FTW prev.pop() # only keep history of 5 previous rel_data['previous'] = prev self.rcs.patch('service', svc, { "release": rel_data, "statuses": {status: when}, "status": status })
[ "def", "release", "(", "self", ",", "lane", ",", "status", ",", "target", "=", "None", ",", "meta", "=", "None", ",", "svcs", "=", "None", ")", ":", "if", "target", "not", "in", "(", "None", ",", "'current'", ",", "'future'", ")", ":", "raise", "ValueError", "(", "\"\\nError: Target must be None, 'current', or 'future'\\n\"", ")", "svcs", ",", "meta", ",", "lane", "=", "self", ".", "_prep_for_release", "(", "lane", ",", "svcs", "=", "svcs", ",", "meta", "=", "meta", ")", "when", "=", "time", ".", "time", "(", ")", "# loathe non-functional dictionaries in python", "rel_data", "=", "meta", ".", "copy", "(", ")", "rel_data", ".", "update", "(", "{", "\"_time\"", ":", "when", ",", "\"status\"", ":", "status", ",", "\"services\"", ":", "list", "(", "svcs", ".", "keys", "(", ")", ")", ",", "}", ")", "rel_lane", "=", "self", ".", "obj", ".", "get", "(", "'lanes'", ",", "{", "}", ")", ".", "get", "(", "lane", ",", "dict", "(", "log", "=", "[", "]", ",", "status", "=", "status", ")", ")", "rel_lane", "[", "'status'", "]", "=", "status", "rel_lane", "[", "'log'", "]", "=", "[", "rel_data", "]", "+", "rel_lane", ".", "get", "(", "'log'", ",", "[", "]", ")", "self", ".", "rcs", ".", "patch", "(", "'build'", ",", "self", ".", "name", ",", "{", "\"lanes\"", ":", "{", "lane", ":", "rel_lane", ",", "}", "}", ")", "if", "target", ":", "for", "svc", "in", "svcs", ":", "rel_data", "=", "{", "target", ":", "self", ".", "name", "}", "# if target is specified, then also update svc.release", "# {current/previous/future}", "if", "target", "==", "\"current\"", ":", "mysvc", "=", "svcs", "[", "svc", "]", "curver", "=", "mysvc", ".", "get", "(", "'release'", ",", "{", "}", ")", ".", "get", "(", "'current'", ",", "''", ")", "prev", "=", "[", "]", "if", "curver", ":", "prev", "=", "mysvc", ".", "get", "(", "'release'", ",", "{", "}", ")", ".", "get", "(", "'previous'", ",", "[", "]", ")", "if", "not", "prev", "or", "prev", "[", "0", "]", "!=", "curver", ":", "prev", "=", "[", "curver", "]", "+", "prev", "while", "len", "(", "prev", ")", ">", "5", ":", "# magic values FTW", "prev", ".", "pop", "(", ")", "# only keep history of 5 previous", "rel_data", "[", "'previous'", "]", "=", "prev", "self", ".", "rcs", ".", "patch", "(", "'service'", ",", "svc", ",", "{", "\"release\"", ":", "rel_data", ",", "\"statuses\"", ":", "{", "status", ":", "when", "}", ",", "\"status\"", ":", "status", "}", ")" ]
Set release information on a build
[ "Set", "release", "information", "on", "a", "build" ]
python
train
jmurty/xml4h
xml4h/nodes.py
https://github.com/jmurty/xml4h/blob/adbb45e27a01a869a505aee7bc16bad2f517b511/xml4h/nodes.py#L344-L359
def delete(self, destroy=True): """ Delete this node from the owning document. :param bool destroy: if True the child node will be destroyed in addition to being removed from the document. :returns: the removed child node, or *None* if the child was destroyed. """ removed_child = self.adapter.remove_node_child( self.adapter.get_node_parent(self.impl_node), self.impl_node, destroy_node=destroy) if removed_child is not None: return self.adapter.wrap_node(removed_child, None, self.adapter) else: return None
[ "def", "delete", "(", "self", ",", "destroy", "=", "True", ")", ":", "removed_child", "=", "self", ".", "adapter", ".", "remove_node_child", "(", "self", ".", "adapter", ".", "get_node_parent", "(", "self", ".", "impl_node", ")", ",", "self", ".", "impl_node", ",", "destroy_node", "=", "destroy", ")", "if", "removed_child", "is", "not", "None", ":", "return", "self", ".", "adapter", ".", "wrap_node", "(", "removed_child", ",", "None", ",", "self", ".", "adapter", ")", "else", ":", "return", "None" ]
Delete this node from the owning document. :param bool destroy: if True the child node will be destroyed in addition to being removed from the document. :returns: the removed child node, or *None* if the child was destroyed.
[ "Delete", "this", "node", "from", "the", "owning", "document", "." ]
python
train
owncloud/pyocclient
owncloud/owncloud.py
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L868-L887
def get_share(self, share_id): """Returns share information about known share :param share_id: id of the share to be checked :returns: instance of ShareInfo class :raises: ResponseError in case an HTTP error status was returned """ if (share_id is None) or not (isinstance(share_id, int)): return None res = self._make_ocs_request( 'GET', self.OCS_SERVICE_SHARE, 'shares/' + str(share_id) ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) return self._get_shareinfo(tree.find('data').find('element')) raise HTTPResponseError(res)
[ "def", "get_share", "(", "self", ",", "share_id", ")", ":", "if", "(", "share_id", "is", "None", ")", "or", "not", "(", "isinstance", "(", "share_id", ",", "int", ")", ")", ":", "return", "None", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_SHARE", ",", "'shares/'", "+", "str", "(", "share_id", ")", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "return", "self", ".", "_get_shareinfo", "(", "tree", ".", "find", "(", "'data'", ")", ".", "find", "(", "'element'", ")", ")", "raise", "HTTPResponseError", "(", "res", ")" ]
Returns share information about known share :param share_id: id of the share to be checked :returns: instance of ShareInfo class :raises: ResponseError in case an HTTP error status was returned
[ "Returns", "share", "information", "about", "known", "share" ]
python
train
awentzonline/keras-vgg-buddy
keras_vgg_buddy/models.py
https://github.com/awentzonline/keras-vgg-buddy/blob/716cb66396b839a66ec8dc66998066b360a8f395/keras_vgg_buddy/models.py#L21-L28
def img_to_vgg(x): '''Condition an image for use with the VGG16 model.''' x = x[:,:,::-1] # to BGR x[:, :, 0] -= 103.939 x[:, :, 1] -= 116.779 x[:, :, 2] -= 123.68 x = x.transpose((2, 0, 1)) return x
[ "def", "img_to_vgg", "(", "x", ")", ":", "x", "=", "x", "[", ":", ",", ":", ",", ":", ":", "-", "1", "]", "# to BGR", "x", "[", ":", ",", ":", ",", "0", "]", "-=", "103.939", "x", "[", ":", ",", ":", ",", "1", "]", "-=", "116.779", "x", "[", ":", ",", ":", ",", "2", "]", "-=", "123.68", "x", "=", "x", ".", "transpose", "(", "(", "2", ",", "0", ",", "1", ")", ")", "return", "x" ]
Condition an image for use with the VGG16 model.
[ "Condition", "an", "image", "for", "use", "with", "the", "VGG16", "model", "." ]
python
test
DistrictDataLabs/yellowbrick
yellowbrick/regressor/residuals.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/regressor/residuals.py#L202-L253
def finalize(self, **kwargs): """ Finalize executes any subclass-specific axes finalization steps. The user calls poof and poof calls finalize. Parameters ---------- kwargs: generic keyword arguments. """ # Set the title on the plot self.set_title( 'Prediction Error for {}'.format(self.name) ) # Square the axes to ensure a 45 degree line if self.shared_limits: # Get the current limits ylim = self.ax.get_ylim() xlim = self.ax.get_xlim() # Find the range that captures all data bounds = ( min(ylim[0], xlim[0]), max(ylim[1], xlim[1]), ) # Reset the limits self.ax.set_xlim(bounds) self.ax.set_ylim(bounds) # Ensure the aspect ratio is square self.ax.set_aspect('equal', adjustable='box') # Draw the 45 degree line if self.identity: draw_identity_line( ax=self.ax, ls='--', lw=2, c=self.colors['line'], alpha=0.5, label="identity" ) # Set the axes labels self.ax.set_ylabel(r'$\hat{y}$') self.ax.set_xlabel(r'$y$') # Set the legend # Note: it would be nice to be able to use the manual_legend utility # here, since if the user sets a low alpha value, the R2 color in the # legend will also become more translucent. Unfortunately this is a # bit tricky because adding a manual legend here would override the # best fit and 45 degree line legend components. In particular, the # best fit is plotted in draw because it depends on y and y_pred. self.ax.legend(loc='best', frameon=True)
[ "def", "finalize", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Set the title on the plot", "self", ".", "set_title", "(", "'Prediction Error for {}'", ".", "format", "(", "self", ".", "name", ")", ")", "# Square the axes to ensure a 45 degree line", "if", "self", ".", "shared_limits", ":", "# Get the current limits", "ylim", "=", "self", ".", "ax", ".", "get_ylim", "(", ")", "xlim", "=", "self", ".", "ax", ".", "get_xlim", "(", ")", "# Find the range that captures all data", "bounds", "=", "(", "min", "(", "ylim", "[", "0", "]", ",", "xlim", "[", "0", "]", ")", ",", "max", "(", "ylim", "[", "1", "]", ",", "xlim", "[", "1", "]", ")", ",", ")", "# Reset the limits", "self", ".", "ax", ".", "set_xlim", "(", "bounds", ")", "self", ".", "ax", ".", "set_ylim", "(", "bounds", ")", "# Ensure the aspect ratio is square", "self", ".", "ax", ".", "set_aspect", "(", "'equal'", ",", "adjustable", "=", "'box'", ")", "# Draw the 45 degree line", "if", "self", ".", "identity", ":", "draw_identity_line", "(", "ax", "=", "self", ".", "ax", ",", "ls", "=", "'--'", ",", "lw", "=", "2", ",", "c", "=", "self", ".", "colors", "[", "'line'", "]", ",", "alpha", "=", "0.5", ",", "label", "=", "\"identity\"", ")", "# Set the axes labels", "self", ".", "ax", ".", "set_ylabel", "(", "r'$\\hat{y}$'", ")", "self", ".", "ax", ".", "set_xlabel", "(", "r'$y$'", ")", "# Set the legend", "# Note: it would be nice to be able to use the manual_legend utility", "# here, since if the user sets a low alpha value, the R2 color in the", "# legend will also become more translucent. Unfortunately this is a", "# bit tricky because adding a manual legend here would override the", "# best fit and 45 degree line legend components. In particular, the", "# best fit is plotted in draw because it depends on y and y_pred.", "self", ".", "ax", ".", "legend", "(", "loc", "=", "'best'", ",", "frameon", "=", "True", ")" ]
Finalize executes any subclass-specific axes finalization steps. The user calls poof and poof calls finalize. Parameters ---------- kwargs: generic keyword arguments.
[ "Finalize", "executes", "any", "subclass", "-", "specific", "axes", "finalization", "steps", ".", "The", "user", "calls", "poof", "and", "poof", "calls", "finalize", "." ]
python
train
ranaroussi/qtpylib
qtpylib/algo.py
https://github.com/ranaroussi/qtpylib/blob/0dbbc465fafd9cb9b0f4d10e1e07fae4e15032dd/qtpylib/algo.py#L792-L859
def _base_bar_handler(self, bar): """ non threaded bar handler (called by threaded _tick_handler) """ # bar symbol symbol = bar['symbol'].values if len(symbol) == 0: return symbol = symbol[0] self_bars = self.bars.copy() # work on copy is_tick_or_volume_bar = False handle_bar = True if self.resolution[-1] in ("S", "K", "V"): is_tick_or_volume_bar = True handle_bar = self._caller("_tick_handler") # drip is also ok handle_bar = handle_bar or self._caller("drip") if is_tick_or_volume_bar: # just add a bar (used by tick bar bandler) if self.threads == 0: self.bars = self._update_window(self.bars, bar, window=self.bar_window) else: self_bars = self._update_window(self_bars, bar, window=self.bar_window) else: # add the bar and resample to resolution if self.threads == 0: self.bars = self._update_window(self.bars, bar, window=self.bar_window, resolution=self.resolution) else: self_bars = self._update_window(self_bars, bar, window=self.bar_window, resolution=self.resolution) # assign new data to self.bars if threaded if self.threads > 0: self.bars = self._thread_safe_merge(symbol, self.bars, self_bars) # optimize pandas if len(self.bars) == 1: self.bars['symbol'] = self.bars['symbol'].astype('category') self.bars['symbol_group'] = self.bars['symbol_group'].astype('category') self.bars['asset_class'] = self.bars['asset_class'].astype('category') # new bar? hash_string = bar[:1]['symbol'].to_string().translate( str.maketrans({key: None for key in "\n -:+"})) this_bar_hash = abs(hash(hash_string)) % (10 ** 8) newbar = True if symbol in self.bar_hashes.keys(): newbar = self.bar_hashes[symbol] != this_bar_hash self.bar_hashes[symbol] = this_bar_hash if newbar and handle_bar: if self.bars[(self.bars['symbol'] == symbol) | ( self.bars['symbol_group'] == symbol)].empty: return bar_instrument = self.get_instrument(symbol) if bar_instrument: self.record_ts = bar.index[0] self.on_bar(bar_instrument) # if self.resolution[-1] not in ("S", "K", "V"): self.record(bar)
[ "def", "_base_bar_handler", "(", "self", ",", "bar", ")", ":", "# bar symbol", "symbol", "=", "bar", "[", "'symbol'", "]", ".", "values", "if", "len", "(", "symbol", ")", "==", "0", ":", "return", "symbol", "=", "symbol", "[", "0", "]", "self_bars", "=", "self", ".", "bars", ".", "copy", "(", ")", "# work on copy", "is_tick_or_volume_bar", "=", "False", "handle_bar", "=", "True", "if", "self", ".", "resolution", "[", "-", "1", "]", "in", "(", "\"S\"", ",", "\"K\"", ",", "\"V\"", ")", ":", "is_tick_or_volume_bar", "=", "True", "handle_bar", "=", "self", ".", "_caller", "(", "\"_tick_handler\"", ")", "# drip is also ok", "handle_bar", "=", "handle_bar", "or", "self", ".", "_caller", "(", "\"drip\"", ")", "if", "is_tick_or_volume_bar", ":", "# just add a bar (used by tick bar bandler)", "if", "self", ".", "threads", "==", "0", ":", "self", ".", "bars", "=", "self", ".", "_update_window", "(", "self", ".", "bars", ",", "bar", ",", "window", "=", "self", ".", "bar_window", ")", "else", ":", "self_bars", "=", "self", ".", "_update_window", "(", "self_bars", ",", "bar", ",", "window", "=", "self", ".", "bar_window", ")", "else", ":", "# add the bar and resample to resolution", "if", "self", ".", "threads", "==", "0", ":", "self", ".", "bars", "=", "self", ".", "_update_window", "(", "self", ".", "bars", ",", "bar", ",", "window", "=", "self", ".", "bar_window", ",", "resolution", "=", "self", ".", "resolution", ")", "else", ":", "self_bars", "=", "self", ".", "_update_window", "(", "self_bars", ",", "bar", ",", "window", "=", "self", ".", "bar_window", ",", "resolution", "=", "self", ".", "resolution", ")", "# assign new data to self.bars if threaded", "if", "self", ".", "threads", ">", "0", ":", "self", ".", "bars", "=", "self", ".", "_thread_safe_merge", "(", "symbol", ",", "self", ".", "bars", ",", "self_bars", ")", "# optimize pandas", "if", "len", "(", "self", ".", "bars", ")", "==", "1", ":", "self", ".", "bars", "[", "'symbol'", "]", "=", "self", ".", "bars", "[", "'symbol'", "]", ".", "astype", "(", "'category'", ")", "self", ".", "bars", "[", "'symbol_group'", "]", "=", "self", ".", "bars", "[", "'symbol_group'", "]", ".", "astype", "(", "'category'", ")", "self", ".", "bars", "[", "'asset_class'", "]", "=", "self", ".", "bars", "[", "'asset_class'", "]", ".", "astype", "(", "'category'", ")", "# new bar?", "hash_string", "=", "bar", "[", ":", "1", "]", "[", "'symbol'", "]", ".", "to_string", "(", ")", ".", "translate", "(", "str", ".", "maketrans", "(", "{", "key", ":", "None", "for", "key", "in", "\"\\n -:+\"", "}", ")", ")", "this_bar_hash", "=", "abs", "(", "hash", "(", "hash_string", ")", ")", "%", "(", "10", "**", "8", ")", "newbar", "=", "True", "if", "symbol", "in", "self", ".", "bar_hashes", ".", "keys", "(", ")", ":", "newbar", "=", "self", ".", "bar_hashes", "[", "symbol", "]", "!=", "this_bar_hash", "self", ".", "bar_hashes", "[", "symbol", "]", "=", "this_bar_hash", "if", "newbar", "and", "handle_bar", ":", "if", "self", ".", "bars", "[", "(", "self", ".", "bars", "[", "'symbol'", "]", "==", "symbol", ")", "|", "(", "self", ".", "bars", "[", "'symbol_group'", "]", "==", "symbol", ")", "]", ".", "empty", ":", "return", "bar_instrument", "=", "self", ".", "get_instrument", "(", "symbol", ")", "if", "bar_instrument", ":", "self", ".", "record_ts", "=", "bar", ".", "index", "[", "0", "]", "self", ".", "on_bar", "(", "bar_instrument", ")", "# if self.resolution[-1] not in (\"S\", \"K\", \"V\"):", "self", ".", "record", "(", "bar", ")" ]
non threaded bar handler (called by threaded _tick_handler)
[ "non", "threaded", "bar", "handler", "(", "called", "by", "threaded", "_tick_handler", ")" ]
python
train
wndhydrnt/python-oauth2
oauth2/store/memcache.py
https://github.com/wndhydrnt/python-oauth2/blob/abe3bf5f27bda2ff737cab387b040e2e6e85c2e2/oauth2/store/memcache.py#L54-L69
def save_code(self, authorization_code): """ Stores the data belonging to an authorization code token in memcache. See :class:`oauth2.store.AuthCodeStore`. """ key = self._generate_cache_key(authorization_code.code) self.mc.set(key, {"client_id": authorization_code.client_id, "code": authorization_code.code, "expires_at": authorization_code.expires_at, "redirect_uri": authorization_code.redirect_uri, "scopes": authorization_code.scopes, "data": authorization_code.data, "user_id": authorization_code.user_id})
[ "def", "save_code", "(", "self", ",", "authorization_code", ")", ":", "key", "=", "self", ".", "_generate_cache_key", "(", "authorization_code", ".", "code", ")", "self", ".", "mc", ".", "set", "(", "key", ",", "{", "\"client_id\"", ":", "authorization_code", ".", "client_id", ",", "\"code\"", ":", "authorization_code", ".", "code", ",", "\"expires_at\"", ":", "authorization_code", ".", "expires_at", ",", "\"redirect_uri\"", ":", "authorization_code", ".", "redirect_uri", ",", "\"scopes\"", ":", "authorization_code", ".", "scopes", ",", "\"data\"", ":", "authorization_code", ".", "data", ",", "\"user_id\"", ":", "authorization_code", ".", "user_id", "}", ")" ]
Stores the data belonging to an authorization code token in memcache. See :class:`oauth2.store.AuthCodeStore`.
[ "Stores", "the", "data", "belonging", "to", "an", "authorization", "code", "token", "in", "memcache", "." ]
python
train
google/grr
grr/core/grr_response_core/lib/parsers/chrome_history.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/chrome_history.py#L95-L121
def Parse(self): """Iterator returning a list for each entry in history. We store all the download events in an array (choosing this over visits since there are likely to be less of them). We later interleave them with visit events to get an overall correct time order. Yields: a list of attributes for each entry """ # Query for old style and newstyle downloads storage. query_iter = itertools.chain( self.Query(self.DOWNLOADS_QUERY), self.Query(self.DOWNLOADS_QUERY_2)) results = [] for timestamp, url, path, received_bytes, total_bytes in query_iter: timestamp = self.ConvertTimestamp(timestamp) results.append((timestamp, "CHROME_DOWNLOAD", url, path, received_bytes, total_bytes)) for timestamp, url, title, typed_count in self.Query(self.VISITS_QUERY): timestamp = self.ConvertTimestamp(timestamp) results.append((timestamp, "CHROME_VISIT", url, title, typed_count, "")) results.sort(key=lambda it: it[0]) for it in results: yield it
[ "def", "Parse", "(", "self", ")", ":", "# Query for old style and newstyle downloads storage.", "query_iter", "=", "itertools", ".", "chain", "(", "self", ".", "Query", "(", "self", ".", "DOWNLOADS_QUERY", ")", ",", "self", ".", "Query", "(", "self", ".", "DOWNLOADS_QUERY_2", ")", ")", "results", "=", "[", "]", "for", "timestamp", ",", "url", ",", "path", ",", "received_bytes", ",", "total_bytes", "in", "query_iter", ":", "timestamp", "=", "self", ".", "ConvertTimestamp", "(", "timestamp", ")", "results", ".", "append", "(", "(", "timestamp", ",", "\"CHROME_DOWNLOAD\"", ",", "url", ",", "path", ",", "received_bytes", ",", "total_bytes", ")", ")", "for", "timestamp", ",", "url", ",", "title", ",", "typed_count", "in", "self", ".", "Query", "(", "self", ".", "VISITS_QUERY", ")", ":", "timestamp", "=", "self", ".", "ConvertTimestamp", "(", "timestamp", ")", "results", ".", "append", "(", "(", "timestamp", ",", "\"CHROME_VISIT\"", ",", "url", ",", "title", ",", "typed_count", ",", "\"\"", ")", ")", "results", ".", "sort", "(", "key", "=", "lambda", "it", ":", "it", "[", "0", "]", ")", "for", "it", "in", "results", ":", "yield", "it" ]
Iterator returning a list for each entry in history. We store all the download events in an array (choosing this over visits since there are likely to be less of them). We later interleave them with visit events to get an overall correct time order. Yields: a list of attributes for each entry
[ "Iterator", "returning", "a", "list", "for", "each", "entry", "in", "history", "." ]
python
train
MuhammedHasan/sklearn_utils
sklearn_utils/preprocessing/standard_scale_by_label.py
https://github.com/MuhammedHasan/sklearn_utils/blob/337c3b7a27f4921d12da496f66a2b83ef582b413/sklearn_utils/preprocessing/standard_scale_by_label.py#L15-L24
def partial_fit(self, X, y): """ :X: {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. :y: Healthy 'h' or 'sick_name' """ X, y = filter_by_label(X, y, self.reference_label) super().partial_fit(X, y) return self
[ "def", "partial_fit", "(", "self", ",", "X", ",", "y", ")", ":", "X", ",", "y", "=", "filter_by_label", "(", "X", ",", "y", ",", "self", ".", "reference_label", ")", "super", "(", ")", ".", "partial_fit", "(", "X", ",", "y", ")", "return", "self" ]
:X: {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. :y: Healthy 'h' or 'sick_name'
[ ":", "X", ":", "{", "array", "-", "like", "sparse", "matrix", "}", "shape", "[", "n_samples", "n_features", "]", "The", "data", "used", "to", "compute", "the", "mean", "and", "standard", "deviation", "used", "for", "later", "scaling", "along", "the", "features", "axis", ".", ":", "y", ":", "Healthy", "h", "or", "sick_name" ]
python
test
google/google-visualization-python
gviz_api.py
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L567-L591
def AppendData(self, data, custom_properties=None): """Appends new data to the table. Data is appended in rows. Data must comply with the table schema passed in to __init__(). See CoerceValue() for a list of acceptable data types. See the class documentation for more information and examples of schema and data values. Args: data: The row to add to the table. The data must conform to the table description format. custom_properties: A dictionary of string to string, representing the custom properties to add to all the rows. Raises: DataTableException: The data structure does not match the description. """ # If the maximal depth is 0, we simply iterate over the data table # lines and insert them using _InnerAppendData. Otherwise, we simply # let the _InnerAppendData handle all the levels. if not self.__columns[-1]["depth"]: for row in data: self._InnerAppendData(({}, custom_properties), row, 0) else: self._InnerAppendData(({}, custom_properties), data, 0)
[ "def", "AppendData", "(", "self", ",", "data", ",", "custom_properties", "=", "None", ")", ":", "# If the maximal depth is 0, we simply iterate over the data table", "# lines and insert them using _InnerAppendData. Otherwise, we simply", "# let the _InnerAppendData handle all the levels.", "if", "not", "self", ".", "__columns", "[", "-", "1", "]", "[", "\"depth\"", "]", ":", "for", "row", "in", "data", ":", "self", ".", "_InnerAppendData", "(", "(", "{", "}", ",", "custom_properties", ")", ",", "row", ",", "0", ")", "else", ":", "self", ".", "_InnerAppendData", "(", "(", "{", "}", ",", "custom_properties", ")", ",", "data", ",", "0", ")" ]
Appends new data to the table. Data is appended in rows. Data must comply with the table schema passed in to __init__(). See CoerceValue() for a list of acceptable data types. See the class documentation for more information and examples of schema and data values. Args: data: The row to add to the table. The data must conform to the table description format. custom_properties: A dictionary of string to string, representing the custom properties to add to all the rows. Raises: DataTableException: The data structure does not match the description.
[ "Appends", "new", "data", "to", "the", "table", "." ]
python
train
xflr6/gsheets
gsheets/backend.py
https://github.com/xflr6/gsheets/blob/ca4f1273044704e529c1138e3f942836fc496e1b/gsheets/backend.py#L63-L69
def values(service, id, ranges): """Fetch and return spreadsheet cell values with Google sheets API.""" params = {'majorDimension': 'ROWS', 'valueRenderOption': 'UNFORMATTED_VALUE', 'dateTimeRenderOption': 'FORMATTED_STRING'} params.update(spreadsheetId=id, ranges=ranges) response = service.spreadsheets().values().batchGet(**params).execute() return response['valueRanges']
[ "def", "values", "(", "service", ",", "id", ",", "ranges", ")", ":", "params", "=", "{", "'majorDimension'", ":", "'ROWS'", ",", "'valueRenderOption'", ":", "'UNFORMATTED_VALUE'", ",", "'dateTimeRenderOption'", ":", "'FORMATTED_STRING'", "}", "params", ".", "update", "(", "spreadsheetId", "=", "id", ",", "ranges", "=", "ranges", ")", "response", "=", "service", ".", "spreadsheets", "(", ")", ".", "values", "(", ")", ".", "batchGet", "(", "*", "*", "params", ")", ".", "execute", "(", ")", "return", "response", "[", "'valueRanges'", "]" ]
Fetch and return spreadsheet cell values with Google sheets API.
[ "Fetch", "and", "return", "spreadsheet", "cell", "values", "with", "Google", "sheets", "API", "." ]
python
train
DasIch/argvard
argvard/__init__.py
https://github.com/DasIch/argvard/blob/2603e323a995e0915ce41fcf49e2a82519556195/argvard/__init__.py#L119-L149
def option(self, signature, overrideable=False): """ A decorator for registering an option with the given `signature`:: @app.option('--option') def option(context): # do something pass If the name in the signature has already been used to register an option, a :exc:`RuntimeError` is raised unless the registered option has been defined with `overrideable` set to `True`. :param signature: The signature of the option as a string. :param overrideable: If `True` the registered option can be overridden. """ def decorator(function): try: function = annotations()(function) except RuntimeError: pass option = Option.from_string( signature, function, overrideable=overrideable ) for name in option.names: if name in self.options and not self.options[name].overrideable: raise RuntimeError('%s is already defined' % name) self.options.update((name, option) for name in option.names) return function return decorator
[ "def", "option", "(", "self", ",", "signature", ",", "overrideable", "=", "False", ")", ":", "def", "decorator", "(", "function", ")", ":", "try", ":", "function", "=", "annotations", "(", ")", "(", "function", ")", "except", "RuntimeError", ":", "pass", "option", "=", "Option", ".", "from_string", "(", "signature", ",", "function", ",", "overrideable", "=", "overrideable", ")", "for", "name", "in", "option", ".", "names", ":", "if", "name", "in", "self", ".", "options", "and", "not", "self", ".", "options", "[", "name", "]", ".", "overrideable", ":", "raise", "RuntimeError", "(", "'%s is already defined'", "%", "name", ")", "self", ".", "options", ".", "update", "(", "(", "name", ",", "option", ")", "for", "name", "in", "option", ".", "names", ")", "return", "function", "return", "decorator" ]
A decorator for registering an option with the given `signature`:: @app.option('--option') def option(context): # do something pass If the name in the signature has already been used to register an option, a :exc:`RuntimeError` is raised unless the registered option has been defined with `overrideable` set to `True`. :param signature: The signature of the option as a string. :param overrideable: If `True` the registered option can be overridden.
[ "A", "decorator", "for", "registering", "an", "option", "with", "the", "given", "signature", "::" ]
python
train
angr/angr
angr/sim_state_options.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/sim_state_options.py#L372-L390
def register_option(cls, name, types, default=None, description=None): """ Register a state option. :param str name: Name of the state option. :param types: A collection of allowed types of this state option. :param default: The default value of this state option. :param str description: The description of this state option. :return: None """ if name in cls.OPTIONS: raise SimStateOptionsError("A state option with the same name has been registered.") if isinstance(types, type): types = { types } o = StateOption(name, types, default=default, description=description) cls.OPTIONS[name] = o
[ "def", "register_option", "(", "cls", ",", "name", ",", "types", ",", "default", "=", "None", ",", "description", "=", "None", ")", ":", "if", "name", "in", "cls", ".", "OPTIONS", ":", "raise", "SimStateOptionsError", "(", "\"A state option with the same name has been registered.\"", ")", "if", "isinstance", "(", "types", ",", "type", ")", ":", "types", "=", "{", "types", "}", "o", "=", "StateOption", "(", "name", ",", "types", ",", "default", "=", "default", ",", "description", "=", "description", ")", "cls", ".", "OPTIONS", "[", "name", "]", "=", "o" ]
Register a state option. :param str name: Name of the state option. :param types: A collection of allowed types of this state option. :param default: The default value of this state option. :param str description: The description of this state option. :return: None
[ "Register", "a", "state", "option", "." ]
python
train
nabla-c0d3/sslyze
sslyze/plugins/utils/trust_store/trust_store.py
https://github.com/nabla-c0d3/sslyze/blob/0fb3ae668453d7ecf616d0755f237ca7be9f62fa/sslyze/plugins/utils/trust_store/trust_store.py#L121-L147
def build_verified_certificate_chain(self, received_certificate_chain: List[Certificate]) -> List[Certificate]: """Try to figure out the verified chain by finding the anchor/root CA the received chain chains up to in the trust store. This will not clean the certificate chain if additional/invalid certificates were sent and the signatures and fields (notBefore, etc.) are not verified. """ # The certificates must have been sent in the correct order or we give up if not self._is_certificate_chain_order_valid(received_certificate_chain): raise InvalidCertificateChainOrderError() # TODO: OpenSSL 1.1.0 has SSL_get0_verified_chain() to do this directly verified_certificate_chain = [] anchor_cert = None # Assume that the certificates were sent in the correct order or give up for cert in received_certificate_chain: anchor_cert = self._get_certificate_with_subject(cert.issuer) verified_certificate_chain.append(cert) if anchor_cert: verified_certificate_chain.append(anchor_cert) break if anchor_cert is None: # Could not build the verified chain raise AnchorCertificateNotInTrustStoreError() return verified_certificate_chain
[ "def", "build_verified_certificate_chain", "(", "self", ",", "received_certificate_chain", ":", "List", "[", "Certificate", "]", ")", "->", "List", "[", "Certificate", "]", ":", "# The certificates must have been sent in the correct order or we give up", "if", "not", "self", ".", "_is_certificate_chain_order_valid", "(", "received_certificate_chain", ")", ":", "raise", "InvalidCertificateChainOrderError", "(", ")", "# TODO: OpenSSL 1.1.0 has SSL_get0_verified_chain() to do this directly", "verified_certificate_chain", "=", "[", "]", "anchor_cert", "=", "None", "# Assume that the certificates were sent in the correct order or give up", "for", "cert", "in", "received_certificate_chain", ":", "anchor_cert", "=", "self", ".", "_get_certificate_with_subject", "(", "cert", ".", "issuer", ")", "verified_certificate_chain", ".", "append", "(", "cert", ")", "if", "anchor_cert", ":", "verified_certificate_chain", ".", "append", "(", "anchor_cert", ")", "break", "if", "anchor_cert", "is", "None", ":", "# Could not build the verified chain", "raise", "AnchorCertificateNotInTrustStoreError", "(", ")", "return", "verified_certificate_chain" ]
Try to figure out the verified chain by finding the anchor/root CA the received chain chains up to in the trust store. This will not clean the certificate chain if additional/invalid certificates were sent and the signatures and fields (notBefore, etc.) are not verified.
[ "Try", "to", "figure", "out", "the", "verified", "chain", "by", "finding", "the", "anchor", "/", "root", "CA", "the", "received", "chain", "chains", "up", "to", "in", "the", "trust", "store", "." ]
python
train
fananimi/pyzk
zk/base.py
https://github.com/fananimi/pyzk/blob/1a765d616526efdcb4c9adfcc9b1d10f6ed8b938/zk/base.py#L388-L401
def disconnect(self): """ diconnect from the connected device :return: bool """ cmd_response = self.__send_command(const.CMD_EXIT) if cmd_response.get('status'): self.is_connect = False if self.__sock: self.__sock.close() return True else: raise ZKErrorResponse("can't disconnect")
[ "def", "disconnect", "(", "self", ")", ":", "cmd_response", "=", "self", ".", "__send_command", "(", "const", ".", "CMD_EXIT", ")", "if", "cmd_response", ".", "get", "(", "'status'", ")", ":", "self", ".", "is_connect", "=", "False", "if", "self", ".", "__sock", ":", "self", ".", "__sock", ".", "close", "(", ")", "return", "True", "else", ":", "raise", "ZKErrorResponse", "(", "\"can't disconnect\"", ")" ]
diconnect from the connected device :return: bool
[ "diconnect", "from", "the", "connected", "device" ]
python
train
SITools2/pySitools2_1.0
sitools2/core/query.py
https://github.com/SITools2/pySitools2_1.0/blob/acd13198162456ba401a0b923af989bb29feb3b6/sitools2/core/query.py#L386-L394
def __buildLimit(self, query ,limitResMax): """Builds limit parameter.""" limit = query._getParameters()['limit'] if limitResMax>0 and limitResMax < limit: query = UpdateParameter(query, 'limit', limitResMax) query = UpdateParameter(query, 'nocount', 'true') elif limitResMax>0 and limitResMax >= limit: query = UpdateParameter(query, 'nocount', 'true') return query
[ "def", "__buildLimit", "(", "self", ",", "query", ",", "limitResMax", ")", ":", "limit", "=", "query", ".", "_getParameters", "(", ")", "[", "'limit'", "]", "if", "limitResMax", ">", "0", "and", "limitResMax", "<", "limit", ":", "query", "=", "UpdateParameter", "(", "query", ",", "'limit'", ",", "limitResMax", ")", "query", "=", "UpdateParameter", "(", "query", ",", "'nocount'", ",", "'true'", ")", "elif", "limitResMax", ">", "0", "and", "limitResMax", ">=", "limit", ":", "query", "=", "UpdateParameter", "(", "query", ",", "'nocount'", ",", "'true'", ")", "return", "query" ]
Builds limit parameter.
[ "Builds", "limit", "parameter", "." ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/gloo/wrappers.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/wrappers.py#L253-L271
def set_blend_func(self, srgb='one', drgb='zero', salpha=None, dalpha=None): """Specify pixel arithmetic for RGB and alpha Parameters ---------- srgb : str Source RGB factor. drgb : str Destination RGB factor. salpha : str | None Source alpha factor. If None, ``srgb`` is used. dalpha : str Destination alpha factor. If None, ``drgb`` is used. """ salpha = srgb if salpha is None else salpha dalpha = drgb if dalpha is None else dalpha self.glir.command('FUNC', 'glBlendFuncSeparate', srgb, drgb, salpha, dalpha)
[ "def", "set_blend_func", "(", "self", ",", "srgb", "=", "'one'", ",", "drgb", "=", "'zero'", ",", "salpha", "=", "None", ",", "dalpha", "=", "None", ")", ":", "salpha", "=", "srgb", "if", "salpha", "is", "None", "else", "salpha", "dalpha", "=", "drgb", "if", "dalpha", "is", "None", "else", "dalpha", "self", ".", "glir", ".", "command", "(", "'FUNC'", ",", "'glBlendFuncSeparate'", ",", "srgb", ",", "drgb", ",", "salpha", ",", "dalpha", ")" ]
Specify pixel arithmetic for RGB and alpha Parameters ---------- srgb : str Source RGB factor. drgb : str Destination RGB factor. salpha : str | None Source alpha factor. If None, ``srgb`` is used. dalpha : str Destination alpha factor. If None, ``drgb`` is used.
[ "Specify", "pixel", "arithmetic", "for", "RGB", "and", "alpha", "Parameters", "----------", "srgb", ":", "str", "Source", "RGB", "factor", ".", "drgb", ":", "str", "Destination", "RGB", "factor", ".", "salpha", ":", "str", "|", "None", "Source", "alpha", "factor", ".", "If", "None", "srgb", "is", "used", ".", "dalpha", ":", "str", "Destination", "alpha", "factor", ".", "If", "None", "drgb", "is", "used", "." ]
python
train