repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
ggravlingen/pytradfri
pytradfri/group.py
https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/group.py#L70-L81
def set_dimmer(self, dimmer, transition_time=None): """Set dimmer value of a group. dimmer: Integer between 0..255 transition_time: Integer representing tenth of a second (default None) """ values = { ATTR_LIGHT_DIMMER: dimmer, } if transition_time is not None: values[ATTR_TRANSITION_TIME] = transition_time return self.set_values(values)
[ "def", "set_dimmer", "(", "self", ",", "dimmer", ",", "transition_time", "=", "None", ")", ":", "values", "=", "{", "ATTR_LIGHT_DIMMER", ":", "dimmer", ",", "}", "if", "transition_time", "is", "not", "None", ":", "values", "[", "ATTR_TRANSITION_TIME", "]", "=", "transition_time", "return", "self", ".", "set_values", "(", "values", ")" ]
Set dimmer value of a group. dimmer: Integer between 0..255 transition_time: Integer representing tenth of a second (default None)
[ "Set", "dimmer", "value", "of", "a", "group", "." ]
python
train
34.75
tensorflow/tensor2tensor
tensor2tensor/models/video/sv2p_params.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/sv2p_params.py#L145-L151
def next_frame_sv2p_cutoff(): """SV2P model with additional cutoff in L2 loss for environments like pong.""" hparams = next_frame_sv2p() hparams.video_modality_loss_cutoff = 0.4 hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 1 return hparams
[ "def", "next_frame_sv2p_cutoff", "(", ")", ":", "hparams", "=", "next_frame_sv2p", "(", ")", "hparams", ".", "video_modality_loss_cutoff", "=", "0.4", "hparams", ".", "video_num_input_frames", "=", "4", "hparams", ".", "video_num_target_frames", "=", "1", "return", "hparams" ]
SV2P model with additional cutoff in L2 loss for environments like pong.
[ "SV2P", "model", "with", "additional", "cutoff", "in", "L2", "loss", "for", "environments", "like", "pong", "." ]
python
train
38.428571
monarch-initiative/dipper
dipper/sources/KEGG.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L354-L423
def _process_ortholog_classes(self, limit=None): """ This method add the KEGG orthology classes to the graph. If there's an embedded enzyme commission number, that is added as an xref. Triples created: <orthology_class_id> is a class <orthology_class_id> has label <orthology_symbols> <orthology_class_id> has description <orthology_description> :param limit: :return: """ LOG.info("Processing ortholog classes") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, self.files['ortholog_classes']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (orthology_class_id, orthology_class_name) = row if self.test_mode and orthology_class_id \ not in self.test_ids['orthology_classes']: continue # The orthology class is essentially a KEGG gene ID # that is species agnostic. # Add the ID and label as a gene family class other_labels = re.split(r'[;,]', orthology_class_name) # the first one is the label we'll use orthology_label = other_labels[0] orthology_class_id = 'KEGG-'+orthology_class_id.strip() orthology_type = self.globaltt['gene_family'] model.addClassToGraph( orthology_class_id, orthology_label, orthology_type) if len(other_labels) > 1: # add the rest as synonyms # todo skip the first for s in other_labels: model.addSynonym(orthology_class_id, s.strip()) # add the last one as the description d = other_labels[len(other_labels)-1] model.addDescription(orthology_class_id, d) # add the enzyme commission number (EC:1.2.99.5)as an xref # sometimes there's two, like [EC:1.3.5.1 1.3.5.4] # can also have a dash, like EC:1.10.3.- ec_matches = re.findall(r'((?:\d+|\.|-){5,7})', d) if ec_matches is not None: for ecm in ec_matches: model.addXref(orthology_class_id, 'EC:' + ecm) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with ortholog classes") return
[ "def", "_process_ortholog_classes", "(", "self", ",", "limit", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Processing ortholog classes\"", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'ortholog_classes'", "]", "[", "'file'", "]", ")", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"iso-8859-1\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "orthology_class_id", ",", "orthology_class_name", ")", "=", "row", "if", "self", ".", "test_mode", "and", "orthology_class_id", "not", "in", "self", ".", "test_ids", "[", "'orthology_classes'", "]", ":", "continue", "# The orthology class is essentially a KEGG gene ID", "# that is species agnostic.", "# Add the ID and label as a gene family class", "other_labels", "=", "re", ".", "split", "(", "r'[;,]'", ",", "orthology_class_name", ")", "# the first one is the label we'll use", "orthology_label", "=", "other_labels", "[", "0", "]", "orthology_class_id", "=", "'KEGG-'", "+", "orthology_class_id", ".", "strip", "(", ")", "orthology_type", "=", "self", ".", "globaltt", "[", "'gene_family'", "]", "model", ".", "addClassToGraph", "(", "orthology_class_id", ",", "orthology_label", ",", "orthology_type", ")", "if", "len", "(", "other_labels", ")", ">", "1", ":", "# add the rest as synonyms", "# todo skip the first", "for", "s", "in", "other_labels", ":", "model", ".", "addSynonym", "(", "orthology_class_id", ",", "s", ".", "strip", "(", ")", ")", "# add the last one as the description", "d", "=", "other_labels", "[", "len", "(", "other_labels", ")", "-", "1", "]", "model", ".", "addDescription", "(", "orthology_class_id", ",", "d", ")", "# add the enzyme commission number (EC:1.2.99.5)as an xref", "# sometimes there's two, like [EC:1.3.5.1 1.3.5.4]", "# can also have a dash, like EC:1.10.3.-", "ec_matches", "=", "re", ".", "findall", "(", "r'((?:\\d+|\\.|-){5,7})'", ",", "d", ")", "if", "ec_matches", "is", "not", "None", ":", "for", "ecm", "in", "ec_matches", ":", "model", ".", "addXref", "(", "orthology_class_id", ",", "'EC:'", "+", "ecm", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "LOG", ".", "info", "(", "\"Done with ortholog classes\"", ")", "return" ]
This method add the KEGG orthology classes to the graph. If there's an embedded enzyme commission number, that is added as an xref. Triples created: <orthology_class_id> is a class <orthology_class_id> has label <orthology_symbols> <orthology_class_id> has description <orthology_description> :param limit: :return:
[ "This", "method", "add", "the", "KEGG", "orthology", "classes", "to", "the", "graph", "." ]
python
train
39.528571
neherlab/treetime
treetime/gtr.py
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/gtr.py#L821-L861
def prob_t_profiles(self, profile_pair, multiplicity, t, return_log=False, ignore_gaps=True): ''' Calculate the probability of observing a node pair at a distance t Parameters ---------- profile_pair: numpy arrays Probability distributions of the nucleotides at either end of the branch. pp[0] = parent, pp[1] = child multiplicity : numpy array The number of times an alignment pattern is observed t : float Length of the branch separating parent and child ignore_gaps: bool If True, ignore mutations to and from gaps in distance calculations return_log : bool Whether or not to exponentiate the result ''' if t<0: logP = -ttconf.BIG_NUMBER else: Qt = self.expQt(t) if len(Qt.shape)==3: res = np.einsum('ai,ija,aj->a', profile_pair[1], Qt, profile_pair[0]) else: res = np.einsum('ai,ij,aj->a', profile_pair[1], Qt, profile_pair[0]) if ignore_gaps and (self.gap_index is not None): # calculate the probability that neither outgroup/node has a gap non_gap_frac = (1-profile_pair[0][:,self.gap_index])*(1-profile_pair[1][:,self.gap_index]) # weigh log LH by the non-gap probability logP = np.sum(multiplicity*np.log(res)*non_gap_frac) else: logP = np.sum(multiplicity*np.log(res)) return logP if return_log else np.exp(logP)
[ "def", "prob_t_profiles", "(", "self", ",", "profile_pair", ",", "multiplicity", ",", "t", ",", "return_log", "=", "False", ",", "ignore_gaps", "=", "True", ")", ":", "if", "t", "<", "0", ":", "logP", "=", "-", "ttconf", ".", "BIG_NUMBER", "else", ":", "Qt", "=", "self", ".", "expQt", "(", "t", ")", "if", "len", "(", "Qt", ".", "shape", ")", "==", "3", ":", "res", "=", "np", ".", "einsum", "(", "'ai,ija,aj->a'", ",", "profile_pair", "[", "1", "]", ",", "Qt", ",", "profile_pair", "[", "0", "]", ")", "else", ":", "res", "=", "np", ".", "einsum", "(", "'ai,ij,aj->a'", ",", "profile_pair", "[", "1", "]", ",", "Qt", ",", "profile_pair", "[", "0", "]", ")", "if", "ignore_gaps", "and", "(", "self", ".", "gap_index", "is", "not", "None", ")", ":", "# calculate the probability that neither outgroup/node has a gap", "non_gap_frac", "=", "(", "1", "-", "profile_pair", "[", "0", "]", "[", ":", ",", "self", ".", "gap_index", "]", ")", "*", "(", "1", "-", "profile_pair", "[", "1", "]", "[", ":", ",", "self", ".", "gap_index", "]", ")", "# weigh log LH by the non-gap probability", "logP", "=", "np", ".", "sum", "(", "multiplicity", "*", "np", ".", "log", "(", "res", ")", "*", "non_gap_frac", ")", "else", ":", "logP", "=", "np", ".", "sum", "(", "multiplicity", "*", "np", ".", "log", "(", "res", ")", ")", "return", "logP", "if", "return_log", "else", "np", ".", "exp", "(", "logP", ")" ]
Calculate the probability of observing a node pair at a distance t Parameters ---------- profile_pair: numpy arrays Probability distributions of the nucleotides at either end of the branch. pp[0] = parent, pp[1] = child multiplicity : numpy array The number of times an alignment pattern is observed t : float Length of the branch separating parent and child ignore_gaps: bool If True, ignore mutations to and from gaps in distance calculations return_log : bool Whether or not to exponentiate the result
[ "Calculate", "the", "probability", "of", "observing", "a", "node", "pair", "at", "a", "distance", "t" ]
python
test
38.219512
artemrizhov/django-mail-templated
mail_templated/message.py
https://github.com/artemrizhov/django-mail-templated/blob/1b428e7b6e02a5cf775bc83d6f5fd8c5f56d7932/mail_templated/message.py#L207-L227
def send(self, *args, **kwargs): """ Send email message, render if it is not rendered yet. Note ---- Any extra arguments are passed to :class:`EmailMultiAlternatives.send() <django.core.mail.EmailMessage>`. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``. """ clean = kwargs.pop('clean', False) if not self._is_rendered: self.render() if clean: self.clean() return super(EmailMessage, self).send(*args, **kwargs)
[ "def", "send", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "clean", "=", "kwargs", ".", "pop", "(", "'clean'", ",", "False", ")", "if", "not", "self", ".", "_is_rendered", ":", "self", ".", "render", "(", ")", "if", "clean", ":", "self", ".", "clean", "(", ")", "return", "super", "(", "EmailMessage", ",", "self", ")", ".", "send", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Send email message, render if it is not rendered yet. Note ---- Any extra arguments are passed to :class:`EmailMultiAlternatives.send() <django.core.mail.EmailMessage>`. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``.
[ "Send", "email", "message", "render", "if", "it", "is", "not", "rendered", "yet", "." ]
python
train
31.142857
jazzband/django-analytical
analytical/templatetags/snapengage.py
https://github.com/jazzband/django-analytical/blob/5487fd677bd47bc63fc2cf39597a0adc5d6c9ab3/analytical/templatetags/snapengage.py#L56-L66
def snapengage(parser, token): """ SnapEngage set-up template tag. Renders Javascript code to set-up SnapEngage chat. You must supply your widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting. """ bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return SnapEngageNode()
[ "def", "snapengage", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "bits", ")", ">", "1", ":", "raise", "TemplateSyntaxError", "(", "\"'%s' takes no arguments\"", "%", "bits", "[", "0", "]", ")", "return", "SnapEngageNode", "(", ")" ]
SnapEngage set-up template tag. Renders Javascript code to set-up SnapEngage chat. You must supply your widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting.
[ "SnapEngage", "set", "-", "up", "template", "tag", "." ]
python
valid
32.727273
okpy/ok-client
client/protocols/unlock.py
https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/protocols/unlock.py#L90-L184
def interact(self, unique_id, case_id, question_prompt, answer, choices=None, randomize=True): """Reads student input for unlocking tests until the student answers correctly. PARAMETERS: unique_id -- str; the ID that is recorded with this unlocking attempt. case_id -- str; the ID that is recorded with this unlocking attempt. question_prompt -- str; the question prompt answer -- list; a list of locked lines in a test case answer. choices -- list or None; a list of choices. If None or an empty list, signifies the question is not multiple choice. randomize -- bool; if True, randomizes the choices on first invocation. DESCRIPTION: Continually prompt the student for an answer to an unlocking question until one of the folliwng happens: 1. The student supplies the correct answer, in which case the supplied answer is returned 2. The student aborts abnormally (either by typing 'exit()' or using Ctrl-C/D. In this case, return None Correctness is determined by the verify method. RETURNS: list; the correct solution (that the student supplied). Each element in the list is a line of the correct output. """ if randomize and choices: choices = random.sample(choices, len(choices)) correct = False while not correct: if choices: assert len(answer) == 1, 'Choices must have 1 line of output' choice_map = self._display_choices(choices) question_timestamp = datetime.now() input_lines = [] for line_number, line in enumerate(answer): if len(answer) == 1: prompt = self.PROMPT else: prompt = '(line {}){}'.format(line_number + 1, self.PROMPT) student_input = format.normalize(self._input(prompt)) self._add_history(student_input) if student_input in self.EXIT_INPUTS: raise EOFError if choices and student_input in choice_map: student_input = choice_map[student_input] correct_answer = self._verify_student_input(student_input, line) if correct_answer: input_lines.append(correct_answer) else: input_lines.append(student_input) break else: correct = True tg_id = -1 misU_count_dict = {} rationale = "Unknown - Default Value" if not correct: guidance_data = self.guidance_util.show_guidance_msg(unique_id, input_lines, self.hash_key) misU_count_dict, tg_id, printed_msg, rationale = guidance_data else: rationale = self.guidance_util.prompt_with_prob() print("-- OK! --") printed_msg = ["-- OK! --"] self.analytics.append({ 'id': unique_id, 'case_id': case_id, 'question timestamp': self.unix_time(question_timestamp), 'answer timestamp': self.unix_time(datetime.now()), 'prompt': question_prompt, 'answer': input_lines, 'correct': correct, 'treatment group id': tg_id, 'rationale': rationale, 'misU count': misU_count_dict, 'printed msg': printed_msg }) print() return input_lines
[ "def", "interact", "(", "self", ",", "unique_id", ",", "case_id", ",", "question_prompt", ",", "answer", ",", "choices", "=", "None", ",", "randomize", "=", "True", ")", ":", "if", "randomize", "and", "choices", ":", "choices", "=", "random", ".", "sample", "(", "choices", ",", "len", "(", "choices", ")", ")", "correct", "=", "False", "while", "not", "correct", ":", "if", "choices", ":", "assert", "len", "(", "answer", ")", "==", "1", ",", "'Choices must have 1 line of output'", "choice_map", "=", "self", ".", "_display_choices", "(", "choices", ")", "question_timestamp", "=", "datetime", ".", "now", "(", ")", "input_lines", "=", "[", "]", "for", "line_number", ",", "line", "in", "enumerate", "(", "answer", ")", ":", "if", "len", "(", "answer", ")", "==", "1", ":", "prompt", "=", "self", ".", "PROMPT", "else", ":", "prompt", "=", "'(line {}){}'", ".", "format", "(", "line_number", "+", "1", ",", "self", ".", "PROMPT", ")", "student_input", "=", "format", ".", "normalize", "(", "self", ".", "_input", "(", "prompt", ")", ")", "self", ".", "_add_history", "(", "student_input", ")", "if", "student_input", "in", "self", ".", "EXIT_INPUTS", ":", "raise", "EOFError", "if", "choices", "and", "student_input", "in", "choice_map", ":", "student_input", "=", "choice_map", "[", "student_input", "]", "correct_answer", "=", "self", ".", "_verify_student_input", "(", "student_input", ",", "line", ")", "if", "correct_answer", ":", "input_lines", ".", "append", "(", "correct_answer", ")", "else", ":", "input_lines", ".", "append", "(", "student_input", ")", "break", "else", ":", "correct", "=", "True", "tg_id", "=", "-", "1", "misU_count_dict", "=", "{", "}", "rationale", "=", "\"Unknown - Default Value\"", "if", "not", "correct", ":", "guidance_data", "=", "self", ".", "guidance_util", ".", "show_guidance_msg", "(", "unique_id", ",", "input_lines", ",", "self", ".", "hash_key", ")", "misU_count_dict", ",", "tg_id", ",", "printed_msg", ",", "rationale", "=", "guidance_data", "else", ":", "rationale", "=", "self", ".", "guidance_util", ".", "prompt_with_prob", "(", ")", "print", "(", "\"-- OK! --\"", ")", "printed_msg", "=", "[", "\"-- OK! --\"", "]", "self", ".", "analytics", ".", "append", "(", "{", "'id'", ":", "unique_id", ",", "'case_id'", ":", "case_id", ",", "'question timestamp'", ":", "self", ".", "unix_time", "(", "question_timestamp", ")", ",", "'answer timestamp'", ":", "self", ".", "unix_time", "(", "datetime", ".", "now", "(", ")", ")", ",", "'prompt'", ":", "question_prompt", ",", "'answer'", ":", "input_lines", ",", "'correct'", ":", "correct", ",", "'treatment group id'", ":", "tg_id", ",", "'rationale'", ":", "rationale", ",", "'misU count'", ":", "misU_count_dict", ",", "'printed msg'", ":", "printed_msg", "}", ")", "print", "(", ")", "return", "input_lines" ]
Reads student input for unlocking tests until the student answers correctly. PARAMETERS: unique_id -- str; the ID that is recorded with this unlocking attempt. case_id -- str; the ID that is recorded with this unlocking attempt. question_prompt -- str; the question prompt answer -- list; a list of locked lines in a test case answer. choices -- list or None; a list of choices. If None or an empty list, signifies the question is not multiple choice. randomize -- bool; if True, randomizes the choices on first invocation. DESCRIPTION: Continually prompt the student for an answer to an unlocking question until one of the folliwng happens: 1. The student supplies the correct answer, in which case the supplied answer is returned 2. The student aborts abnormally (either by typing 'exit()' or using Ctrl-C/D. In this case, return None Correctness is determined by the verify method. RETURNS: list; the correct solution (that the student supplied). Each element in the list is a line of the correct output.
[ "Reads", "student", "input", "for", "unlocking", "tests", "until", "the", "student", "answers", "correctly", "." ]
python
train
40.178947
google/dotty
efilter/protocol.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocol.py#L147-L164
def __get_type_args(for_type=None, for_types=None): """Parse the arguments and return a tuple of types to implement for. Raises: ValueError or TypeError as appropriate. """ if for_type: if for_types: raise ValueError("Cannot pass both for_type and for_types.") for_types = (for_type,) elif for_types: if not isinstance(for_types, tuple): raise TypeError("for_types must be passed as a tuple of " "types (classes).") else: raise ValueError("Must pass either for_type or for_types.") return for_types
[ "def", "__get_type_args", "(", "for_type", "=", "None", ",", "for_types", "=", "None", ")", ":", "if", "for_type", ":", "if", "for_types", ":", "raise", "ValueError", "(", "\"Cannot pass both for_type and for_types.\"", ")", "for_types", "=", "(", "for_type", ",", ")", "elif", "for_types", ":", "if", "not", "isinstance", "(", "for_types", ",", "tuple", ")", ":", "raise", "TypeError", "(", "\"for_types must be passed as a tuple of \"", "\"types (classes).\"", ")", "else", ":", "raise", "ValueError", "(", "\"Must pass either for_type or for_types.\"", ")", "return", "for_types" ]
Parse the arguments and return a tuple of types to implement for. Raises: ValueError or TypeError as appropriate.
[ "Parse", "the", "arguments", "and", "return", "a", "tuple", "of", "types", "to", "implement", "for", "." ]
python
train
36.833333
google/grr
grr/core/grr_response_core/lib/util/compatibility.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/util/compatibility.py#L252-L271
def Environ(variable, default): """A wrapper for `os.environ.get` that works the same way in both Pythons. Args: variable: A name of the variable to get the value of. default: A default value to return in case no value for the given variable is set. Returns: An environment value of the given variable. """ precondition.AssertType(variable, Text) value = os.environ.get(variable, default) if value is None: return default if PY2: # TODO(hanuszczak): https://github.com/google/pytype/issues/127 value = value.decode("utf-8") # pytype: disable=attribute-error return value
[ "def", "Environ", "(", "variable", ",", "default", ")", ":", "precondition", ".", "AssertType", "(", "variable", ",", "Text", ")", "value", "=", "os", ".", "environ", ".", "get", "(", "variable", ",", "default", ")", "if", "value", "is", "None", ":", "return", "default", "if", "PY2", ":", "# TODO(hanuszczak): https://github.com/google/pytype/issues/127", "value", "=", "value", ".", "decode", "(", "\"utf-8\"", ")", "# pytype: disable=attribute-error", "return", "value" ]
A wrapper for `os.environ.get` that works the same way in both Pythons. Args: variable: A name of the variable to get the value of. default: A default value to return in case no value for the given variable is set. Returns: An environment value of the given variable.
[ "A", "wrapper", "for", "os", ".", "environ", ".", "get", "that", "works", "the", "same", "way", "in", "both", "Pythons", "." ]
python
train
30.15
elastic/elasticsearch-py
elasticsearch/client/ingest.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/ingest.py#L5-L14
def get_pipeline(self, id=None, params=None): """ `<https://www.elastic.co/guide/en/elasticsearch/plugins/current/ingest.html>`_ :arg id: Comma separated list of pipeline ids. Wildcards supported :arg master_timeout: Explicit operation timeout for connection to master node """ return self.transport.perform_request('GET', _make_path('_ingest', 'pipeline', id), params=params)
[ "def", "get_pipeline", "(", "self", ",", "id", "=", "None", ",", "params", "=", "None", ")", ":", "return", "self", ".", "transport", ".", "perform_request", "(", "'GET'", ",", "_make_path", "(", "'_ingest'", ",", "'pipeline'", ",", "id", ")", ",", "params", "=", "params", ")" ]
`<https://www.elastic.co/guide/en/elasticsearch/plugins/current/ingest.html>`_ :arg id: Comma separated list of pipeline ids. Wildcards supported :arg master_timeout: Explicit operation timeout for connection to master node
[ "<https", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "plugins", "/", "current", "/", "ingest", ".", "html", ">", "_" ]
python
train
44
lsst-sqre/documenteer
documenteer/sphinxext/utils.py
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/utils.py#L44-L83
def make_python_xref_nodes(py_typestr, state, hide_namespace=False): """Make docutils nodes containing a cross-reference to a Python object. Parameters ---------- py_typestr : `str` Name of the Python object. For example ``'mypackage.mymodule.MyClass'``. If you have the object itself, or its type, use the `make_python_xref_nodes_for_type` function instead. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes('numpy.sin', self.state) See also -------- `make_python_xref_nodes_for_type` """ if hide_namespace: template = ':py:obj:`~{}`\n' else: template = ':py:obj:`{}`\n' xref_text = template.format(py_typestr) return parse_rst_content(xref_text, state)
[ "def", "make_python_xref_nodes", "(", "py_typestr", ",", "state", ",", "hide_namespace", "=", "False", ")", ":", "if", "hide_namespace", ":", "template", "=", "':py:obj:`~{}`\\n'", "else", ":", "template", "=", "':py:obj:`{}`\\n'", "xref_text", "=", "template", ".", "format", "(", "py_typestr", ")", "return", "parse_rst_content", "(", "xref_text", ",", "state", ")" ]
Make docutils nodes containing a cross-reference to a Python object. Parameters ---------- py_typestr : `str` Name of the Python object. For example ``'mypackage.mymodule.MyClass'``. If you have the object itself, or its type, use the `make_python_xref_nodes_for_type` function instead. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes('numpy.sin', self.state) See also -------- `make_python_xref_nodes_for_type`
[ "Make", "docutils", "nodes", "containing", "a", "cross", "-", "reference", "to", "a", "Python", "object", "." ]
python
train
29.95
rtfd/sphinx-autoapi
autoapi/mappers/python/mapper.py
https://github.com/rtfd/sphinx-autoapi/blob/9735f43a8d9ff4620c7bcbd177fd1bb7608052e9/autoapi/mappers/python/mapper.py#L24-L69
def _expand_wildcard_placeholder(original_module, originals_map, placeholder): """Expand a wildcard placeholder to a sequence of named placeholders. :param original_module: The data dictionary of the module that the placeholder is imported from. :type original_module: dict :param originals_map: A map of the names of children under the module to their data dictionaries. :type originals_map: dict(str, dict) :param placeholder: The wildcard placeholder to expand. :type placeholder: dict :returns: The placeholders that the wildcard placeholder represents. :rtype: list(dict) """ originals = originals_map.values() if original_module["all"] is not None: originals = [] for name in original_module["all"]: if name == "__all__": continue if name not in originals_map: msg = "Invalid __all__ entry {0} in {1}".format( name, original_module["name"] ) LOGGER.warning(msg) continue originals.append(originals_map[name]) placeholders = [] for original in originals: new_full_name = placeholder["full_name"].replace("*", original["name"]) new_original_path = placeholder["original_path"].replace("*", original["name"]) if "original_path" in original: new_original_path = original["original_path"] new_placeholder = dict( placeholder, name=original["name"], full_name=new_full_name, original_path=new_original_path, ) placeholders.append(new_placeholder) return placeholders
[ "def", "_expand_wildcard_placeholder", "(", "original_module", ",", "originals_map", ",", "placeholder", ")", ":", "originals", "=", "originals_map", ".", "values", "(", ")", "if", "original_module", "[", "\"all\"", "]", "is", "not", "None", ":", "originals", "=", "[", "]", "for", "name", "in", "original_module", "[", "\"all\"", "]", ":", "if", "name", "==", "\"__all__\"", ":", "continue", "if", "name", "not", "in", "originals_map", ":", "msg", "=", "\"Invalid __all__ entry {0} in {1}\"", ".", "format", "(", "name", ",", "original_module", "[", "\"name\"", "]", ")", "LOGGER", ".", "warning", "(", "msg", ")", "continue", "originals", ".", "append", "(", "originals_map", "[", "name", "]", ")", "placeholders", "=", "[", "]", "for", "original", "in", "originals", ":", "new_full_name", "=", "placeholder", "[", "\"full_name\"", "]", ".", "replace", "(", "\"*\"", ",", "original", "[", "\"name\"", "]", ")", "new_original_path", "=", "placeholder", "[", "\"original_path\"", "]", ".", "replace", "(", "\"*\"", ",", "original", "[", "\"name\"", "]", ")", "if", "\"original_path\"", "in", "original", ":", "new_original_path", "=", "original", "[", "\"original_path\"", "]", "new_placeholder", "=", "dict", "(", "placeholder", ",", "name", "=", "original", "[", "\"name\"", "]", ",", "full_name", "=", "new_full_name", ",", "original_path", "=", "new_original_path", ",", ")", "placeholders", ".", "append", "(", "new_placeholder", ")", "return", "placeholders" ]
Expand a wildcard placeholder to a sequence of named placeholders. :param original_module: The data dictionary of the module that the placeholder is imported from. :type original_module: dict :param originals_map: A map of the names of children under the module to their data dictionaries. :type originals_map: dict(str, dict) :param placeholder: The wildcard placeholder to expand. :type placeholder: dict :returns: The placeholders that the wildcard placeholder represents. :rtype: list(dict)
[ "Expand", "a", "wildcard", "placeholder", "to", "a", "sequence", "of", "named", "placeholders", "." ]
python
train
36.152174
pgmpy/pgmpy
pgmpy/readwrite/BIF.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/BIF.py#L535-L552
def write_bif(self, filename): """ Writes the BIF data into a file Parameters ---------- filename : Name of the file Example ------- >>> from pgmpy.readwrite import BIFReader, BIFWriter >>> model = BIFReader('dog-problem.bif').get_model() >>> writer = BIFWriter(model) >>> writer.write_bif(filname='test_file.bif') """ writer = self.__str__() with open(filename, 'w') as fout: fout.write(writer)
[ "def", "write_bif", "(", "self", ",", "filename", ")", ":", "writer", "=", "self", ".", "__str__", "(", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fout", ":", "fout", ".", "write", "(", "writer", ")" ]
Writes the BIF data into a file Parameters ---------- filename : Name of the file Example ------- >>> from pgmpy.readwrite import BIFReader, BIFWriter >>> model = BIFReader('dog-problem.bif').get_model() >>> writer = BIFWriter(model) >>> writer.write_bif(filname='test_file.bif')
[ "Writes", "the", "BIF", "data", "into", "a", "file" ]
python
train
28
gwastro/pycbc
pycbc/events/stat.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/events/stat.py#L416-L426
def coinc(self, s0, s1, slide, step): # pylint:disable=unused-argument """Calculate the final coinc ranking statistic""" # Approximate log likelihood ratio by summing single-ifo negative # log noise likelihoods loglr = - s0 - s1 # add squares of threshold stat values via idealized Gaussian formula threshes = [self.fits_by_tid[i]['thresh'] for i in self.ifos] loglr += sum([t**2. / 2. for t in threshes]) # convert back to a coinc-SNR-like statistic # via log likelihood ratio \propto rho_c^2 / 2 return (2. * loglr) ** 0.5
[ "def", "coinc", "(", "self", ",", "s0", ",", "s1", ",", "slide", ",", "step", ")", ":", "# pylint:disable=unused-argument", "# Approximate log likelihood ratio by summing single-ifo negative", "# log noise likelihoods", "loglr", "=", "-", "s0", "-", "s1", "# add squares of threshold stat values via idealized Gaussian formula", "threshes", "=", "[", "self", ".", "fits_by_tid", "[", "i", "]", "[", "'thresh'", "]", "for", "i", "in", "self", ".", "ifos", "]", "loglr", "+=", "sum", "(", "[", "t", "**", "2.", "/", "2.", "for", "t", "in", "threshes", "]", ")", "# convert back to a coinc-SNR-like statistic", "# via log likelihood ratio \\propto rho_c^2 / 2", "return", "(", "2.", "*", "loglr", ")", "**", "0.5" ]
Calculate the final coinc ranking statistic
[ "Calculate", "the", "final", "coinc", "ranking", "statistic" ]
python
train
54
pywbem/pywbem
wbemcli.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/wbemcli.py#L2298-L2389
def orip(ip, rc=None, r=None, fl=None, fs=None, ot=None, coe=None, moc=None): # pylint: disable=too-many-arguments, redefined-outer-name, invalid-name """ This function is a wrapper for :meth:`~pywbem.WBEMConnection.OpenReferenceInstancePaths`. Open an enumeration session to retrieve the instance paths of the association instances that reference a source instance. Use the :func:`~wbemcli.pip` function to retrieve the next set of instance paths or the :func:`~wbcmeli.ce` function to close the enumeration session before it is complete. Parameters: ip (:class:`~pywbem.CIMInstanceName`): Source instance path. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. fl (:term:`string`): Filter query language to be used for the filter defined in the `fs` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". `None` means that no such filtering is peformed. fs (:term:`string`): Filter to apply to objects to be returned. Based on filter query language defined by `fl` parameter. `None` means that no such filtering is peformed. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of objects to return for this operation. `None` will cause the server to use its default of 0. Returns: A :func:`~py:collections.namedtuple` object containing the following named items: * **paths** (list of :class:`~pywbem.CIMInstanceName`): The retrieved instance paths. * **eos** (:class:`py:bool`): `True` if the enumeration session is exhausted after this operation. Otherwise `eos` is `False` and the `context` item is the context object for the next operation on the enumeration session. * **context** (:func:`py:tuple` of server_context, namespace): A context object identifying the open enumeration session, including its current enumeration state, and the namespace. This object must be supplied with the next pull or close operation for this enumeration session. """ return CONN.OpenReferenceInstancePaths(ip, ResultClass=rc, Role=r, FilterQueryLanguage=fl, FilterQuery=fs, OperationTimeout=ot, ContinueOnError=coe, MaxObjectCount=moc)
[ "def", "orip", "(", "ip", ",", "rc", "=", "None", ",", "r", "=", "None", ",", "fl", "=", "None", ",", "fs", "=", "None", ",", "ot", "=", "None", ",", "coe", "=", "None", ",", "moc", "=", "None", ")", ":", "# pylint: disable=too-many-arguments, redefined-outer-name, invalid-name", "return", "CONN", ".", "OpenReferenceInstancePaths", "(", "ip", ",", "ResultClass", "=", "rc", ",", "Role", "=", "r", ",", "FilterQueryLanguage", "=", "fl", ",", "FilterQuery", "=", "fs", ",", "OperationTimeout", "=", "ot", ",", "ContinueOnError", "=", "coe", ",", "MaxObjectCount", "=", "moc", ")" ]
This function is a wrapper for :meth:`~pywbem.WBEMConnection.OpenReferenceInstancePaths`. Open an enumeration session to retrieve the instance paths of the association instances that reference a source instance. Use the :func:`~wbemcli.pip` function to retrieve the next set of instance paths or the :func:`~wbcmeli.ce` function to close the enumeration session before it is complete. Parameters: ip (:class:`~pywbem.CIMInstanceName`): Source instance path. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. fl (:term:`string`): Filter query language to be used for the filter defined in the `fs` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". `None` means that no such filtering is peformed. fs (:term:`string`): Filter to apply to objects to be returned. Based on filter query language defined by `fl` parameter. `None` means that no such filtering is peformed. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of objects to return for this operation. `None` will cause the server to use its default of 0. Returns: A :func:`~py:collections.namedtuple` object containing the following named items: * **paths** (list of :class:`~pywbem.CIMInstanceName`): The retrieved instance paths. * **eos** (:class:`py:bool`): `True` if the enumeration session is exhausted after this operation. Otherwise `eos` is `False` and the `context` item is the context object for the next operation on the enumeration session. * **context** (:func:`py:tuple` of server_context, namespace): A context object identifying the open enumeration session, including its current enumeration state, and the namespace. This object must be supplied with the next pull or close operation for this enumeration session.
[ "This", "function", "is", "a", "wrapper", "for", ":", "meth", ":", "~pywbem", ".", "WBEMConnection", ".", "OpenReferenceInstancePaths", "." ]
python
train
37.434783
nens/turn
turn/core.py
https://github.com/nens/turn/blob/98e806a0749ada0ddfd04b3c29fb04c15bf5ac18/turn/core.py#L159-L184
def bump(self): """ Fix indicator in case of unnanounced departments. """ # read client values = self.client.mget(self.keys.indicator, self.keys.dispenser) indicator, dispenser = map(int, values) # determine active users numbers = range(indicator, dispenser + 1) keys = [self.keys.key(n) for n in numbers] pairs = zip(keys, self.client.mget(*keys)) try: # determine number of first active user number = next(self.keys.number(key) for key, value in pairs if value is not None) except: # set number to next result of incr on dispenser number = dispenser + 1 # set indicator to it if necessary if number != indicator: self.client.set(self.keys.indicator, number) # announce and return it anyway self.announce(number) return number
[ "def", "bump", "(", "self", ")", ":", "# read client", "values", "=", "self", ".", "client", ".", "mget", "(", "self", ".", "keys", ".", "indicator", ",", "self", ".", "keys", ".", "dispenser", ")", "indicator", ",", "dispenser", "=", "map", "(", "int", ",", "values", ")", "# determine active users", "numbers", "=", "range", "(", "indicator", ",", "dispenser", "+", "1", ")", "keys", "=", "[", "self", ".", "keys", ".", "key", "(", "n", ")", "for", "n", "in", "numbers", "]", "pairs", "=", "zip", "(", "keys", ",", "self", ".", "client", ".", "mget", "(", "*", "keys", ")", ")", "try", ":", "# determine number of first active user", "number", "=", "next", "(", "self", ".", "keys", ".", "number", "(", "key", ")", "for", "key", ",", "value", "in", "pairs", "if", "value", "is", "not", "None", ")", "except", ":", "# set number to next result of incr on dispenser", "number", "=", "dispenser", "+", "1", "# set indicator to it if necessary", "if", "number", "!=", "indicator", ":", "self", ".", "client", ".", "set", "(", "self", ".", "keys", ".", "indicator", ",", "number", ")", "# announce and return it anyway", "self", ".", "announce", "(", "number", ")", "return", "number" ]
Fix indicator in case of unnanounced departments.
[ "Fix", "indicator", "in", "case", "of", "unnanounced", "departments", "." ]
python
train
35.076923
rosenbrockc/fortpy
fortpy/isense/evaluator.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/isense/evaluator.py#L300-L318
def _complete_el(self, symbol, attribute, fullsymbol): """Suggests a list of completions based on the el_* attributes of the user_context.""" if symbol != fullsymbol: #We have a sym%sym%... chain and the completion just needs to #be a member variable or method of the type being referenced. return self._complete_type_chain(symbol, fullsymbol) if self.context.el_section == "params": #They are in the process of defining a new executable and are #picking the names themselves, return normal word complete. return self._complete_word(symbol, attribute) elif self.context.el_section == "body": if self.context.el_call in ["sub", "fun"]: return self._complete_sig(symbol, attribute) else: return self._complete_word(symbol, attribute) else: return self._complete_word(symbol, attribute)
[ "def", "_complete_el", "(", "self", ",", "symbol", ",", "attribute", ",", "fullsymbol", ")", ":", "if", "symbol", "!=", "fullsymbol", ":", "#We have a sym%sym%... chain and the completion just needs to", "#be a member variable or method of the type being referenced.", "return", "self", ".", "_complete_type_chain", "(", "symbol", ",", "fullsymbol", ")", "if", "self", ".", "context", ".", "el_section", "==", "\"params\"", ":", "#They are in the process of defining a new executable and are", "#picking the names themselves, return normal word complete.", "return", "self", ".", "_complete_word", "(", "symbol", ",", "attribute", ")", "elif", "self", ".", "context", ".", "el_section", "==", "\"body\"", ":", "if", "self", ".", "context", ".", "el_call", "in", "[", "\"sub\"", ",", "\"fun\"", "]", ":", "return", "self", ".", "_complete_sig", "(", "symbol", ",", "attribute", ")", "else", ":", "return", "self", ".", "_complete_word", "(", "symbol", ",", "attribute", ")", "else", ":", "return", "self", ".", "_complete_word", "(", "symbol", ",", "attribute", ")" ]
Suggests a list of completions based on the el_* attributes of the user_context.
[ "Suggests", "a", "list", "of", "completions", "based", "on", "the", "el_", "*", "attributes", "of", "the", "user_context", "." ]
python
train
50.210526
neo4j-drivers/neotime
neotime/__init__.py
https://github.com/neo4j-drivers/neotime/blob/9f6c1d782178fee5e27345dbf78ac161b3a95cc7/neotime/__init__.py#L568-L585
def parse(cls, s): """ Parse a string to produce a :class:`.Date`. Accepted formats: 'YYYY-MM-DD' :param s: :return: """ try: numbers = map(int, s.split("-")) except (ValueError, AttributeError): raise ValueError("Date string must be in format YYYY-MM-DD") else: numbers = list(numbers) if len(numbers) == 3: return cls(*numbers) raise ValueError("Date string must be in format YYYY-MM-DD")
[ "def", "parse", "(", "cls", ",", "s", ")", ":", "try", ":", "numbers", "=", "map", "(", "int", ",", "s", ".", "split", "(", "\"-\"", ")", ")", "except", "(", "ValueError", ",", "AttributeError", ")", ":", "raise", "ValueError", "(", "\"Date string must be in format YYYY-MM-DD\"", ")", "else", ":", "numbers", "=", "list", "(", "numbers", ")", "if", "len", "(", "numbers", ")", "==", "3", ":", "return", "cls", "(", "*", "numbers", ")", "raise", "ValueError", "(", "\"Date string must be in format YYYY-MM-DD\"", ")" ]
Parse a string to produce a :class:`.Date`. Accepted formats: 'YYYY-MM-DD' :param s: :return:
[ "Parse", "a", "string", "to", "produce", "a", ":", "class", ":", ".", "Date", "." ]
python
train
29.277778
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L1165-L1174
def p_scalar_namespace_name(p): '''scalar : namespace_name | NS_SEPARATOR namespace_name | NAMESPACE NS_SEPARATOR namespace_name''' if len(p) == 2: p[0] = ast.Constant(p[1], lineno=p.lineno(1)) elif len(p) == 3: p[0] = ast.Constant(p[1] + p[2], lineno=p.lineno(1)) else: p[0] = ast.Constant(p[1] + p[2] + p[3], lineno=p.lineno(1))
[ "def", "p_scalar_namespace_name", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "ast", ".", "Constant", "(", "p", "[", "1", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "elif", "len", "(", "p", ")", "==", "3", ":", "p", "[", "0", "]", "=", "ast", ".", "Constant", "(", "p", "[", "1", "]", "+", "p", "[", "2", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "else", ":", "p", "[", "0", "]", "=", "ast", ".", "Constant", "(", "p", "[", "1", "]", "+", "p", "[", "2", "]", "+", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")" ]
scalar : namespace_name | NS_SEPARATOR namespace_name | NAMESPACE NS_SEPARATOR namespace_name
[ "scalar", ":", "namespace_name", "|", "NS_SEPARATOR", "namespace_name", "|", "NAMESPACE", "NS_SEPARATOR", "namespace_name" ]
python
train
38.9
tensorflow/mesh
mesh_tensorflow/simd_mesh_impl.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/simd_mesh_impl.py#L460-L483
def slicewise(self, fn, *inputs): """Execute a function in parallel on all slices. Args: fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors. *inputs: a list of inputs. Each input is either a LaidOutTensor or is convertible to a tf.Tensor. Returns: a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple. """ if fn == tf.add: assert len(inputs) == 2 if isinstance(inputs[0], mtf.LazyAllreduceSum): # sum of LazyAllreduceSum (keep delaying the allreduce) return inputs[0] + inputs[1] # convert all inputs to LaidOutTensor where possible inputs = mtf.convert_args_to_laid_out_tensors(inputs) ret = fn(*[ x.one_slice if isinstance(x, self.LaidOutTensor) else x for x in inputs]) if isinstance(ret, tuple): return tuple([self.LaidOutTensor([t]) for t in ret]) else: return self.LaidOutTensor([ret])
[ "def", "slicewise", "(", "self", ",", "fn", ",", "*", "inputs", ")", ":", "if", "fn", "==", "tf", ".", "add", ":", "assert", "len", "(", "inputs", ")", "==", "2", "if", "isinstance", "(", "inputs", "[", "0", "]", ",", "mtf", ".", "LazyAllreduceSum", ")", ":", "# sum of LazyAllreduceSum (keep delaying the allreduce)", "return", "inputs", "[", "0", "]", "+", "inputs", "[", "1", "]", "# convert all inputs to LaidOutTensor where possible", "inputs", "=", "mtf", ".", "convert_args_to_laid_out_tensors", "(", "inputs", ")", "ret", "=", "fn", "(", "*", "[", "x", ".", "one_slice", "if", "isinstance", "(", "x", ",", "self", ".", "LaidOutTensor", ")", "else", "x", "for", "x", "in", "inputs", "]", ")", "if", "isinstance", "(", "ret", ",", "tuple", ")", ":", "return", "tuple", "(", "[", "self", ".", "LaidOutTensor", "(", "[", "t", "]", ")", "for", "t", "in", "ret", "]", ")", "else", ":", "return", "self", ".", "LaidOutTensor", "(", "[", "ret", "]", ")" ]
Execute a function in parallel on all slices. Args: fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors. *inputs: a list of inputs. Each input is either a LaidOutTensor or is convertible to a tf.Tensor. Returns: a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple.
[ "Execute", "a", "function", "in", "parallel", "on", "all", "slices", "." ]
python
train
38.541667
dadadel/pyment
pyment/docstring.py
https://github.com/dadadel/pyment/blob/3d1bdf87d083ff56230bd0bf7c5252e20552b7b6/pyment/docstring.py#L1601-L1610
def _extract_docs_other(self): """Extract other specific sections""" if self.dst.style['in'] == 'numpydoc': data = '\n'.join([d.rstrip().replace(self.docs['out']['spaces'], '', 1) for d in self.docs['in']['raw'].splitlines()]) lst = self.dst.numpydoc.get_list_key(data, 'also') lst = self.dst.numpydoc.get_list_key(data, 'ref') lst = self.dst.numpydoc.get_list_key(data, 'note') lst = self.dst.numpydoc.get_list_key(data, 'other') lst = self.dst.numpydoc.get_list_key(data, 'example') lst = self.dst.numpydoc.get_list_key(data, 'attr')
[ "def", "_extract_docs_other", "(", "self", ")", ":", "if", "self", ".", "dst", ".", "style", "[", "'in'", "]", "==", "'numpydoc'", ":", "data", "=", "'\\n'", ".", "join", "(", "[", "d", ".", "rstrip", "(", ")", ".", "replace", "(", "self", ".", "docs", "[", "'out'", "]", "[", "'spaces'", "]", ",", "''", ",", "1", ")", "for", "d", "in", "self", ".", "docs", "[", "'in'", "]", "[", "'raw'", "]", ".", "splitlines", "(", ")", "]", ")", "lst", "=", "self", ".", "dst", ".", "numpydoc", ".", "get_list_key", "(", "data", ",", "'also'", ")", "lst", "=", "self", ".", "dst", ".", "numpydoc", ".", "get_list_key", "(", "data", ",", "'ref'", ")", "lst", "=", "self", ".", "dst", ".", "numpydoc", ".", "get_list_key", "(", "data", ",", "'note'", ")", "lst", "=", "self", ".", "dst", ".", "numpydoc", ".", "get_list_key", "(", "data", ",", "'other'", ")", "lst", "=", "self", ".", "dst", ".", "numpydoc", ".", "get_list_key", "(", "data", ",", "'example'", ")", "lst", "=", "self", ".", "dst", ".", "numpydoc", ".", "get_list_key", "(", "data", ",", "'attr'", ")" ]
Extract other specific sections
[ "Extract", "other", "specific", "sections" ]
python
train
62.6
azraq27/neural
neural/decon.py
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/decon.py#L378-L419
def partial(self,start=0,end=None,run=0): '''chops the stimulus by only including time points ``start`` through ``end`` (in reps, inclusive; ``None``=until the end) if using stim_times-style simulus, will change the ``run``'th run. If a column, will just chop the column''' self.read_file() decon_stim = copy.copy(self) if start<0: start = 0 if self.type()=="column": decon_stim.column_file = None if end>=len(decon_stim.column): end = None if end==None: decon_stim.column = decon_stim.column[start:] else: decon_stim.column = decon_stim.column[start:end+1] if len(decon_stim.column)==0: return None if self.type()=="times": if self.TR==None: nl.notify('Error: cannot get partial segment of a stim_times stimulus without a TR',level=nl.level.error) return None def time_in(a): first_number = r'^(\d+(\.\d+)?)' if isinstance(a,basestring): m = re.match(first_number,a) if m: a = m.group(1) else: nl.notify('Warning: cannot intepret a number from the stim_time: "%s"' % a,level=nl.level.warning) return False a = float(a)/self.TR if a>=start and (end==None or a<=end): return True return False decon_stim.times_file = None if len(decon_stim.times)==0 or '__iter__' not in dir(decon_stim.times[0]): decon_stim.times = [decon_stim.times] decon_stim.times[run] = [x for x in decon_stim.times[run] if time_in(x)] if len(nl.flatten(decon_stim.times))==0: return None return decon_stim
[ "def", "partial", "(", "self", ",", "start", "=", "0", ",", "end", "=", "None", ",", "run", "=", "0", ")", ":", "self", ".", "read_file", "(", ")", "decon_stim", "=", "copy", ".", "copy", "(", "self", ")", "if", "start", "<", "0", ":", "start", "=", "0", "if", "self", ".", "type", "(", ")", "==", "\"column\"", ":", "decon_stim", ".", "column_file", "=", "None", "if", "end", ">=", "len", "(", "decon_stim", ".", "column", ")", ":", "end", "=", "None", "if", "end", "==", "None", ":", "decon_stim", ".", "column", "=", "decon_stim", ".", "column", "[", "start", ":", "]", "else", ":", "decon_stim", ".", "column", "=", "decon_stim", ".", "column", "[", "start", ":", "end", "+", "1", "]", "if", "len", "(", "decon_stim", ".", "column", ")", "==", "0", ":", "return", "None", "if", "self", ".", "type", "(", ")", "==", "\"times\"", ":", "if", "self", ".", "TR", "==", "None", ":", "nl", ".", "notify", "(", "'Error: cannot get partial segment of a stim_times stimulus without a TR'", ",", "level", "=", "nl", ".", "level", ".", "error", ")", "return", "None", "def", "time_in", "(", "a", ")", ":", "first_number", "=", "r'^(\\d+(\\.\\d+)?)'", "if", "isinstance", "(", "a", ",", "basestring", ")", ":", "m", "=", "re", ".", "match", "(", "first_number", ",", "a", ")", "if", "m", ":", "a", "=", "m", ".", "group", "(", "1", ")", "else", ":", "nl", ".", "notify", "(", "'Warning: cannot intepret a number from the stim_time: \"%s\"'", "%", "a", ",", "level", "=", "nl", ".", "level", ".", "warning", ")", "return", "False", "a", "=", "float", "(", "a", ")", "/", "self", ".", "TR", "if", "a", ">=", "start", "and", "(", "end", "==", "None", "or", "a", "<=", "end", ")", ":", "return", "True", "return", "False", "decon_stim", ".", "times_file", "=", "None", "if", "len", "(", "decon_stim", ".", "times", ")", "==", "0", "or", "'__iter__'", "not", "in", "dir", "(", "decon_stim", ".", "times", "[", "0", "]", ")", ":", "decon_stim", ".", "times", "=", "[", "decon_stim", ".", "times", "]", "decon_stim", ".", "times", "[", "run", "]", "=", "[", "x", "for", "x", "in", "decon_stim", ".", "times", "[", "run", "]", "if", "time_in", "(", "x", ")", "]", "if", "len", "(", "nl", ".", "flatten", "(", "decon_stim", ".", "times", ")", ")", "==", "0", ":", "return", "None", "return", "decon_stim" ]
chops the stimulus by only including time points ``start`` through ``end`` (in reps, inclusive; ``None``=until the end) if using stim_times-style simulus, will change the ``run``'th run. If a column, will just chop the column
[ "chops", "the", "stimulus", "by", "only", "including", "time", "points", "start", "through", "end", "(", "in", "reps", "inclusive", ";", "None", "=", "until", "the", "end", ")", "if", "using", "stim_times", "-", "style", "simulus", "will", "change", "the", "run", "th", "run", ".", "If", "a", "column", "will", "just", "chop", "the", "column" ]
python
train
45.357143
openstack/networking-arista
networking_arista/common/db_lib.py
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L59-L68
def filter_unnecessary_segments(query): """Filter segments are not needed on CVX""" segment_model = segment_models.NetworkSegment network_model = models_v2.Network query = (query .join_if_necessary(network_model) .join_if_necessary(segment_model) .filter(network_model.project_id != '') .filter_network_type()) return query
[ "def", "filter_unnecessary_segments", "(", "query", ")", ":", "segment_model", "=", "segment_models", ".", "NetworkSegment", "network_model", "=", "models_v2", ".", "Network", "query", "=", "(", "query", ".", "join_if_necessary", "(", "network_model", ")", ".", "join_if_necessary", "(", "segment_model", ")", ".", "filter", "(", "network_model", ".", "project_id", "!=", "''", ")", ".", "filter_network_type", "(", ")", ")", "return", "query" ]
Filter segments are not needed on CVX
[ "Filter", "segments", "are", "not", "needed", "on", "CVX" ]
python
train
38.6
manns/pyspread
pyspread/src/lib/charts.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/charts.py#L113-L126
def fig2x(figure, format): """Returns svg from matplotlib chart""" # Save svg to file like object svg_io io = StringIO() figure.savefig(io, format=format) # Rewind the file like object io.seek(0) data = io.getvalue() io.close() return data
[ "def", "fig2x", "(", "figure", ",", "format", ")", ":", "# Save svg to file like object svg_io", "io", "=", "StringIO", "(", ")", "figure", ".", "savefig", "(", "io", ",", "format", "=", "format", ")", "# Rewind the file like object", "io", ".", "seek", "(", "0", ")", "data", "=", "io", ".", "getvalue", "(", ")", "io", ".", "close", "(", ")", "return", "data" ]
Returns svg from matplotlib chart
[ "Returns", "svg", "from", "matplotlib", "chart" ]
python
train
19
pokerregion/poker
poker/commands.py
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/commands.py#L59-L95
def twoplustwo_player(username): """Get profile information about a Two plus Two Forum member given the username.""" from .website.twoplustwo import ForumMember, AmbiguousUserNameError, UserNotFoundError try: member = ForumMember(username) except UserNotFoundError: raise click.ClickException('User "%s" not found!' % username) except AmbiguousUserNameError as e: click.echo('Got multiple users with similar names!', err=True) for ind, user in enumerate(e.users): click.echo('{}. {}'.format(ind + 1, user.name), err=True) number = click.prompt('Which would you like to see [{}-{}]'.format(1, len(e.users)), prompt_suffix='? ', type=click.IntRange(1, len(e.users)), err=True) userid = e.users[int(number) - 1].id member = ForumMember.from_userid(userid) click.echo(err=True) # empty line after input _print_header('Two plus two forum member') _print_values( ('Username', member.username), ('Forum id', member.id), ('Location', member.location), ('Total posts', member.total_posts), ('Posts per day', member.posts_per_day), ('Rank', member.rank), ('Last activity', member.last_activity), ('Join date', member.join_date), ('Usergroups', member.public_usergroups), ('Profile picture', member.profile_picture), ('Avatar', member.avatar), )
[ "def", "twoplustwo_player", "(", "username", ")", ":", "from", ".", "website", ".", "twoplustwo", "import", "ForumMember", ",", "AmbiguousUserNameError", ",", "UserNotFoundError", "try", ":", "member", "=", "ForumMember", "(", "username", ")", "except", "UserNotFoundError", ":", "raise", "click", ".", "ClickException", "(", "'User \"%s\" not found!'", "%", "username", ")", "except", "AmbiguousUserNameError", "as", "e", ":", "click", ".", "echo", "(", "'Got multiple users with similar names!'", ",", "err", "=", "True", ")", "for", "ind", ",", "user", "in", "enumerate", "(", "e", ".", "users", ")", ":", "click", ".", "echo", "(", "'{}. {}'", ".", "format", "(", "ind", "+", "1", ",", "user", ".", "name", ")", ",", "err", "=", "True", ")", "number", "=", "click", ".", "prompt", "(", "'Which would you like to see [{}-{}]'", ".", "format", "(", "1", ",", "len", "(", "e", ".", "users", ")", ")", ",", "prompt_suffix", "=", "'? '", ",", "type", "=", "click", ".", "IntRange", "(", "1", ",", "len", "(", "e", ".", "users", ")", ")", ",", "err", "=", "True", ")", "userid", "=", "e", ".", "users", "[", "int", "(", "number", ")", "-", "1", "]", ".", "id", "member", "=", "ForumMember", ".", "from_userid", "(", "userid", ")", "click", ".", "echo", "(", "err", "=", "True", ")", "# empty line after input", "_print_header", "(", "'Two plus two forum member'", ")", "_print_values", "(", "(", "'Username'", ",", "member", ".", "username", ")", ",", "(", "'Forum id'", ",", "member", ".", "id", ")", ",", "(", "'Location'", ",", "member", ".", "location", ")", ",", "(", "'Total posts'", ",", "member", ".", "total_posts", ")", ",", "(", "'Posts per day'", ",", "member", ".", "posts_per_day", ")", ",", "(", "'Rank'", ",", "member", ".", "rank", ")", ",", "(", "'Last activity'", ",", "member", ".", "last_activity", ")", ",", "(", "'Join date'", ",", "member", ".", "join_date", ")", ",", "(", "'Usergroups'", ",", "member", ".", "public_usergroups", ")", ",", "(", "'Profile picture'", ",", "member", ".", "profile_picture", ")", ",", "(", "'Avatar'", ",", "member", ".", "avatar", ")", ",", ")" ]
Get profile information about a Two plus Two Forum member given the username.
[ "Get", "profile", "information", "about", "a", "Two", "plus", "Two", "Forum", "member", "given", "the", "username", "." ]
python
train
38.702703
fastai/fastai
fastai/vision/transform.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/transform.py#L175-L189
def _crop_pad_default(x, size, padding_mode='reflection', row_pct:uniform = 0.5, col_pct:uniform = 0.5): "Crop and pad tfm - `row_pct`,`col_pct` sets focal point." padding_mode = _pad_mode_convert[padding_mode] size = tis2hw(size) if x.shape[1:] == torch.Size(size): return x rows,cols = size row_pct,col_pct = _minus_epsilon(row_pct,col_pct) if x.size(1)<rows or x.size(2)<cols: row_pad = max((rows-x.size(1)+1)//2, 0) col_pad = max((cols-x.size(2)+1)//2, 0) x = F.pad(x[None], (col_pad,col_pad,row_pad,row_pad), mode=padding_mode)[0] row = int((x.size(1)-rows+1)*row_pct) col = int((x.size(2)-cols+1)*col_pct) x = x[:, row:row+rows, col:col+cols] return x.contiguous()
[ "def", "_crop_pad_default", "(", "x", ",", "size", ",", "padding_mode", "=", "'reflection'", ",", "row_pct", ":", "uniform", "=", "0.5", ",", "col_pct", ":", "uniform", "=", "0.5", ")", ":", "padding_mode", "=", "_pad_mode_convert", "[", "padding_mode", "]", "size", "=", "tis2hw", "(", "size", ")", "if", "x", ".", "shape", "[", "1", ":", "]", "==", "torch", ".", "Size", "(", "size", ")", ":", "return", "x", "rows", ",", "cols", "=", "size", "row_pct", ",", "col_pct", "=", "_minus_epsilon", "(", "row_pct", ",", "col_pct", ")", "if", "x", ".", "size", "(", "1", ")", "<", "rows", "or", "x", ".", "size", "(", "2", ")", "<", "cols", ":", "row_pad", "=", "max", "(", "(", "rows", "-", "x", ".", "size", "(", "1", ")", "+", "1", ")", "//", "2", ",", "0", ")", "col_pad", "=", "max", "(", "(", "cols", "-", "x", ".", "size", "(", "2", ")", "+", "1", ")", "//", "2", ",", "0", ")", "x", "=", "F", ".", "pad", "(", "x", "[", "None", "]", ",", "(", "col_pad", ",", "col_pad", ",", "row_pad", ",", "row_pad", ")", ",", "mode", "=", "padding_mode", ")", "[", "0", "]", "row", "=", "int", "(", "(", "x", ".", "size", "(", "1", ")", "-", "rows", "+", "1", ")", "*", "row_pct", ")", "col", "=", "int", "(", "(", "x", ".", "size", "(", "2", ")", "-", "cols", "+", "1", ")", "*", "col_pct", ")", "x", "=", "x", "[", ":", ",", "row", ":", "row", "+", "rows", ",", "col", ":", "col", "+", "cols", "]", "return", "x", ".", "contiguous", "(", ")" ]
Crop and pad tfm - `row_pct`,`col_pct` sets focal point.
[ "Crop", "and", "pad", "tfm", "-", "row_pct", "col_pct", "sets", "focal", "point", "." ]
python
train
48.266667
stanfordnlp/stanza
stanza/text/vocab.py
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/text/vocab.py#L93-L108
def add(self, word, count=1): """Add a word to the vocabulary and return its index. :param word: word to add to the dictionary. :param count: how many times to add the word. :return: index of the added word. WARNING: this function assumes that if the Vocab currently has N words, then there is a perfect bijection between these N words and the integers 0 through N-1. """ if word not in self: super(Vocab, self).__setitem__(word, len(self)) self._counts[word] += count return self[word]
[ "def", "add", "(", "self", ",", "word", ",", "count", "=", "1", ")", ":", "if", "word", "not", "in", "self", ":", "super", "(", "Vocab", ",", "self", ")", ".", "__setitem__", "(", "word", ",", "len", "(", "self", ")", ")", "self", ".", "_counts", "[", "word", "]", "+=", "count", "return", "self", "[", "word", "]" ]
Add a word to the vocabulary and return its index. :param word: word to add to the dictionary. :param count: how many times to add the word. :return: index of the added word. WARNING: this function assumes that if the Vocab currently has N words, then there is a perfect bijection between these N words and the integers 0 through N-1.
[ "Add", "a", "word", "to", "the", "vocabulary", "and", "return", "its", "index", "." ]
python
train
35.4375
markovmodel/msmtools
msmtools/analysis/dense/sensitivity.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/dense/sensitivity.py#L387-L407
def expectation_sensitivity(T, a): r"""Sensitivity of expectation value of observable A=(a_i). Parameters ---------- T : (M, M) ndarray Transition matrix a : (M,) ndarray Observable, a[i] is the value of the observable at state i. Returns ------- S : (M, M) ndarray Sensitivity matrix of the expectation value. """ M = T.shape[0] S = numpy.zeros((M, M)) for i in range(M): S += a[i] * stationary_distribution_sensitivity(T, i) return S
[ "def", "expectation_sensitivity", "(", "T", ",", "a", ")", ":", "M", "=", "T", ".", "shape", "[", "0", "]", "S", "=", "numpy", ".", "zeros", "(", "(", "M", ",", "M", ")", ")", "for", "i", "in", "range", "(", "M", ")", ":", "S", "+=", "a", "[", "i", "]", "*", "stationary_distribution_sensitivity", "(", "T", ",", "i", ")", "return", "S" ]
r"""Sensitivity of expectation value of observable A=(a_i). Parameters ---------- T : (M, M) ndarray Transition matrix a : (M,) ndarray Observable, a[i] is the value of the observable at state i. Returns ------- S : (M, M) ndarray Sensitivity matrix of the expectation value.
[ "r", "Sensitivity", "of", "expectation", "value", "of", "observable", "A", "=", "(", "a_i", ")", "." ]
python
train
23.904762
pandas-dev/pandas
pandas/core/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3459-L3547
def assign(self, **kwargs): r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy() # >= 3.6 preserve order of kwargs if PY36: for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) else: # <= 3.5: do all calculations first... results = OrderedDict() for k, v in kwargs.items(): results[k] = com.apply_if_callable(v, data) # <= 3.5 and earlier results = sorted(results.items()) # ... and then assign for k, v in results: data[k] = v return data
[ "def", "assign", "(", "self", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "copy", "(", ")", "# >= 3.6 preserve order of kwargs", "if", "PY36", ":", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "data", "[", "k", "]", "=", "com", ".", "apply_if_callable", "(", "v", ",", "data", ")", "else", ":", "# <= 3.5: do all calculations first...", "results", "=", "OrderedDict", "(", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "results", "[", "k", "]", "=", "com", ".", "apply_if_callable", "(", "v", ",", "data", ")", "# <= 3.5 and earlier", "results", "=", "sorted", "(", "results", ".", "items", "(", ")", ")", "# ... and then assign", "for", "k", ",", "v", "in", "results", ":", "data", "[", "k", "]", "=", "v", "return", "data" ]
r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15
[ "r", "Assign", "new", "columns", "to", "a", "DataFrame", "." ]
python
train
35.483146
KelSolaar/Umbra
umbra/components/factory/script_editor/search_in_files.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/search_in_files.py#L519-L532
def filters_in_format(self, value): """ Setter for **self.__filters_in_format** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "filters_in_format", value) assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format( "filters_in_format", value) self.__filters_in_format = value
[ "def", "filters_in_format", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "unicode", ",", "\"'{0}' attribute: '{1}' type is not 'unicode'!\"", ".", "format", "(", "\"filters_in_format\"", ",", "value", ")", "assert", "os", ".", "path", ".", "exists", "(", "value", ")", ",", "\"'{0}' attribute: '{1}' file doesn't exists!\"", ".", "format", "(", "\"filters_in_format\"", ",", "value", ")", "self", ".", "__filters_in_format", "=", "value" ]
Setter for **self.__filters_in_format** attribute. :param value: Attribute value. :type value: unicode
[ "Setter", "for", "**", "self", ".", "__filters_in_format", "**", "attribute", "." ]
python
train
37.785714
ThreatConnect-Inc/tcex
tcex/tcex_playbook.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_playbook.py#L909-L928
def create_string_array(self, key, value): """Create method of CRUD operation for string array data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write. """ data = None if key is not None and value is not None: if isinstance(value, (list)): data = self.db.create(key.strip(), json.dumps(value)) else: # used to save raw value with embedded variables data = self.db.create(key.strip(), value) else: self.tcex.log.warning(u'The key or value field was None.') return data
[ "def", "create_string_array", "(", "self", ",", "key", ",", "value", ")", ":", "data", "=", "None", "if", "key", "is", "not", "None", "and", "value", "is", "not", "None", ":", "if", "isinstance", "(", "value", ",", "(", "list", ")", ")", ":", "data", "=", "self", ".", "db", ".", "create", "(", "key", ".", "strip", "(", ")", ",", "json", ".", "dumps", "(", "value", ")", ")", "else", ":", "# used to save raw value with embedded variables", "data", "=", "self", ".", "db", ".", "create", "(", "key", ".", "strip", "(", ")", ",", "value", ")", "else", ":", "self", ".", "tcex", ".", "log", ".", "warning", "(", "u'The key or value field was None.'", ")", "return", "data" ]
Create method of CRUD operation for string array data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write.
[ "Create", "method", "of", "CRUD", "operation", "for", "string", "array", "data", "." ]
python
train
35.85
SMTG-UCL/sumo
sumo/cli/dosplot.py
https://github.com/SMTG-UCL/sumo/blob/47aec6bbfa033a624435a65bd4edabd18bfb437f/sumo/cli/dosplot.py#L187-L210
def _el_orb(string): """Parse the element and orbital argument strings. The presence of an element without any orbitals means that we want to plot all of its orbitals. Args: string (str): The element and orbitals as a string, in the form ``"C.s.p,O"``. Returns: dict: The elements and orbitals as a :obj:`dict`. For example:: {'Bi': ['s', 'px', 'py', 'd']}. If an element symbol is included with an empty list, then all orbitals for that species are considered. """ el_orbs = {} for split in string.split(','): orbs = split.split('.') orbs = [orbs[0], 's', 'p', 'd', 'f'] if len(orbs) == 1 else orbs el_orbs[orbs.pop(0)] = orbs return el_orbs
[ "def", "_el_orb", "(", "string", ")", ":", "el_orbs", "=", "{", "}", "for", "split", "in", "string", ".", "split", "(", "','", ")", ":", "orbs", "=", "split", ".", "split", "(", "'.'", ")", "orbs", "=", "[", "orbs", "[", "0", "]", ",", "'s'", ",", "'p'", ",", "'d'", ",", "'f'", "]", "if", "len", "(", "orbs", ")", "==", "1", "else", "orbs", "el_orbs", "[", "orbs", ".", "pop", "(", "0", ")", "]", "=", "orbs", "return", "el_orbs" ]
Parse the element and orbital argument strings. The presence of an element without any orbitals means that we want to plot all of its orbitals. Args: string (str): The element and orbitals as a string, in the form ``"C.s.p,O"``. Returns: dict: The elements and orbitals as a :obj:`dict`. For example:: {'Bi': ['s', 'px', 'py', 'd']}. If an element symbol is included with an empty list, then all orbitals for that species are considered.
[ "Parse", "the", "element", "and", "orbital", "argument", "strings", "." ]
python
train
30.833333
cpenv/cpenv
cpenv/utils.py
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L384-L397
def set_env_from_file(env_file): '''Restore the current environment from an environment stored in a yaml yaml file. :param env_file: Path to environment yaml file. ''' with open(env_file, 'r') as f: env_dict = yaml.load(f.read()) if 'environment' in env_dict: env_dict = env_dict['environment'] set_env(env_dict)
[ "def", "set_env_from_file", "(", "env_file", ")", ":", "with", "open", "(", "env_file", ",", "'r'", ")", "as", "f", ":", "env_dict", "=", "yaml", ".", "load", "(", "f", ".", "read", "(", ")", ")", "if", "'environment'", "in", "env_dict", ":", "env_dict", "=", "env_dict", "[", "'environment'", "]", "set_env", "(", "env_dict", ")" ]
Restore the current environment from an environment stored in a yaml yaml file. :param env_file: Path to environment yaml file.
[ "Restore", "the", "current", "environment", "from", "an", "environment", "stored", "in", "a", "yaml", "yaml", "file", "." ]
python
valid
24.785714
bsolomon1124/pyfinance
pyfinance/returns.py
https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L687-L699
def pct_negative(self, threshold=0.0): """Pct. of periods in which `self` is less than `threshold.` Parameters ---------- threshold : {float, TSeries, pd.Series}, default 0. Returns ------- float """ return np.count_nonzero(self[self < threshold]) / self.count()
[ "def", "pct_negative", "(", "self", ",", "threshold", "=", "0.0", ")", ":", "return", "np", ".", "count_nonzero", "(", "self", "[", "self", "<", "threshold", "]", ")", "/", "self", ".", "count", "(", ")" ]
Pct. of periods in which `self` is less than `threshold.` Parameters ---------- threshold : {float, TSeries, pd.Series}, default 0. Returns ------- float
[ "Pct", ".", "of", "periods", "in", "which", "self", "is", "less", "than", "threshold", "." ]
python
train
25
bfrog/whizzer
whizzer/rpc/service.py
https://github.com/bfrog/whizzer/blob/a1e43084b3ac8c1f3fb4ada081777cdbf791fd77/whizzer/rpc/service.py#L105-L110
def listen_init(self): """Setup the service to listen for clients.""" self.dispatcher = ObjectDispatch(self) self.factory = MsgPackProtocolFactory(self.dispatcher) self.server = UnixServer(self.loop, self.factory, self.path) self.server.start()
[ "def", "listen_init", "(", "self", ")", ":", "self", ".", "dispatcher", "=", "ObjectDispatch", "(", "self", ")", "self", ".", "factory", "=", "MsgPackProtocolFactory", "(", "self", ".", "dispatcher", ")", "self", ".", "server", "=", "UnixServer", "(", "self", ".", "loop", ",", "self", ".", "factory", ",", "self", ".", "path", ")", "self", ".", "server", ".", "start", "(", ")" ]
Setup the service to listen for clients.
[ "Setup", "the", "service", "to", "listen", "for", "clients", "." ]
python
train
46.5
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L4192-L4287
def set_lock(i): """ Input: { path - path to be locked (get_lock) - if 'yes', lock this entry (lock_retries) - number of retries to aquire lock (default=11) (lock_retry_delay) - delay in seconds before trying to aquire lock again (default=3) (lock_expire_time) - number of seconds before lock expires (default=30) (unlock_uid) - UID of the lock to release it } Output: { return - return code = 0, if successful = 32, couldn't acquire lock (still locked after all retries) > 0, if error (error) - error text if return > 0 (lock_uid) - lock UID, if locked successfully } """ p=i['path'] gl=i.get('get_lock','') uuid=i.get('unlock_uid','') exp=float(i.get('lock_expire_time','30')) rr={'return':0} if gl=='yes' or uuid!='': pl=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_for_lock']) luid='' if os.path.isfile(pl): import time # Read lock file try: f=open(pl) luid=f.readline().strip() exp=float(f.readline().strip()) if exp<0: exp=1 f.close() except Exception as e: return {'return':1, 'error':'problem reading lock file'} # Check if lock has expired if gl=='yes' and uuid=='': # Retry if locked retry=int(i.get('lock_retries','11')) retryd=float(i.get('lock_retry_delay','3')) dt=os.path.getmtime(pl)+exp-time.time() if dt>0: while retry>0 and os.path.isfile(pl) and dt>0: retry-=1 time.sleep(retryd) if os.path.isfile(pl): dt=os.path.getmtime(pl)+exp-time.time() if retry==0 and dt>0 and os.path.isfile(pl): return {'return':32, 'error':'entry is still locked'} luid='' if os.path.isfile(pl): os.remove(pl) # Release lock if requested (and if not locked by another UID) if luid!='' and uuid!='': if luid!=uuid: return {'return':32, 'error': 'entry is locked with another UID'} luid='' os.remove(pl) # Finish acquiring lock if gl=='yes': # (Re)acquire lock if uuid=='': r=gen_uid({}) if r['return']>0: return r luid=r['data_uid'] else: luid=uuid # Write lock file try: f=open(pl,'w') f.write(luid+'\n') f.write(str(exp)+'\n') f.close() except Exception as e: return {'return':1, 'error':'problem writing lock file'} rr['lock_uid']=luid return rr
[ "def", "set_lock", "(", "i", ")", ":", "p", "=", "i", "[", "'path'", "]", "gl", "=", "i", ".", "get", "(", "'get_lock'", ",", "''", ")", "uuid", "=", "i", ".", "get", "(", "'unlock_uid'", ",", "''", ")", "exp", "=", "float", "(", "i", ".", "get", "(", "'lock_expire_time'", ",", "'30'", ")", ")", "rr", "=", "{", "'return'", ":", "0", "}", "if", "gl", "==", "'yes'", "or", "uuid", "!=", "''", ":", "pl", "=", "os", ".", "path", ".", "join", "(", "p", ",", "cfg", "[", "'subdir_ck_ext'", "]", ",", "cfg", "[", "'file_for_lock'", "]", ")", "luid", "=", "''", "if", "os", ".", "path", ".", "isfile", "(", "pl", ")", ":", "import", "time", "# Read lock file", "try", ":", "f", "=", "open", "(", "pl", ")", "luid", "=", "f", ".", "readline", "(", ")", ".", "strip", "(", ")", "exp", "=", "float", "(", "f", ".", "readline", "(", ")", ".", "strip", "(", ")", ")", "if", "exp", "<", "0", ":", "exp", "=", "1", "f", ".", "close", "(", ")", "except", "Exception", "as", "e", ":", "return", "{", "'return'", ":", "1", ",", "'error'", ":", "'problem reading lock file'", "}", "# Check if lock has expired", "if", "gl", "==", "'yes'", "and", "uuid", "==", "''", ":", "# Retry if locked", "retry", "=", "int", "(", "i", ".", "get", "(", "'lock_retries'", ",", "'11'", ")", ")", "retryd", "=", "float", "(", "i", ".", "get", "(", "'lock_retry_delay'", ",", "'3'", ")", ")", "dt", "=", "os", ".", "path", ".", "getmtime", "(", "pl", ")", "+", "exp", "-", "time", ".", "time", "(", ")", "if", "dt", ">", "0", ":", "while", "retry", ">", "0", "and", "os", ".", "path", ".", "isfile", "(", "pl", ")", "and", "dt", ">", "0", ":", "retry", "-=", "1", "time", ".", "sleep", "(", "retryd", ")", "if", "os", ".", "path", ".", "isfile", "(", "pl", ")", ":", "dt", "=", "os", ".", "path", ".", "getmtime", "(", "pl", ")", "+", "exp", "-", "time", ".", "time", "(", ")", "if", "retry", "==", "0", "and", "dt", ">", "0", "and", "os", ".", "path", ".", "isfile", "(", "pl", ")", ":", "return", "{", "'return'", ":", "32", ",", "'error'", ":", "'entry is still locked'", "}", "luid", "=", "''", "if", "os", ".", "path", ".", "isfile", "(", "pl", ")", ":", "os", ".", "remove", "(", "pl", ")", "# Release lock if requested (and if not locked by another UID)", "if", "luid", "!=", "''", "and", "uuid", "!=", "''", ":", "if", "luid", "!=", "uuid", ":", "return", "{", "'return'", ":", "32", ",", "'error'", ":", "'entry is locked with another UID'", "}", "luid", "=", "''", "os", ".", "remove", "(", "pl", ")", "# Finish acquiring lock", "if", "gl", "==", "'yes'", ":", "# (Re)acquire lock", "if", "uuid", "==", "''", ":", "r", "=", "gen_uid", "(", "{", "}", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "return", "r", "luid", "=", "r", "[", "'data_uid'", "]", "else", ":", "luid", "=", "uuid", "# Write lock file", "try", ":", "f", "=", "open", "(", "pl", ",", "'w'", ")", "f", ".", "write", "(", "luid", "+", "'\\n'", ")", "f", ".", "write", "(", "str", "(", "exp", ")", "+", "'\\n'", ")", "f", ".", "close", "(", ")", "except", "Exception", "as", "e", ":", "return", "{", "'return'", ":", "1", ",", "'error'", ":", "'problem writing lock file'", "}", "rr", "[", "'lock_uid'", "]", "=", "luid", "return", "rr" ]
Input: { path - path to be locked (get_lock) - if 'yes', lock this entry (lock_retries) - number of retries to aquire lock (default=11) (lock_retry_delay) - delay in seconds before trying to aquire lock again (default=3) (lock_expire_time) - number of seconds before lock expires (default=30) (unlock_uid) - UID of the lock to release it } Output: { return - return code = 0, if successful = 32, couldn't acquire lock (still locked after all retries) > 0, if error (error) - error text if return > 0 (lock_uid) - lock UID, if locked successfully }
[ "Input", ":", "{", "path", "-", "path", "to", "be", "locked" ]
python
train
30.5625
quiltdata/quilt
compiler/quilt/tools/util.py
https://github.com/quiltdata/quilt/blob/651853e7e89a8af86e0ff26167e752efa5878c12/compiler/quilt/tools/util.py#L138-L146
def sub_dirs(path, invisible=False): """ Child directories (non-recursive) """ dirs = [x for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))] if not invisible: dirs = [x for x in dirs if not x.startswith('.')] return dirs
[ "def", "sub_dirs", "(", "path", ",", "invisible", "=", "False", ")", ":", "dirs", "=", "[", "x", "for", "x", "in", "os", ".", "listdir", "(", "path", ")", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "path", ",", "x", ")", ")", "]", "if", "not", "invisible", ":", "dirs", "=", "[", "x", "for", "x", "in", "dirs", "if", "not", "x", ".", "startswith", "(", "'.'", ")", "]", "return", "dirs" ]
Child directories (non-recursive)
[ "Child", "directories", "(", "non", "-", "recursive", ")" ]
python
train
28.888889
metacloud/gilt
gilt/util.py
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/util.py#L62-L70
def build_sh_cmd(cmd, cwd=None): """Build a `sh.Command` from a string. :param cmd: String with the command to convert. :param cwd: Optional path to use as working directory. :return: `sh.Command` """ args = cmd.split() return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])
[ "def", "build_sh_cmd", "(", "cmd", ",", "cwd", "=", "None", ")", ":", "args", "=", "cmd", ".", "split", "(", ")", "return", "getattr", "(", "sh", ",", "args", "[", "0", "]", ")", ".", "bake", "(", "_cwd", "=", "cwd", ",", "*", "args", "[", "1", ":", "]", ")" ]
Build a `sh.Command` from a string. :param cmd: String with the command to convert. :param cwd: Optional path to use as working directory. :return: `sh.Command`
[ "Build", "a", "sh", ".", "Command", "from", "a", "string", "." ]
python
train
32.666667
sparklingpandas/sparklingpandas
sparklingpandas/pcontext.py
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/pcontext.py#L68-L155
def read_csv(self, file_path, use_whole_file=False, names=None, skiprows=0, *args, **kwargs): """Read a CSV file in and parse it into Pandas DataFrames. By default, the first row from the first partition of that data is parsed and used as the column names for the data from. If no 'names' param is provided we parse the first row of the first partition of data and use it for column names. Parameters ---------- file_path: string Path to input. Any valid file path in Spark works here, eg: 'file:///my/path/in/local/file/system' or 'hdfs:/user/juliet/' use_whole_file: boolean Whether of not to use the whole file. names: list of strings, optional skiprows: integer, optional indicates how many rows of input to skip. This will only be applied to the first partition of the data (so if #skiprows > #row in first partition this will not work). Generally this shouldn't be an issue for small values of skiprows. No other value of header is supported. All additional parameters available in pandas.read_csv() are usable here. Returns ------- A SparklingPandas DataFrame that contains the data from the specified file. """ def csv_file(partition_number, files): # pylint: disable=unexpected-keyword-arg file_count = 0 for _, contents in files: # Only skip lines on the first file if partition_number == 0 and file_count == 0 and _skiprows > 0: yield pandas.read_csv( sio(contents), *args, header=None, names=mynames, skiprows=_skiprows, **kwargs) else: file_count += 1 yield pandas.read_csv( sio(contents), *args, header=None, names=mynames, **kwargs) def csv_rows(partition_number, rows): # pylint: disable=unexpected-keyword-arg in_str = "\n".join(rows) if partition_number == 0: return iter([ pandas.read_csv( sio(in_str), *args, header=None, names=mynames, skiprows=_skiprows, **kwargs)]) else: # could use .iterows instead? return iter([pandas.read_csv(sio(in_str), *args, header=None, names=mynames, **kwargs)]) # If we need to peak at the first partition and determine the column # names mynames = None _skiprows = skiprows if names: mynames = names else: # In the future we could avoid this expensive call. first_line = self.spark_ctx.textFile(file_path).first() frame = pandas.read_csv(sio(first_line), **kwargs) # pylint sees frame as a tuple despite it being a DataFrame mynames = list(frame.columns) _skiprows += 1 # Do the actual load if use_whole_file: return self.from_pandas_rdd( self.spark_ctx.wholeTextFiles(file_path) .mapPartitionsWithIndex(csv_file)) else: return self.from_pandas_rdd( self.spark_ctx.textFile(file_path) .mapPartitionsWithIndex(csv_rows))
[ "def", "read_csv", "(", "self", ",", "file_path", ",", "use_whole_file", "=", "False", ",", "names", "=", "None", ",", "skiprows", "=", "0", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "csv_file", "(", "partition_number", ",", "files", ")", ":", "# pylint: disable=unexpected-keyword-arg", "file_count", "=", "0", "for", "_", ",", "contents", "in", "files", ":", "# Only skip lines on the first file", "if", "partition_number", "==", "0", "and", "file_count", "==", "0", "and", "_skiprows", ">", "0", ":", "yield", "pandas", ".", "read_csv", "(", "sio", "(", "contents", ")", ",", "*", "args", ",", "header", "=", "None", ",", "names", "=", "mynames", ",", "skiprows", "=", "_skiprows", ",", "*", "*", "kwargs", ")", "else", ":", "file_count", "+=", "1", "yield", "pandas", ".", "read_csv", "(", "sio", "(", "contents", ")", ",", "*", "args", ",", "header", "=", "None", ",", "names", "=", "mynames", ",", "*", "*", "kwargs", ")", "def", "csv_rows", "(", "partition_number", ",", "rows", ")", ":", "# pylint: disable=unexpected-keyword-arg", "in_str", "=", "\"\\n\"", ".", "join", "(", "rows", ")", "if", "partition_number", "==", "0", ":", "return", "iter", "(", "[", "pandas", ".", "read_csv", "(", "sio", "(", "in_str", ")", ",", "*", "args", ",", "header", "=", "None", ",", "names", "=", "mynames", ",", "skiprows", "=", "_skiprows", ",", "*", "*", "kwargs", ")", "]", ")", "else", ":", "# could use .iterows instead?", "return", "iter", "(", "[", "pandas", ".", "read_csv", "(", "sio", "(", "in_str", ")", ",", "*", "args", ",", "header", "=", "None", ",", "names", "=", "mynames", ",", "*", "*", "kwargs", ")", "]", ")", "# If we need to peak at the first partition and determine the column", "# names", "mynames", "=", "None", "_skiprows", "=", "skiprows", "if", "names", ":", "mynames", "=", "names", "else", ":", "# In the future we could avoid this expensive call.", "first_line", "=", "self", ".", "spark_ctx", ".", "textFile", "(", "file_path", ")", ".", "first", "(", ")", "frame", "=", "pandas", ".", "read_csv", "(", "sio", "(", "first_line", ")", ",", "*", "*", "kwargs", ")", "# pylint sees frame as a tuple despite it being a DataFrame", "mynames", "=", "list", "(", "frame", ".", "columns", ")", "_skiprows", "+=", "1", "# Do the actual load", "if", "use_whole_file", ":", "return", "self", ".", "from_pandas_rdd", "(", "self", ".", "spark_ctx", ".", "wholeTextFiles", "(", "file_path", ")", ".", "mapPartitionsWithIndex", "(", "csv_file", ")", ")", "else", ":", "return", "self", ".", "from_pandas_rdd", "(", "self", ".", "spark_ctx", ".", "textFile", "(", "file_path", ")", ".", "mapPartitionsWithIndex", "(", "csv_rows", ")", ")" ]
Read a CSV file in and parse it into Pandas DataFrames. By default, the first row from the first partition of that data is parsed and used as the column names for the data from. If no 'names' param is provided we parse the first row of the first partition of data and use it for column names. Parameters ---------- file_path: string Path to input. Any valid file path in Spark works here, eg: 'file:///my/path/in/local/file/system' or 'hdfs:/user/juliet/' use_whole_file: boolean Whether of not to use the whole file. names: list of strings, optional skiprows: integer, optional indicates how many rows of input to skip. This will only be applied to the first partition of the data (so if #skiprows > #row in first partition this will not work). Generally this shouldn't be an issue for small values of skiprows. No other value of header is supported. All additional parameters available in pandas.read_csv() are usable here. Returns ------- A SparklingPandas DataFrame that contains the data from the specified file.
[ "Read", "a", "CSV", "file", "in", "and", "parse", "it", "into", "Pandas", "DataFrames", ".", "By", "default", "the", "first", "row", "from", "the", "first", "partition", "of", "that", "data", "is", "parsed", "and", "used", "as", "the", "column", "names", "for", "the", "data", "from", ".", "If", "no", "names", "param", "is", "provided", "we", "parse", "the", "first", "row", "of", "the", "first", "partition", "of", "data", "and", "use", "it", "for", "column", "names", "." ]
python
train
41.181818
ossobv/dutree
dutree/dutree.py
https://github.com/ossobv/dutree/blob/adceeeb17f9fd70a7ed9c674850d7015d820eb2a/dutree/dutree.py#L153-L166
def _prune_all_if_small(self, small_size, a_or_u): "Return True and delete children if small enough." if self._nodes is None: return True total_size = (self.app_size() if a_or_u else self.use_size()) if total_size < small_size: if a_or_u: self._set_size(total_size, self.use_size()) else: self._set_size(self.app_size(), total_size) return True return False
[ "def", "_prune_all_if_small", "(", "self", ",", "small_size", ",", "a_or_u", ")", ":", "if", "self", ".", "_nodes", "is", "None", ":", "return", "True", "total_size", "=", "(", "self", ".", "app_size", "(", ")", "if", "a_or_u", "else", "self", ".", "use_size", "(", ")", ")", "if", "total_size", "<", "small_size", ":", "if", "a_or_u", ":", "self", ".", "_set_size", "(", "total_size", ",", "self", ".", "use_size", "(", ")", ")", "else", ":", "self", ".", "_set_size", "(", "self", ".", "app_size", "(", ")", ",", "total_size", ")", "return", "True", "return", "False" ]
Return True and delete children if small enough.
[ "Return", "True", "and", "delete", "children", "if", "small", "enough", "." ]
python
train
33.285714
numenta/htmresearch
htmresearch/frameworks/layers/l2_l4_inference.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/l2_l4_inference.py#L518-L573
def plotInferenceStats(self, fields, plotDir="plots", experimentID=0, onePlot=True): """ Plots and saves the desired inference statistics. Parameters: ---------------------------- @param fields (list(str)) List of fields to include in the plots @param experimentID (int) ID of the experiment (usually 0 if only one was conducted) @param onePlot (bool) If true, all cortical columns will be merged in one plot. """ if not os.path.exists(plotDir): os.makedirs(plotDir) plt.figure() stats = self.statistics[experimentID] objectName = stats["object"] for i in xrange(self.numColumns): if not onePlot: plt.figure() # plot request stats for field in fields: fieldKey = field + " C" + str(i) plt.plot(stats[fieldKey], marker='+', label=fieldKey) # format plt.legend(loc="upper right") plt.xlabel("Sensation #") plt.xticks(range(stats["numSteps"])) plt.ylabel("Number of active bits") plt.ylim(plt.ylim()[0] - 5, plt.ylim()[1] + 5) plt.title("Object inference for object {}".format(objectName)) # save if not onePlot: relPath = "{}_exp_{}_C{}.png".format(self.name, experimentID, i) path = os.path.join(plotDir, relPath) plt.savefig(path) plt.close() if onePlot: relPath = "{}_exp_{}.png".format(self.name, experimentID) path = os.path.join(plotDir, relPath) plt.savefig(path) plt.close()
[ "def", "plotInferenceStats", "(", "self", ",", "fields", ",", "plotDir", "=", "\"plots\"", ",", "experimentID", "=", "0", ",", "onePlot", "=", "True", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "plotDir", ")", ":", "os", ".", "makedirs", "(", "plotDir", ")", "plt", ".", "figure", "(", ")", "stats", "=", "self", ".", "statistics", "[", "experimentID", "]", "objectName", "=", "stats", "[", "\"object\"", "]", "for", "i", "in", "xrange", "(", "self", ".", "numColumns", ")", ":", "if", "not", "onePlot", ":", "plt", ".", "figure", "(", ")", "# plot request stats", "for", "field", "in", "fields", ":", "fieldKey", "=", "field", "+", "\" C\"", "+", "str", "(", "i", ")", "plt", ".", "plot", "(", "stats", "[", "fieldKey", "]", ",", "marker", "=", "'+'", ",", "label", "=", "fieldKey", ")", "# format", "plt", ".", "legend", "(", "loc", "=", "\"upper right\"", ")", "plt", ".", "xlabel", "(", "\"Sensation #\"", ")", "plt", ".", "xticks", "(", "range", "(", "stats", "[", "\"numSteps\"", "]", ")", ")", "plt", ".", "ylabel", "(", "\"Number of active bits\"", ")", "plt", ".", "ylim", "(", "plt", ".", "ylim", "(", ")", "[", "0", "]", "-", "5", ",", "plt", ".", "ylim", "(", ")", "[", "1", "]", "+", "5", ")", "plt", ".", "title", "(", "\"Object inference for object {}\"", ".", "format", "(", "objectName", ")", ")", "# save", "if", "not", "onePlot", ":", "relPath", "=", "\"{}_exp_{}_C{}.png\"", ".", "format", "(", "self", ".", "name", ",", "experimentID", ",", "i", ")", "path", "=", "os", ".", "path", ".", "join", "(", "plotDir", ",", "relPath", ")", "plt", ".", "savefig", "(", "path", ")", "plt", ".", "close", "(", ")", "if", "onePlot", ":", "relPath", "=", "\"{}_exp_{}.png\"", ".", "format", "(", "self", ".", "name", ",", "experimentID", ")", "path", "=", "os", ".", "path", ".", "join", "(", "plotDir", ",", "relPath", ")", "plt", ".", "savefig", "(", "path", ")", "plt", ".", "close", "(", ")" ]
Plots and saves the desired inference statistics. Parameters: ---------------------------- @param fields (list(str)) List of fields to include in the plots @param experimentID (int) ID of the experiment (usually 0 if only one was conducted) @param onePlot (bool) If true, all cortical columns will be merged in one plot.
[ "Plots", "and", "saves", "the", "desired", "inference", "statistics", "." ]
python
train
28.464286
EventTeam/beliefs
src/beliefs/beliefstate.py
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L67-L77
def add_deferred_effect(self, effect, pos): """ Pushes an (pos, effect) tuple onto a stack to later be executed if the state reaches the 'pos'.""" if not isinstance(pos, (unicode, str)): raise Exception("Invalid POS tag. Must be string not %d" % (type(pos))) if self['speaker_model']['is_syntax_stacked'] == True: self.__dict__['deferred_effects'].insert(0,(pos, effect,)) elif self['speaker_model']['is_syntax_stacked'] == False: self.__dict__['deferred_effects'].append((pos, effect,)) else: raise Contradiction("Speaker Model undefined")
[ "def", "add_deferred_effect", "(", "self", ",", "effect", ",", "pos", ")", ":", "if", "not", "isinstance", "(", "pos", ",", "(", "unicode", ",", "str", ")", ")", ":", "raise", "Exception", "(", "\"Invalid POS tag. Must be string not %d\"", "%", "(", "type", "(", "pos", ")", ")", ")", "if", "self", "[", "'speaker_model'", "]", "[", "'is_syntax_stacked'", "]", "==", "True", ":", "self", ".", "__dict__", "[", "'deferred_effects'", "]", ".", "insert", "(", "0", ",", "(", "pos", ",", "effect", ",", ")", ")", "elif", "self", "[", "'speaker_model'", "]", "[", "'is_syntax_stacked'", "]", "==", "False", ":", "self", ".", "__dict__", "[", "'deferred_effects'", "]", ".", "append", "(", "(", "pos", ",", "effect", ",", ")", ")", "else", ":", "raise", "Contradiction", "(", "\"Speaker Model undefined\"", ")" ]
Pushes an (pos, effect) tuple onto a stack to later be executed if the state reaches the 'pos'.
[ "Pushes", "an", "(", "pos", "effect", ")", "tuple", "onto", "a", "stack", "to", "later", "be", "executed", "if", "the", "state", "reaches", "the", "pos", "." ]
python
train
56.909091
LonamiWebs/Telethon
telethon_examples/interactive_telegram_client.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_examples/interactive_telegram_client.py#L329-L347
async def download_media_by_id(self, media_id): """Given a message ID, finds the media this message contained and downloads it. """ try: msg = self.found_media[int(media_id)] except (ValueError, KeyError): # ValueError when parsing, KeyError when accessing dictionary print('Invalid media ID given or message not found!') return print('Downloading media to usermedia/...') os.makedirs('usermedia', exist_ok=True) output = await self.download_media( msg.media, file='usermedia/', progress_callback=self.download_progress_callback ) print('Media downloaded to {}!'.format(output))
[ "async", "def", "download_media_by_id", "(", "self", ",", "media_id", ")", ":", "try", ":", "msg", "=", "self", ".", "found_media", "[", "int", "(", "media_id", ")", "]", "except", "(", "ValueError", ",", "KeyError", ")", ":", "# ValueError when parsing, KeyError when accessing dictionary", "print", "(", "'Invalid media ID given or message not found!'", ")", "return", "print", "(", "'Downloading media to usermedia/...'", ")", "os", ".", "makedirs", "(", "'usermedia'", ",", "exist_ok", "=", "True", ")", "output", "=", "await", "self", ".", "download_media", "(", "msg", ".", "media", ",", "file", "=", "'usermedia/'", ",", "progress_callback", "=", "self", ".", "download_progress_callback", ")", "print", "(", "'Media downloaded to {}!'", ".", "format", "(", "output", ")", ")" ]
Given a message ID, finds the media this message contained and downloads it.
[ "Given", "a", "message", "ID", "finds", "the", "media", "this", "message", "contained", "and", "downloads", "it", "." ]
python
train
38.315789
mozilla-releng/scriptworker
scriptworker/cot/verify.py
https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/cot/verify.py#L1841-L1862
def check_num_tasks(chain, task_count): """Make sure there are a specific number of specific task types. Currently we only check decision tasks. Args: chain (ChainOfTrust): the chain we're operating on task_count (dict): mapping task type to the number of links. Raises: CoTError: on failure. """ errors = [] # hardcode for now. If we need a different set of constraints, either # go by cot_product settings or by task_count['docker-image'] + 1 min_decision_tasks = 1 if task_count['decision'] < min_decision_tasks: errors.append("{} decision tasks; we must have at least {}!".format( task_count['decision'], min_decision_tasks )) raise_on_errors(errors)
[ "def", "check_num_tasks", "(", "chain", ",", "task_count", ")", ":", "errors", "=", "[", "]", "# hardcode for now. If we need a different set of constraints, either", "# go by cot_product settings or by task_count['docker-image'] + 1", "min_decision_tasks", "=", "1", "if", "task_count", "[", "'decision'", "]", "<", "min_decision_tasks", ":", "errors", ".", "append", "(", "\"{} decision tasks; we must have at least {}!\"", ".", "format", "(", "task_count", "[", "'decision'", "]", ",", "min_decision_tasks", ")", ")", "raise_on_errors", "(", "errors", ")" ]
Make sure there are a specific number of specific task types. Currently we only check decision tasks. Args: chain (ChainOfTrust): the chain we're operating on task_count (dict): mapping task type to the number of links. Raises: CoTError: on failure.
[ "Make", "sure", "there", "are", "a", "specific", "number", "of", "specific", "task", "types", "." ]
python
train
33.363636
Damgaard/PyImgur
pyimgur/__init__.py
https://github.com/Damgaard/PyImgur/blob/606f17078d24158632f807430f8d0b9b3cd8b312/pyimgur/__init__.py#L487-L491
def get_comments(self): """Get a list of the top-level comments.""" url = self._imgur._base_url + "/3/gallery/{0}/comments".format(self.id) resp = self._imgur._send_request(url) return [Comment(com, self._imgur) for com in resp]
[ "def", "get_comments", "(", "self", ")", ":", "url", "=", "self", ".", "_imgur", ".", "_base_url", "+", "\"/3/gallery/{0}/comments\"", ".", "format", "(", "self", ".", "id", ")", "resp", "=", "self", ".", "_imgur", ".", "_send_request", "(", "url", ")", "return", "[", "Comment", "(", "com", ",", "self", ".", "_imgur", ")", "for", "com", "in", "resp", "]" ]
Get a list of the top-level comments.
[ "Get", "a", "list", "of", "the", "top", "-", "level", "comments", "." ]
python
train
51.2
sorgerlab/indra
indra/sources/reach/api.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/api.py#L77-L115
def process_pubmed_abstract(pubmed_id, offline=False, output_fname=default_output_fname, **kwargs): """Return a ReachProcessor by processing an abstract with a given Pubmed id. Uses the Pubmed client to get the abstract. If that fails, None is returned. Parameters ---------- pubmed_id : str The ID of a Pubmed article. The string may start with PMID but passing just the ID also works. Examples: 27168024, PMID27168024 https://www.ncbi.nlm.nih.gov/pubmed/ offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. **kwargs : keyword arguments All other keyword arguments are passed directly to `process_text`. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ abs_txt = pubmed_client.get_abstract(pubmed_id) if abs_txt is None: return None rp = process_text(abs_txt, citation=pubmed_id, offline=offline, output_fname=output_fname, **kwargs) if rp and rp.statements: for st in rp.statements: for ev in st.evidence: ev.epistemics['section_type'] = 'abstract' return rp
[ "def", "process_pubmed_abstract", "(", "pubmed_id", ",", "offline", "=", "False", ",", "output_fname", "=", "default_output_fname", ",", "*", "*", "kwargs", ")", ":", "abs_txt", "=", "pubmed_client", ".", "get_abstract", "(", "pubmed_id", ")", "if", "abs_txt", "is", "None", ":", "return", "None", "rp", "=", "process_text", "(", "abs_txt", ",", "citation", "=", "pubmed_id", ",", "offline", "=", "offline", ",", "output_fname", "=", "output_fname", ",", "*", "*", "kwargs", ")", "if", "rp", "and", "rp", ".", "statements", ":", "for", "st", "in", "rp", ".", "statements", ":", "for", "ev", "in", "st", ".", "evidence", ":", "ev", ".", "epistemics", "[", "'section_type'", "]", "=", "'abstract'", "return", "rp" ]
Return a ReachProcessor by processing an abstract with a given Pubmed id. Uses the Pubmed client to get the abstract. If that fails, None is returned. Parameters ---------- pubmed_id : str The ID of a Pubmed article. The string may start with PMID but passing just the ID also works. Examples: 27168024, PMID27168024 https://www.ncbi.nlm.nih.gov/pubmed/ offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. **kwargs : keyword arguments All other keyword arguments are passed directly to `process_text`. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements.
[ "Return", "a", "ReachProcessor", "by", "processing", "an", "abstract", "with", "a", "given", "Pubmed", "id", "." ]
python
train
37.641026
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/core.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/core.py#L273-L351
def get_kde_contour(self, xax="area_um", yax="deform", xacc=None, yacc=None, kde_type="histogram", kde_kwargs={}, xscale="linear", yscale="linear"): """Evaluate the kernel density estimate for contour plots Parameters ---------- xax: str Identifier for X axis (e.g. "area_um", "aspect", "deform") yax: str Identifier for Y axis xacc: float Contour accuracy in x direction yacc: float Contour accuracy in y direction kde_type: str The KDE method to use kde_kwargs: dict Additional keyword arguments to the KDE method xscale: str If set to "log", take the logarithm of the x-values before computing the KDE. This is useful when data are are displayed on a log-scale. Defaults to "linear". yscale: str See `xscale`. Returns ------- X, Y, Z : coordinates The kernel density Z evaluated on a rectangular grid (X,Y). """ xax = xax.lower() yax = yax.lower() kde_type = kde_type.lower() if kde_type not in kde_methods.methods: raise ValueError("Not a valid kde type: {}!".format(kde_type)) # Get data x = self[xax][self.filter.all] y = self[yax][self.filter.all] # Apply scale (no change for linear scale) xs = self._apply_scale(x, xscale, xax) ys = self._apply_scale(y, yscale, yax) # accuracy (bin width) of KDE estimator if xacc is None: xacc = kde_methods.bin_width_doane(xs) / 5 if yacc is None: yacc = kde_methods.bin_width_doane(ys) / 5 # Ignore infs and nans bad = kde_methods.get_bad_vals(xs, ys) xc = xs[~bad] yc = ys[~bad] xnum = int(np.ceil((xc.max() - xc.min()) / xacc)) ynum = int(np.ceil((yc.max() - yc.min()) / yacc)) xlin = np.linspace(xc.min(), xc.max(), xnum, endpoint=True) ylin = np.linspace(yc.min(), yc.max(), ynum, endpoint=True) xmesh, ymesh = np.meshgrid(xlin, ylin, indexing="ij") kde_fct = kde_methods.methods[kde_type] if len(x): density = kde_fct(events_x=xs, events_y=ys, xout=xmesh, yout=ymesh, **kde_kwargs) else: density = [] # Convert mesh back to linear scale if applicable if xscale == "log": xmesh = np.exp(xmesh) if yscale == "log": ymesh = np.exp(ymesh) return xmesh, ymesh, density
[ "def", "get_kde_contour", "(", "self", ",", "xax", "=", "\"area_um\"", ",", "yax", "=", "\"deform\"", ",", "xacc", "=", "None", ",", "yacc", "=", "None", ",", "kde_type", "=", "\"histogram\"", ",", "kde_kwargs", "=", "{", "}", ",", "xscale", "=", "\"linear\"", ",", "yscale", "=", "\"linear\"", ")", ":", "xax", "=", "xax", ".", "lower", "(", ")", "yax", "=", "yax", ".", "lower", "(", ")", "kde_type", "=", "kde_type", ".", "lower", "(", ")", "if", "kde_type", "not", "in", "kde_methods", ".", "methods", ":", "raise", "ValueError", "(", "\"Not a valid kde type: {}!\"", ".", "format", "(", "kde_type", ")", ")", "# Get data", "x", "=", "self", "[", "xax", "]", "[", "self", ".", "filter", ".", "all", "]", "y", "=", "self", "[", "yax", "]", "[", "self", ".", "filter", ".", "all", "]", "# Apply scale (no change for linear scale)", "xs", "=", "self", ".", "_apply_scale", "(", "x", ",", "xscale", ",", "xax", ")", "ys", "=", "self", ".", "_apply_scale", "(", "y", ",", "yscale", ",", "yax", ")", "# accuracy (bin width) of KDE estimator", "if", "xacc", "is", "None", ":", "xacc", "=", "kde_methods", ".", "bin_width_doane", "(", "xs", ")", "/", "5", "if", "yacc", "is", "None", ":", "yacc", "=", "kde_methods", ".", "bin_width_doane", "(", "ys", ")", "/", "5", "# Ignore infs and nans", "bad", "=", "kde_methods", ".", "get_bad_vals", "(", "xs", ",", "ys", ")", "xc", "=", "xs", "[", "~", "bad", "]", "yc", "=", "ys", "[", "~", "bad", "]", "xnum", "=", "int", "(", "np", ".", "ceil", "(", "(", "xc", ".", "max", "(", ")", "-", "xc", ".", "min", "(", ")", ")", "/", "xacc", ")", ")", "ynum", "=", "int", "(", "np", ".", "ceil", "(", "(", "yc", ".", "max", "(", ")", "-", "yc", ".", "min", "(", ")", ")", "/", "yacc", ")", ")", "xlin", "=", "np", ".", "linspace", "(", "xc", ".", "min", "(", ")", ",", "xc", ".", "max", "(", ")", ",", "xnum", ",", "endpoint", "=", "True", ")", "ylin", "=", "np", ".", "linspace", "(", "yc", ".", "min", "(", ")", ",", "yc", ".", "max", "(", ")", ",", "ynum", ",", "endpoint", "=", "True", ")", "xmesh", ",", "ymesh", "=", "np", ".", "meshgrid", "(", "xlin", ",", "ylin", ",", "indexing", "=", "\"ij\"", ")", "kde_fct", "=", "kde_methods", ".", "methods", "[", "kde_type", "]", "if", "len", "(", "x", ")", ":", "density", "=", "kde_fct", "(", "events_x", "=", "xs", ",", "events_y", "=", "ys", ",", "xout", "=", "xmesh", ",", "yout", "=", "ymesh", ",", "*", "*", "kde_kwargs", ")", "else", ":", "density", "=", "[", "]", "# Convert mesh back to linear scale if applicable", "if", "xscale", "==", "\"log\"", ":", "xmesh", "=", "np", ".", "exp", "(", "xmesh", ")", "if", "yscale", "==", "\"log\"", ":", "ymesh", "=", "np", ".", "exp", "(", "ymesh", ")", "return", "xmesh", ",", "ymesh", ",", "density" ]
Evaluate the kernel density estimate for contour plots Parameters ---------- xax: str Identifier for X axis (e.g. "area_um", "aspect", "deform") yax: str Identifier for Y axis xacc: float Contour accuracy in x direction yacc: float Contour accuracy in y direction kde_type: str The KDE method to use kde_kwargs: dict Additional keyword arguments to the KDE method xscale: str If set to "log", take the logarithm of the x-values before computing the KDE. This is useful when data are are displayed on a log-scale. Defaults to "linear". yscale: str See `xscale`. Returns ------- X, Y, Z : coordinates The kernel density Z evaluated on a rectangular grid (X,Y).
[ "Evaluate", "the", "kernel", "density", "estimate", "for", "contour", "plots" ]
python
train
33.21519
igorcoding/asynctnt-queue
asynctnt_queue/tube.py
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/tube.py#L190-L201
async def kick(self, count): """ Kick `count` tasks from queue :param count: Tasks count to kick :return: Number of tasks actually kicked """ args = (count,) res = await self.conn.call(self.__funcs['kick'], args) if self.conn.version < (1, 7): return res.body[0][0] return res.body[0]
[ "async", "def", "kick", "(", "self", ",", "count", ")", ":", "args", "=", "(", "count", ",", ")", "res", "=", "await", "self", ".", "conn", ".", "call", "(", "self", ".", "__funcs", "[", "'kick'", "]", ",", "args", ")", "if", "self", ".", "conn", ".", "version", "<", "(", "1", ",", "7", ")", ":", "return", "res", ".", "body", "[", "0", "]", "[", "0", "]", "return", "res", ".", "body", "[", "0", "]" ]
Kick `count` tasks from queue :param count: Tasks count to kick :return: Number of tasks actually kicked
[ "Kick", "count", "tasks", "from", "queue" ]
python
train
30.833333
aws/sagemaker-containers
src/sagemaker_containers/entry_point.py
https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/entry_point.py#L22-L89
def run(uri, user_entry_point, args, env_vars=None, wait=True, capture_error=False, runner=_runner.ProcessRunnerType, extra_opts=None): # type: (str, str, List[str], Dict[str, str], bool, bool, _runner.RunnerType, Dict[str, str]) -> None """Download, prepare and executes a compressed tar file from S3 or provided directory as an user entrypoint. Runs the user entry point, passing env_vars as environment variables and args as command arguments. If the entry point is: - A Python package: executes the packages as >>> env_vars python -m module_name + args - A Python script: executes the script as >>> env_vars python module_name + args - Any other: executes the command as >>> env_vars /bin/sh -c ./module_name + args Example: >>>import sagemaker_containers >>>from sagemaker_containers.beta.framework import entry_point >>>env = sagemaker_containers.training_env() {'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...} >>>hyperparameters = env.hyperparameters {'batch-size': 128, 'model_dir': '/opt/ml/model'} >>>args = mapping.to_cmd_args(hyperparameters) ['--batch-size', '128', '--model_dir', '/opt/ml/model'] >>>env_vars = mapping.to_env_vars() ['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training', 'MODEL_DIR':'/opt/ml/model', ...} >>>entry_point.run('user_script', args, env_vars) SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \ SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model Args: uri (str): the location of the module. user_entry_point (str): name of the user provided entry point args (list): A list of program arguments. env_vars (dict): A map containing the environment variables to be written (default: None). wait (bool): If the user entry point should be run to completion before this method returns (default: True). capture_error (bool): Default false. If True, the running process captures the stderr, and appends it to the returned Exception message in case of errors. runner (sagemaker_containers.beta.framework.runner.RunnerType): the type of runner object to be created (default: sagemaker_containers.beta.framework.runner.ProcessRunnerType). extra_opts (dict): Additional options for running the entry point (default: None). Currently, this only applies for MPI. Returns: sagemaker_containers.beta.framework.process.ProcessRunner: the runner object responsible for executing the entry point. """ env_vars = env_vars or {} env_vars = env_vars.copy() _files.download_and_extract(uri, user_entry_point, _env.code_dir) install(user_entry_point, _env.code_dir, capture_error) _env.write_env_vars(env_vars) return _runner.get(runner, user_entry_point, args, env_vars, extra_opts).run(wait, capture_error)
[ "def", "run", "(", "uri", ",", "user_entry_point", ",", "args", ",", "env_vars", "=", "None", ",", "wait", "=", "True", ",", "capture_error", "=", "False", ",", "runner", "=", "_runner", ".", "ProcessRunnerType", ",", "extra_opts", "=", "None", ")", ":", "# type: (str, str, List[str], Dict[str, str], bool, bool, _runner.RunnerType, Dict[str, str]) -> None", "env_vars", "=", "env_vars", "or", "{", "}", "env_vars", "=", "env_vars", ".", "copy", "(", ")", "_files", ".", "download_and_extract", "(", "uri", ",", "user_entry_point", ",", "_env", ".", "code_dir", ")", "install", "(", "user_entry_point", ",", "_env", ".", "code_dir", ",", "capture_error", ")", "_env", ".", "write_env_vars", "(", "env_vars", ")", "return", "_runner", ".", "get", "(", "runner", ",", "user_entry_point", ",", "args", ",", "env_vars", ",", "extra_opts", ")", ".", "run", "(", "wait", ",", "capture_error", ")" ]
Download, prepare and executes a compressed tar file from S3 or provided directory as an user entrypoint. Runs the user entry point, passing env_vars as environment variables and args as command arguments. If the entry point is: - A Python package: executes the packages as >>> env_vars python -m module_name + args - A Python script: executes the script as >>> env_vars python module_name + args - Any other: executes the command as >>> env_vars /bin/sh -c ./module_name + args Example: >>>import sagemaker_containers >>>from sagemaker_containers.beta.framework import entry_point >>>env = sagemaker_containers.training_env() {'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...} >>>hyperparameters = env.hyperparameters {'batch-size': 128, 'model_dir': '/opt/ml/model'} >>>args = mapping.to_cmd_args(hyperparameters) ['--batch-size', '128', '--model_dir', '/opt/ml/model'] >>>env_vars = mapping.to_env_vars() ['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training', 'MODEL_DIR':'/opt/ml/model', ...} >>>entry_point.run('user_script', args, env_vars) SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \ SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model Args: uri (str): the location of the module. user_entry_point (str): name of the user provided entry point args (list): A list of program arguments. env_vars (dict): A map containing the environment variables to be written (default: None). wait (bool): If the user entry point should be run to completion before this method returns (default: True). capture_error (bool): Default false. If True, the running process captures the stderr, and appends it to the returned Exception message in case of errors. runner (sagemaker_containers.beta.framework.runner.RunnerType): the type of runner object to be created (default: sagemaker_containers.beta.framework.runner.ProcessRunnerType). extra_opts (dict): Additional options for running the entry point (default: None). Currently, this only applies for MPI. Returns: sagemaker_containers.beta.framework.process.ProcessRunner: the runner object responsible for executing the entry point.
[ "Download", "prepare", "and", "executes", "a", "compressed", "tar", "file", "from", "S3", "or", "provided", "directory", "as", "an", "user", "entrypoint", ".", "Runs", "the", "user", "entry", "point", "passing", "env_vars", "as", "environment", "variables", "and", "args", "as", "command", "arguments", "." ]
python
train
46.044118
OpenKMIP/PyKMIP
kmip/core/objects.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/objects.py#L5082-L5260
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3): """ Read the data encoding the ValidationInformation structure and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 2.0. Raises: InvalidKmipEncoding: Raised if the validation authority type, validation version major, validation type, and/or validation level are missing from the encoding. VersionNotSupported: Raised when a KMIP version is provided that does not support the ValidationInformation structure. """ if kmip_version < enums.KMIPVersion.KMIP_1_3: raise exceptions.VersionNotSupported( "KMIP {} does not support the ValidationInformation " "object.".format( kmip_version.value ) ) super(ValidationInformation, self).read( input_buffer, kmip_version=kmip_version ) local_buffer = utils.BytearrayStream(input_buffer.read(self.length)) if self.is_tag_next( enums.Tags.VALIDATION_AUTHORITY_TYPE, local_buffer ): validation_authority_type = primitives.Enumeration( enums.ValidationAuthorityType, tag=enums.Tags.VALIDATION_AUTHORITY_TYPE ) validation_authority_type.read( local_buffer, kmip_version=kmip_version ) self._validation_authority_type = validation_authority_type else: raise exceptions.InvalidKmipEncoding( "The ValidationInformation encoding is missing the " "validation authority type." ) if self.is_tag_next( enums.Tags.VALIDATION_AUTHORITY_COUNTRY, local_buffer ): validation_authority_country = primitives.TextString( tag=enums.Tags.VALIDATION_AUTHORITY_COUNTRY ) validation_authority_country.read( local_buffer, kmip_version=kmip_version ) self._validation_authority_country = validation_authority_country if self.is_tag_next(enums.Tags.VALIDATION_AUTHORITY_URI, local_buffer): validation_authority_uri = primitives.TextString( tag=enums.Tags.VALIDATION_AUTHORITY_URI ) validation_authority_uri.read( local_buffer, kmip_version=kmip_version ) self._validation_authority_uri = validation_authority_uri if self.is_tag_next( enums.Tags.VALIDATION_VERSION_MAJOR, local_buffer ): validation_version_major = primitives.Integer( tag=enums.Tags.VALIDATION_VERSION_MAJOR ) validation_version_major.read( local_buffer, kmip_version=kmip_version ) self._validation_version_major = validation_version_major else: raise exceptions.InvalidKmipEncoding( "The ValidationInformation encoding is missing the " "validation version major." ) if self.is_tag_next( enums.Tags.VALIDATION_VERSION_MINOR, local_buffer ): validation_version_minor = primitives.Integer( tag=enums.Tags.VALIDATION_VERSION_MINOR ) validation_version_minor.read( local_buffer, kmip_version=kmip_version ) self._validation_version_minor = validation_version_minor if self.is_tag_next(enums.Tags.VALIDATION_TYPE, local_buffer): validation_type = primitives.Enumeration( enums.ValidationType, tag=enums.Tags.VALIDATION_TYPE ) validation_type.read( local_buffer, kmip_version=kmip_version ) self._validation_type = validation_type else: raise exceptions.InvalidKmipEncoding( "The ValidationInformation encoding is missing the " "validation type." ) if self.is_tag_next(enums.Tags.VALIDATION_LEVEL, local_buffer): validation_level = primitives.Integer( tag=enums.Tags.VALIDATION_LEVEL ) validation_level.read(local_buffer, kmip_version=kmip_version) self._validation_level = validation_level else: raise exceptions.InvalidKmipEncoding( "The ValidationInformation encoding is missing the " "validation level." ) if self.is_tag_next( enums.Tags.VALIDATION_CERTIFICATE_IDENTIFIER, local_buffer ): validation_certificate_identifier = primitives.TextString( tag=enums.Tags.VALIDATION_CERTIFICATE_IDENTIFIER ) validation_certificate_identifier.read( local_buffer, kmip_version=kmip_version ) self._validation_certificate_identifier = \ validation_certificate_identifier if self.is_tag_next( enums.Tags.VALIDATION_CERTIFICATE_URI, local_buffer ): validation_certificate_uri = primitives.TextString( tag=enums.Tags.VALIDATION_CERTIFICATE_URI ) validation_certificate_uri.read( local_buffer, kmip_version=kmip_version ) self._validation_certificate_uri = validation_certificate_uri if self.is_tag_next(enums.Tags.VALIDATION_VENDOR_URI, local_buffer): validation_vendor_uri = primitives.TextString( tag=enums.Tags.VALIDATION_VENDOR_URI ) validation_vendor_uri.read(local_buffer, kmip_version=kmip_version) self._validation_vendor_uri = validation_vendor_uri validation_profiles = [] while self.is_tag_next(enums.Tags.VALIDATION_PROFILE, local_buffer): validation_profile = primitives.TextString( tag=enums.Tags.VALIDATION_PROFILE ) validation_profile.read(local_buffer, kmip_version=kmip_version) validation_profiles.append(validation_profile) self._validation_profiles = validation_profiles self.is_oversized(local_buffer)
[ "def", "read", "(", "self", ",", "input_buffer", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_3", ")", ":", "if", "kmip_version", "<", "enums", ".", "KMIPVersion", ".", "KMIP_1_3", ":", "raise", "exceptions", ".", "VersionNotSupported", "(", "\"KMIP {} does not support the ValidationInformation \"", "\"object.\"", ".", "format", "(", "kmip_version", ".", "value", ")", ")", "super", "(", "ValidationInformation", ",", "self", ")", ".", "read", "(", "input_buffer", ",", "kmip_version", "=", "kmip_version", ")", "local_buffer", "=", "utils", ".", "BytearrayStream", "(", "input_buffer", ".", "read", "(", "self", ".", "length", ")", ")", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "VALIDATION_AUTHORITY_TYPE", ",", "local_buffer", ")", ":", "validation_authority_type", "=", "primitives", ".", "Enumeration", "(", "enums", ".", "ValidationAuthorityType", ",", "tag", "=", "enums", ".", "Tags", ".", "VALIDATION_AUTHORITY_TYPE", ")", "validation_authority_type", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "_validation_authority_type", "=", "validation_authority_type", "else", ":", "raise", "exceptions", ".", "InvalidKmipEncoding", "(", "\"The ValidationInformation encoding is missing the \"", "\"validation authority type.\"", ")", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "VALIDATION_AUTHORITY_COUNTRY", ",", "local_buffer", ")", ":", "validation_authority_country", "=", "primitives", ".", "TextString", "(", "tag", "=", "enums", ".", "Tags", ".", "VALIDATION_AUTHORITY_COUNTRY", ")", "validation_authority_country", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "_validation_authority_country", "=", "validation_authority_country", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "VALIDATION_AUTHORITY_URI", ",", "local_buffer", ")", ":", "validation_authority_uri", "=", "primitives", ".", "TextString", "(", "tag", "=", "enums", ".", "Tags", ".", "VALIDATION_AUTHORITY_URI", ")", "validation_authority_uri", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "_validation_authority_uri", "=", "validation_authority_uri", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "VALIDATION_VERSION_MAJOR", ",", "local_buffer", ")", ":", "validation_version_major", "=", "primitives", ".", "Integer", "(", "tag", "=", "enums", ".", "Tags", ".", "VALIDATION_VERSION_MAJOR", ")", "validation_version_major", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "_validation_version_major", "=", "validation_version_major", "else", ":", "raise", "exceptions", ".", "InvalidKmipEncoding", "(", "\"The ValidationInformation encoding is missing the \"", "\"validation version major.\"", ")", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "VALIDATION_VERSION_MINOR", ",", "local_buffer", ")", ":", "validation_version_minor", "=", "primitives", ".", "Integer", "(", "tag", "=", "enums", ".", "Tags", ".", "VALIDATION_VERSION_MINOR", ")", "validation_version_minor", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "_validation_version_minor", "=", "validation_version_minor", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "VALIDATION_TYPE", ",", "local_buffer", ")", ":", "validation_type", "=", "primitives", ".", "Enumeration", "(", "enums", ".", "ValidationType", ",", "tag", "=", "enums", ".", "Tags", ".", "VALIDATION_TYPE", ")", "validation_type", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "_validation_type", "=", "validation_type", "else", ":", "raise", "exceptions", ".", "InvalidKmipEncoding", "(", "\"The ValidationInformation encoding is missing the \"", "\"validation type.\"", ")", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "VALIDATION_LEVEL", ",", "local_buffer", ")", ":", "validation_level", "=", "primitives", ".", "Integer", "(", "tag", "=", "enums", ".", "Tags", ".", "VALIDATION_LEVEL", ")", "validation_level", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "_validation_level", "=", "validation_level", "else", ":", "raise", "exceptions", ".", "InvalidKmipEncoding", "(", "\"The ValidationInformation encoding is missing the \"", "\"validation level.\"", ")", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "VALIDATION_CERTIFICATE_IDENTIFIER", ",", "local_buffer", ")", ":", "validation_certificate_identifier", "=", "primitives", ".", "TextString", "(", "tag", "=", "enums", ".", "Tags", ".", "VALIDATION_CERTIFICATE_IDENTIFIER", ")", "validation_certificate_identifier", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "_validation_certificate_identifier", "=", "validation_certificate_identifier", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "VALIDATION_CERTIFICATE_URI", ",", "local_buffer", ")", ":", "validation_certificate_uri", "=", "primitives", ".", "TextString", "(", "tag", "=", "enums", ".", "Tags", ".", "VALIDATION_CERTIFICATE_URI", ")", "validation_certificate_uri", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "_validation_certificate_uri", "=", "validation_certificate_uri", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "VALIDATION_VENDOR_URI", ",", "local_buffer", ")", ":", "validation_vendor_uri", "=", "primitives", ".", "TextString", "(", "tag", "=", "enums", ".", "Tags", ".", "VALIDATION_VENDOR_URI", ")", "validation_vendor_uri", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "_validation_vendor_uri", "=", "validation_vendor_uri", "validation_profiles", "=", "[", "]", "while", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "VALIDATION_PROFILE", ",", "local_buffer", ")", ":", "validation_profile", "=", "primitives", ".", "TextString", "(", "tag", "=", "enums", ".", "Tags", ".", "VALIDATION_PROFILE", ")", "validation_profile", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "validation_profiles", ".", "append", "(", "validation_profile", ")", "self", ".", "_validation_profiles", "=", "validation_profiles", "self", ".", "is_oversized", "(", "local_buffer", ")" ]
Read the data encoding the ValidationInformation structure and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 2.0. Raises: InvalidKmipEncoding: Raised if the validation authority type, validation version major, validation type, and/or validation level are missing from the encoding. VersionNotSupported: Raised when a KMIP version is provided that does not support the ValidationInformation structure.
[ "Read", "the", "data", "encoding", "the", "ValidationInformation", "structure", "and", "decode", "it", "into", "its", "constituent", "parts", "." ]
python
test
37.837989
numenta/nupic
src/nupic/swarming/hypersearch/permutation_helpers.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch/permutation_helpers.py#L212-L236
def newPosition(self, globalBestPosition, rng): """See comments in base class.""" # First, update the velocity. The new velocity is given as: # v = (inertia * v) + (cogRate * r1 * (localBest-pos)) # + (socRate * r2 * (globalBest-pos)) # # where r1 and r2 are random numbers between 0 and 1.0 lb=float(Configuration.get("nupic.hypersearch.randomLowerBound")) ub=float(Configuration.get("nupic.hypersearch.randomUpperBound")) self._velocity = (self._velocity * self._inertia + rng.uniform(lb, ub) * self._cogRate * (self._bestPosition - self.getPosition())) if globalBestPosition is not None: self._velocity += rng.uniform(lb, ub) * self._socRate * ( globalBestPosition - self.getPosition()) # update position based on velocity self._position += self._velocity # Clip it self._position = max(self.min, self._position) self._position = min(self.max, self._position) # Return it return self.getPosition()
[ "def", "newPosition", "(", "self", ",", "globalBestPosition", ",", "rng", ")", ":", "# First, update the velocity. The new velocity is given as:", "# v = (inertia * v) + (cogRate * r1 * (localBest-pos))", "# + (socRate * r2 * (globalBest-pos))", "#", "# where r1 and r2 are random numbers between 0 and 1.0", "lb", "=", "float", "(", "Configuration", ".", "get", "(", "\"nupic.hypersearch.randomLowerBound\"", ")", ")", "ub", "=", "float", "(", "Configuration", ".", "get", "(", "\"nupic.hypersearch.randomUpperBound\"", ")", ")", "self", ".", "_velocity", "=", "(", "self", ".", "_velocity", "*", "self", ".", "_inertia", "+", "rng", ".", "uniform", "(", "lb", ",", "ub", ")", "*", "self", ".", "_cogRate", "*", "(", "self", ".", "_bestPosition", "-", "self", ".", "getPosition", "(", ")", ")", ")", "if", "globalBestPosition", "is", "not", "None", ":", "self", ".", "_velocity", "+=", "rng", ".", "uniform", "(", "lb", ",", "ub", ")", "*", "self", ".", "_socRate", "*", "(", "globalBestPosition", "-", "self", ".", "getPosition", "(", ")", ")", "# update position based on velocity", "self", ".", "_position", "+=", "self", ".", "_velocity", "# Clip it", "self", ".", "_position", "=", "max", "(", "self", ".", "min", ",", "self", ".", "_position", ")", "self", ".", "_position", "=", "min", "(", "self", ".", "max", ",", "self", ".", "_position", ")", "# Return it", "return", "self", ".", "getPosition", "(", ")" ]
See comments in base class.
[ "See", "comments", "in", "base", "class", "." ]
python
valid
40.24
The-Politico/politico-civic-election-night
electionnight/serializers/election.py
https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/election.py#L102-L104
def get_images(self, obj): """Object of images serialized by tag name.""" return {str(i.tag): i.image.url for i in obj.images.all()}
[ "def", "get_images", "(", "self", ",", "obj", ")", ":", "return", "{", "str", "(", "i", ".", "tag", ")", ":", "i", ".", "image", ".", "url", "for", "i", "in", "obj", ".", "images", ".", "all", "(", ")", "}" ]
Object of images serialized by tag name.
[ "Object", "of", "images", "serialized", "by", "tag", "name", "." ]
python
train
48.666667
ejeschke/ginga
ginga/rv/plugins/Thumbs.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Thumbs.py#L630-L651
def delete_channel_cb(self, viewer, channel): """Called when a channel is deleted from the main interface. Parameter is channel (a bunch).""" chname_del = channel.name # TODO: delete thumbs for this channel! self.logger.debug("deleting thumbs for channel '%s'" % (chname_del)) with self.thmblock: new_thumb_list = [] un_hilite_set = set([]) for thumbkey in self.thumb_list: chname = thumbkey[0] if chname != chname_del: new_thumb_list.append(thumbkey) else: if thumbkey in self.thumb_dict: del self.thumb_dict[thumbkey] un_hilite_set.add(thumbkey) self.thumb_list = new_thumb_list self._tkf_highlight -= un_hilite_set # Unhighlight self.fv.gui_do_oneshot('thumbs-reorder', self.reorder_thumbs)
[ "def", "delete_channel_cb", "(", "self", ",", "viewer", ",", "channel", ")", ":", "chname_del", "=", "channel", ".", "name", "# TODO: delete thumbs for this channel!", "self", ".", "logger", ".", "debug", "(", "\"deleting thumbs for channel '%s'\"", "%", "(", "chname_del", ")", ")", "with", "self", ".", "thmblock", ":", "new_thumb_list", "=", "[", "]", "un_hilite_set", "=", "set", "(", "[", "]", ")", "for", "thumbkey", "in", "self", ".", "thumb_list", ":", "chname", "=", "thumbkey", "[", "0", "]", "if", "chname", "!=", "chname_del", ":", "new_thumb_list", ".", "append", "(", "thumbkey", ")", "else", ":", "if", "thumbkey", "in", "self", ".", "thumb_dict", ":", "del", "self", ".", "thumb_dict", "[", "thumbkey", "]", "un_hilite_set", ".", "add", "(", "thumbkey", ")", "self", ".", "thumb_list", "=", "new_thumb_list", "self", ".", "_tkf_highlight", "-=", "un_hilite_set", "# Unhighlight", "self", ".", "fv", ".", "gui_do_oneshot", "(", "'thumbs-reorder'", ",", "self", ".", "reorder_thumbs", ")" ]
Called when a channel is deleted from the main interface. Parameter is channel (a bunch).
[ "Called", "when", "a", "channel", "is", "deleted", "from", "the", "main", "interface", ".", "Parameter", "is", "channel", "(", "a", "bunch", ")", "." ]
python
train
41.954545
jrigden/pyPodcastParser
pyPodcastParser/Podcast.py
https://github.com/jrigden/pyPodcastParser/blob/b21e027bb56ec77986d76fc1990f4e420c6de869/pyPodcastParser/Podcast.py#L394-L401
def set_pubsubhubbub(self): """Parses pubsubhubbub and email then sets value""" self.pubsubhubbub = None atom_links = self.soup.findAll('atom:link') for atom_link in atom_links: rel = atom_link.get('rel') if rel == "hub": self.pubsubhubbub = atom_link.get('href')
[ "def", "set_pubsubhubbub", "(", "self", ")", ":", "self", ".", "pubsubhubbub", "=", "None", "atom_links", "=", "self", ".", "soup", ".", "findAll", "(", "'atom:link'", ")", "for", "atom_link", "in", "atom_links", ":", "rel", "=", "atom_link", ".", "get", "(", "'rel'", ")", "if", "rel", "==", "\"hub\"", ":", "self", ".", "pubsubhubbub", "=", "atom_link", ".", "get", "(", "'href'", ")" ]
Parses pubsubhubbub and email then sets value
[ "Parses", "pubsubhubbub", "and", "email", "then", "sets", "value" ]
python
train
41
obulpathi/cdn-fastly-python
fastly/__init__.py
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L709-L712
def delete_service(self, service_id): """Delete a service.""" content = self._fetch("/service/%s" % service_id, method="DELETE") return self._status(content)
[ "def", "delete_service", "(", "self", ",", "service_id", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s\"", "%", "service_id", ",", "method", "=", "\"DELETE\"", ")", "return", "self", ".", "_status", "(", "content", ")" ]
Delete a service.
[ "Delete", "a", "service", "." ]
python
train
40
treycucco/pyebnf
pyebnf/compiler.py
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/compiler.py#L324-L346
def _ast_optree_node_to_code(self, node, **kwargs): """Convert an abstract syntax operator tree to python source code.""" opnode = node.opnode if opnode is None: return self._ast_to_code(node.operands[0]) else: operator = opnode.operator if operator is OP_ALTERNATE: return self._ast_op_alternate_to_code(node, **kwargs) elif operator is OP_WS_CONCAT: kwargs["ignore_whitespace"] = False return self._ast_op_concat_to_code(node, **kwargs) elif operator is OP_CONCAT: kwargs["ignore_whitespace"] = True return self._ast_op_concat_to_code(node, **kwargs) elif operator is OP_EXCLUDE: return self._ast_op_exclude_to_code(node, **kwargs) elif operator is OP_MULTIPLY: return self._ast_op_multiply_to_code(node, **kwargs) elif operator is OP_REPEAT: return self._ast_op_repeat_to_code(node, **kwargs) else: raise Exception("Unhandled optree node: {0}".format(node))
[ "def", "_ast_optree_node_to_code", "(", "self", ",", "node", ",", "*", "*", "kwargs", ")", ":", "opnode", "=", "node", ".", "opnode", "if", "opnode", "is", "None", ":", "return", "self", ".", "_ast_to_code", "(", "node", ".", "operands", "[", "0", "]", ")", "else", ":", "operator", "=", "opnode", ".", "operator", "if", "operator", "is", "OP_ALTERNATE", ":", "return", "self", ".", "_ast_op_alternate_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "elif", "operator", "is", "OP_WS_CONCAT", ":", "kwargs", "[", "\"ignore_whitespace\"", "]", "=", "False", "return", "self", ".", "_ast_op_concat_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "elif", "operator", "is", "OP_CONCAT", ":", "kwargs", "[", "\"ignore_whitespace\"", "]", "=", "True", "return", "self", ".", "_ast_op_concat_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "elif", "operator", "is", "OP_EXCLUDE", ":", "return", "self", ".", "_ast_op_exclude_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "elif", "operator", "is", "OP_MULTIPLY", ":", "return", "self", ".", "_ast_op_multiply_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "elif", "operator", "is", "OP_REPEAT", ":", "return", "self", ".", "_ast_op_repeat_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "Exception", "(", "\"Unhandled optree node: {0}\"", ".", "format", "(", "node", ")", ")" ]
Convert an abstract syntax operator tree to python source code.
[ "Convert", "an", "abstract", "syntax", "operator", "tree", "to", "python", "source", "code", "." ]
python
test
42.608696
locationlabs/mockredis
mockredis/script.py
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/script.py#L29-L51
def _execute_lua(self, keys, args, client): """ Sets KEYS and ARGV alongwith redis.call() function in lua globals and executes the lua redis script """ lua, lua_globals = Script._import_lua(self.load_dependencies) lua_globals.KEYS = self._python_to_lua(keys) lua_globals.ARGV = self._python_to_lua(args) def _call(*call_args): # redis-py and native redis commands are mostly compatible argument # wise, but some exceptions need to be handled here: if str(call_args[0]).lower() == 'lrem': response = client.call( call_args[0], call_args[1], call_args[3], # "count", default is 0 call_args[2]) else: response = client.call(*call_args) return self._python_to_lua(response) lua_globals.redis = {"call": _call} return self._lua_to_python(lua.execute(self.script), return_status=True)
[ "def", "_execute_lua", "(", "self", ",", "keys", ",", "args", ",", "client", ")", ":", "lua", ",", "lua_globals", "=", "Script", ".", "_import_lua", "(", "self", ".", "load_dependencies", ")", "lua_globals", ".", "KEYS", "=", "self", ".", "_python_to_lua", "(", "keys", ")", "lua_globals", ".", "ARGV", "=", "self", ".", "_python_to_lua", "(", "args", ")", "def", "_call", "(", "*", "call_args", ")", ":", "# redis-py and native redis commands are mostly compatible argument", "# wise, but some exceptions need to be handled here:", "if", "str", "(", "call_args", "[", "0", "]", ")", ".", "lower", "(", ")", "==", "'lrem'", ":", "response", "=", "client", ".", "call", "(", "call_args", "[", "0", "]", ",", "call_args", "[", "1", "]", ",", "call_args", "[", "3", "]", ",", "# \"count\", default is 0", "call_args", "[", "2", "]", ")", "else", ":", "response", "=", "client", ".", "call", "(", "*", "call_args", ")", "return", "self", ".", "_python_to_lua", "(", "response", ")", "lua_globals", ".", "redis", "=", "{", "\"call\"", ":", "_call", "}", "return", "self", ".", "_lua_to_python", "(", "lua", ".", "execute", "(", "self", ".", "script", ")", ",", "return_status", "=", "True", ")" ]
Sets KEYS and ARGV alongwith redis.call() function in lua globals and executes the lua redis script
[ "Sets", "KEYS", "and", "ARGV", "alongwith", "redis", ".", "call", "()", "function", "in", "lua", "globals", "and", "executes", "the", "lua", "redis", "script" ]
python
train
43.086957
JensRantil/rewind
rewind/server/eventstores.py
https://github.com/JensRantil/rewind/blob/7f645d20186c1db55cfe53a0310c9fd6292f91ea/rewind/server/eventstores.py#L981-L997
def add_event(self, key, event): """Add an event and its corresponding key to the store.""" if self.key_exists(key): # This check might actually also be done further up in the chain # (read: SQLiteEventStore). Could potentially be removed if it # requires a lot of processor cycles. msg = "The key already existed: {0}".format(key) raise EventStore.EventKeyAlreadyExistError(msg) self._rotate_files_if_needed() # Since I guess LogEventStore is less mature codewise than # SQLiteEventStore I am writing to that log file first. If something # fails we are not writing to SQLiteEventStore. for store in self.stores: store.add_event(key, event) self.count += 1
[ "def", "add_event", "(", "self", ",", "key", ",", "event", ")", ":", "if", "self", ".", "key_exists", "(", "key", ")", ":", "# This check might actually also be done further up in the chain", "# (read: SQLiteEventStore). Could potentially be removed if it", "# requires a lot of processor cycles.", "msg", "=", "\"The key already existed: {0}\"", ".", "format", "(", "key", ")", "raise", "EventStore", ".", "EventKeyAlreadyExistError", "(", "msg", ")", "self", ".", "_rotate_files_if_needed", "(", ")", "# Since I guess LogEventStore is less mature codewise than", "# SQLiteEventStore I am writing to that log file first. If something", "# fails we are not writing to SQLiteEventStore.", "for", "store", "in", "self", ".", "stores", ":", "store", ".", "add_event", "(", "key", ",", "event", ")", "self", ".", "count", "+=", "1" ]
Add an event and its corresponding key to the store.
[ "Add", "an", "event", "and", "its", "corresponding", "key", "to", "the", "store", "." ]
python
train
45.764706
wal-e/wal-e
wal_e/log_help.py
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/log_help.py#L118-L129
def get_syslog_facility(): """Get syslog facility from ENV var""" facil = os.getenv('WALE_SYSLOG_FACILITY', 'user') valid_facility = True try: facility = handlers.SysLogHandler.facility_names[facil.lower()] except KeyError: valid_facility = False facility = handlers.SysLogHandler.LOG_USER return facility, valid_facility
[ "def", "get_syslog_facility", "(", ")", ":", "facil", "=", "os", ".", "getenv", "(", "'WALE_SYSLOG_FACILITY'", ",", "'user'", ")", "valid_facility", "=", "True", "try", ":", "facility", "=", "handlers", ".", "SysLogHandler", ".", "facility_names", "[", "facil", ".", "lower", "(", ")", "]", "except", "KeyError", ":", "valid_facility", "=", "False", "facility", "=", "handlers", ".", "SysLogHandler", ".", "LOG_USER", "return", "facility", ",", "valid_facility" ]
Get syslog facility from ENV var
[ "Get", "syslog", "facility", "from", "ENV", "var" ]
python
train
30
HEPData/hepdata-converter
hepdata_converter/writers/csv_writer.py
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/writers/csv_writer.py#L82-L102
def _write_packed_data(self, data_out, table): """This is kind of legacy function - this functionality may be useful for some people, so even though now the default of writing CSV is writing unpacked data (divided by independent variable) this method is still available and accessible if ```pack``` flag is specified in Writer's options :param output: output file like object to which data will be written :param table: input table :type table: hepdata_converter.parsers.Table """ headers = [] data = [] qualifiers_marks = [] qualifiers = {} self._extract_independent_variables(table, headers, data, qualifiers_marks) for dependent_variable in table.dependent_variables: self._parse_dependent_variable(dependent_variable, headers, qualifiers, qualifiers_marks, data) self._write_metadata(data_out, table) self._write_csv_data(data_out, qualifiers, qualifiers_marks, headers, data)
[ "def", "_write_packed_data", "(", "self", ",", "data_out", ",", "table", ")", ":", "headers", "=", "[", "]", "data", "=", "[", "]", "qualifiers_marks", "=", "[", "]", "qualifiers", "=", "{", "}", "self", ".", "_extract_independent_variables", "(", "table", ",", "headers", ",", "data", ",", "qualifiers_marks", ")", "for", "dependent_variable", "in", "table", ".", "dependent_variables", ":", "self", ".", "_parse_dependent_variable", "(", "dependent_variable", ",", "headers", ",", "qualifiers", ",", "qualifiers_marks", ",", "data", ")", "self", ".", "_write_metadata", "(", "data_out", ",", "table", ")", "self", ".", "_write_csv_data", "(", "data_out", ",", "qualifiers", ",", "qualifiers_marks", ",", "headers", ",", "data", ")" ]
This is kind of legacy function - this functionality may be useful for some people, so even though now the default of writing CSV is writing unpacked data (divided by independent variable) this method is still available and accessible if ```pack``` flag is specified in Writer's options :param output: output file like object to which data will be written :param table: input table :type table: hepdata_converter.parsers.Table
[ "This", "is", "kind", "of", "legacy", "function", "-", "this", "functionality", "may", "be", "useful", "for", "some", "people", "so", "even", "though", "now", "the", "default", "of", "writing", "CSV", "is", "writing", "unpacked", "data", "(", "divided", "by", "independent", "variable", ")", "this", "method", "is", "still", "available", "and", "accessible", "if", "pack", "flag", "is", "specified", "in", "Writer", "s", "options" ]
python
train
47.428571
materialsproject/pymatgen
pymatgen/analysis/graphs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/graphs.py#L467-L508
def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False): """ Remove an edge from the StructureGraph. If no image is given, this method will fail. :param from_index: int :param to_index: int :param to_jimage: tuple :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return: """ # ensure that edge exists before attempting to remove it existing_edges = self.graph.get_edge_data(from_index, to_index) existing_reverse = None if to_jimage is None: raise ValueError("Image must be supplied, to avoid ambiguity.") if existing_edges: for i, properties in existing_edges.items(): if properties["to_jimage"] == to_jimage: edge_index = i self.graph.remove_edge(from_index, to_index, edge_index) else: if allow_reverse: existing_reverse = self.graph.get_edge_data(to_index, from_index) if existing_reverse: for i, properties in existing_reverse.items(): if properties["to_jimage"] == to_jimage: edge_index = i self.graph.remove_edge(to_index, from_index, edge_index) else: raise ValueError("Edge cannot be broken between {} and {};\ no edge exists between those sites.".format( from_index, to_index ))
[ "def", "break_edge", "(", "self", ",", "from_index", ",", "to_index", ",", "to_jimage", "=", "None", ",", "allow_reverse", "=", "False", ")", ":", "# ensure that edge exists before attempting to remove it", "existing_edges", "=", "self", ".", "graph", ".", "get_edge_data", "(", "from_index", ",", "to_index", ")", "existing_reverse", "=", "None", "if", "to_jimage", "is", "None", ":", "raise", "ValueError", "(", "\"Image must be supplied, to avoid ambiguity.\"", ")", "if", "existing_edges", ":", "for", "i", ",", "properties", "in", "existing_edges", ".", "items", "(", ")", ":", "if", "properties", "[", "\"to_jimage\"", "]", "==", "to_jimage", ":", "edge_index", "=", "i", "self", ".", "graph", ".", "remove_edge", "(", "from_index", ",", "to_index", ",", "edge_index", ")", "else", ":", "if", "allow_reverse", ":", "existing_reverse", "=", "self", ".", "graph", ".", "get_edge_data", "(", "to_index", ",", "from_index", ")", "if", "existing_reverse", ":", "for", "i", ",", "properties", "in", "existing_reverse", ".", "items", "(", ")", ":", "if", "properties", "[", "\"to_jimage\"", "]", "==", "to_jimage", ":", "edge_index", "=", "i", "self", ".", "graph", ".", "remove_edge", "(", "to_index", ",", "from_index", ",", "edge_index", ")", "else", ":", "raise", "ValueError", "(", "\"Edge cannot be broken between {} and {};\\\n no edge exists between those sites.\"", ".", "format", "(", "from_index", ",", "to_index", ")", ")" ]
Remove an edge from the StructureGraph. If no image is given, this method will fail. :param from_index: int :param to_index: int :param to_jimage: tuple :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return:
[ "Remove", "an", "edge", "from", "the", "StructureGraph", ".", "If", "no", "image", "is", "given", "this", "method", "will", "fail", "." ]
python
train
39.285714
koszullab/metaTOR
metator/scripts/hicstuff.py
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1514-L1533
def split_matrix(M, contigs): """Split multiple chromosome matrix Split a labeled matrix with multiple chromosomes into unlabeled single-chromosome matrices. Inter chromosomal contacts are discarded. Parameters ---------- M : array_like The multiple chromosome matrix to be split contigs : list or array_like The list of contig labels """ index = 0 for _, chunk in itertools.groubpy(contigs): l = len(chunk) yield M[index : index + l, index : index + l] index += l
[ "def", "split_matrix", "(", "M", ",", "contigs", ")", ":", "index", "=", "0", "for", "_", ",", "chunk", "in", "itertools", ".", "groubpy", "(", "contigs", ")", ":", "l", "=", "len", "(", "chunk", ")", "yield", "M", "[", "index", ":", "index", "+", "l", ",", "index", ":", "index", "+", "l", "]", "index", "+=", "l" ]
Split multiple chromosome matrix Split a labeled matrix with multiple chromosomes into unlabeled single-chromosome matrices. Inter chromosomal contacts are discarded. Parameters ---------- M : array_like The multiple chromosome matrix to be split contigs : list or array_like The list of contig labels
[ "Split", "multiple", "chromosome", "matrix" ]
python
train
26.65
jic-dtool/dtoolcore
dtoolcore/__init__.py
https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L43-L47
def _is_dataset(uri, config_path): """Helper function for determining if a URI is a dataset.""" uri = dtoolcore.utils.sanitise_uri(uri) storage_broker = _get_storage_broker(uri, config_path) return storage_broker.has_admin_metadata()
[ "def", "_is_dataset", "(", "uri", ",", "config_path", ")", ":", "uri", "=", "dtoolcore", ".", "utils", ".", "sanitise_uri", "(", "uri", ")", "storage_broker", "=", "_get_storage_broker", "(", "uri", ",", "config_path", ")", "return", "storage_broker", ".", "has_admin_metadata", "(", ")" ]
Helper function for determining if a URI is a dataset.
[ "Helper", "function", "for", "determining", "if", "a", "URI", "is", "a", "dataset", "." ]
python
train
49
craffel/mir_eval
mir_eval/util.py
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L758-L780
def validate_intervals(intervals): """Checks that an (n, 2) interval ndarray is well-formed, and raises errors if not. Parameters ---------- intervals : np.ndarray, shape=(n, 2) Array of interval start/end locations. """ # Validate interval shape if intervals.ndim != 2 or intervals.shape[1] != 2: raise ValueError('Intervals should be n-by-2 numpy ndarray, ' 'but shape={}'.format(intervals.shape)) # Make sure no times are negative if (intervals < 0).any(): raise ValueError('Negative interval times found') # Make sure all intervals have strictly positive duration if (intervals[:, 1] <= intervals[:, 0]).any(): raise ValueError('All interval durations must be strictly positive')
[ "def", "validate_intervals", "(", "intervals", ")", ":", "# Validate interval shape", "if", "intervals", ".", "ndim", "!=", "2", "or", "intervals", ".", "shape", "[", "1", "]", "!=", "2", ":", "raise", "ValueError", "(", "'Intervals should be n-by-2 numpy ndarray, '", "'but shape={}'", ".", "format", "(", "intervals", ".", "shape", ")", ")", "# Make sure no times are negative", "if", "(", "intervals", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "'Negative interval times found'", ")", "# Make sure all intervals have strictly positive duration", "if", "(", "intervals", "[", ":", ",", "1", "]", "<=", "intervals", "[", ":", ",", "0", "]", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "'All interval durations must be strictly positive'", ")" ]
Checks that an (n, 2) interval ndarray is well-formed, and raises errors if not. Parameters ---------- intervals : np.ndarray, shape=(n, 2) Array of interval start/end locations.
[ "Checks", "that", "an", "(", "n", "2", ")", "interval", "ndarray", "is", "well", "-", "formed", "and", "raises", "errors", "if", "not", "." ]
python
train
33.521739
IdentityPython/pysaml2
src/saml2/sigver.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/sigver.py#L1855-L1870
def pre_encrypt_assertion(response): """ Move the assertion to within a encrypted_assertion :param response: The response with one assertion :return: The response but now with the assertion within an encrypted_assertion. """ assertion = response.assertion response.assertion = None response.encrypted_assertion = EncryptedAssertion() if assertion is not None: if isinstance(assertion, list): response.encrypted_assertion.add_extension_elements(assertion) else: response.encrypted_assertion.add_extension_element(assertion) return response
[ "def", "pre_encrypt_assertion", "(", "response", ")", ":", "assertion", "=", "response", ".", "assertion", "response", ".", "assertion", "=", "None", "response", ".", "encrypted_assertion", "=", "EncryptedAssertion", "(", ")", "if", "assertion", "is", "not", "None", ":", "if", "isinstance", "(", "assertion", ",", "list", ")", ":", "response", ".", "encrypted_assertion", ".", "add_extension_elements", "(", "assertion", ")", "else", ":", "response", ".", "encrypted_assertion", ".", "add_extension_element", "(", "assertion", ")", "return", "response" ]
Move the assertion to within a encrypted_assertion :param response: The response with one assertion :return: The response but now with the assertion within an encrypted_assertion.
[ "Move", "the", "assertion", "to", "within", "a", "encrypted_assertion", ":", "param", "response", ":", "The", "response", "with", "one", "assertion", ":", "return", ":", "The", "response", "but", "now", "with", "the", "assertion", "within", "an", "encrypted_assertion", "." ]
python
train
38.1875
mitsei/dlkit
dlkit/json_/commenting/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/commenting/managers.py#L386-L400
def get_comment_book_session(self): """Gets the session for retrieving comment to book mappings. return: (osid.commenting.CommentBookSession) - a ``CommentBookSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_book()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_book()`` is ``true``.* """ if not self.supports_comment_book(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.CommentBookSession(runtime=self._runtime)
[ "def", "get_comment_book_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_comment_book", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "CommentBookSession", "(", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the session for retrieving comment to book mappings. return: (osid.commenting.CommentBookSession) - a ``CommentBookSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_book()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_book()`` is ``true``.*
[ "Gets", "the", "session", "for", "retrieving", "comment", "to", "book", "mappings", "." ]
python
train
42.666667
OldhamMade/PySO8601
PySO8601/utility.py
https://github.com/OldhamMade/PySO8601/blob/b7d3b3cb3ed3e12eb2a21caa26a3abeab3c96fe4/PySO8601/utility.py#L28-L52
def _timedelta_from_elements(elements): """ Return a timedelta from a dict of date elements. Accepts a dict containing any of the following: - years - months - days - hours - minutes - seconds If years and/or months are provided, it will use a naive calcuation of 365 days in a year and 30 days in a month. """ days = sum(( elements['days'], _months_to_days(elements.get('months', 0)), _years_to_days(elements.get('years', 0)) )) return datetime.timedelta(days=days, hours=elements.get('hours', 0), minutes=elements.get('minutes', 0), seconds=elements.get('seconds', 0))
[ "def", "_timedelta_from_elements", "(", "elements", ")", ":", "days", "=", "sum", "(", "(", "elements", "[", "'days'", "]", ",", "_months_to_days", "(", "elements", ".", "get", "(", "'months'", ",", "0", ")", ")", ",", "_years_to_days", "(", "elements", ".", "get", "(", "'years'", ",", "0", ")", ")", ")", ")", "return", "datetime", ".", "timedelta", "(", "days", "=", "days", ",", "hours", "=", "elements", ".", "get", "(", "'hours'", ",", "0", ")", ",", "minutes", "=", "elements", ".", "get", "(", "'minutes'", ",", "0", ")", ",", "seconds", "=", "elements", ".", "get", "(", "'seconds'", ",", "0", ")", ")" ]
Return a timedelta from a dict of date elements. Accepts a dict containing any of the following: - years - months - days - hours - minutes - seconds If years and/or months are provided, it will use a naive calcuation of 365 days in a year and 30 days in a month.
[ "Return", "a", "timedelta", "from", "a", "dict", "of", "date", "elements", "." ]
python
train
29.4
softlayer/softlayer-python
SoftLayer/managers/user.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/user.py#L171-L197
def get_events(self, user_id, start_date=None): """Gets the event log for a specific user, default start_date is 30 days ago :param int id: User id to view :param string start_date: "%Y-%m-%dT%H:%M:%s.0000-06:00" is the full formatted string. The Timezone part has to be HH:MM, notice the : there. :returns: https://softlayer.github.io/reference/datatypes/SoftLayer_Event_Log/ """ if start_date is None: date_object = datetime.datetime.today() - datetime.timedelta(days=30) start_date = date_object.strftime("%Y-%m-%dT00:00:00") object_filter = { 'userId': { 'operation': user_id }, 'eventCreateDate': { 'operation': 'greaterThanDate', 'options': [{'name': 'date', 'value': [start_date]}] } } events = self.client.call('Event_Log', 'getAllObjects', filter=object_filter) if events is None: events = [{'eventName': 'No Events Found'}] return events
[ "def", "get_events", "(", "self", ",", "user_id", ",", "start_date", "=", "None", ")", ":", "if", "start_date", "is", "None", ":", "date_object", "=", "datetime", ".", "datetime", ".", "today", "(", ")", "-", "datetime", ".", "timedelta", "(", "days", "=", "30", ")", "start_date", "=", "date_object", ".", "strftime", "(", "\"%Y-%m-%dT00:00:00\"", ")", "object_filter", "=", "{", "'userId'", ":", "{", "'operation'", ":", "user_id", "}", ",", "'eventCreateDate'", ":", "{", "'operation'", ":", "'greaterThanDate'", ",", "'options'", ":", "[", "{", "'name'", ":", "'date'", ",", "'value'", ":", "[", "start_date", "]", "}", "]", "}", "}", "events", "=", "self", ".", "client", ".", "call", "(", "'Event_Log'", ",", "'getAllObjects'", ",", "filter", "=", "object_filter", ")", "if", "events", "is", "None", ":", "events", "=", "[", "{", "'eventName'", ":", "'No Events Found'", "}", "]", "return", "events" ]
Gets the event log for a specific user, default start_date is 30 days ago :param int id: User id to view :param string start_date: "%Y-%m-%dT%H:%M:%s.0000-06:00" is the full formatted string. The Timezone part has to be HH:MM, notice the : there. :returns: https://softlayer.github.io/reference/datatypes/SoftLayer_Event_Log/
[ "Gets", "the", "event", "log", "for", "a", "specific", "user", "default", "start_date", "is", "30", "days", "ago" ]
python
train
39.962963
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/html/notebook/kernelmanager.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/html/notebook/kernelmanager.py#L329-L332
def create_shell_stream(self, kernel_id): """Create a new shell stream.""" self._check_kernel_id(kernel_id) return super(MappingKernelManager, self).create_shell_stream(kernel_id)
[ "def", "create_shell_stream", "(", "self", ",", "kernel_id", ")", ":", "self", ".", "_check_kernel_id", "(", "kernel_id", ")", "return", "super", "(", "MappingKernelManager", ",", "self", ")", ".", "create_shell_stream", "(", "kernel_id", ")" ]
Create a new shell stream.
[ "Create", "a", "new", "shell", "stream", "." ]
python
test
50
d0c-s4vage/pfp
pfp/bitwrap.py
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L125-L143
def read_bits(self, num): """Read ``num`` number of bits from the stream :num: number of bits to read :returns: a list of ``num`` bits, or an empty list if EOF has been reached """ if num > len(self._bits): needed = num - len(self._bits) num_bytes = int(math.ceil(needed / 8.0)) read_bytes = self._stream.read(num_bytes) for bit in bytes_to_bits(read_bytes): self._bits.append(bit) res = [] while len(res) < num and len(self._bits) > 0: res.append(self._bits.popleft()) return res
[ "def", "read_bits", "(", "self", ",", "num", ")", ":", "if", "num", ">", "len", "(", "self", ".", "_bits", ")", ":", "needed", "=", "num", "-", "len", "(", "self", ".", "_bits", ")", "num_bytes", "=", "int", "(", "math", ".", "ceil", "(", "needed", "/", "8.0", ")", ")", "read_bytes", "=", "self", ".", "_stream", ".", "read", "(", "num_bytes", ")", "for", "bit", "in", "bytes_to_bits", "(", "read_bytes", ")", ":", "self", ".", "_bits", ".", "append", "(", "bit", ")", "res", "=", "[", "]", "while", "len", "(", "res", ")", "<", "num", "and", "len", "(", "self", ".", "_bits", ")", ">", "0", ":", "res", ".", "append", "(", "self", ".", "_bits", ".", "popleft", "(", ")", ")", "return", "res" ]
Read ``num`` number of bits from the stream :num: number of bits to read :returns: a list of ``num`` bits, or an empty list if EOF has been reached
[ "Read", "num", "number", "of", "bits", "from", "the", "stream" ]
python
train
31.894737
twisted/mantissa
xmantissa/ampserver.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/ampserver.py#L64-L74
def generateOneTimePad(self, userStore): """ Generate a pad which can be used to authenticate via AMP. This pad will expire in L{ONE_TIME_PAD_DURATION} seconds. """ pad = secureRandom(16).encode('hex') self._oneTimePads[pad] = userStore.idInParent def expirePad(): self._oneTimePads.pop(pad, None) self.callLater(self.ONE_TIME_PAD_DURATION, expirePad) return pad
[ "def", "generateOneTimePad", "(", "self", ",", "userStore", ")", ":", "pad", "=", "secureRandom", "(", "16", ")", ".", "encode", "(", "'hex'", ")", "self", ".", "_oneTimePads", "[", "pad", "]", "=", "userStore", ".", "idInParent", "def", "expirePad", "(", ")", ":", "self", ".", "_oneTimePads", ".", "pop", "(", "pad", ",", "None", ")", "self", ".", "callLater", "(", "self", ".", "ONE_TIME_PAD_DURATION", ",", "expirePad", ")", "return", "pad" ]
Generate a pad which can be used to authenticate via AMP. This pad will expire in L{ONE_TIME_PAD_DURATION} seconds.
[ "Generate", "a", "pad", "which", "can", "be", "used", "to", "authenticate", "via", "AMP", ".", "This", "pad", "will", "expire", "in", "L", "{", "ONE_TIME_PAD_DURATION", "}", "seconds", "." ]
python
train
39.727273
mitsei/dlkit
dlkit/json_/repository/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L2765-L2786
def get_compositions_by_asset(self, asset_id): """Gets a list of compositions including the given asset. arg: asset_id (osid.id.Id): ``Id`` of the ``Asset`` return: (osid.repository.CompositionList) - the returned ``Composition list`` raise: NotFound - ``asset_id`` is not found raise: NullArgument - ``asset_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.repository.AssetCompositionSession.get_compositions_by_asset collection = JSONClientValidated('repository', collection='Composition', runtime=self._runtime) result = collection.find( dict({'assetIds': {'$in': [str(asset_id)]}}, **self._view_filter())).sort('_id', DESCENDING) return objects.CompositionList(result, runtime=self._runtime)
[ "def", "get_compositions_by_asset", "(", "self", ",", "asset_id", ")", ":", "# Implemented from template for", "# osid.repository.AssetCompositionSession.get_compositions_by_asset", "collection", "=", "JSONClientValidated", "(", "'repository'", ",", "collection", "=", "'Composition'", ",", "runtime", "=", "self", ".", "_runtime", ")", "result", "=", "collection", ".", "find", "(", "dict", "(", "{", "'assetIds'", ":", "{", "'$in'", ":", "[", "str", "(", "asset_id", ")", "]", "}", "}", ",", "*", "*", "self", ".", "_view_filter", "(", ")", ")", ")", ".", "sort", "(", "'_id'", ",", "DESCENDING", ")", "return", "objects", ".", "CompositionList", "(", "result", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets a list of compositions including the given asset. arg: asset_id (osid.id.Id): ``Id`` of the ``Asset`` return: (osid.repository.CompositionList) - the returned ``Composition list`` raise: NotFound - ``asset_id`` is not found raise: NullArgument - ``asset_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "a", "list", "of", "compositions", "including", "the", "given", "asset", "." ]
python
train
49.681818
coinbase/coinbase-python
coinbase/wallet/client.py
https://github.com/coinbase/coinbase-python/blob/497c28158f529e8c7d0228521b4386a890baf088/coinbase/wallet/client.py#L244-L247
def get_time(self, **params): """https://developers.coinbase.com/api/v2#time""" response = self._get('v2', 'time', params=params) return self._make_api_object(response, APIObject)
[ "def", "get_time", "(", "self", ",", "*", "*", "params", ")", ":", "response", "=", "self", ".", "_get", "(", "'v2'", ",", "'time'", ",", "params", "=", "params", ")", "return", "self", ".", "_make_api_object", "(", "response", ",", "APIObject", ")" ]
https://developers.coinbase.com/api/v2#time
[ "https", ":", "//", "developers", ".", "coinbase", ".", "com", "/", "api", "/", "v2#time" ]
python
train
50
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/secpolicy/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/secpolicy/__init__.py#L133-L156
def _set_active_policy(self, v, load=False): """ Setter method for active_policy, mapped from YANG variable /rbridge_id/secpolicy/active_policy (container) If this variable is read-only (config: false) in the source YANG file, then _set_active_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_active_policy() directly. YANG Description: Set the Active policy """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=active_policy.active_policy, is_container='container', presence=False, yang_name="active-policy", rest_name="active-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Active policy set', u'cli-incomplete-no': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """active_policy must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=active_policy.active_policy, is_container='container', presence=False, yang_name="active-policy", rest_name="active-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Active policy set', u'cli-incomplete-no': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True)""", }) self.__active_policy = t if hasattr(self, '_set'): self._set()
[ "def", "_set_active_policy", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "active_policy", ".", "active_policy", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"active-policy\"", ",", "rest_name", "=", "\"active-policy\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Active policy set'", ",", "u'cli-incomplete-no'", ":", "None", ",", "u'cli-suppress-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-fc-auth'", ",", "defining_module", "=", "'brocade-fc-auth'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"active_policy must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=active_policy.active_policy, is_container='container', presence=False, yang_name=\"active-policy\", rest_name=\"active-policy\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Active policy set', u'cli-incomplete-no': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__active_policy", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for active_policy, mapped from YANG variable /rbridge_id/secpolicy/active_policy (container) If this variable is read-only (config: false) in the source YANG file, then _set_active_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_active_policy() directly. YANG Description: Set the Active policy
[ "Setter", "method", "for", "active_policy", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "secpolicy", "/", "active_policy", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_active_policy", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_active_policy", "()", "directly", "." ]
python
train
74.541667
charnley/rmsd
rmsd/calculate_rmsd.py
https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L222-L247
def quaternion_rotate(X, Y): """ Calculate the rotation Parameters ---------- X : array (N,D) matrix, where N is points and D is dimension. Y: array (N,D) matrix, where N is points and D is dimension. Returns ------- rot : matrix Rotation matrix (D,D) """ N = X.shape[0] W = np.asarray([makeW(*Y[k]) for k in range(N)]) Q = np.asarray([makeQ(*X[k]) for k in range(N)]) Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)]) W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)]) A = np.sum(Qt_dot_W, axis=0) eigen = np.linalg.eigh(A) r = eigen[1][:, eigen[0].argmax()] rot = quaternion_transform(r) return rot
[ "def", "quaternion_rotate", "(", "X", ",", "Y", ")", ":", "N", "=", "X", ".", "shape", "[", "0", "]", "W", "=", "np", ".", "asarray", "(", "[", "makeW", "(", "*", "Y", "[", "k", "]", ")", "for", "k", "in", "range", "(", "N", ")", "]", ")", "Q", "=", "np", ".", "asarray", "(", "[", "makeQ", "(", "*", "X", "[", "k", "]", ")", "for", "k", "in", "range", "(", "N", ")", "]", ")", "Qt_dot_W", "=", "np", ".", "asarray", "(", "[", "np", ".", "dot", "(", "Q", "[", "k", "]", ".", "T", ",", "W", "[", "k", "]", ")", "for", "k", "in", "range", "(", "N", ")", "]", ")", "W_minus_Q", "=", "np", ".", "asarray", "(", "[", "W", "[", "k", "]", "-", "Q", "[", "k", "]", "for", "k", "in", "range", "(", "N", ")", "]", ")", "A", "=", "np", ".", "sum", "(", "Qt_dot_W", ",", "axis", "=", "0", ")", "eigen", "=", "np", ".", "linalg", ".", "eigh", "(", "A", ")", "r", "=", "eigen", "[", "1", "]", "[", ":", ",", "eigen", "[", "0", "]", ".", "argmax", "(", ")", "]", "rot", "=", "quaternion_transform", "(", "r", ")", "return", "rot" ]
Calculate the rotation Parameters ---------- X : array (N,D) matrix, where N is points and D is dimension. Y: array (N,D) matrix, where N is points and D is dimension. Returns ------- rot : matrix Rotation matrix (D,D)
[ "Calculate", "the", "rotation" ]
python
train
26.923077
gautammishra/lyft-rides-python-sdk
lyft_rides/request.py
https://github.com/gautammishra/lyft-rides-python-sdk/blob/b6d96a0fceaf7dc3425153c418a8e25c57803431/lyft_rides/request.py#L131-L160
def _build_headers(self, method, auth_session): """Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises LyftIllegalState (ApiError) Raised if headers are invalid. """ token_type = auth_session.token_type token = auth_session.oauth2credential.access_token if not self._authorization_headers_valid(token_type, token): message = 'Invalid token_type or token.' raise LyftIllegalState(message) headers = { 'Authorization': ' '.join([token_type, token]), } if method in http.BODY_METHODS: headers.update(http.DEFAULT_CONTENT_HEADERS) return headers
[ "def", "_build_headers", "(", "self", ",", "method", ",", "auth_session", ")", ":", "token_type", "=", "auth_session", ".", "token_type", "token", "=", "auth_session", ".", "oauth2credential", ".", "access_token", "if", "not", "self", ".", "_authorization_headers_valid", "(", "token_type", ",", "token", ")", ":", "message", "=", "'Invalid token_type or token.'", "raise", "LyftIllegalState", "(", "message", ")", "headers", "=", "{", "'Authorization'", ":", "' '", ".", "join", "(", "[", "token_type", ",", "token", "]", ")", ",", "}", "if", "method", "in", "http", ".", "BODY_METHODS", ":", "headers", ".", "update", "(", "http", ".", "DEFAULT_CONTENT_HEADERS", ")", "return", "headers" ]
Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises LyftIllegalState (ApiError) Raised if headers are invalid.
[ "Create", "headers", "for", "the", "request", ".", "Parameters", "method", "(", "str", ")", "HTTP", "method", "(", "e", ".", "g", ".", "POST", ")", ".", "auth_session", "(", "Session", ")", "The", "Session", "object", "containing", "OAuth", "2", ".", "0", "credentials", ".", "Returns", "headers", "(", "dict", ")", "Dictionary", "of", "access", "headers", "to", "attach", "to", "request", ".", "Raises", "LyftIllegalState", "(", "ApiError", ")", "Raised", "if", "headers", "are", "invalid", "." ]
python
train
32.033333
NickMonzillo/SmartCloud
SmartCloud/__init__.py
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/__init__.py#L23-L27
def plot_word(self,position): '''Blits a rendered word on to the main display surface''' posrectangle = pygame.Rect(position,self.word_size) self.used_pos.append(posrectangle) self.cloud.blit(self.rendered_word,position)
[ "def", "plot_word", "(", "self", ",", "position", ")", ":", "posrectangle", "=", "pygame", ".", "Rect", "(", "position", ",", "self", ".", "word_size", ")", "self", ".", "used_pos", ".", "append", "(", "posrectangle", ")", "self", ".", "cloud", ".", "blit", "(", "self", ".", "rendered_word", ",", "position", ")" ]
Blits a rendered word on to the main display surface
[ "Blits", "a", "rendered", "word", "on", "to", "the", "main", "display", "surface" ]
python
train
49.6
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L4147-L4154
def rect(self): """rect(self) -> PyObject *""" CheckParent(self) val = _fitz.Link_rect(self) val = Rect(val) return val
[ "def", "rect", "(", "self", ")", ":", "CheckParent", "(", "self", ")", "val", "=", "_fitz", ".", "Link_rect", "(", "self", ")", "val", "=", "Rect", "(", "val", ")", "return", "val" ]
rect(self) -> PyObject *
[ "rect", "(", "self", ")", "-", ">", "PyObject", "*" ]
python
train
19.25
BerkeleyAutomation/perception
perception/image.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L1306-L1346
def segment_kmeans(self, rgb_weight, num_clusters, hue_weight=0.0): """ Segment a color image using KMeans based on spatial and color distances. Black pixels will automatically be assigned to their own 'background' cluster. Parameters ---------- rgb_weight : float weighting of RGB distance relative to spatial and hue distance num_clusters : int number of clusters to use hue_weight : float weighting of hue from hsv relative to spatial and RGB distance Returns ------- :obj:`SegmentationImage` image containing the segment labels """ # form features array label_offset = 1 nonzero_px = np.where(self.data != 0.0) nonzero_px = np.c_[nonzero_px[0], nonzero_px[1]] # get hsv data if specified color_vals = rgb_weight * \ self._data[nonzero_px[:, 0], nonzero_px[:, 1], :] if hue_weight > 0.0: hsv_data = cv2.cvtColor(self.data, cv2.COLOR_BGR2HSV) color_vals = np.c_[color_vals, hue_weight * hsv_data[nonzero_px[:, 0], nonzero_px[:, 1], :1]] features = np.c_[nonzero_px, color_vals.astype(np.float32)] # perform KMeans clustering kmeans = sc.KMeans(n_clusters=num_clusters) labels = kmeans.fit_predict(features) # create output label array label_im = np.zeros([self.height, self.width]).astype(np.uint8) label_im[nonzero_px[:, 0], nonzero_px[:, 1]] = labels + label_offset return SegmentationImage(label_im, frame=self.frame)
[ "def", "segment_kmeans", "(", "self", ",", "rgb_weight", ",", "num_clusters", ",", "hue_weight", "=", "0.0", ")", ":", "# form features array", "label_offset", "=", "1", "nonzero_px", "=", "np", ".", "where", "(", "self", ".", "data", "!=", "0.0", ")", "nonzero_px", "=", "np", ".", "c_", "[", "nonzero_px", "[", "0", "]", ",", "nonzero_px", "[", "1", "]", "]", "# get hsv data if specified", "color_vals", "=", "rgb_weight", "*", "self", ".", "_data", "[", "nonzero_px", "[", ":", ",", "0", "]", ",", "nonzero_px", "[", ":", ",", "1", "]", ",", ":", "]", "if", "hue_weight", ">", "0.0", ":", "hsv_data", "=", "cv2", ".", "cvtColor", "(", "self", ".", "data", ",", "cv2", ".", "COLOR_BGR2HSV", ")", "color_vals", "=", "np", ".", "c_", "[", "color_vals", ",", "hue_weight", "*", "hsv_data", "[", "nonzero_px", "[", ":", ",", "0", "]", ",", "nonzero_px", "[", ":", ",", "1", "]", ",", ":", "1", "]", "]", "features", "=", "np", ".", "c_", "[", "nonzero_px", ",", "color_vals", ".", "astype", "(", "np", ".", "float32", ")", "]", "# perform KMeans clustering", "kmeans", "=", "sc", ".", "KMeans", "(", "n_clusters", "=", "num_clusters", ")", "labels", "=", "kmeans", ".", "fit_predict", "(", "features", ")", "# create output label array", "label_im", "=", "np", ".", "zeros", "(", "[", "self", ".", "height", ",", "self", ".", "width", "]", ")", ".", "astype", "(", "np", ".", "uint8", ")", "label_im", "[", "nonzero_px", "[", ":", ",", "0", "]", ",", "nonzero_px", "[", ":", ",", "1", "]", "]", "=", "labels", "+", "label_offset", "return", "SegmentationImage", "(", "label_im", ",", "frame", "=", "self", ".", "frame", ")" ]
Segment a color image using KMeans based on spatial and color distances. Black pixels will automatically be assigned to their own 'background' cluster. Parameters ---------- rgb_weight : float weighting of RGB distance relative to spatial and hue distance num_clusters : int number of clusters to use hue_weight : float weighting of hue from hsv relative to spatial and RGB distance Returns ------- :obj:`SegmentationImage` image containing the segment labels
[ "Segment", "a", "color", "image", "using", "KMeans", "based", "on", "spatial", "and", "color", "distances", ".", "Black", "pixels", "will", "automatically", "be", "assigned", "to", "their", "own", "background", "cluster", "." ]
python
train
39.463415
eirannejad/Revit-Journal-Maker
rjm/__init__.py
https://github.com/eirannejad/Revit-Journal-Maker/blob/09a4f27da6d183f63a2c93ed99dca8a8590d5241/rjm/__init__.py#L326-L335
def import_family(self, rfa_file): """Append a import family entry to the journal. This instructs Revit to import a family into the opened model. Args: rfa_file (str): full path of the family file """ self._add_entry(templates.IMPORT_FAMILY .format(family_file=rfa_file))
[ "def", "import_family", "(", "self", ",", "rfa_file", ")", ":", "self", ".", "_add_entry", "(", "templates", ".", "IMPORT_FAMILY", ".", "format", "(", "family_file", "=", "rfa_file", ")", ")" ]
Append a import family entry to the journal. This instructs Revit to import a family into the opened model. Args: rfa_file (str): full path of the family file
[ "Append", "a", "import", "family", "entry", "to", "the", "journal", "." ]
python
train
34.9
naphatkrit/click-extensions
click_extensions/utils.py
https://github.com/naphatkrit/click-extensions/blob/80fc1a70419fdaf00833649677a9031bdbf8c47b/click_extensions/utils.py#L4-L27
def echo_with_markers(text, marker='=', marker_color='blue', text_color=None): """Print a text to the screen with markers surrounding it. The output looks like: ======== text ======== with marker='=' right now. In the event that the terminal window is too small, the text is printed without markers. :param str text: the text to echo :param str marker: the marker to surround the text :param str marker_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white') :param str text_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white') """ text = ' ' + text + ' ' width, _ = click.get_terminal_size() if len(text) >= width: click.echo(text) # this is probably never the case else: leftovers = width - len(text) click.secho(marker * (leftovers / 2), fg=marker_color, nl=False) click.secho(text, nl=False, fg=text_color) click.secho(marker * (leftovers / 2 + leftovers % 2), fg=marker_color)
[ "def", "echo_with_markers", "(", "text", ",", "marker", "=", "'='", ",", "marker_color", "=", "'blue'", ",", "text_color", "=", "None", ")", ":", "text", "=", "' '", "+", "text", "+", "' '", "width", ",", "_", "=", "click", ".", "get_terminal_size", "(", ")", "if", "len", "(", "text", ")", ">=", "width", ":", "click", ".", "echo", "(", "text", ")", "# this is probably never the case", "else", ":", "leftovers", "=", "width", "-", "len", "(", "text", ")", "click", ".", "secho", "(", "marker", "*", "(", "leftovers", "/", "2", ")", ",", "fg", "=", "marker_color", ",", "nl", "=", "False", ")", "click", ".", "secho", "(", "text", ",", "nl", "=", "False", ",", "fg", "=", "text_color", ")", "click", ".", "secho", "(", "marker", "*", "(", "leftovers", "/", "2", "+", "leftovers", "%", "2", ")", ",", "fg", "=", "marker_color", ")" ]
Print a text to the screen with markers surrounding it. The output looks like: ======== text ======== with marker='=' right now. In the event that the terminal window is too small, the text is printed without markers. :param str text: the text to echo :param str marker: the marker to surround the text :param str marker_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white') :param str text_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white')
[ "Print", "a", "text", "to", "the", "screen", "with", "markers", "surrounding", "it", "." ]
python
train
43.291667
zhanglab/psamm
psamm/importer.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/importer.py#L703-L813
def main_bigg(args=None, urlopen=urlopen): """Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing. """ parser = argparse.ArgumentParser( description='Import from BiGG database') parser.add_argument('--dest', metavar='path', default='.', help='Destination directory (default is ".")') parser.add_argument('--no-exchange', action='store_true', help=('Disable importing exchange reactions as' ' exchange compound file.')) parser.add_argument('--split-subsystem', action='store_true', help='Enable splitting reaction files by subsystem') parser.add_argument('--merge-compounds', action='store_true', help=('Merge identical compounds occuring in various' ' compartments.')) parser.add_argument('--force', action='store_true', help='Enable overwriting model files') parser.add_argument('id', help='BiGG model to import ("list" to see all)') args = parser.parse_args(args) # Set up logging for the command line interface if 'PSAMM_DEBUG' in os.environ: level = getattr(logging, os.environ['PSAMM_DEBUG'].upper(), None) if level is not None: logging.basicConfig(level=level) else: logging.basicConfig( level=logging.INFO, format='%(levelname)s: %(message)s') # Print list of available models if args.id == 'list': print('Available models:') f = urlopen('http://bigg.ucsd.edu/api/v2/models') doc = json.loads(f.read().decode('utf-8')) results = doc['results'] id_width = min(max(len(result['bigg_id']) for result in results), 16) for result in sorted(results, key=lambda x: x.get('organism')): print('{} {}'.format( result.get('bigg_id').ljust(id_width), result.get('organism'))) return 0 importer_entry = None try: importer_entry = next( pkg_resources.iter_entry_points('psamm.importer', 'JSON')) except StopIteration: logger.error('Failed to locate the COBRA JSON model importer!') sys.exit(-1) importer_class = importer_entry.load() importer = importer_class() try: f = urlopen( 'http://bigg.ucsd.edu/api/v2/models/{}/download'.format( url_quote(args.id))) model = importer.import_model(codecs.getreader('utf-8')(f)) except ModelLoadError as e: logger.error('Failed to load model!', exc_info=True) importer.help() parser.error(text_type(e)) except ParseError as e: logger.error('Failed to parse model!', exc_info=True) logger.error(text_type(e)) sys.exit(-1) if args.merge_compounds: compounds_before = len(model.compounds) sbml.merge_equivalent_compounds(model) if len(model.compounds) < compounds_before: logger.info( 'Merged {} compound entries into {} entries by' ' removing duplicates in various compartments'.format( compounds_before, len(model.compounds))) print('Model: {}'.format(model.name)) print('- Biomass reaction: {}'.format(model.biomass_reaction)) print('- Compartments: {}'.format(len(model.compartments))) print('- Compounds: {}'.format(len(model.compounds))) print('- Reactions: {}'.format(len(model.reactions))) print('- Genes: {}'.format(count_genes(model))) # Check if dest directory is empty. If we get an error assume that the # directory does not exist. dest_is_empty = False try: dest_is_empty = len(os.listdir(args.dest)) == 0 except OSError: dest_is_empty = True if not dest_is_empty: if not args.force: logger.error('Destination directory is not empty. Use --force' ' option to proceed anyway, overwriting any existing' ' files in {}'.format(args.dest)) return 1 else: logger.warning('Destination directory is not empty, overwriting' ' existing files in {}'.format(args.dest)) # Create destination directory if not exists dest = args.dest mkdir_p(dest) convert_exchange = not args.no_exchange write_yaml_model(model, dest, convert_exchange=convert_exchange, split_subsystem=args.split_subsystem)
[ "def", "main_bigg", "(", "args", "=", "None", ",", "urlopen", "=", "urlopen", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Import from BiGG database'", ")", "parser", ".", "add_argument", "(", "'--dest'", ",", "metavar", "=", "'path'", ",", "default", "=", "'.'", ",", "help", "=", "'Destination directory (default is \".\")'", ")", "parser", ".", "add_argument", "(", "'--no-exchange'", ",", "action", "=", "'store_true'", ",", "help", "=", "(", "'Disable importing exchange reactions as'", "' exchange compound file.'", ")", ")", "parser", ".", "add_argument", "(", "'--split-subsystem'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Enable splitting reaction files by subsystem'", ")", "parser", ".", "add_argument", "(", "'--merge-compounds'", ",", "action", "=", "'store_true'", ",", "help", "=", "(", "'Merge identical compounds occuring in various'", "' compartments.'", ")", ")", "parser", ".", "add_argument", "(", "'--force'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Enable overwriting model files'", ")", "parser", ".", "add_argument", "(", "'id'", ",", "help", "=", "'BiGG model to import (\"list\" to see all)'", ")", "args", "=", "parser", ".", "parse_args", "(", "args", ")", "# Set up logging for the command line interface", "if", "'PSAMM_DEBUG'", "in", "os", ".", "environ", ":", "level", "=", "getattr", "(", "logging", ",", "os", ".", "environ", "[", "'PSAMM_DEBUG'", "]", ".", "upper", "(", ")", ",", "None", ")", "if", "level", "is", "not", "None", ":", "logging", ".", "basicConfig", "(", "level", "=", "level", ")", "else", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "'%(levelname)s: %(message)s'", ")", "# Print list of available models", "if", "args", ".", "id", "==", "'list'", ":", "print", "(", "'Available models:'", ")", "f", "=", "urlopen", "(", "'http://bigg.ucsd.edu/api/v2/models'", ")", "doc", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "results", "=", "doc", "[", "'results'", "]", "id_width", "=", "min", "(", "max", "(", "len", "(", "result", "[", "'bigg_id'", "]", ")", "for", "result", "in", "results", ")", ",", "16", ")", "for", "result", "in", "sorted", "(", "results", ",", "key", "=", "lambda", "x", ":", "x", ".", "get", "(", "'organism'", ")", ")", ":", "print", "(", "'{} {}'", ".", "format", "(", "result", ".", "get", "(", "'bigg_id'", ")", ".", "ljust", "(", "id_width", ")", ",", "result", ".", "get", "(", "'organism'", ")", ")", ")", "return", "0", "importer_entry", "=", "None", "try", ":", "importer_entry", "=", "next", "(", "pkg_resources", ".", "iter_entry_points", "(", "'psamm.importer'", ",", "'JSON'", ")", ")", "except", "StopIteration", ":", "logger", ".", "error", "(", "'Failed to locate the COBRA JSON model importer!'", ")", "sys", ".", "exit", "(", "-", "1", ")", "importer_class", "=", "importer_entry", ".", "load", "(", ")", "importer", "=", "importer_class", "(", ")", "try", ":", "f", "=", "urlopen", "(", "'http://bigg.ucsd.edu/api/v2/models/{}/download'", ".", "format", "(", "url_quote", "(", "args", ".", "id", ")", ")", ")", "model", "=", "importer", ".", "import_model", "(", "codecs", ".", "getreader", "(", "'utf-8'", ")", "(", "f", ")", ")", "except", "ModelLoadError", "as", "e", ":", "logger", ".", "error", "(", "'Failed to load model!'", ",", "exc_info", "=", "True", ")", "importer", ".", "help", "(", ")", "parser", ".", "error", "(", "text_type", "(", "e", ")", ")", "except", "ParseError", "as", "e", ":", "logger", ".", "error", "(", "'Failed to parse model!'", ",", "exc_info", "=", "True", ")", "logger", ".", "error", "(", "text_type", "(", "e", ")", ")", "sys", ".", "exit", "(", "-", "1", ")", "if", "args", ".", "merge_compounds", ":", "compounds_before", "=", "len", "(", "model", ".", "compounds", ")", "sbml", ".", "merge_equivalent_compounds", "(", "model", ")", "if", "len", "(", "model", ".", "compounds", ")", "<", "compounds_before", ":", "logger", ".", "info", "(", "'Merged {} compound entries into {} entries by'", "' removing duplicates in various compartments'", ".", "format", "(", "compounds_before", ",", "len", "(", "model", ".", "compounds", ")", ")", ")", "print", "(", "'Model: {}'", ".", "format", "(", "model", ".", "name", ")", ")", "print", "(", "'- Biomass reaction: {}'", ".", "format", "(", "model", ".", "biomass_reaction", ")", ")", "print", "(", "'- Compartments: {}'", ".", "format", "(", "len", "(", "model", ".", "compartments", ")", ")", ")", "print", "(", "'- Compounds: {}'", ".", "format", "(", "len", "(", "model", ".", "compounds", ")", ")", ")", "print", "(", "'- Reactions: {}'", ".", "format", "(", "len", "(", "model", ".", "reactions", ")", ")", ")", "print", "(", "'- Genes: {}'", ".", "format", "(", "count_genes", "(", "model", ")", ")", ")", "# Check if dest directory is empty. If we get an error assume that the", "# directory does not exist.", "dest_is_empty", "=", "False", "try", ":", "dest_is_empty", "=", "len", "(", "os", ".", "listdir", "(", "args", ".", "dest", ")", ")", "==", "0", "except", "OSError", ":", "dest_is_empty", "=", "True", "if", "not", "dest_is_empty", ":", "if", "not", "args", ".", "force", ":", "logger", ".", "error", "(", "'Destination directory is not empty. Use --force'", "' option to proceed anyway, overwriting any existing'", "' files in {}'", ".", "format", "(", "args", ".", "dest", ")", ")", "return", "1", "else", ":", "logger", ".", "warning", "(", "'Destination directory is not empty, overwriting'", "' existing files in {}'", ".", "format", "(", "args", ".", "dest", ")", ")", "# Create destination directory if not exists", "dest", "=", "args", ".", "dest", "mkdir_p", "(", "dest", ")", "convert_exchange", "=", "not", "args", ".", "no_exchange", "write_yaml_model", "(", "model", ",", "dest", ",", "convert_exchange", "=", "convert_exchange", ",", "split_subsystem", "=", "args", ".", "split_subsystem", ")" ]
Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing.
[ "Entry", "point", "for", "BiGG", "import", "program", "." ]
python
train
40.774775
spacetelescope/stsci.tools
lib/stsci/tools/capable.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/capable.py#L89-L106
def get_dc_owner(raises, mask_if_self): """ Convenience function to return owner of /dev/console. If raises is True, this raises an exception on any error. If not, it returns any error string as the owner name. If owner is self, and if mask_if_self, returns "<self>".""" try: from pwd import getpwuid owner_uid = os.stat('/dev/console').st_uid self_uid = os.getuid() if mask_if_self and owner_uid == self_uid: return "<self>" owner_name = getpwuid(owner_uid).pw_name return owner_name except Exception as e: if raises: raise e else: return str(e)
[ "def", "get_dc_owner", "(", "raises", ",", "mask_if_self", ")", ":", "try", ":", "from", "pwd", "import", "getpwuid", "owner_uid", "=", "os", ".", "stat", "(", "'/dev/console'", ")", ".", "st_uid", "self_uid", "=", "os", ".", "getuid", "(", ")", "if", "mask_if_self", "and", "owner_uid", "==", "self_uid", ":", "return", "\"<self>\"", "owner_name", "=", "getpwuid", "(", "owner_uid", ")", ".", "pw_name", "return", "owner_name", "except", "Exception", "as", "e", ":", "if", "raises", ":", "raise", "e", "else", ":", "return", "str", "(", "e", ")" ]
Convenience function to return owner of /dev/console. If raises is True, this raises an exception on any error. If not, it returns any error string as the owner name. If owner is self, and if mask_if_self, returns "<self>".
[ "Convenience", "function", "to", "return", "owner", "of", "/", "dev", "/", "console", ".", "If", "raises", "is", "True", "this", "raises", "an", "exception", "on", "any", "error", ".", "If", "not", "it", "returns", "any", "error", "string", "as", "the", "owner", "name", ".", "If", "owner", "is", "self", "and", "if", "mask_if_self", "returns", "<self", ">", "." ]
python
train
36.333333
BD2KGenomics/toil-scripts
src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py#L164-L177
def spawn_batch_jobs(job, shared_ids, input_args): """ Spawns an alignment job for every sample in the input configuration file """ samples = [] config = input_args['config'] with open(config, 'r') as f_in: for line in f_in: line = line.strip().split(',') uuid = line[0] urls = line[1:] samples.append((uuid, urls)) for sample in samples: job.addChildJobFn(alignment, shared_ids, input_args, sample, cores=32, memory='20 G', disk='100 G')
[ "def", "spawn_batch_jobs", "(", "job", ",", "shared_ids", ",", "input_args", ")", ":", "samples", "=", "[", "]", "config", "=", "input_args", "[", "'config'", "]", "with", "open", "(", "config", ",", "'r'", ")", "as", "f_in", ":", "for", "line", "in", "f_in", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "','", ")", "uuid", "=", "line", "[", "0", "]", "urls", "=", "line", "[", "1", ":", "]", "samples", ".", "append", "(", "(", "uuid", ",", "urls", ")", ")", "for", "sample", "in", "samples", ":", "job", ".", "addChildJobFn", "(", "alignment", ",", "shared_ids", ",", "input_args", ",", "sample", ",", "cores", "=", "32", ",", "memory", "=", "'20 G'", ",", "disk", "=", "'100 G'", ")" ]
Spawns an alignment job for every sample in the input configuration file
[ "Spawns", "an", "alignment", "job", "for", "every", "sample", "in", "the", "input", "configuration", "file" ]
python
train
36.928571
senaite/senaite.core
bika/lims/browser/fields/aranalysesfield.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/fields/aranalysesfield.py#L255-L285
def _to_service(self, thing): """Convert to Analysis Service :param thing: UID/Catalog Brain/Object/Something :returns: Analysis Service object or None """ # Convert UIDs to objects if api.is_uid(thing): thing = api.get_object_by_uid(thing, None) # Bail out if the thing is not a valid object if not api.is_object(thing): logger.warn("'{}' is not a valid object!".format(repr(thing))) return None # Ensure we have an object here and not a brain obj = api.get_object(thing) if IAnalysisService.providedBy(obj): return obj if IAnalysis.providedBy(obj): return obj.getAnalysisService() # An object, but neither an Analysis nor AnalysisService? # This should never happen. portal_type = api.get_portal_type(obj) logger.error("ARAnalysesField doesn't accept objects from {} type. " "The object will be dismissed.".format(portal_type)) return None
[ "def", "_to_service", "(", "self", ",", "thing", ")", ":", "# Convert UIDs to objects", "if", "api", ".", "is_uid", "(", "thing", ")", ":", "thing", "=", "api", ".", "get_object_by_uid", "(", "thing", ",", "None", ")", "# Bail out if the thing is not a valid object", "if", "not", "api", ".", "is_object", "(", "thing", ")", ":", "logger", ".", "warn", "(", "\"'{}' is not a valid object!\"", ".", "format", "(", "repr", "(", "thing", ")", ")", ")", "return", "None", "# Ensure we have an object here and not a brain", "obj", "=", "api", ".", "get_object", "(", "thing", ")", "if", "IAnalysisService", ".", "providedBy", "(", "obj", ")", ":", "return", "obj", "if", "IAnalysis", ".", "providedBy", "(", "obj", ")", ":", "return", "obj", ".", "getAnalysisService", "(", ")", "# An object, but neither an Analysis nor AnalysisService?", "# This should never happen.", "portal_type", "=", "api", ".", "get_portal_type", "(", "obj", ")", "logger", ".", "error", "(", "\"ARAnalysesField doesn't accept objects from {} type. \"", "\"The object will be dismissed.\"", ".", "format", "(", "portal_type", ")", ")", "return", "None" ]
Convert to Analysis Service :param thing: UID/Catalog Brain/Object/Something :returns: Analysis Service object or None
[ "Convert", "to", "Analysis", "Service" ]
python
train
33.387097
decryptus/sonicprobe
sonicprobe/libs/xys.py
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/xys.py#L350-L374
def add_parameterized_validator(param_validator, base_tag, tag_prefix=None): """ Add a parameterized validator for the given tag prefix. If tag_prefix is None, it is automatically constructed as u'!~%s(' % param_validator.__name__ A parametrized validator is a function that accepts a document node (in the form of a Python object), a schema node (also a Python object), and other parameters (integer or string) that directly come from its complete YAML name in the schema. It returns True if the document node is valid according to the schema node. Note that the validator function does not have to recurse in sub-nodes, because XYS already does that. """ # pylint: disable-msg=C0111,W0621 if not tag_prefix: tag_prefix = u'!~%s(' % param_validator.__name__ def multi_constructor(loader, tag_suffix, node): def temp_validator(node, schema): return param_validator(node, schema, *_split_params(tag_prefix, tag_suffix)) temp_validator.__name__ = str(tag_prefix + tag_suffix) return ContructorValidatorNode(base_tag, base_tag, temp_validator)(loader, node) yaml.add_multi_constructor(tag_prefix, multi_constructor)
[ "def", "add_parameterized_validator", "(", "param_validator", ",", "base_tag", ",", "tag_prefix", "=", "None", ")", ":", "# pylint: disable-msg=C0111,W0621", "if", "not", "tag_prefix", ":", "tag_prefix", "=", "u'!~%s('", "%", "param_validator", ".", "__name__", "def", "multi_constructor", "(", "loader", ",", "tag_suffix", ",", "node", ")", ":", "def", "temp_validator", "(", "node", ",", "schema", ")", ":", "return", "param_validator", "(", "node", ",", "schema", ",", "*", "_split_params", "(", "tag_prefix", ",", "tag_suffix", ")", ")", "temp_validator", ".", "__name__", "=", "str", "(", "tag_prefix", "+", "tag_suffix", ")", "return", "ContructorValidatorNode", "(", "base_tag", ",", "base_tag", ",", "temp_validator", ")", "(", "loader", ",", "node", ")", "yaml", ".", "add_multi_constructor", "(", "tag_prefix", ",", "multi_constructor", ")" ]
Add a parameterized validator for the given tag prefix. If tag_prefix is None, it is automatically constructed as u'!~%s(' % param_validator.__name__ A parametrized validator is a function that accepts a document node (in the form of a Python object), a schema node (also a Python object), and other parameters (integer or string) that directly come from its complete YAML name in the schema. It returns True if the document node is valid according to the schema node. Note that the validator function does not have to recurse in sub-nodes, because XYS already does that.
[ "Add", "a", "parameterized", "validator", "for", "the", "given", "tag", "prefix", ".", "If", "tag_prefix", "is", "None", "it", "is", "automatically", "constructed", "as", "u", "!~%s", "(", "%", "param_validator", ".", "__name__", "A", "parametrized", "validator", "is", "a", "function", "that", "accepts", "a", "document", "node", "(", "in", "the", "form", "of", "a", "Python", "object", ")", "a", "schema", "node", "(", "also", "a", "Python", "object", ")", "and", "other", "parameters", "(", "integer", "or", "string", ")", "that", "directly", "come", "from", "its", "complete", "YAML", "name", "in", "the", "schema", ".", "It", "returns", "True", "if", "the", "document", "node", "is", "valid", "according", "to", "the", "schema", "node", ".", "Note", "that", "the", "validator", "function", "does", "not", "have", "to", "recurse", "in", "sub", "-", "nodes", "because", "XYS", "already", "does", "that", "." ]
python
train
51.08
bram85/topydo
topydo/lib/RelativeDate.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/RelativeDate.py#L106-L152
def relative_date_to_date(p_date, p_offset=None): """ Transforms a relative date into a date object. The following formats are understood: * [0-9][dwmy] * 'yesterday', 'today' or 'tomorrow' * days of the week (in full or abbreviated) """ result = None p_date = p_date.lower() p_offset = p_offset or date.today() relative = re.match('(?P<length>-?[0-9]+)(?P<period>[dwmyb])$', p_date, re.I) monday = 'mo(n(day)?)?$' tuesday = 'tu(e(sday)?)?$' wednesday = 'we(d(nesday)?)?$' thursday = 'th(u(rsday)?)?$' friday = 'fr(i(day)?)?$' saturday = 'sa(t(urday)?)?$' sunday = 'su(n(day)?)?$' weekday = re.match('|'.join( [monday, tuesday, wednesday, thursday, friday, saturday, sunday]), p_date) if relative: length = relative.group('length') period = relative.group('period') result = _convert_pattern(length, period, p_offset) elif weekday: result = _convert_weekday_pattern(weekday.group(0)) elif re.match('tod(ay)?$', p_date): result = _convert_pattern('0', 'd') elif re.match('tom(orrow)?$', p_date): result = _convert_pattern('1', 'd') elif re.match('yes(terday)?$', p_date): result = _convert_pattern('-1', 'd') return result
[ "def", "relative_date_to_date", "(", "p_date", ",", "p_offset", "=", "None", ")", ":", "result", "=", "None", "p_date", "=", "p_date", ".", "lower", "(", ")", "p_offset", "=", "p_offset", "or", "date", ".", "today", "(", ")", "relative", "=", "re", ".", "match", "(", "'(?P<length>-?[0-9]+)(?P<period>[dwmyb])$'", ",", "p_date", ",", "re", ".", "I", ")", "monday", "=", "'mo(n(day)?)?$'", "tuesday", "=", "'tu(e(sday)?)?$'", "wednesday", "=", "'we(d(nesday)?)?$'", "thursday", "=", "'th(u(rsday)?)?$'", "friday", "=", "'fr(i(day)?)?$'", "saturday", "=", "'sa(t(urday)?)?$'", "sunday", "=", "'su(n(day)?)?$'", "weekday", "=", "re", ".", "match", "(", "'|'", ".", "join", "(", "[", "monday", ",", "tuesday", ",", "wednesday", ",", "thursday", ",", "friday", ",", "saturday", ",", "sunday", "]", ")", ",", "p_date", ")", "if", "relative", ":", "length", "=", "relative", ".", "group", "(", "'length'", ")", "period", "=", "relative", ".", "group", "(", "'period'", ")", "result", "=", "_convert_pattern", "(", "length", ",", "period", ",", "p_offset", ")", "elif", "weekday", ":", "result", "=", "_convert_weekday_pattern", "(", "weekday", ".", "group", "(", "0", ")", ")", "elif", "re", ".", "match", "(", "'tod(ay)?$'", ",", "p_date", ")", ":", "result", "=", "_convert_pattern", "(", "'0'", ",", "'d'", ")", "elif", "re", ".", "match", "(", "'tom(orrow)?$'", ",", "p_date", ")", ":", "result", "=", "_convert_pattern", "(", "'1'", ",", "'d'", ")", "elif", "re", ".", "match", "(", "'yes(terday)?$'", ",", "p_date", ")", ":", "result", "=", "_convert_pattern", "(", "'-1'", ",", "'d'", ")", "return", "result" ]
Transforms a relative date into a date object. The following formats are understood: * [0-9][dwmy] * 'yesterday', 'today' or 'tomorrow' * days of the week (in full or abbreviated)
[ "Transforms", "a", "relative", "date", "into", "a", "date", "object", "." ]
python
train
27.276596
allanlei/django-argparse-command
argcmd/management/base.py
https://github.com/allanlei/django-argparse-command/blob/27ea77e1dd0cf2f0567223735762a5ebd14fdaef/argcmd/management/base.py#L26-L36
def run_from_argv(self, argv): """ Set up any environment changes requested (e.g., Python path and Django settings), then run this command. """ parser = self.create_parser(argv[0], argv[1]) self.arguments = parser.parse_args(argv[2:]) handle_default_options(self.arguments) options = vars(self.arguments) self.execute(**options)
[ "def", "run_from_argv", "(", "self", ",", "argv", ")", ":", "parser", "=", "self", ".", "create_parser", "(", "argv", "[", "0", "]", ",", "argv", "[", "1", "]", ")", "self", ".", "arguments", "=", "parser", ".", "parse_args", "(", "argv", "[", "2", ":", "]", ")", "handle_default_options", "(", "self", ".", "arguments", ")", "options", "=", "vars", "(", "self", ".", "arguments", ")", "self", ".", "execute", "(", "*", "*", "options", ")" ]
Set up any environment changes requested (e.g., Python path and Django settings), then run this command.
[ "Set", "up", "any", "environment", "changes", "requested", "(", "e", ".", "g", ".", "Python", "path", "and", "Django", "settings", ")", "then", "run", "this", "command", "." ]
python
test
35.545455
bjmorgan/lattice_mc
lattice_mc/simulation.py
https://github.com/bjmorgan/lattice_mc/blob/7fa7be85f2f23a2d8dfd0830ecdb89d0dbf2bfd5/lattice_mc/simulation.py#L182-L201
def old_tracer_correlation( self ): """ Deprecated tracer correlation factor for this simulation. Args: None Returns: (Float): The tracer correlation factor, f. Notes: This function assumes that the jump distance between sites has been normalised to a=1. If the jump distance is not equal to 1 then the value returned by this function should be divided by a^2. Even better, use `self.tracer_correlation`. """ if self.has_run: return self.atoms.sum_dr_squared() / float( self.number_of_jumps ) else: return None
[ "def", "old_tracer_correlation", "(", "self", ")", ":", "if", "self", ".", "has_run", ":", "return", "self", ".", "atoms", ".", "sum_dr_squared", "(", ")", "/", "float", "(", "self", ".", "number_of_jumps", ")", "else", ":", "return", "None" ]
Deprecated tracer correlation factor for this simulation. Args: None Returns: (Float): The tracer correlation factor, f. Notes: This function assumes that the jump distance between sites has been normalised to a=1. If the jump distance is not equal to 1 then the value returned by this function should be divided by a^2. Even better, use `self.tracer_correlation`.
[ "Deprecated", "tracer", "correlation", "factor", "for", "this", "simulation", ".", "Args", ":", "None" ]
python
train
33.05
brunato/lograptor
lograptor/timedate.py
https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/timedate.py#L181-L203
def strftimegen(start_dt, end_dt): """ Return a generator function for datetime format strings. The generator produce a day-by-day sequence starting from the first datetime to the second datetime argument. """ if start_dt > end_dt: raise ValueError("the start datetime is after the end datetime: (%r,%r)" % (start_dt, end_dt)) def iterftime(string): date_subs = [i for i in DATE_FORMATS if i[1].search(string) is not None] if not date_subs: yield string else: dt = start_dt date_path = string while end_dt >= dt: for item in date_subs: date_path = item[1].sub(dt.strftime(item[0]), date_path) yield date_path dt = dt + datetime.timedelta(days=1) return iterftime
[ "def", "strftimegen", "(", "start_dt", ",", "end_dt", ")", ":", "if", "start_dt", ">", "end_dt", ":", "raise", "ValueError", "(", "\"the start datetime is after the end datetime: (%r,%r)\"", "%", "(", "start_dt", ",", "end_dt", ")", ")", "def", "iterftime", "(", "string", ")", ":", "date_subs", "=", "[", "i", "for", "i", "in", "DATE_FORMATS", "if", "i", "[", "1", "]", ".", "search", "(", "string", ")", "is", "not", "None", "]", "if", "not", "date_subs", ":", "yield", "string", "else", ":", "dt", "=", "start_dt", "date_path", "=", "string", "while", "end_dt", ">=", "dt", ":", "for", "item", "in", "date_subs", ":", "date_path", "=", "item", "[", "1", "]", ".", "sub", "(", "dt", ".", "strftime", "(", "item", "[", "0", "]", ")", ",", "date_path", ")", "yield", "date_path", "dt", "=", "dt", "+", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "return", "iterftime" ]
Return a generator function for datetime format strings. The generator produce a day-by-day sequence starting from the first datetime to the second datetime argument.
[ "Return", "a", "generator", "function", "for", "datetime", "format", "strings", ".", "The", "generator", "produce", "a", "day", "-", "by", "-", "day", "sequence", "starting", "from", "the", "first", "datetime", "to", "the", "second", "datetime", "argument", "." ]
python
train
35.73913
mitsei/dlkit
dlkit/json_/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L4804-L4821
def get_assessments_by_banks(self, bank_ids): """Gets the list of ``Assessments`` corresponding to a list of ``Banks``. arg: bank_ids (osid.id.IdList): list of bank ``Ids`` return: (osid.assessment.AssessmentList) - list of assessments raise: NullArgument - ``bank_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_resources_by_bins assessment_list = [] for bank_id in bank_ids: assessment_list += list( self.get_assessments_by_bank(bank_id)) return objects.AssessmentList(assessment_list)
[ "def", "get_assessments_by_banks", "(", "self", ",", "bank_ids", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinSession.get_resources_by_bins", "assessment_list", "=", "[", "]", "for", "bank_id", "in", "bank_ids", ":", "assessment_list", "+=", "list", "(", "self", ".", "get_assessments_by_bank", "(", "bank_id", ")", ")", "return", "objects", ".", "AssessmentList", "(", "assessment_list", ")" ]
Gets the list of ``Assessments`` corresponding to a list of ``Banks``. arg: bank_ids (osid.id.IdList): list of bank ``Ids`` return: (osid.assessment.AssessmentList) - list of assessments raise: NullArgument - ``bank_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "list", "of", "Assessments", "corresponding", "to", "a", "list", "of", "Banks", "." ]
python
train
45.777778
gabstopper/smc-python
smc/core/node.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/node.py#L347-L360
def go_standby(self, comment=None): """ Executes a Go-Standby operation on the specified node. To get the status of the current node/s, run :func:`status` :param str comment: optional comment to audit :raises NodeCommandFailed: engine cannot go standby :return: None """ self.make_request( NodeCommandFailed, method='update', resource='go_standby', params={'comment': comment})
[ "def", "go_standby", "(", "self", ",", "comment", "=", "None", ")", ":", "self", ".", "make_request", "(", "NodeCommandFailed", ",", "method", "=", "'update'", ",", "resource", "=", "'go_standby'", ",", "params", "=", "{", "'comment'", ":", "comment", "}", ")" ]
Executes a Go-Standby operation on the specified node. To get the status of the current node/s, run :func:`status` :param str comment: optional comment to audit :raises NodeCommandFailed: engine cannot go standby :return: None
[ "Executes", "a", "Go", "-", "Standby", "operation", "on", "the", "specified", "node", ".", "To", "get", "the", "status", "of", "the", "current", "node", "/", "s", "run", ":", "func", ":", "status" ]
python
train
34.071429
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/history.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/history.py#L203-L228
def get_session_info(self, session=0): """get info about a session Parameters ---------- session : int Session number to retrieve. The current session is 0, and negative numbers count back from current session, so -1 is previous session. Returns ------- (session_id [int], start [datetime], end [datetime], num_cmds [int], remark [unicode]) Sessions that are running or did not exit cleanly will have `end=None` and `num_cmds=None`. """ if session <= 0: session += self.session_number query = "SELECT * from sessions where session == ?" return self.db.execute(query, (session,)).fetchone()
[ "def", "get_session_info", "(", "self", ",", "session", "=", "0", ")", ":", "if", "session", "<=", "0", ":", "session", "+=", "self", ".", "session_number", "query", "=", "\"SELECT * from sessions where session == ?\"", "return", "self", ".", "db", ".", "execute", "(", "query", ",", "(", "session", ",", ")", ")", ".", "fetchone", "(", ")" ]
get info about a session Parameters ---------- session : int Session number to retrieve. The current session is 0, and negative numbers count back from current session, so -1 is previous session. Returns ------- (session_id [int], start [datetime], end [datetime], num_cmds [int], remark [unicode]) Sessions that are running or did not exit cleanly will have `end=None` and `num_cmds=None`.
[ "get", "info", "about", "a", "session" ]
python
test
27.692308
basecrm/basecrm-python
basecrm/services.py
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/services.py#L186-L200
def retrieve(self, id) : """ Retrieve a single contact Returns a single contact available to the user, according to the unique contact ID provided If the specified contact does not exist, the request will return an error :calls: ``get /contacts/{id}`` :param int id: Unique identifier of a Contact. :return: Dictionary that support attriubte-style access and represent Contact resource. :rtype: dict """ _, _, contact = self.http_client.get("/contacts/{id}".format(id=id)) return contact
[ "def", "retrieve", "(", "self", ",", "id", ")", ":", "_", ",", "_", ",", "contact", "=", "self", ".", "http_client", ".", "get", "(", "\"/contacts/{id}\"", ".", "format", "(", "id", "=", "id", ")", ")", "return", "contact" ]
Retrieve a single contact Returns a single contact available to the user, according to the unique contact ID provided If the specified contact does not exist, the request will return an error :calls: ``get /contacts/{id}`` :param int id: Unique identifier of a Contact. :return: Dictionary that support attriubte-style access and represent Contact resource. :rtype: dict
[ "Retrieve", "a", "single", "contact" ]
python
train
37.6
PyAr/fades
fades/cache.py
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/cache.py#L148-L163
def _select(self, current_venvs, requirements=None, interpreter='', uuid='', options=None): """Select which venv satisfy the received requirements.""" if uuid: logger.debug("Searching a venv by uuid: %s", uuid) venv = self._match_by_uuid(current_venvs, uuid) else: logger.debug("Searching a venv for: reqs=%s interpreter=%s options=%s", requirements, interpreter, options) venv = self._match_by_requirements(current_venvs, requirements, interpreter, options) if venv is None: logger.debug("No matching venv found :(") return logger.debug("Found a matching venv! %s", venv) return venv['metadata']
[ "def", "_select", "(", "self", ",", "current_venvs", ",", "requirements", "=", "None", ",", "interpreter", "=", "''", ",", "uuid", "=", "''", ",", "options", "=", "None", ")", ":", "if", "uuid", ":", "logger", ".", "debug", "(", "\"Searching a venv by uuid: %s\"", ",", "uuid", ")", "venv", "=", "self", ".", "_match_by_uuid", "(", "current_venvs", ",", "uuid", ")", "else", ":", "logger", ".", "debug", "(", "\"Searching a venv for: reqs=%s interpreter=%s options=%s\"", ",", "requirements", ",", "interpreter", ",", "options", ")", "venv", "=", "self", ".", "_match_by_requirements", "(", "current_venvs", ",", "requirements", ",", "interpreter", ",", "options", ")", "if", "venv", "is", "None", ":", "logger", ".", "debug", "(", "\"No matching venv found :(\"", ")", "return", "logger", ".", "debug", "(", "\"Found a matching venv! %s\"", ",", "venv", ")", "return", "venv", "[", "'metadata'", "]" ]
Select which venv satisfy the received requirements.
[ "Select", "which", "venv", "satisfy", "the", "received", "requirements", "." ]
python
train
45.5
scidash/sciunit
sciunit/models/backends.py
https://github.com/scidash/sciunit/blob/41b2e38c45c0776727ab1f281a572b65be19cea1/sciunit/models/backends.py#L95-L102
def set_disk_cache(self, results, key=None): """Store result in disk cache with key matching model state.""" if not getattr(self, 'disk_cache_location', False): self.init_disk_cache() disk_cache = shelve.open(self.disk_cache_location) key = self.model.hash if key is None else key disk_cache[key] = results disk_cache.close()
[ "def", "set_disk_cache", "(", "self", ",", "results", ",", "key", "=", "None", ")", ":", "if", "not", "getattr", "(", "self", ",", "'disk_cache_location'", ",", "False", ")", ":", "self", ".", "init_disk_cache", "(", ")", "disk_cache", "=", "shelve", ".", "open", "(", "self", ".", "disk_cache_location", ")", "key", "=", "self", ".", "model", ".", "hash", "if", "key", "is", "None", "else", "key", "disk_cache", "[", "key", "]", "=", "results", "disk_cache", ".", "close", "(", ")" ]
Store result in disk cache with key matching model state.
[ "Store", "result", "in", "disk", "cache", "with", "key", "matching", "model", "state", "." ]
python
train
47.25
wbond/oscrypto
oscrypto/_openssl/tls.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_openssl/tls.py#L1125-L1137
def intermediates(self): """ A list of asn1crypto.x509.Certificate objects that were presented as intermediates by the server """ if self._ssl is None: self._raise_closed() if self._certificate is None: self._read_certificates() return self._intermediates
[ "def", "intermediates", "(", "self", ")", ":", "if", "self", ".", "_ssl", "is", "None", ":", "self", ".", "_raise_closed", "(", ")", "if", "self", ".", "_certificate", "is", "None", ":", "self", ".", "_read_certificates", "(", ")", "return", "self", ".", "_intermediates" ]
A list of asn1crypto.x509.Certificate objects that were presented as intermediates by the server
[ "A", "list", "of", "asn1crypto", ".", "x509", ".", "Certificate", "objects", "that", "were", "presented", "as", "intermediates", "by", "the", "server" ]
python
valid
25.076923
pacificclimate/cfmeta
cfmeta/cmipfile.py
https://github.com/pacificclimate/cfmeta/blob/a6eef78d0bce523bb44920ba96233f034b60316a/cfmeta/cmipfile.py#L176-L195
def get_var_name(nc): """Guesses the variable_name of an open NetCDF file """ non_variable_names = [ 'lat', 'lat_bnds', 'lon', 'lon_bnds', 'time', 'latitude', 'longitude', 'bnds' ] _vars = set(nc.variables.keys()) _vars.difference_update(set(non_variable_names)) if len(_vars) == 1: return _vars.pop() return None
[ "def", "get_var_name", "(", "nc", ")", ":", "non_variable_names", "=", "[", "'lat'", ",", "'lat_bnds'", ",", "'lon'", ",", "'lon_bnds'", ",", "'time'", ",", "'latitude'", ",", "'longitude'", ",", "'bnds'", "]", "_vars", "=", "set", "(", "nc", ".", "variables", ".", "keys", "(", ")", ")", "_vars", ".", "difference_update", "(", "set", "(", "non_variable_names", ")", ")", "if", "len", "(", "_vars", ")", "==", "1", ":", "return", "_vars", ".", "pop", "(", ")", "return", "None" ]
Guesses the variable_name of an open NetCDF file
[ "Guesses", "the", "variable_name", "of", "an", "open", "NetCDF", "file" ]
python
train
20
timothydmorton/isochrones
isochrones/grid.py
https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/grid.py#L101-L122
def _get_df(self): """Returns stellar model grid with desired bandpasses and with standard column names bands must be iterable, and are parsed according to :func:``get_band`` """ grids = {} df = pd.DataFrame() for bnd in self.bands: s,b = self.get_band(bnd, **self.kwargs) logging.debug('loading {} band from {}'.format(b,s)) if s not in grids: grids[s] = self.get_hdf(s) if self.common_columns[0] not in df: df[list(self.common_columns)] = grids[s][list(self.common_columns)] col = grids[s][b] n_nan = np.isnan(col).sum() if n_nan > 0: logging.debug('{} NANs in {} column'.format(n_nan, b)) df.loc[:, bnd] = col.values #dunno why it has to be this way; something # funny with indexing. return df
[ "def", "_get_df", "(", "self", ")", ":", "grids", "=", "{", "}", "df", "=", "pd", ".", "DataFrame", "(", ")", "for", "bnd", "in", "self", ".", "bands", ":", "s", ",", "b", "=", "self", ".", "get_band", "(", "bnd", ",", "*", "*", "self", ".", "kwargs", ")", "logging", ".", "debug", "(", "'loading {} band from {}'", ".", "format", "(", "b", ",", "s", ")", ")", "if", "s", "not", "in", "grids", ":", "grids", "[", "s", "]", "=", "self", ".", "get_hdf", "(", "s", ")", "if", "self", ".", "common_columns", "[", "0", "]", "not", "in", "df", ":", "df", "[", "list", "(", "self", ".", "common_columns", ")", "]", "=", "grids", "[", "s", "]", "[", "list", "(", "self", ".", "common_columns", ")", "]", "col", "=", "grids", "[", "s", "]", "[", "b", "]", "n_nan", "=", "np", ".", "isnan", "(", "col", ")", ".", "sum", "(", ")", "if", "n_nan", ">", "0", ":", "logging", ".", "debug", "(", "'{} NANs in {} column'", ".", "format", "(", "n_nan", ",", "b", ")", ")", "df", ".", "loc", "[", ":", ",", "bnd", "]", "=", "col", ".", "values", "#dunno why it has to be this way; something", "# funny with indexing.", "return", "df" ]
Returns stellar model grid with desired bandpasses and with standard column names bands must be iterable, and are parsed according to :func:``get_band``
[ "Returns", "stellar", "model", "grid", "with", "desired", "bandpasses", "and", "with", "standard", "column", "names" ]
python
train
41.681818
tensorflow/mesh
mesh_tensorflow/optimize.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/optimize.py#L286-L312
def adafactor_optimizer_from_hparams(hparams, lr): """Create an Adafactor optimizer based on model hparams. Args: hparams: model hyperparameters lr: learning rate scalar. Returns: an AdafactorOptimizer Raises: ValueError: on illegal values """ if hparams.optimizer_adafactor_decay_type == "Adam": decay_rate = adafactor_decay_rate_adam( hparams.optimizer_adafactor_beta2) elif hparams.optimizer_adafactor_decay_type == "pow": decay_rate = adafactor_decay_rate_pow( hparams.optimizer_adafactor_memory_exponent) else: raise ValueError("unknown optimizer_adafactor_decay_type") return AdafactorOptimizer( multiply_by_parameter_scale=( hparams.optimizer_adafactor_multiply_by_parameter_scale), learning_rate=lr, decay_rate=decay_rate, beta1=hparams.optimizer_adafactor_beta1, clipping_threshold=hparams.optimizer_adafactor_clipping_threshold, factored=hparams.optimizer_adafactor_factored)
[ "def", "adafactor_optimizer_from_hparams", "(", "hparams", ",", "lr", ")", ":", "if", "hparams", ".", "optimizer_adafactor_decay_type", "==", "\"Adam\"", ":", "decay_rate", "=", "adafactor_decay_rate_adam", "(", "hparams", ".", "optimizer_adafactor_beta2", ")", "elif", "hparams", ".", "optimizer_adafactor_decay_type", "==", "\"pow\"", ":", "decay_rate", "=", "adafactor_decay_rate_pow", "(", "hparams", ".", "optimizer_adafactor_memory_exponent", ")", "else", ":", "raise", "ValueError", "(", "\"unknown optimizer_adafactor_decay_type\"", ")", "return", "AdafactorOptimizer", "(", "multiply_by_parameter_scale", "=", "(", "hparams", ".", "optimizer_adafactor_multiply_by_parameter_scale", ")", ",", "learning_rate", "=", "lr", ",", "decay_rate", "=", "decay_rate", ",", "beta1", "=", "hparams", ".", "optimizer_adafactor_beta1", ",", "clipping_threshold", "=", "hparams", ".", "optimizer_adafactor_clipping_threshold", ",", "factored", "=", "hparams", ".", "optimizer_adafactor_factored", ")" ]
Create an Adafactor optimizer based on model hparams. Args: hparams: model hyperparameters lr: learning rate scalar. Returns: an AdafactorOptimizer Raises: ValueError: on illegal values
[ "Create", "an", "Adafactor", "optimizer", "based", "on", "model", "hparams", "." ]
python
train
35.851852