nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
SanPen/GridCal
d3f4566d2d72c11c7e910c9d162538ef0e60df31
src/research/grid_reduction/minimal_graph_reduction_example.py
python
Graph.merge_nodes
(self, node_to_delete, node_to_keep)
Merge the information about two nodes :param node_to_delete: :param node_to_keep: :return:
Merge the information about two nodes :param node_to_delete: :param node_to_keep: :return:
[ "Merge", "the", "information", "about", "two", "nodes", ":", "param", "node_to_delete", ":", ":", "param", "node_to_keep", ":", ":", "return", ":" ]
def merge_nodes(self, node_to_delete, node_to_keep): """ Merge the information about two nodes :param node_to_delete: :param node_to_keep: :return: """ # self.graph[node_to_keep] += self.graph[node_to_delete] lst = self.graph[node_to_delete] for x in lst: if x != node_to_keep: self.graph[x] += node_to_keep del self.graph[node_to_delete] # for key, values in self.graph.items(): # val = values.copy() # for i in range(len(val)): # if val[i] == node_to_delete: # val[i] = node_to_keep # print('\tnode updated', key, ':', node_to_delete, '->', node_to_keep, ' remove(', node_to_delete, ')') # self.graph[key] = val pass
[ "def", "merge_nodes", "(", "self", ",", "node_to_delete", ",", "node_to_keep", ")", ":", "# self.graph[node_to_keep] += self.graph[node_to_delete]", "lst", "=", "self", ".", "graph", "[", "node_to_delete", "]", "for", "x", "in", "lst", ":", "if", "x", "!=", "node_to_keep", ":", "self", ".", "graph", "[", "x", "]", "+=", "node_to_keep", "del", "self", ".", "graph", "[", "node_to_delete", "]", "# for key, values in self.graph.items():", "# val = values.copy()", "# for i in range(len(val)):", "# if val[i] == node_to_delete:", "# val[i] = node_to_keep", "# print('\\tnode updated', key, ':', node_to_delete, '->', node_to_keep, ' remove(', node_to_delete, ')')", "# self.graph[key] = val", "pass" ]
https://github.com/SanPen/GridCal/blob/d3f4566d2d72c11c7e910c9d162538ef0e60df31/src/research/grid_reduction/minimal_graph_reduction_example.py#L81-L106
cltk/cltk
1a8c2f5ef72389e2579dfce1fa5af8e59ebc9ec1
src/cltk/wordnet/wordnet.py
python
_WordNetObject.verb_groups
(self)
return self.related("$")
[]
def verb_groups(self): """""" return self.related("$")
[ "def", "verb_groups", "(", "self", ")", ":", "return", "self", ".", "related", "(", "\"$\"", ")" ]
https://github.com/cltk/cltk/blob/1a8c2f5ef72389e2579dfce1fa5af8e59ebc9ec1/src/cltk/wordnet/wordnet.py#L199-L201
emposha/Shell-Detector
5ac8ab2bf514bea737ddff16a75d85d887478f85
shelldetect.py
python
ShellDetector.alert
(self, _content, _color='', _class='info', _html=False, _flag=False)
[]
def alert(self, _content, _color='', _class='info', _html=False, _flag=False): _color_result = { 'red': '\033[91m', 'green': '\033[92m', 'yellow': '\033[93m', 'purple': '\033[95m', 'blue': '\033[94m', '': '' }[_color] if self.supports_color() is True: print _color_result + _content + '\033[0m' else: print _content if _flag is True: self.output(_content, _class, _html)
[ "def", "alert", "(", "self", ",", "_content", ",", "_color", "=", "''", ",", "_class", "=", "'info'", ",", "_html", "=", "False", ",", "_flag", "=", "False", ")", ":", "_color_result", "=", "{", "'red'", ":", "'\\033[91m'", ",", "'green'", ":", "'\\033[92m'", ",", "'yellow'", ":", "'\\033[93m'", ",", "'purple'", ":", "'\\033[95m'", ",", "'blue'", ":", "'\\033[94m'", ",", "''", ":", "''", "}", "[", "_color", "]", "if", "self", ".", "supports_color", "(", ")", "is", "True", ":", "print", "_color_result", "+", "_content", "+", "'\\033[0m'", "else", ":", "print", "_content", "if", "_flag", "is", "True", ":", "self", ".", "output", "(", "_content", ",", "_class", ",", "_html", ")" ]
https://github.com/emposha/Shell-Detector/blob/5ac8ab2bf514bea737ddff16a75d85d887478f85/shelldetect.py#L293-L309
hhannine/superpaper
357bcc9dd6f47e3e2ce9b21a2b8f9e1be30ba145
superpaper/configuration_dialogs.py
python
HelpPanel.onClose
(self, event)
Closes help dialog. Saves checkbox state as needed.
Closes help dialog. Saves checkbox state as needed.
[ "Closes", "help", "dialog", ".", "Saves", "checkbox", "state", "as", "needed", "." ]
def onClose(self, event): """Closes help dialog. Saves checkbox state as needed.""" if self.cb_show_at_start.GetValue() is True: current_settings = GeneralSettingsData() if current_settings.show_help is False: current_settings.show_help = True current_settings.save_settings() else: # Save that the help at start is not wanted. current_settings = GeneralSettingsData() show_help = current_settings.show_help if show_help: current_settings.show_help = False current_settings.save_settings() self.frame.Close(True)
[ "def", "onClose", "(", "self", ",", "event", ")", ":", "if", "self", ".", "cb_show_at_start", ".", "GetValue", "(", ")", "is", "True", ":", "current_settings", "=", "GeneralSettingsData", "(", ")", "if", "current_settings", ".", "show_help", "is", "False", ":", "current_settings", ".", "show_help", "=", "True", "current_settings", ".", "save_settings", "(", ")", "else", ":", "# Save that the help at start is not wanted.", "current_settings", "=", "GeneralSettingsData", "(", ")", "show_help", "=", "current_settings", ".", "show_help", "if", "show_help", ":", "current_settings", ".", "show_help", "=", "False", "current_settings", ".", "save_settings", "(", ")", "self", ".", "frame", ".", "Close", "(", "True", ")" ]
https://github.com/hhannine/superpaper/blob/357bcc9dd6f47e3e2ce9b21a2b8f9e1be30ba145/superpaper/configuration_dialogs.py#L1439-L1453
IntelLabs/nlp-architect
60afd0dd1bfd74f01b4ac8f613cb484777b80284
examples/sparse_gnmt/nmt.py
python
validate_arguments
(args)
Validate input arguments
Validate input arguments
[ "Validate", "input", "arguments" ]
def validate_arguments(args): """Validate input arguments""" io.validate((args.num_units, int, 1, None)) io.validate((args.num_layers, int, 1, None)) io.validate((args.num_encoder_layers, (int, type(None)), 0, None)) io.validate((args.num_decoder_layers, (int, type(None)), 0, None)) io.validate((args.num_embeddings_partitions, int, 0, None)) io.validate((args.learning_rate, float, 0.0, None)) io.validate((args.num_train_steps, int, 1, None)) io.validate((args.warmup_steps, int, 0, args.num_train_steps)) io.validate((args.init_weight, float)) io.validate((args.src, (str, type(None)), 1, 256)) io.validate((args.tgt, (str, type(None)), 1, 256)) io.validate((args.sos, str, 1, 256)) io.validate((args.eos, str, 1, 256)) io.validate((args.src_max_len, int, 1, None)) io.validate((args.tgt_max_len, int, 1, None)) io.validate((args.src_max_len_infer, (int, type(None)), 1, None)) io.validate((args.tgt_max_len_infer, (int, type(None)), 1, None)) io.validate((args.forget_bias, float, 0.0, None)) io.validate((args.dropout, float, 0.0, 1.0)) io.validate((args.max_gradient_norm, float, 0.000000001, None)) io.validate((args.batch_size, int, 1, None)) io.validate((args.steps_per_stats, int, 1, None)) io.validate((args.max_train, int, 0, None)) io.validate((args.num_buckets, int, 1, None)) io.validate((args.num_sampled_softmax, int, 0, None)) io.validate((args.num_gpus, int, 0, None)) io.validate((args.metrics, str, 1, 256)) io.validate((args.inference_list, (str, type(None)), 0, 256)) io.validate((args.steps_per_external_eval, (int, type(None)), 1, None)) io.validate((args.scope, (str, type(None)), 1, 256)) io.validate((args.random_seed, (int, type(None)))) io.validate((args.num_keep_ckpts, int, 0, None)) io.validate((args.infer_batch_size, int, 1, None)) io.validate((args.beam_width, int, 0, None)) io.validate((args.length_penalty_weight, float, 0.0, None)) io.validate((args.sampling_temperature, float, 0.0, None)) io.validate((args.num_translations_per_input, int, 1, None)) io.validate((args.jobid, int, 0, None)) io.validate((args.num_workers, int, 1, None)) io.validate((args.num_inter_threads, int, 0, None)) io.validate((args.num_intra_threads, int, 0, None)) io.validate((args.pruning_hparams, (str, type(None)), 1, 256)) suffixes = [args.src] if not args.language_model: suffixes.append(args.tgt) for suffix in suffixes: validate_existing_filepath(args.train_prefix, suffix) validate_existing_filepath(args.dev_prefix, suffix) validate_existing_filepath(args.test_prefix, suffix) validate_existing_filepath(args.vocab_prefix, suffix) validate_existing_filepath(args.embed_prefix, suffix) validate_existing_filepath(args.inference_ref_file) validate_existing_filepath(args.inference_input_file) validate_existing_filepath(args.hparams_path) validate_parent_exists(args.ckpt) validate_parent_exists(args.inference_output_file) validate_parent_exists(args.out_dir)
[ "def", "validate_arguments", "(", "args", ")", ":", "io", ".", "validate", "(", "(", "args", ".", "num_units", ",", "int", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_layers", ",", "int", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_encoder_layers", ",", "(", "int", ",", "type", "(", "None", ")", ")", ",", "0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_decoder_layers", ",", "(", "int", ",", "type", "(", "None", ")", ")", ",", "0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_embeddings_partitions", ",", "int", ",", "0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "learning_rate", ",", "float", ",", "0.0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_train_steps", ",", "int", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "warmup_steps", ",", "int", ",", "0", ",", "args", ".", "num_train_steps", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "init_weight", ",", "float", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "src", ",", "(", "str", ",", "type", "(", "None", ")", ")", ",", "1", ",", "256", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "tgt", ",", "(", "str", ",", "type", "(", "None", ")", ")", ",", "1", ",", "256", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "sos", ",", "str", ",", "1", ",", "256", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "eos", ",", "str", ",", "1", ",", "256", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "src_max_len", ",", "int", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "tgt_max_len", ",", "int", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "src_max_len_infer", ",", "(", "int", ",", "type", "(", "None", ")", ")", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "tgt_max_len_infer", ",", "(", "int", ",", "type", "(", "None", ")", ")", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "forget_bias", ",", "float", ",", "0.0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "dropout", ",", "float", ",", "0.0", ",", "1.0", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "max_gradient_norm", ",", "float", ",", "0.000000001", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "batch_size", ",", "int", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "steps_per_stats", ",", "int", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "max_train", ",", "int", ",", "0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_buckets", ",", "int", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_sampled_softmax", ",", "int", ",", "0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_gpus", ",", "int", ",", "0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "metrics", ",", "str", ",", "1", ",", "256", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "inference_list", ",", "(", "str", ",", "type", "(", "None", ")", ")", ",", "0", ",", "256", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "steps_per_external_eval", ",", "(", "int", ",", "type", "(", "None", ")", ")", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "scope", ",", "(", "str", ",", "type", "(", "None", ")", ")", ",", "1", ",", "256", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "random_seed", ",", "(", "int", ",", "type", "(", "None", ")", ")", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_keep_ckpts", ",", "int", ",", "0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "infer_batch_size", ",", "int", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "beam_width", ",", "int", ",", "0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "length_penalty_weight", ",", "float", ",", "0.0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "sampling_temperature", ",", "float", ",", "0.0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_translations_per_input", ",", "int", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "jobid", ",", "int", ",", "0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_workers", ",", "int", ",", "1", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_inter_threads", ",", "int", ",", "0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "num_intra_threads", ",", "int", ",", "0", ",", "None", ")", ")", "io", ".", "validate", "(", "(", "args", ".", "pruning_hparams", ",", "(", "str", ",", "type", "(", "None", ")", ")", ",", "1", ",", "256", ")", ")", "suffixes", "=", "[", "args", ".", "src", "]", "if", "not", "args", ".", "language_model", ":", "suffixes", ".", "append", "(", "args", ".", "tgt", ")", "for", "suffix", "in", "suffixes", ":", "validate_existing_filepath", "(", "args", ".", "train_prefix", ",", "suffix", ")", "validate_existing_filepath", "(", "args", ".", "dev_prefix", ",", "suffix", ")", "validate_existing_filepath", "(", "args", ".", "test_prefix", ",", "suffix", ")", "validate_existing_filepath", "(", "args", ".", "vocab_prefix", ",", "suffix", ")", "validate_existing_filepath", "(", "args", ".", "embed_prefix", ",", "suffix", ")", "validate_existing_filepath", "(", "args", ".", "inference_ref_file", ")", "validate_existing_filepath", "(", "args", ".", "inference_input_file", ")", "validate_existing_filepath", "(", "args", ".", "hparams_path", ")", "validate_parent_exists", "(", "args", ".", "ckpt", ")", "validate_parent_exists", "(", "args", ".", "inference_output_file", ")", "validate_parent_exists", "(", "args", ".", "out_dir", ")" ]
https://github.com/IntelLabs/nlp-architect/blob/60afd0dd1bfd74f01b4ac8f613cb484777b80284/examples/sparse_gnmt/nmt.py#L684-L744
nucleic/enaml
65c2a2a2d765e88f2e1103046680571894bb41ed
enaml/qt/docking/q_dock_title_bar.py
python
IDockTitleBar.setLinked
(self, linked)
Set whether or not the link button is checked. Parameters ---------- linked : bool True if the link button should be checked, False otherwise.
Set whether or not the link button is checked.
[ "Set", "whether", "or", "not", "the", "link", "button", "is", "checked", "." ]
def setLinked(self, linked): """ Set whether or not the link button is checked. Parameters ---------- linked : bool True if the link button should be checked, False otherwise. """ raise NotImplementedError
[ "def", "setLinked", "(", "self", ",", "linked", ")", ":", "raise", "NotImplementedError" ]
https://github.com/nucleic/enaml/blob/65c2a2a2d765e88f2e1103046680571894bb41ed/enaml/qt/docking/q_dock_title_bar.py#L185-L194
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/trusthub/v1/trust_products/trust_products_evaluations.py
python
TrustProductsEvaluationsInstance.date_created
(self)
return self._properties['date_created']
:returns: The date_created :rtype: datetime
:returns: The date_created :rtype: datetime
[ ":", "returns", ":", "The", "date_created", ":", "rtype", ":", "datetime" ]
def date_created(self): """ :returns: The date_created :rtype: datetime """ return self._properties['date_created']
[ "def", "date_created", "(", "self", ")", ":", "return", "self", ".", "_properties", "[", "'date_created'", "]" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/trusthub/v1/trust_products/trust_products_evaluations.py#L357-L362
numba/numba
bf480b9e0da858a65508c2b17759a72ee6a44c51
numba/misc/llvm_pass_timings.py
python
ProcessedPassTimings._process
(self)
return _adjust_timings(records)
Parses the raw string data from LLVM timing report and attempts to improve the data by recomputing the times (See `_adjust_timings()``).
Parses the raw string data from LLVM timing report and attempts to improve the data by recomputing the times (See `_adjust_timings()``).
[ "Parses", "the", "raw", "string", "data", "from", "LLVM", "timing", "report", "and", "attempts", "to", "improve", "the", "data", "by", "recomputing", "the", "times", "(", "See", "_adjust_timings", "()", ")", "." ]
def _process(self): """Parses the raw string data from LLVM timing report and attempts to improve the data by recomputing the times (See `_adjust_timings()``). """ def parse(raw_data): """A generator that parses the raw_data line-by-line to extract timing information for each pass. """ lines = raw_data.splitlines() colheader = r"[a-zA-Z+ ]+" # Take at least one column header. multicolheaders = fr"(?:\s*-+{colheader}-+)+" line_iter = iter(lines) # find column headers header_map = { "User Time": "user", "System Time": "system", "User+System": "user_system", "Wall Time": "wall", "Name": "pass_name", } for ln in line_iter: m = re.match(multicolheaders, ln) if m: # Get all the column headers raw_headers = re.findall(r"[a-zA-Z][a-zA-Z+ ]+", ln) headers = [header_map[k.strip()] for k in raw_headers] break assert headers[-1] == 'pass_name' # compute the list of available attributes from the column headers attrs = [] for k in headers[:-1]: attrs.append(f"{k}_time") attrs.append(f"{k}_percent") # put default value 0.0 to all missing attributes missing = {} for k in PassTimingRecord._fields: if k not in attrs and k != 'pass_name': missing[k] = 0.0 # parse timings n = r"\s*((?:[0-9]+\.)?[0-9]+)" pat = f"\\s+(?:{n}\\s*\\({n}%\\)|-+)" * (len(headers) - 1) pat += r"\s*(.*)" for ln in line_iter: m = re.match(pat, ln) if m is not None: raw_data = list(m.groups()) data = {k: float(v) if v is not None else 0.0 for k, v in zip(attrs, raw_data)} data.update(missing) pass_name = raw_data[-1] rec = PassTimingRecord( pass_name=pass_name, **data, ) yield rec if rec.pass_name == "Total": # "Total" means the report has ended break # Check that we have reach the end of the report remaining = '\n'.join(line_iter) if remaining: raise ValueError( f"unexpected text after parser finished:\n{remaining}" ) # Parse raw data records = list(parse(self._raw_data)) return _adjust_timings(records)
[ "def", "_process", "(", "self", ")", ":", "def", "parse", "(", "raw_data", ")", ":", "\"\"\"A generator that parses the raw_data line-by-line to extract\n timing information for each pass.\n \"\"\"", "lines", "=", "raw_data", ".", "splitlines", "(", ")", "colheader", "=", "r\"[a-zA-Z+ ]+\"", "# Take at least one column header.", "multicolheaders", "=", "fr\"(?:\\s*-+{colheader}-+)+\"", "line_iter", "=", "iter", "(", "lines", ")", "# find column headers", "header_map", "=", "{", "\"User Time\"", ":", "\"user\"", ",", "\"System Time\"", ":", "\"system\"", ",", "\"User+System\"", ":", "\"user_system\"", ",", "\"Wall Time\"", ":", "\"wall\"", ",", "\"Name\"", ":", "\"pass_name\"", ",", "}", "for", "ln", "in", "line_iter", ":", "m", "=", "re", ".", "match", "(", "multicolheaders", ",", "ln", ")", "if", "m", ":", "# Get all the column headers", "raw_headers", "=", "re", ".", "findall", "(", "r\"[a-zA-Z][a-zA-Z+ ]+\"", ",", "ln", ")", "headers", "=", "[", "header_map", "[", "k", ".", "strip", "(", ")", "]", "for", "k", "in", "raw_headers", "]", "break", "assert", "headers", "[", "-", "1", "]", "==", "'pass_name'", "# compute the list of available attributes from the column headers", "attrs", "=", "[", "]", "for", "k", "in", "headers", "[", ":", "-", "1", "]", ":", "attrs", ".", "append", "(", "f\"{k}_time\"", ")", "attrs", ".", "append", "(", "f\"{k}_percent\"", ")", "# put default value 0.0 to all missing attributes", "missing", "=", "{", "}", "for", "k", "in", "PassTimingRecord", ".", "_fields", ":", "if", "k", "not", "in", "attrs", "and", "k", "!=", "'pass_name'", ":", "missing", "[", "k", "]", "=", "0.0", "# parse timings", "n", "=", "r\"\\s*((?:[0-9]+\\.)?[0-9]+)\"", "pat", "=", "f\"\\\\s+(?:{n}\\\\s*\\\\({n}%\\\\)|-+)\"", "*", "(", "len", "(", "headers", ")", "-", "1", ")", "pat", "+=", "r\"\\s*(.*)\"", "for", "ln", "in", "line_iter", ":", "m", "=", "re", ".", "match", "(", "pat", ",", "ln", ")", "if", "m", "is", "not", "None", ":", "raw_data", "=", "list", "(", "m", ".", "groups", "(", ")", ")", "data", "=", "{", "k", ":", "float", "(", "v", ")", "if", "v", "is", "not", "None", "else", "0.0", "for", "k", ",", "v", "in", "zip", "(", "attrs", ",", "raw_data", ")", "}", "data", ".", "update", "(", "missing", ")", "pass_name", "=", "raw_data", "[", "-", "1", "]", "rec", "=", "PassTimingRecord", "(", "pass_name", "=", "pass_name", ",", "*", "*", "data", ",", ")", "yield", "rec", "if", "rec", ".", "pass_name", "==", "\"Total\"", ":", "# \"Total\" means the report has ended", "break", "# Check that we have reach the end of the report", "remaining", "=", "'\\n'", ".", "join", "(", "line_iter", ")", "if", "remaining", ":", "raise", "ValueError", "(", "f\"unexpected text after parser finished:\\n{remaining}\"", ")", "# Parse raw data", "records", "=", "list", "(", "parse", "(", "self", ".", "_raw_data", ")", ")", "return", "_adjust_timings", "(", "records", ")" ]
https://github.com/numba/numba/blob/bf480b9e0da858a65508c2b17759a72ee6a44c51/numba/misc/llvm_pass_timings.py#L197-L268
gem/oq-engine
1bdb88f3914e390abcbd285600bfd39477aae47c
openquake/hazardlib/source/non_parametric.py
python
NonParametricSeismicSource.is_gridded
(self)
return True
:returns: True if containing only GriddedRuptures, False otherwise
:returns: True if containing only GriddedRuptures, False otherwise
[ ":", "returns", ":", "True", "if", "containing", "only", "GriddedRuptures", "False", "otherwise" ]
def is_gridded(self): """ :returns: True if containing only GriddedRuptures, False otherwise """ for rup, _ in self.data: if not isinstance(rup.surface, GriddedSurface): return False return True
[ "def", "is_gridded", "(", "self", ")", ":", "for", "rup", ",", "_", "in", "self", ".", "data", ":", "if", "not", "isinstance", "(", "rup", ".", "surface", ",", "GriddedSurface", ")", ":", "return", "False", "return", "True" ]
https://github.com/gem/oq-engine/blob/1bdb88f3914e390abcbd285600bfd39477aae47c/openquake/hazardlib/source/non_parametric.py#L135-L142
albertz/music-player
d23586f5bf657cbaea8147223be7814d117ae73d
mac/pyobjc-framework-Cocoa/Examples/AppKit/DragItemAround/DragItemAround.py
python
DraggableItemView.itemColor
(self)
return self._itemColor
.
.
[ "." ]
def itemColor(self): """.""" return self._itemColor
[ "def", "itemColor", "(", "self", ")", ":", "return", "self", ".", "_itemColor" ]
https://github.com/albertz/music-player/blob/d23586f5bf657cbaea8147223be7814d117ae73d/mac/pyobjc-framework-Cocoa/Examples/AppKit/DragItemAround/DragItemAround.py#L164-L166
uber-archive/plato-research-dialogue-system
1db30be390df6903be89fdf5a515debc7d7defb4
plato/domain/database.py
python
DataBase.get_table_name
(self)
Return the database table's name :return: the table's name
Return the database table's name :return: the table's name
[ "Return", "the", "database", "table", "s", "name", ":", "return", ":", "the", "table", "s", "name" ]
def get_table_name(self): """ Return the database table's name :return: the table's name """ pass
[ "def", "get_table_name", "(", "self", ")", ":", "pass" ]
https://github.com/uber-archive/plato-research-dialogue-system/blob/1db30be390df6903be89fdf5a515debc7d7defb4/plato/domain/database.py#L69-L75
rembo10/headphones
b3199605be1ebc83a7a8feab6b1e99b64014187c
lib/mutagen/_tools/_util.py
python
SignalHandler.init
(self)
[]
def init(self): signal.signal(signal.SIGINT, self._handler) signal.signal(signal.SIGTERM, self._handler) if os.name != "nt": signal.signal(signal.SIGHUP, self._handler)
[ "def", "init", "(", "self", ")", ":", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "self", ".", "_handler", ")", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "self", ".", "_handler", ")", "if", "os", ".", "name", "!=", "\"nt\"", ":", "signal", ".", "signal", "(", "signal", ".", "SIGHUP", ",", "self", ".", "_handler", ")" ]
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/mutagen/_tools/_util.py#L66-L70
transferwise/pipelinewise
6934b3851512dbdd4280790bf253a0a13ab65684
pipelinewise/fastsync/commons/target_snowflake.py
python
FastSyncTargetSnowflake.obfuscate_columns
(self, target_schema: str, table_name: str)
Apply any configured transformations to the given table Args: target_schema: target schema name table_name: table name
Apply any configured transformations to the given table Args: target_schema: target schema name table_name: table name
[ "Apply", "any", "configured", "transformations", "to", "the", "given", "table", "Args", ":", "target_schema", ":", "target", "schema", "name", "table_name", ":", "table", "name" ]
def obfuscate_columns(self, target_schema: str, table_name: str): """ Apply any configured transformations to the given table Args: target_schema: target schema name table_name: table name """ LOGGER.info('Starting obfuscation rules...') table_dict = utils.tablename_to_dict(table_name) temp_table = table_dict.get('temp_table_name') transformations = self.transformation_config.get('transformations', []) # Input table_name is formatted as {{schema}}.{{table}} # Stream name in taps transformation.json is formatted as {{schema}}-{{table}} # # We need to convert to the same format to find the transformation # has that has to be applied tap_stream_name_by_table_name = ( '{}-{}'.format(table_dict['schema_name'], table_dict['table_name']) if table_dict['schema_name'] is not None else table_dict['table_name'] ) # Find obfuscation rules for the current table # trans_map = self.__get_stream_transformation_map(tap_stream_name_by_table_name, transformations) trans_map = TransformationHelper.get_trans_in_sql_flavor( tap_stream_name_by_table_name, transformations, SQLFlavor('snowflake') ) self.__apply_transformations(trans_map, target_schema, temp_table) LOGGER.info('Obfuscation rules applied.')
[ "def", "obfuscate_columns", "(", "self", ",", "target_schema", ":", "str", ",", "table_name", ":", "str", ")", ":", "LOGGER", ".", "info", "(", "'Starting obfuscation rules...'", ")", "table_dict", "=", "utils", ".", "tablename_to_dict", "(", "table_name", ")", "temp_table", "=", "table_dict", ".", "get", "(", "'temp_table_name'", ")", "transformations", "=", "self", ".", "transformation_config", ".", "get", "(", "'transformations'", ",", "[", "]", ")", "# Input table_name is formatted as {{schema}}.{{table}}", "# Stream name in taps transformation.json is formatted as {{schema}}-{{table}}", "#", "# We need to convert to the same format to find the transformation", "# has that has to be applied", "tap_stream_name_by_table_name", "=", "(", "'{}-{}'", ".", "format", "(", "table_dict", "[", "'schema_name'", "]", ",", "table_dict", "[", "'table_name'", "]", ")", "if", "table_dict", "[", "'schema_name'", "]", "is", "not", "None", "else", "table_dict", "[", "'table_name'", "]", ")", "# Find obfuscation rules for the current table", "# trans_map = self.__get_stream_transformation_map(tap_stream_name_by_table_name, transformations)", "trans_map", "=", "TransformationHelper", ".", "get_trans_in_sql_flavor", "(", "tap_stream_name_by_table_name", ",", "transformations", ",", "SQLFlavor", "(", "'snowflake'", ")", ")", "self", ".", "__apply_transformations", "(", "trans_map", ",", "target_schema", ",", "temp_table", ")", "LOGGER", ".", "info", "(", "'Obfuscation rules applied.'", ")" ]
https://github.com/transferwise/pipelinewise/blob/6934b3851512dbdd4280790bf253a0a13ab65684/pipelinewise/fastsync/commons/target_snowflake.py#L353-L385
littlecodersh/MyPlatform
6f9a946605466f580205f6e9e96e533720fce578
vendor/requests/packages/urllib3/util/retry.py
python
Retry.sleep
(self)
Sleep between retry attempts using an exponential backoff. By default, the backoff factor is 0 and this method will return immediately.
Sleep between retry attempts using an exponential backoff.
[ "Sleep", "between", "retry", "attempts", "using", "an", "exponential", "backoff", "." ]
def sleep(self): """ Sleep between retry attempts using an exponential backoff. By default, the backoff factor is 0 and this method will return immediately. """ backoff = self.get_backoff_time() if backoff <= 0: return time.sleep(backoff)
[ "def", "sleep", "(", "self", ")", ":", "backoff", "=", "self", ".", "get_backoff_time", "(", ")", "if", "backoff", "<=", "0", ":", "return", "time", ".", "sleep", "(", "backoff", ")" ]
https://github.com/littlecodersh/MyPlatform/blob/6f9a946605466f580205f6e9e96e533720fce578/vendor/requests/packages/urllib3/util/retry.py#L170-L179
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/pandora/media_player.py
python
PandoraMediaPlayer._send_station_list_command
(self)
Send a station list command.
Send a station list command.
[ "Send", "a", "station", "list", "command", "." ]
def _send_station_list_command(self): """Send a station list command.""" self._pianobar.send("s") try: self._pianobar.expect("Select station:", timeout=1) except pexpect.exceptions.TIMEOUT: # try again. Buffer was contaminated. self._clear_buffer() self._pianobar.send("s") self._pianobar.expect("Select station:")
[ "def", "_send_station_list_command", "(", "self", ")", ":", "self", ".", "_pianobar", ".", "send", "(", "\"s\"", ")", "try", ":", "self", ".", "_pianobar", ".", "expect", "(", "\"Select station:\"", ",", "timeout", "=", "1", ")", "except", "pexpect", ".", "exceptions", ".", "TIMEOUT", ":", "# try again. Buffer was contaminated.", "self", ".", "_clear_buffer", "(", ")", "self", ".", "_pianobar", ".", "send", "(", "\"s\"", ")", "self", ".", "_pianobar", ".", "expect", "(", "\"Select station:\"", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/pandora/media_player.py#L229-L238
facebookresearch/mobile-vision
f40401a44e86bb3ba9c1b66e7700e15f96b880cb
mobile_cv/model_zoo/tools/jit_speed_benchmark.py
python
parse_input_file
(args)
return ret
Parse input from input_file
Parse input from input_file
[ "Parse", "input", "from", "input_file" ]
def parse_input_file(args): """Parse input from input_file""" if args.input_file is None: return None ret = torch.load(args.input_file, map_location="cpu") return ret
[ "def", "parse_input_file", "(", "args", ")", ":", "if", "args", ".", "input_file", "is", "None", ":", "return", "None", "ret", "=", "torch", ".", "load", "(", "args", ".", "input_file", ",", "map_location", "=", "\"cpu\"", ")", "return", "ret" ]
https://github.com/facebookresearch/mobile-vision/blob/f40401a44e86bb3ba9c1b66e7700e15f96b880cb/mobile_cv/model_zoo/tools/jit_speed_benchmark.py#L74-L80
taokong/RON
c62d0edbf6bfe4913d044693463bed199687cdb8
lib/roi_data_layer/minibatch.py
python
_vis_minibatch
(im_blob, rois_blob, overlaps)
Visualize a mini-batch for debugging.
Visualize a mini-batch for debugging.
[ "Visualize", "a", "mini", "-", "batch", "for", "debugging", "." ]
def _vis_minibatch(im_blob, rois_blob, overlaps): """Visualize a mini-batch for debugging.""" import matplotlib.pyplot as plt for i in xrange(im_blob.shape[0]): rois_inds = rois_blob[:, -1] inds = np.where(rois_inds == i)[0] rois = rois_blob[inds, :] im = im_blob[i, :, :, :].transpose((1, 2, 0)).copy() im += cfg.PIXEL_MEANS im = im[:, :, (2, 1, 0)] im = im.astype(np.uint8) cls = rois[-1] print rois plt.imshow(im) for j in xrange(rois.shape[0]): roi = rois[j] plt.gca().add_patch( plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0], roi[3] - roi[1], fill=False, edgecolor='r', linewidth=3) ) plt.show()
[ "def", "_vis_minibatch", "(", "im_blob", ",", "rois_blob", ",", "overlaps", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "for", "i", "in", "xrange", "(", "im_blob", ".", "shape", "[", "0", "]", ")", ":", "rois_inds", "=", "rois_blob", "[", ":", ",", "-", "1", "]", "inds", "=", "np", ".", "where", "(", "rois_inds", "==", "i", ")", "[", "0", "]", "rois", "=", "rois_blob", "[", "inds", ",", ":", "]", "im", "=", "im_blob", "[", "i", ",", ":", ",", ":", ",", ":", "]", ".", "transpose", "(", "(", "1", ",", "2", ",", "0", ")", ")", ".", "copy", "(", ")", "im", "+=", "cfg", ".", "PIXEL_MEANS", "im", "=", "im", "[", ":", ",", ":", ",", "(", "2", ",", "1", ",", "0", ")", "]", "im", "=", "im", ".", "astype", "(", "np", ".", "uint8", ")", "cls", "=", "rois", "[", "-", "1", "]", "print", "rois", "plt", ".", "imshow", "(", "im", ")", "for", "j", "in", "xrange", "(", "rois", ".", "shape", "[", "0", "]", ")", ":", "roi", "=", "rois", "[", "j", "]", "plt", ".", "gca", "(", ")", ".", "add_patch", "(", "plt", ".", "Rectangle", "(", "(", "roi", "[", "0", "]", ",", "roi", "[", "1", "]", ")", ",", "roi", "[", "2", "]", "-", "roi", "[", "0", "]", ",", "roi", "[", "3", "]", "-", "roi", "[", "1", "]", ",", "fill", "=", "False", ",", "edgecolor", "=", "'r'", ",", "linewidth", "=", "3", ")", ")", "plt", ".", "show", "(", ")" ]
https://github.com/taokong/RON/blob/c62d0edbf6bfe4913d044693463bed199687cdb8/lib/roi_data_layer/minibatch.py#L207-L233
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_vendored_deps/library/oc_adm_ca_server_cert.py
python
Yedit.valid_key
(key, sep='.')
return True
validate the incoming key
validate the incoming key
[ "validate", "the", "incoming", "key" ]
def valid_key(key, sep='.'): '''validate the incoming key''' common_separators = list(Yedit.com_sep - set([sep])) if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key): return False return True
[ "def", "valid_key", "(", "key", ",", "sep", "=", "'.'", ")", ":", "common_separators", "=", "list", "(", "Yedit", ".", "com_sep", "-", "set", "(", "[", "sep", "]", ")", ")", "if", "not", "re", ".", "match", "(", "Yedit", ".", "re_valid_key", ".", "format", "(", "''", ".", "join", "(", "common_separators", ")", ")", ",", "key", ")", ":", "return", "False", "return", "True" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_vendored_deps/library/oc_adm_ca_server_cert.py#L217-L223
fedspendingtransparency/usaspending-api
b13bd5bcba0369ff8512f61a34745626c3969391
usaspending_api/common/etl/primatives.py
python
make_change_detector_conditional
(columns: List[str], left_alias: str, right_alias: str)
return SQL(" or ").join(composed_conditionals)
Turn a list of columns in a SQL safe string containing an ORed together list of conditionals for detecting changes between tables. s.name is distinct from d.name or s.description is distinct from d.description
Turn a list of columns in a SQL safe string containing an ORed together list of conditionals for detecting changes between tables.
[ "Turn", "a", "list", "of", "columns", "in", "a", "SQL", "safe", "string", "containing", "an", "ORed", "together", "list", "of", "conditionals", "for", "detecting", "changes", "between", "tables", "." ]
def make_change_detector_conditional(columns: List[str], left_alias: str, right_alias: str) -> Composed: """ Turn a list of columns in a SQL safe string containing an ORed together list of conditionals for detecting changes between tables. s.name is distinct from d.name or s.description is distinct from d.description """ composed_aliases = {"left_alias": Identifier(left_alias), "right_alias": Identifier(right_alias)} template = "{left_alias}.{column} is distinct from {right_alias}.{column}" composed_conditionals = [SQL(template).format(column=Identifier(c), **composed_aliases) for c in columns] return SQL(" or ").join(composed_conditionals)
[ "def", "make_change_detector_conditional", "(", "columns", ":", "List", "[", "str", "]", ",", "left_alias", ":", "str", ",", "right_alias", ":", "str", ")", "->", "Composed", ":", "composed_aliases", "=", "{", "\"left_alias\"", ":", "Identifier", "(", "left_alias", ")", ",", "\"right_alias\"", ":", "Identifier", "(", "right_alias", ")", "}", "template", "=", "\"{left_alias}.{column} is distinct from {right_alias}.{column}\"", "composed_conditionals", "=", "[", "SQL", "(", "template", ")", ".", "format", "(", "column", "=", "Identifier", "(", "c", ")", ",", "*", "*", "composed_aliases", ")", "for", "c", "in", "columns", "]", "return", "SQL", "(", "\" or \"", ")", ".", "join", "(", "composed_conditionals", ")" ]
https://github.com/fedspendingtransparency/usaspending-api/blob/b13bd5bcba0369ff8512f61a34745626c3969391/usaspending_api/common/etl/primatives.py#L42-L54
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/benchmarks/src/benchmarks/sympy/sympy/matrices/matrices.py
python
MatrixBase.diagonal_solve
(self, rhs)
return self._diagonal_solve(rhs)
Solves Ax = B efficiently, where A is a diagonal Matrix, with non-zero diagonal entries. Examples ======== >>> from sympy.matrices import Matrix, eye >>> A = eye(2)*2 >>> B = Matrix([[1, 2], [3, 4]]) >>> A.diagonal_solve(B) == B/2 True See Also ======== lower_triangular_solve upper_triangular_solve cholesky_solve LDLsolve LUsolve QRsolve pinv_solve
Solves Ax = B efficiently, where A is a diagonal Matrix, with non-zero diagonal entries.
[ "Solves", "Ax", "=", "B", "efficiently", "where", "A", "is", "a", "diagonal", "Matrix", "with", "non", "-", "zero", "diagonal", "entries", "." ]
def diagonal_solve(self, rhs): """Solves Ax = B efficiently, where A is a diagonal Matrix, with non-zero diagonal entries. Examples ======== >>> from sympy.matrices import Matrix, eye >>> A = eye(2)*2 >>> B = Matrix([[1, 2], [3, 4]]) >>> A.diagonal_solve(B) == B/2 True See Also ======== lower_triangular_solve upper_triangular_solve cholesky_solve LDLsolve LUsolve QRsolve pinv_solve """ if not self.is_diagonal: raise TypeError("Matrix should be diagonal") if rhs.rows != self.rows: raise TypeError("Size mis-match") return self._diagonal_solve(rhs)
[ "def", "diagonal_solve", "(", "self", ",", "rhs", ")", ":", "if", "not", "self", ".", "is_diagonal", ":", "raise", "TypeError", "(", "\"Matrix should be diagonal\"", ")", "if", "rhs", ".", "rows", "!=", "self", ".", "rows", ":", "raise", "TypeError", "(", "\"Size mis-match\"", ")", "return", "self", ".", "_diagonal_solve", "(", "rhs", ")" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/matrices/matrices.py#L847-L875
jeanharb/option_critic
5d6c81a650a8f452bc8ad3250f1f211d317fde8c
train_agent.py
python
Trainer.save_model
(self, total_reward, skip_best=False)
[]
def save_model(self, total_reward, skip_best=False): if total_reward >= self.best_reward and not skip_best: self.best_reward = total_reward pkl.dump(self.model.save_params(), open(os.path.join(self.mydir, 'best_model.pkl'), "w"), protocol=pkl.HIGHEST_PROTOCOL) pkl.dump(self.model.save_params(), open(os.path.join(self.mydir, 'last_model.pkl'), "w"), protocol=pkl.HIGHEST_PROTOCOL) print "Saved model"
[ "def", "save_model", "(", "self", ",", "total_reward", ",", "skip_best", "=", "False", ")", ":", "if", "total_reward", ">=", "self", ".", "best_reward", "and", "not", "skip_best", ":", "self", ".", "best_reward", "=", "total_reward", "pkl", ".", "dump", "(", "self", ".", "model", ".", "save_params", "(", ")", ",", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "mydir", ",", "'best_model.pkl'", ")", ",", "\"w\"", ")", ",", "protocol", "=", "pkl", ".", "HIGHEST_PROTOCOL", ")", "pkl", ".", "dump", "(", "self", ".", "model", ".", "save_params", "(", ")", ",", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "mydir", ",", "'last_model.pkl'", ")", ",", "\"w\"", ")", ",", "protocol", "=", "pkl", ".", "HIGHEST_PROTOCOL", ")", "print", "\"Saved model\"" ]
https://github.com/jeanharb/option_critic/blob/5d6c81a650a8f452bc8ad3250f1f211d317fde8c/train_agent.py#L158-L163
nosmokingbandit/Watcher3
0217e75158b563bdefc8e01c3be7620008cf3977
lib/mako/runtime.py
python
TemplateNamespace.uri
(self)
return self.template.uri
The URI for this :class:`.Namespace`'s template. I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`. This is the equivalent of :attr:`.Template.uri`.
The URI for this :class:`.Namespace`'s template.
[ "The", "URI", "for", "this", ":", "class", ":", ".", "Namespace", "s", "template", "." ]
def uri(self): """The URI for this :class:`.Namespace`'s template. I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`. This is the equivalent of :attr:`.Template.uri`. """ return self.template.uri
[ "def", "uri", "(", "self", ")", ":", "return", "self", ".", "template", ".", "uri" ]
https://github.com/nosmokingbandit/Watcher3/blob/0217e75158b563bdefc8e01c3be7620008cf3977/lib/mako/runtime.py#L592-L600
CvvT/dumpDex
92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1
python/idc.py
python
FirstFuncFchunk
(funcea)
Get the first function chunk of the specified function @param funcea: any address in the function @return: the function entry point or BADADDR @note: This function returns the first (main) chunk of the specified function
Get the first function chunk of the specified function
[ "Get", "the", "first", "function", "chunk", "of", "the", "specified", "function" ]
def FirstFuncFchunk(funcea): """ Get the first function chunk of the specified function @param funcea: any address in the function @return: the function entry point or BADADDR @note: This function returns the first (main) chunk of the specified function """ func = idaapi.get_func(funcea) fci = idaapi.func_tail_iterator_t(func, funcea) if fci.main(): return fci.chunk().startEA else: return BADADDR
[ "def", "FirstFuncFchunk", "(", "funcea", ")", ":", "func", "=", "idaapi", ".", "get_func", "(", "funcea", ")", "fci", "=", "idaapi", ".", "func_tail_iterator_t", "(", "func", ",", "funcea", ")", "if", "fci", ".", "main", "(", ")", ":", "return", "fci", ".", "chunk", "(", ")", ".", "startEA", "else", ":", "return", "BADADDR" ]
https://github.com/CvvT/dumpDex/blob/92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1/python/idc.py#L5680-L5695
w568w/GitHubFollow
a56ec730ae434c99ff95e6132984c38113ed061d
main.py
python
Gitstar.update_gitstar
(self)
[]
def update_gitstar(self): url = "http://gitstar.top:88/follow_update" res = requests.get(url,headers={'Accept': 'application/json','Cookie' : self.cookie}) print "update:" + str(res.status_code == 200)
[ "def", "update_gitstar", "(", "self", ")", ":", "url", "=", "\"http://gitstar.top:88/follow_update\"", "res", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "{", "'Accept'", ":", "'application/json'", ",", "'Cookie'", ":", "self", ".", "cookie", "}", ")", "print", "\"update:\"", "+", "str", "(", "res", ".", "status_code", "==", "200", ")" ]
https://github.com/w568w/GitHubFollow/blob/a56ec730ae434c99ff95e6132984c38113ed061d/main.py#L33-L36
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/Lib/nntplib.py
python
NNTP.stat
(self, id)
return self.statcmd('STAT ' + id)
Process a STAT command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: the article number - id: the message id
Process a STAT command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: the article number - id: the message id
[ "Process", "a", "STAT", "command", ".", "Argument", ":", "-", "id", ":", "article", "number", "or", "message", "id", "Returns", ":", "-", "resp", ":", "server", "response", "if", "successful", "-", "nr", ":", "the", "article", "number", "-", "id", ":", "the", "message", "id" ]
def stat(self, id): """Process a STAT command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: the article number - id: the message id""" return self.statcmd('STAT ' + id)
[ "def", "stat", "(", "self", ",", "id", ")", ":", "return", "self", ".", "statcmd", "(", "'STAT '", "+", "id", ")" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/Lib/nntplib.py#L387-L395
LabPy/lantz
3e878e3f765a4295b0089d04e241d4beb7b8a65b
lantz/drivers/legacy/ni/daqmx/base.py
python
Task.regeneration_enabled
(self)
return value
Generating the same data more than once is allowed. Set to None to reset.
Generating the same data more than once is allowed.
[ "Generating", "the", "same", "data", "more", "than", "once", "is", "allowed", "." ]
def regeneration_enabled(self): """Generating the same data more than once is allowed. Set to None to reset. """ err, value = self.lib.GetWriteRegenMode(RetValue('i32')) return value
[ "def", "regeneration_enabled", "(", "self", ")", ":", "err", ",", "value", "=", "self", ".", "lib", ".", "GetWriteRegenMode", "(", "RetValue", "(", "'i32'", ")", ")", "return", "value" ]
https://github.com/LabPy/lantz/blob/3e878e3f765a4295b0089d04e241d4beb7b8a65b/lantz/drivers/legacy/ni/daqmx/base.py#L1274-L1280
Kautenja/gym-super-mario-bros
4c89cf601929733800f70833c7fe62973aecdb08
gym_super_mario_bros/smb_env.py
python
SuperMarioBrosEnv._y_position
(self)
return 255 - self._y_pixel
Return the current vertical position.
Return the current vertical position.
[ "Return", "the", "current", "vertical", "position", "." ]
def _y_position(self): """Return the current vertical position.""" # check if Mario is above the viewport (the score board area) if self._y_viewport < 1: # y position overflows so we start from 255 and add the offset return 255 + (255 - self._y_pixel) # invert the y pixel into the distance from the bottom of the screen return 255 - self._y_pixel
[ "def", "_y_position", "(", "self", ")", ":", "# check if Mario is above the viewport (the score board area)", "if", "self", ".", "_y_viewport", "<", "1", ":", "# y position overflows so we start from 255 and add the offset", "return", "255", "+", "(", "255", "-", "self", ".", "_y_pixel", ")", "# invert the y pixel into the distance from the bottom of the screen", "return", "255", "-", "self", ".", "_y_pixel" ]
https://github.com/Kautenja/gym-super-mario-bros/blob/4c89cf601929733800f70833c7fe62973aecdb08/gym_super_mario_bros/smb_env.py#L172-L179
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/distlib/_backport/shutil.py
python
unregister_archive_format
(name)
[]
def unregister_archive_format(name): del _ARCHIVE_FORMATS[name]
[ "def", "unregister_archive_format", "(", "name", ")", ":", "del", "_ARCHIVE_FORMATS", "[", "name", "]" ]
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/distlib/_backport/shutil.py#L541-L542
micropython/micropython-lib
cdd260f0792d04a1ded99171b4c7a2582b7856b4
python-stdlib/email.message/email/message.py
python
Message.__getitem__
(self, name)
return self.get(name)
Get a header value. Return None if the header is missing instead of raising an exception. Note that if the header appeared multiple times, exactly which occurrence gets returned is undefined. Use get_all() to get all the values matching a header field name.
Get a header value.
[ "Get", "a", "header", "value", "." ]
def __getitem__(self, name): """Get a header value. Return None if the header is missing instead of raising an exception. Note that if the header appeared multiple times, exactly which occurrence gets returned is undefined. Use get_all() to get all the values matching a header field name. """ return self.get(name)
[ "def", "__getitem__", "(", "self", ",", "name", ")", ":", "return", "self", ".", "get", "(", "name", ")" ]
https://github.com/micropython/micropython-lib/blob/cdd260f0792d04a1ded99171b4c7a2582b7856b4/python-stdlib/email.message/email/message.py#L336-L345
ucsb-seclab/karonte
427ac313e596f723e40768b95d13bd7a9fc92fd8
eval/multi_bin/network-facing/binary_dependency_graph/plugins/environment.py
python
Environment._search_str_in_bb
(self, p, b, caller_node, string)
return found
[]
def _search_str_in_bb(self, p, b, caller_node, string): found = False c = caller_node block = p.factory.block(c.addr) # consider the constants in the calling block for con in block.vex.all_constants: val = con.value # check if a string c_string = get_string(p, val) self._binaries_strings[b].append(c_string) if c_string and c_string == string: found = True return found
[ "def", "_search_str_in_bb", "(", "self", ",", "p", ",", "b", ",", "caller_node", ",", "string", ")", ":", "found", "=", "False", "c", "=", "caller_node", "block", "=", "p", ".", "factory", ".", "block", "(", "c", ".", "addr", ")", "# consider the constants in the calling block", "for", "con", "in", "block", ".", "vex", ".", "all_constants", ":", "val", "=", "con", ".", "value", "# check if a string", "c_string", "=", "get_string", "(", "p", ",", "val", ")", "self", ".", "_binaries_strings", "[", "b", "]", ".", "append", "(", "c_string", ")", "if", "c_string", "and", "c_string", "==", "string", ":", "found", "=", "True", "return", "found" ]
https://github.com/ucsb-seclab/karonte/blob/427ac313e596f723e40768b95d13bd7a9fc92fd8/eval/multi_bin/network-facing/binary_dependency_graph/plugins/environment.py#L105-L120
kanzure/nanoengineer
874e4c9f8a9190f093625b267f9767e19f82e6c4
cad/src/simulation/runSim.py
python
SimRunner.verifyGromacsPlugin
(self)
return True
Verify GROMACS plugin. @return: True if GROMACS is properly enabled. @rtype: boolean
Verify GROMACS plugin.
[ "Verify", "GROMACS", "plugin", "." ]
def verifyGromacsPlugin(self): """ Verify GROMACS plugin. @return: True if GROMACS is properly enabled. @rtype: boolean """ plugin_name = "GROMACS" plugin_prefs_keys = (gromacs_enabled_prefs_key, gromacs_path_prefs_key) errorcode, errortext_or_path = \ checkPluginPreferences(plugin_name, plugin_prefs_keys, extra_check = _verifyGromppAndMdrunExecutables) if errorcode: msg = redmsg("Verify Plugin: %s (code %d)" % (errortext_or_path, errorcode)) env.history.message(msg) return False program_path = errortext_or_path self.gromacs_bin_dir, junk_exe = os.path.split(program_path) plugin_name = "CPP" plugin_prefs_keys = (cpp_enabled_prefs_key, cpp_path_prefs_key) errorcode, errortext_or_path = \ checkPluginPreferences(plugin_name, plugin_prefs_keys, insure_executable = True) if errorcode: msg = redmsg("Verify Plugin: %s (code %d)" % (errortext_or_path, errorcode)) env.history.message(msg) return False self.cpp_executable_path = errortext_or_path return True
[ "def", "verifyGromacsPlugin", "(", "self", ")", ":", "plugin_name", "=", "\"GROMACS\"", "plugin_prefs_keys", "=", "(", "gromacs_enabled_prefs_key", ",", "gromacs_path_prefs_key", ")", "errorcode", ",", "errortext_or_path", "=", "checkPluginPreferences", "(", "plugin_name", ",", "plugin_prefs_keys", ",", "extra_check", "=", "_verifyGromppAndMdrunExecutables", ")", "if", "errorcode", ":", "msg", "=", "redmsg", "(", "\"Verify Plugin: %s (code %d)\"", "%", "(", "errortext_or_path", ",", "errorcode", ")", ")", "env", ".", "history", ".", "message", "(", "msg", ")", "return", "False", "program_path", "=", "errortext_or_path", "self", ".", "gromacs_bin_dir", ",", "junk_exe", "=", "os", ".", "path", ".", "split", "(", "program_path", ")", "plugin_name", "=", "\"CPP\"", "plugin_prefs_keys", "=", "(", "cpp_enabled_prefs_key", ",", "cpp_path_prefs_key", ")", "errorcode", ",", "errortext_or_path", "=", "checkPluginPreferences", "(", "plugin_name", ",", "plugin_prefs_keys", ",", "insure_executable", "=", "True", ")", "if", "errorcode", ":", "msg", "=", "redmsg", "(", "\"Verify Plugin: %s (code %d)\"", "%", "(", "errortext_or_path", ",", "errorcode", ")", ")", "env", ".", "history", ".", "message", "(", "msg", ")", "return", "False", "self", ".", "cpp_executable_path", "=", "errortext_or_path", "return", "True" ]
https://github.com/kanzure/nanoengineer/blob/874e4c9f8a9190f093625b267f9767e19f82e6c4/cad/src/simulation/runSim.py#L275-L311
DLTK/DLTK
f94d3bb509eb0741164149acbef0788769a869e4
dltk/networks/autoencoder/convolutional_autoencoder.py
python
convolutional_autoencoder_3d
(inputs, num_convolutions=1, num_hidden_units=128, filters=(16, 32, 64), strides=((2, 2, 2), (2, 2, 2), (2, 2, 2)), mode=tf.estimator.ModeKeys.TRAIN, use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None)
return outputs
Convolutional autoencoder with num_convolutions on len(filters) resolution scales. The downsampling of features is done via strided convolutions and upsampling via strided transpose convolutions. On each resolution scale s are num_convolutions with filter size = filters[s]. strides[s] determine the downsampling factor at each resolution scale. Args: inputs (tf.Tensor): Input tensor to the network, required to be of rank 5. num_convolutions (int, optional): Number of convolutions per resolution scale. num_hidden_units (int, optional): Number of hidden units. filters (tuple or list, optional): Number of filters for all convolutions at each resolution scale. strides (tuple or list, optional): Stride of the first convolution on a resolution scale. mode (str, optional): One of the tf.estimator.ModeKeys strings: TRAIN, EVAL or PREDICT use_bias (bool, optional): Boolean, whether the layer uses a bias. activation (optional): A function to use as activation function. kernel_initializer (TYPE, optional): An initializer for the convolution kernel. bias_initializer (TYPE, optional): An initializer for the bias vector. If None, no bias will be applied. kernel_regularizer (None, optional): Optional regularizer for the convolution kernel. bias_regularizer (None, optional): Optional regularizer for the bias vector. Returns: dict: dictionary of output tensors
Convolutional autoencoder with num_convolutions on len(filters) resolution scales. The downsampling of features is done via strided convolutions and upsampling via strided transpose convolutions. On each resolution scale s are num_convolutions with filter size = filters[s]. strides[s] determine the downsampling factor at each resolution scale.
[ "Convolutional", "autoencoder", "with", "num_convolutions", "on", "len", "(", "filters", ")", "resolution", "scales", ".", "The", "downsampling", "of", "features", "is", "done", "via", "strided", "convolutions", "and", "upsampling", "via", "strided", "transpose", "convolutions", ".", "On", "each", "resolution", "scale", "s", "are", "num_convolutions", "with", "filter", "size", "=", "filters", "[", "s", "]", ".", "strides", "[", "s", "]", "determine", "the", "downsampling", "factor", "at", "each", "resolution", "scale", "." ]
def convolutional_autoencoder_3d(inputs, num_convolutions=1, num_hidden_units=128, filters=(16, 32, 64), strides=((2, 2, 2), (2, 2, 2), (2, 2, 2)), mode=tf.estimator.ModeKeys.TRAIN, use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None): """Convolutional autoencoder with num_convolutions on len(filters) resolution scales. The downsampling of features is done via strided convolutions and upsampling via strided transpose convolutions. On each resolution scale s are num_convolutions with filter size = filters[s]. strides[s] determine the downsampling factor at each resolution scale. Args: inputs (tf.Tensor): Input tensor to the network, required to be of rank 5. num_convolutions (int, optional): Number of convolutions per resolution scale. num_hidden_units (int, optional): Number of hidden units. filters (tuple or list, optional): Number of filters for all convolutions at each resolution scale. strides (tuple or list, optional): Stride of the first convolution on a resolution scale. mode (str, optional): One of the tf.estimator.ModeKeys strings: TRAIN, EVAL or PREDICT use_bias (bool, optional): Boolean, whether the layer uses a bias. activation (optional): A function to use as activation function. kernel_initializer (TYPE, optional): An initializer for the convolution kernel. bias_initializer (TYPE, optional): An initializer for the bias vector. If None, no bias will be applied. kernel_regularizer (None, optional): Optional regularizer for the convolution kernel. bias_regularizer (None, optional): Optional regularizer for the bias vector. Returns: dict: dictionary of output tensors """ outputs = {} assert len(strides) == len(filters) assert len(inputs.get_shape().as_list()) == 5, \ 'inputs are required to have a rank of 5.' conv_op = tf.layers.conv3d tp_conv_op = tf.layers.conv3d_transpose conv_params = {'padding': 'same', 'use_bias': use_bias, 'kernel_initializer': kernel_initializer, 'bias_initializer': bias_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer} x = inputs tf.logging.info('Input tensor shape {}'.format(x.get_shape())) # Convolutional feature encoding blocks with num_convolutions at different # resolution scales res_scales for res_scale in range(0, len(filters)): for i in range(0, num_convolutions - 1): with tf.variable_scope('enc_unit_{}_{}'.format(res_scale, i)): x = conv_op(inputs=x, filters=filters[res_scale], kernel_size=(3, 3, 3), strides=(1, 1, 1), **conv_params) x = tf.layers.batch_normalization( inputs=x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) tf.logging.info('Encoder at res_scale {} shape: {}'.format( res_scale, x.get_shape())) # Employ strided convolutions to downsample with tf.variable_scope('enc_unit_{}_{}'.format( res_scale, num_convolutions)): # Adjust the strided conv kernel size to prevent losing information k_size = [s * 2 if s > 1 else 3 for s in strides[res_scale]] x = conv_op(inputs=x, filters=filters[res_scale], kernel_size=k_size, strides=strides[res_scale], **conv_params) x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) tf.logging.info('Encoder at res_scale {} tensor shape: {}'.format( res_scale, x.get_shape())) # Densely connected layer of hidden units x_shape = x.get_shape().as_list() x = tf.reshape(x, (tf.shape(x)[0], np.prod(x_shape[1:]))) x = tf.layers.dense(inputs=x, units=num_hidden_units, use_bias=conv_params['use_bias'], kernel_initializer=conv_params['kernel_initializer'], bias_initializer=conv_params['bias_initializer'], kernel_regularizer=conv_params['kernel_regularizer'], bias_regularizer=conv_params['bias_regularizer'], name='hidden_units') outputs['hidden_units'] = x tf.logging.info('Hidden units tensor shape: {}'.format(x.get_shape())) x = tf.layers.dense(inputs=x, units=np.prod(x_shape[1:]), activation=activation, use_bias=conv_params['use_bias'], kernel_initializer=conv_params['kernel_initializer'], bias_initializer=conv_params['bias_initializer'], kernel_regularizer=conv_params['kernel_regularizer'], bias_regularizer=conv_params['bias_regularizer']) x = tf.reshape(x, [tf.shape(x)[0]] + list(x_shape)[1:]) tf.logging.info('Decoder input tensor shape: {}'.format(x.get_shape())) # Decoding blocks with num_convolutions at different resolution scales # res_scales for res_scale in reversed(range(0, len(filters))): # Employ strided transpose convolutions to upsample with tf.variable_scope('dec_unit_{}_0'.format(res_scale)): # Adjust the strided tp conv kernel size to prevent losing # information k_size = [s * 2 if s > 1 else 3 for s in strides[res_scale]] x = tp_conv_op(inputs=x, filters=filters[res_scale], kernel_size=k_size, strides=strides[res_scale], **conv_params) x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) tf.logging.info('Decoder at res_scale {} tensor shape: {}'.format( res_scale, x.get_shape())) for i in range(1, num_convolutions): with tf.variable_scope('dec_unit_{}_{}'.format(res_scale, i)): x = conv_op(inputs=x, filters=filters[res_scale], kernel_size=(3, 3, 3), strides=(1, 1, 1), **conv_params) x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) tf.logging.info('Decoder at res_scale {} tensor shape: {}'.format( res_scale, x.get_shape())) # A final convolution reduces the number of output features to those of # the inputs x = conv_op(inputs=x, filters=inputs.get_shape().as_list()[-1], kernel_size=(1, 1, 1), strides=(1, 1, 1), **conv_params) tf.logging.info('Output tensor shape: {}'.format(x.get_shape())) outputs['x_'] = x return outputs
[ "def", "convolutional_autoencoder_3d", "(", "inputs", ",", "num_convolutions", "=", "1", ",", "num_hidden_units", "=", "128", ",", "filters", "=", "(", "16", ",", "32", ",", "64", ")", ",", "strides", "=", "(", "(", "2", ",", "2", ",", "2", ")", ",", "(", "2", ",", "2", ",", "2", ")", ",", "(", "2", ",", "2", ",", "2", ")", ")", ",", "mode", "=", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ",", "use_bias", "=", "False", ",", "activation", "=", "tf", ".", "nn", ".", "relu6", ",", "kernel_initializer", "=", "tf", ".", "initializers", ".", "variance_scaling", "(", "distribution", "=", "'uniform'", ")", ",", "bias_initializer", "=", "tf", ".", "zeros_initializer", "(", ")", ",", "kernel_regularizer", "=", "None", ",", "bias_regularizer", "=", "None", ")", ":", "outputs", "=", "{", "}", "assert", "len", "(", "strides", ")", "==", "len", "(", "filters", ")", "assert", "len", "(", "inputs", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", ")", "==", "5", ",", "'inputs are required to have a rank of 5.'", "conv_op", "=", "tf", ".", "layers", ".", "conv3d", "tp_conv_op", "=", "tf", ".", "layers", ".", "conv3d_transpose", "conv_params", "=", "{", "'padding'", ":", "'same'", ",", "'use_bias'", ":", "use_bias", ",", "'kernel_initializer'", ":", "kernel_initializer", ",", "'bias_initializer'", ":", "bias_initializer", ",", "'kernel_regularizer'", ":", "kernel_regularizer", ",", "'bias_regularizer'", ":", "bias_regularizer", "}", "x", "=", "inputs", "tf", ".", "logging", ".", "info", "(", "'Input tensor shape {}'", ".", "format", "(", "x", ".", "get_shape", "(", ")", ")", ")", "# Convolutional feature encoding blocks with num_convolutions at different", "# resolution scales res_scales", "for", "res_scale", "in", "range", "(", "0", ",", "len", "(", "filters", ")", ")", ":", "for", "i", "in", "range", "(", "0", ",", "num_convolutions", "-", "1", ")", ":", "with", "tf", ".", "variable_scope", "(", "'enc_unit_{}_{}'", ".", "format", "(", "res_scale", ",", "i", ")", ")", ":", "x", "=", "conv_op", "(", "inputs", "=", "x", ",", "filters", "=", "filters", "[", "res_scale", "]", ",", "kernel_size", "=", "(", "3", ",", "3", ",", "3", ")", ",", "strides", "=", "(", "1", ",", "1", ",", "1", ")", ",", "*", "*", "conv_params", ")", "x", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "x", ",", "training", "=", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ")", "x", "=", "activation", "(", "x", ")", "tf", ".", "logging", ".", "info", "(", "'Encoder at res_scale {} shape: {}'", ".", "format", "(", "res_scale", ",", "x", ".", "get_shape", "(", ")", ")", ")", "# Employ strided convolutions to downsample", "with", "tf", ".", "variable_scope", "(", "'enc_unit_{}_{}'", ".", "format", "(", "res_scale", ",", "num_convolutions", ")", ")", ":", "# Adjust the strided conv kernel size to prevent losing information", "k_size", "=", "[", "s", "*", "2", "if", "s", ">", "1", "else", "3", "for", "s", "in", "strides", "[", "res_scale", "]", "]", "x", "=", "conv_op", "(", "inputs", "=", "x", ",", "filters", "=", "filters", "[", "res_scale", "]", ",", "kernel_size", "=", "k_size", ",", "strides", "=", "strides", "[", "res_scale", "]", ",", "*", "*", "conv_params", ")", "x", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "x", ",", "training", "=", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ")", "x", "=", "activation", "(", "x", ")", "tf", ".", "logging", ".", "info", "(", "'Encoder at res_scale {} tensor shape: {}'", ".", "format", "(", "res_scale", ",", "x", ".", "get_shape", "(", ")", ")", ")", "# Densely connected layer of hidden units", "x_shape", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "(", "tf", ".", "shape", "(", "x", ")", "[", "0", "]", ",", "np", ".", "prod", "(", "x_shape", "[", "1", ":", "]", ")", ")", ")", "x", "=", "tf", ".", "layers", ".", "dense", "(", "inputs", "=", "x", ",", "units", "=", "num_hidden_units", ",", "use_bias", "=", "conv_params", "[", "'use_bias'", "]", ",", "kernel_initializer", "=", "conv_params", "[", "'kernel_initializer'", "]", ",", "bias_initializer", "=", "conv_params", "[", "'bias_initializer'", "]", ",", "kernel_regularizer", "=", "conv_params", "[", "'kernel_regularizer'", "]", ",", "bias_regularizer", "=", "conv_params", "[", "'bias_regularizer'", "]", ",", "name", "=", "'hidden_units'", ")", "outputs", "[", "'hidden_units'", "]", "=", "x", "tf", ".", "logging", ".", "info", "(", "'Hidden units tensor shape: {}'", ".", "format", "(", "x", ".", "get_shape", "(", ")", ")", ")", "x", "=", "tf", ".", "layers", ".", "dense", "(", "inputs", "=", "x", ",", "units", "=", "np", ".", "prod", "(", "x_shape", "[", "1", ":", "]", ")", ",", "activation", "=", "activation", ",", "use_bias", "=", "conv_params", "[", "'use_bias'", "]", ",", "kernel_initializer", "=", "conv_params", "[", "'kernel_initializer'", "]", ",", "bias_initializer", "=", "conv_params", "[", "'bias_initializer'", "]", ",", "kernel_regularizer", "=", "conv_params", "[", "'kernel_regularizer'", "]", ",", "bias_regularizer", "=", "conv_params", "[", "'bias_regularizer'", "]", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "tf", ".", "shape", "(", "x", ")", "[", "0", "]", "]", "+", "list", "(", "x_shape", ")", "[", "1", ":", "]", ")", "tf", ".", "logging", ".", "info", "(", "'Decoder input tensor shape: {}'", ".", "format", "(", "x", ".", "get_shape", "(", ")", ")", ")", "# Decoding blocks with num_convolutions at different resolution scales", "# res_scales", "for", "res_scale", "in", "reversed", "(", "range", "(", "0", ",", "len", "(", "filters", ")", ")", ")", ":", "# Employ strided transpose convolutions to upsample", "with", "tf", ".", "variable_scope", "(", "'dec_unit_{}_0'", ".", "format", "(", "res_scale", ")", ")", ":", "# Adjust the strided tp conv kernel size to prevent losing", "# information", "k_size", "=", "[", "s", "*", "2", "if", "s", ">", "1", "else", "3", "for", "s", "in", "strides", "[", "res_scale", "]", "]", "x", "=", "tp_conv_op", "(", "inputs", "=", "x", ",", "filters", "=", "filters", "[", "res_scale", "]", ",", "kernel_size", "=", "k_size", ",", "strides", "=", "strides", "[", "res_scale", "]", ",", "*", "*", "conv_params", ")", "x", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "x", ",", "training", "=", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ")", "x", "=", "activation", "(", "x", ")", "tf", ".", "logging", ".", "info", "(", "'Decoder at res_scale {} tensor shape: {}'", ".", "format", "(", "res_scale", ",", "x", ".", "get_shape", "(", ")", ")", ")", "for", "i", "in", "range", "(", "1", ",", "num_convolutions", ")", ":", "with", "tf", ".", "variable_scope", "(", "'dec_unit_{}_{}'", ".", "format", "(", "res_scale", ",", "i", ")", ")", ":", "x", "=", "conv_op", "(", "inputs", "=", "x", ",", "filters", "=", "filters", "[", "res_scale", "]", ",", "kernel_size", "=", "(", "3", ",", "3", ",", "3", ")", ",", "strides", "=", "(", "1", ",", "1", ",", "1", ")", ",", "*", "*", "conv_params", ")", "x", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "x", ",", "training", "=", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ")", "x", "=", "activation", "(", "x", ")", "tf", ".", "logging", ".", "info", "(", "'Decoder at res_scale {} tensor shape: {}'", ".", "format", "(", "res_scale", ",", "x", ".", "get_shape", "(", ")", ")", ")", "# A final convolution reduces the number of output features to those of", "# the inputs", "x", "=", "conv_op", "(", "inputs", "=", "x", ",", "filters", "=", "inputs", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "-", "1", "]", ",", "kernel_size", "=", "(", "1", ",", "1", ",", "1", ")", ",", "strides", "=", "(", "1", ",", "1", ",", "1", ")", ",", "*", "*", "conv_params", ")", "tf", ".", "logging", ".", "info", "(", "'Output tensor shape: {}'", ".", "format", "(", "x", ".", "get_shape", "(", ")", ")", ")", "outputs", "[", "'x_'", "]", "=", "x", "return", "outputs" ]
https://github.com/DLTK/DLTK/blob/f94d3bb509eb0741164149acbef0788769a869e4/dltk/networks/autoencoder/convolutional_autoencoder.py#L10-L187
numba/llvmlite
aeddf447d4befc336b26bde8c46042553a13cf75
llvmlite/ir/builder.py
python
IRBuilder.load
(self, ptr, name='', align=None)
return ld
Load value from pointer, with optional guaranteed alignment: name = *ptr
Load value from pointer, with optional guaranteed alignment: name = *ptr
[ "Load", "value", "from", "pointer", "with", "optional", "guaranteed", "alignment", ":", "name", "=", "*", "ptr" ]
def load(self, ptr, name='', align=None): """ Load value from pointer, with optional guaranteed alignment: name = *ptr """ if not isinstance(ptr.type, types.PointerType): msg = "cannot load from value of type %s (%r): not a pointer" raise TypeError(msg % (ptr.type, str(ptr))) ld = instructions.LoadInstr(self.block, ptr, name) ld.align = align self._insert(ld) return ld
[ "def", "load", "(", "self", ",", "ptr", ",", "name", "=", "''", ",", "align", "=", "None", ")", ":", "if", "not", "isinstance", "(", "ptr", ".", "type", ",", "types", ".", "PointerType", ")", ":", "msg", "=", "\"cannot load from value of type %s (%r): not a pointer\"", "raise", "TypeError", "(", "msg", "%", "(", "ptr", ".", "type", ",", "str", "(", "ptr", ")", ")", ")", "ld", "=", "instructions", ".", "LoadInstr", "(", "self", ".", "block", ",", "ptr", ",", "name", ")", "ld", ".", "align", "=", "align", "self", ".", "_insert", "(", "ld", ")", "return", "ld" ]
https://github.com/numba/llvmlite/blob/aeddf447d4befc336b26bde8c46042553a13cf75/llvmlite/ir/builder.py#L755-L766
pykickstart/pykickstart
c16b64667ea0ba9697f3a0fc5bfe28a5dd970dcf
pykickstart/base.py
python
BaseHandler.__str__
(self)
return retval
Return a string formatted for output to a kickstart file.
Return a string formatted for output to a kickstart file.
[ "Return", "a", "string", "formatted", "for", "output", "to", "a", "kickstart", "file", "." ]
def __str__(self): """Return a string formatted for output to a kickstart file.""" retval = "# Generated by pykickstart v%s\n" % __version__ if self.platform: retval += "#platform=%s\n" % self.platform retval += "#version=%s\n" % versionToString(self.version) retval += KickstartHandler.__str__(self) for script in self.scripts: retval += script.__str__() if self._null_section_strings: retval += "\n" for s in self._null_section_strings: retval += s retval += self.packages.__str__() return retval
[ "def", "__str__", "(", "self", ")", ":", "retval", "=", "\"# Generated by pykickstart v%s\\n\"", "%", "__version__", "if", "self", ".", "platform", ":", "retval", "+=", "\"#platform=%s\\n\"", "%", "self", ".", "platform", "retval", "+=", "\"#version=%s\\n\"", "%", "versionToString", "(", "self", ".", "version", ")", "retval", "+=", "KickstartHandler", ".", "__str__", "(", "self", ")", "for", "script", "in", "self", ".", "scripts", ":", "retval", "+=", "script", ".", "__str__", "(", ")", "if", "self", ".", "_null_section_strings", ":", "retval", "+=", "\"\\n\"", "for", "s", "in", "self", ".", "_null_section_strings", ":", "retval", "+=", "s", "retval", "+=", "self", ".", "packages", ".", "__str__", "(", ")", "return", "retval" ]
https://github.com/pykickstart/pykickstart/blob/c16b64667ea0ba9697f3a0fc5bfe28a5dd970dcf/pykickstart/base.py#L489-L511
knownsec/VxPwn
6555f49675f0317d4a48568a89d0ec4332658402
sulley/examples/fuzz_trend_server_protect_5168.py
python
do_fuzz
()
[]
def do_fuzz (): sess = sessions.session(session_filename="audits/trend_server_protect_5168.session") target = sessions.target("192.168.181.133", 5168) target.netmon = pedrpc.client("192.168.181.133", 26001) target.procmon = pedrpc.client("192.168.181.133", 26002) target.vmcontrol = pedrpc.client("127.0.0.1", 26003) target.procmon_options = \ { "proc_name" : "SpntSvc.exe", "stop_commands" : ['net stop "trend serverprotect"'], "start_commands" : ['net start "trend serverprotect"'], } # start up the target. target.vmcontrol.restart_target() print "virtual machine up and running" sess.add_target(target) sess.pre_send = rpc_bind sess.connect(s_get("5168: op-1")) sess.connect(s_get("5168: op-2")) sess.connect(s_get("5168: op-3")) sess.connect(s_get("5168: op-5")) sess.connect(s_get("5168: op-a")) sess.connect(s_get("5168: op-1f")) sess.fuzz() print "done fuzzing. web interface still running."
[ "def", "do_fuzz", "(", ")", ":", "sess", "=", "sessions", ".", "session", "(", "session_filename", "=", "\"audits/trend_server_protect_5168.session\"", ")", "target", "=", "sessions", ".", "target", "(", "\"192.168.181.133\"", ",", "5168", ")", "target", ".", "netmon", "=", "pedrpc", ".", "client", "(", "\"192.168.181.133\"", ",", "26001", ")", "target", ".", "procmon", "=", "pedrpc", ".", "client", "(", "\"192.168.181.133\"", ",", "26002", ")", "target", ".", "vmcontrol", "=", "pedrpc", ".", "client", "(", "\"127.0.0.1\"", ",", "26003", ")", "target", ".", "procmon_options", "=", "{", "\"proc_name\"", ":", "\"SpntSvc.exe\"", ",", "\"stop_commands\"", ":", "[", "'net stop \"trend serverprotect\"'", "]", ",", "\"start_commands\"", ":", "[", "'net start \"trend serverprotect\"'", "]", ",", "}", "# start up the target.", "target", ".", "vmcontrol", ".", "restart_target", "(", ")", "print", "\"virtual machine up and running\"", "sess", ".", "add_target", "(", "target", ")", "sess", ".", "pre_send", "=", "rpc_bind", "sess", ".", "connect", "(", "s_get", "(", "\"5168: op-1\"", ")", ")", "sess", ".", "connect", "(", "s_get", "(", "\"5168: op-2\"", ")", ")", "sess", ".", "connect", "(", "s_get", "(", "\"5168: op-3\"", ")", ")", "sess", ".", "connect", "(", "s_get", "(", "\"5168: op-5\"", ")", ")", "sess", ".", "connect", "(", "s_get", "(", "\"5168: op-a\"", ")", ")", "sess", ".", "connect", "(", "s_get", "(", "\"5168: op-1f\"", ")", ")", "sess", ".", "fuzz", "(", ")", "print", "\"done fuzzing. web interface still running.\"" ]
https://github.com/knownsec/VxPwn/blob/6555f49675f0317d4a48568a89d0ec4332658402/sulley/examples/fuzz_trend_server_protect_5168.py#L59-L89
kudkudak/word-embeddings-benchmarks
c78272b8c1374e5e518915a240ab2b348b59f44e
web/datasets/analogy.py
python
fetch_google_analogy
()
return Bunch(X=np.vstack(questions).astype("object"), y=np.hstack(answers).astype("object"), category=np.hstack(category).astype("object"), category_high_level=np.hstack(category_high_level).astype("object"))
Fetch Google dataset for testing both semantic and syntactic analogies. Returns ------- data : sklearn.datasets.base.Bunch dictionary-like object. Keys of interest: 'X': matrix of word questions 'y': vector of answers 'category': name of category 'category_high_level': name of high level category (semantic/syntactic) References ---------- Mikolov, Tomas and Sutskever, Ilya and Chen, Kai and Corrado, Greg S and Dean, Jeff, "Distributed representations of words and phrases and their compositionality", 2013 Notes ----- This dataset is a subset of WordRep dataset.
Fetch Google dataset for testing both semantic and syntactic analogies.
[ "Fetch", "Google", "dataset", "for", "testing", "both", "semantic", "and", "syntactic", "analogies", "." ]
def fetch_google_analogy(): """ Fetch Google dataset for testing both semantic and syntactic analogies. Returns ------- data : sklearn.datasets.base.Bunch dictionary-like object. Keys of interest: 'X': matrix of word questions 'y': vector of answers 'category': name of category 'category_high_level': name of high level category (semantic/syntactic) References ---------- Mikolov, Tomas and Sutskever, Ilya and Chen, Kai and Corrado, Greg S and Dean, Jeff, "Distributed representations of words and phrases and their compositionality", 2013 Notes ----- This dataset is a subset of WordRep dataset. """ url = "https://www.dropbox.com/s/eujtyfb5zem1mim/EN-GOOGLE.txt?dl=1" with open(_fetch_file(url, "analogy/EN-GOOGLE", verbose=0), "r") as f: L = f.read().splitlines() # Simple 4 word analogy questions with categories questions = [] answers = [] category = [] cat = None for l in L: if l.startswith(":"): cat =l.lower().split()[1] else: words = standardize_string(l).split() questions.append(words[0:3]) answers.append(words[3]) category.append(cat) assert set(category) == set(['gram3-comparative', 'gram8-plural', 'capital-common-countries', 'city-in-state', 'family', 'gram9-plural-verbs', 'gram2-opposite', 'currency', 'gram4-superlative', 'gram6-nationality-adjective', 'gram7-past-tense', 'gram5-present-participle', 'capital-world', 'gram1-adjective-to-adverb']) syntactic = set([c for c in set(category) if c.startswith("gram")]) category_high_level = [] for cat in category: category_high_level.append("syntactic" if cat in syntactic else "semantic") # dtype=object for memory efficiency return Bunch(X=np.vstack(questions).astype("object"), y=np.hstack(answers).astype("object"), category=np.hstack(category).astype("object"), category_high_level=np.hstack(category_high_level).astype("object"))
[ "def", "fetch_google_analogy", "(", ")", ":", "url", "=", "\"https://www.dropbox.com/s/eujtyfb5zem1mim/EN-GOOGLE.txt?dl=1\"", "with", "open", "(", "_fetch_file", "(", "url", ",", "\"analogy/EN-GOOGLE\"", ",", "verbose", "=", "0", ")", ",", "\"r\"", ")", "as", "f", ":", "L", "=", "f", ".", "read", "(", ")", ".", "splitlines", "(", ")", "# Simple 4 word analogy questions with categories", "questions", "=", "[", "]", "answers", "=", "[", "]", "category", "=", "[", "]", "cat", "=", "None", "for", "l", "in", "L", ":", "if", "l", ".", "startswith", "(", "\":\"", ")", ":", "cat", "=", "l", ".", "lower", "(", ")", ".", "split", "(", ")", "[", "1", "]", "else", ":", "words", "=", "standardize_string", "(", "l", ")", ".", "split", "(", ")", "questions", ".", "append", "(", "words", "[", "0", ":", "3", "]", ")", "answers", ".", "append", "(", "words", "[", "3", "]", ")", "category", ".", "append", "(", "cat", ")", "assert", "set", "(", "category", ")", "==", "set", "(", "[", "'gram3-comparative'", ",", "'gram8-plural'", ",", "'capital-common-countries'", ",", "'city-in-state'", ",", "'family'", ",", "'gram9-plural-verbs'", ",", "'gram2-opposite'", ",", "'currency'", ",", "'gram4-superlative'", ",", "'gram6-nationality-adjective'", ",", "'gram7-past-tense'", ",", "'gram5-present-participle'", ",", "'capital-world'", ",", "'gram1-adjective-to-adverb'", "]", ")", "syntactic", "=", "set", "(", "[", "c", "for", "c", "in", "set", "(", "category", ")", "if", "c", ".", "startswith", "(", "\"gram\"", ")", "]", ")", "category_high_level", "=", "[", "]", "for", "cat", "in", "category", ":", "category_high_level", ".", "append", "(", "\"syntactic\"", "if", "cat", "in", "syntactic", "else", "\"semantic\"", ")", "# dtype=object for memory efficiency", "return", "Bunch", "(", "X", "=", "np", ".", "vstack", "(", "questions", ")", ".", "astype", "(", "\"object\"", ")", ",", "y", "=", "np", ".", "hstack", "(", "answers", ")", ".", "astype", "(", "\"object\"", ")", ",", "category", "=", "np", ".", "hstack", "(", "category", ")", ".", "astype", "(", "\"object\"", ")", ",", "category_high_level", "=", "np", ".", "hstack", "(", "category_high_level", ")", ".", "astype", "(", "\"object\"", ")", ")" ]
https://github.com/kudkudak/word-embeddings-benchmarks/blob/c78272b8c1374e5e518915a240ab2b348b59f44e/web/datasets/analogy.py#L111-L169
modflowpy/flopy
eecd1ad193c5972093c9712e5c4b7a83284f0688
flopy/modflow/mfdis.py
python
ModflowDis.get_cell_volumes
(self)
return vol
Get an array of cell volumes. Returns ------- vol : array of floats (nlay, nrow, ncol)
Get an array of cell volumes.
[ "Get", "an", "array", "of", "cell", "volumes", "." ]
def get_cell_volumes(self): """ Get an array of cell volumes. Returns ------- vol : array of floats (nlay, nrow, ncol) """ vol = np.empty((self.nlay, self.nrow, self.ncol)) for l in range(self.nlay): vol[l, :, :] = self.parent.modelgrid.thick[l] for r in range(self.nrow): vol[:, r, :] *= self.delc[r] for c in range(self.ncol): vol[:, :, c] *= self.delr[c] return vol
[ "def", "get_cell_volumes", "(", "self", ")", ":", "vol", "=", "np", ".", "empty", "(", "(", "self", ".", "nlay", ",", "self", ".", "nrow", ",", "self", ".", "ncol", ")", ")", "for", "l", "in", "range", "(", "self", ".", "nlay", ")", ":", "vol", "[", "l", ",", ":", ",", ":", "]", "=", "self", ".", "parent", ".", "modelgrid", ".", "thick", "[", "l", "]", "for", "r", "in", "range", "(", "self", ".", "nrow", ")", ":", "vol", "[", ":", ",", "r", ",", ":", "]", "*=", "self", ".", "delc", "[", "r", "]", "for", "c", "in", "range", "(", "self", ".", "ncol", ")", ":", "vol", "[", ":", ",", ":", ",", "c", "]", "*=", "self", ".", "delr", "[", "c", "]", "return", "vol" ]
https://github.com/modflowpy/flopy/blob/eecd1ad193c5972093c9712e5c4b7a83284f0688/flopy/modflow/mfdis.py#L427-L443
pypa/pipenv
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
pipenv/patched/notpip/_vendor/pkg_resources/__init__.py
python
WorkingSet.iter_entry_points
(self, group, name=None)
return ( entry for dist in self for entry in dist.get_entry_map(group).values() if name is None or name == entry.name )
Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order).
Yield entry point objects from `group` matching `name`
[ "Yield", "entry", "point", "objects", "from", "group", "matching", "name" ]
def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ return ( entry for dist in self for entry in dist.get_entry_map(group).values() if name is None or name == entry.name )
[ "def", "iter_entry_points", "(", "self", ",", "group", ",", "name", "=", "None", ")", ":", "return", "(", "entry", "for", "dist", "in", "self", "for", "entry", "in", "dist", ".", "get_entry_map", "(", "group", ")", ".", "values", "(", ")", "if", "name", "is", "None", "or", "name", "==", "entry", ".", "name", ")" ]
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py#L646-L658
wvangansbeke/LaneDetection_End2End
93c42a488f9d909328eaa54cd6cd3dd43f81c45f
Birds_Eye_View_Loss/Networks/utils.py
python
homogenous_transformation
(x,y)
return x_vals, y_vals
Helper function to transform coordionates defined by transformation matrix Args: Matrix (multi dim - array): Transformation matrix x (array): original x coordinates y (array): original y coordinates
Helper function to transform coordionates defined by transformation matrix Args: Matrix (multi dim - array): Transformation matrix x (array): original x coordinates y (array): original y coordinates
[ "Helper", "function", "to", "transform", "coordionates", "defined", "by", "transformation", "matrix", "Args", ":", "Matrix", "(", "multi", "dim", "-", "array", ")", ":", "Transformation", "matrix", "x", "(", "array", ")", ":", "original", "x", "coordinates", "y", "(", "array", ")", ":", "original", "y", "coordinates" ]
def homogenous_transformation(x,y): """ Helper function to transform coordionates defined by transformation matrix Args: Matrix (multi dim - array): Transformation matrix x (array): original x coordinates y (array): original y coordinates """ y_start = 0.3 y_stop = 1 src = np.float32([[0.45,y_start],[0.55, y_start],[0.1,y_stop],[0.9, y_stop]]) dst = np.float32([[0.45, y_start],[0.55, y_start],[0.45, y_stop],[0.55,y_stop]]) M_inv = cv2.getPerspectiveTransform(dst,src) ones = np.ones((1,len(y))) coordinates = np.vstack((x, y, ones)) trans = np.matmul(M_inv, coordinates) x_vals = trans[0,:]/trans[2,:] y_vals = trans[1,:]/trans[2,:] return x_vals, y_vals
[ "def", "homogenous_transformation", "(", "x", ",", "y", ")", ":", "y_start", "=", "0.3", "y_stop", "=", "1", "src", "=", "np", ".", "float32", "(", "[", "[", "0.45", ",", "y_start", "]", ",", "[", "0.55", ",", "y_start", "]", ",", "[", "0.1", ",", "y_stop", "]", ",", "[", "0.9", ",", "y_stop", "]", "]", ")", "dst", "=", "np", ".", "float32", "(", "[", "[", "0.45", ",", "y_start", "]", ",", "[", "0.55", ",", "y_start", "]", ",", "[", "0.45", ",", "y_stop", "]", ",", "[", "0.55", ",", "y_stop", "]", "]", ")", "M_inv", "=", "cv2", ".", "getPerspectiveTransform", "(", "dst", ",", "src", ")", "ones", "=", "np", ".", "ones", "(", "(", "1", ",", "len", "(", "y", ")", ")", ")", "coordinates", "=", "np", ".", "vstack", "(", "(", "x", ",", "y", ",", "ones", ")", ")", "trans", "=", "np", ".", "matmul", "(", "M_inv", ",", "coordinates", ")", "x_vals", "=", "trans", "[", "0", ",", ":", "]", "/", "trans", "[", "2", ",", ":", "]", "y_vals", "=", "trans", "[", "1", ",", ":", "]", "/", "trans", "[", "2", ",", ":", "]", "return", "x_vals", ",", "y_vals" ]
https://github.com/wvangansbeke/LaneDetection_End2End/blob/93c42a488f9d909328eaa54cd6cd3dd43f81c45f/Birds_Eye_View_Loss/Networks/utils.py#L299-L320
IngoScholtes/pathpy
857f97d1b5a7eec8466775adfed29c28f32f87aa
pathpy/MarkovSequence.py
python
MarkovSequence.getAIC
(self, k=1, m=1)
return aic
Returns the Aikake Information Criterion assuming a k-th order Markov model
Returns the Aikake Information Criterion assuming a k-th order Markov model
[ "Returns", "the", "Aikake", "Information", "Criterion", "assuming", "a", "k", "-", "th", "order", "Markov", "model" ]
def getAIC(self, k=1, m=1): """ Returns the Aikake Information Criterion assuming a k-th order Markov model """ if k not in self.P: self.fitMarkovModel(k) if m not in self.P: self.fitMarkovModel(m) L_k = self.getLikelihood(k, log=True) L_m = self.getLikelihood(m, log=True) s = len(self.states[1]) n = len(self.sequence) aic = 2 * (s**k - s**m) * (s-1) - 2.0 * (L_k - L_m) return aic
[ "def", "getAIC", "(", "self", ",", "k", "=", "1", ",", "m", "=", "1", ")", ":", "if", "k", "not", "in", "self", ".", "P", ":", "self", ".", "fitMarkovModel", "(", "k", ")", "if", "m", "not", "in", "self", ".", "P", ":", "self", ".", "fitMarkovModel", "(", "m", ")", "L_k", "=", "self", ".", "getLikelihood", "(", "k", ",", "log", "=", "True", ")", "L_m", "=", "self", ".", "getLikelihood", "(", "m", ",", "log", "=", "True", ")", "s", "=", "len", "(", "self", ".", "states", "[", "1", "]", ")", "n", "=", "len", "(", "self", ".", "sequence", ")", "aic", "=", "2", "*", "(", "s", "**", "k", "-", "s", "**", "m", ")", "*", "(", "s", "-", "1", ")", "-", "2.0", "*", "(", "L_k", "-", "L_m", ")", "return", "aic" ]
https://github.com/IngoScholtes/pathpy/blob/857f97d1b5a7eec8466775adfed29c28f32f87aa/pathpy/MarkovSequence.py#L153-L171
Cloud-CV/EvalAI
1884811e7759e0d095f7afb68188a7f010fa65dc
apps/participants/utils.py
python
get_list_of_challenges_for_participant_team
(participant_teams=[])
return Challenge.objects.filter(participant_teams__in=participant_teams)
Returns list of challenges participated by a team
Returns list of challenges participated by a team
[ "Returns", "list", "of", "challenges", "participated", "by", "a", "team" ]
def get_list_of_challenges_for_participant_team(participant_teams=[]): """Returns list of challenges participated by a team""" return Challenge.objects.filter(participant_teams__in=participant_teams)
[ "def", "get_list_of_challenges_for_participant_team", "(", "participant_teams", "=", "[", "]", ")", ":", "return", "Challenge", ".", "objects", ".", "filter", "(", "participant_teams__in", "=", "participant_teams", ")" ]
https://github.com/Cloud-CV/EvalAI/blob/1884811e7759e0d095f7afb68188a7f010fa65dc/apps/participants/utils.py#L65-L67
xonsh/xonsh
b76d6f994f22a4078f602f8b386f4ec280c8461f
xonsh/jobs.py
python
clean_jobs
()
return jobs_clean
Clean up jobs for exiting shell In non-interactive mode, kill all jobs. In interactive mode, check for suspended or background jobs, print a warning if any exist, and return False. Otherwise, return True.
Clean up jobs for exiting shell
[ "Clean", "up", "jobs", "for", "exiting", "shell" ]
def clean_jobs(): """Clean up jobs for exiting shell In non-interactive mode, kill all jobs. In interactive mode, check for suspended or background jobs, print a warning if any exist, and return False. Otherwise, return True. """ jobs_clean = True if XSH.env["XONSH_INTERACTIVE"]: _clear_dead_jobs() if XSH.all_jobs: global _last_exit_time hist = XSH.history if hist is not None and len(hist.tss) > 0: last_cmd_start = hist.tss[-1][0] else: last_cmd_start = None if _last_exit_time and last_cmd_start and _last_exit_time > last_cmd_start: # Exit occurred after last command started, so it was called as # part of the last command and is now being called again # immediately. Kill jobs and exit without reminder about # unfinished jobs in this case. kill_all_jobs() else: if len(XSH.all_jobs) > 1: msg = "there are unfinished jobs" else: msg = "there is an unfinished job" if XSH.env["SHELL_TYPE"] != "prompt_toolkit": # The Ctrl+D binding for prompt_toolkit already inserts a # newline print() print(f"xonsh: {msg}", file=sys.stderr) print("-" * 5, file=sys.stderr) jobs([], stdout=sys.stderr) print("-" * 5, file=sys.stderr) print( 'Type "exit" or press "ctrl-d" again to force quit.', file=sys.stderr, ) jobs_clean = False _last_exit_time = time.time() else: kill_all_jobs() return jobs_clean
[ "def", "clean_jobs", "(", ")", ":", "jobs_clean", "=", "True", "if", "XSH", ".", "env", "[", "\"XONSH_INTERACTIVE\"", "]", ":", "_clear_dead_jobs", "(", ")", "if", "XSH", ".", "all_jobs", ":", "global", "_last_exit_time", "hist", "=", "XSH", ".", "history", "if", "hist", "is", "not", "None", "and", "len", "(", "hist", ".", "tss", ")", ">", "0", ":", "last_cmd_start", "=", "hist", ".", "tss", "[", "-", "1", "]", "[", "0", "]", "else", ":", "last_cmd_start", "=", "None", "if", "_last_exit_time", "and", "last_cmd_start", "and", "_last_exit_time", ">", "last_cmd_start", ":", "# Exit occurred after last command started, so it was called as", "# part of the last command and is now being called again", "# immediately. Kill jobs and exit without reminder about", "# unfinished jobs in this case.", "kill_all_jobs", "(", ")", "else", ":", "if", "len", "(", "XSH", ".", "all_jobs", ")", ">", "1", ":", "msg", "=", "\"there are unfinished jobs\"", "else", ":", "msg", "=", "\"there is an unfinished job\"", "if", "XSH", ".", "env", "[", "\"SHELL_TYPE\"", "]", "!=", "\"prompt_toolkit\"", ":", "# The Ctrl+D binding for prompt_toolkit already inserts a", "# newline", "print", "(", ")", "print", "(", "f\"xonsh: {msg}\"", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\"-\"", "*", "5", ",", "file", "=", "sys", ".", "stderr", ")", "jobs", "(", "[", "]", ",", "stdout", "=", "sys", ".", "stderr", ")", "print", "(", "\"-\"", "*", "5", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "'Type \"exit\" or press \"ctrl-d\" again to force quit.'", ",", "file", "=", "sys", ".", "stderr", ",", ")", "jobs_clean", "=", "False", "_last_exit_time", "=", "time", ".", "time", "(", ")", "else", ":", "kill_all_jobs", "(", ")", "return", "jobs_clean" ]
https://github.com/xonsh/xonsh/blob/b76d6f994f22a4078f602f8b386f4ec280c8461f/xonsh/jobs.py#L313-L362
firedrakeproject/firedrake
06ab4975c14c0d4dcb79be55821f8b9e41554125
firedrake/utility_meshes.py
python
UnitTriangleMesh
(comm=COMM_WORLD)
return mesh.Mesh(plex, reorder=False)
Generate a mesh of the reference triangle :kwarg comm: Optional communicator to build the mesh on (defaults to COMM_WORLD).
Generate a mesh of the reference triangle
[ "Generate", "a", "mesh", "of", "the", "reference", "triangle" ]
def UnitTriangleMesh(comm=COMM_WORLD): """Generate a mesh of the reference triangle :kwarg comm: Optional communicator to build the mesh on (defaults to COMM_WORLD). """ coords = [[0., 0.], [1., 0.], [0., 1.]] cells = [[0, 1, 2]] plex = mesh._from_cell_list(2, cells, coords, comm) return mesh.Mesh(plex, reorder=False)
[ "def", "UnitTriangleMesh", "(", "comm", "=", "COMM_WORLD", ")", ":", "coords", "=", "[", "[", "0.", ",", "0.", "]", ",", "[", "1.", ",", "0.", "]", ",", "[", "0.", ",", "1.", "]", "]", "cells", "=", "[", "[", "0", ",", "1", ",", "2", "]", "]", "plex", "=", "mesh", ".", "_from_cell_list", "(", "2", ",", "cells", ",", "coords", ",", "comm", ")", "return", "mesh", ".", "Mesh", "(", "plex", ",", "reorder", "=", "False", ")" ]
https://github.com/firedrakeproject/firedrake/blob/06ab4975c14c0d4dcb79be55821f8b9e41554125/firedrake/utility_meshes.py#L319-L328
sahana/eden
1696fa50e90ce967df69f66b571af45356cc18da
modules/templates/UCCE/controllers.py
python
dc_TargetL10n.apply_method
(self, r, **attr)
return output
Entry point for REST API Args: r: the S3Request attr: controller arguments
Entry point for REST API
[ "Entry", "point", "for", "REST", "API" ]
def apply_method(self, r, **attr): """ Entry point for REST API Args: r: the S3Request attr: controller arguments """ if r.name == "target": if r.http == "POST" and r.representation == "json": # AJAX method # Action the request table = r.table target_id = r.id if not current.auth.s3_has_permission("update", table, record_id=target_id): r.unauthorised() # Update Language l10n = r.post_vars.get("l10n") if l10n is not None: db = current.db s3db = current.s3db # Update Target ltable = s3db.dc_target_l10n exists = db(ltable.target_id == target_id).select(ltable.id, ltable.language, limitby = (0, 1) ).first() if exists: if exists.language != l10n: exists.update_record(language = l10n) else: ltable.insert(target_id = target_id, language = l10n, ) # Update Template template_id = r.record.template_id ltable = s3db.dc_template_l10n exists = db(ltable.template_id == template_id).select(ltable.id, ltable.language, limitby = (0, 1) ).first() if exists: if exists.language != l10n: exists.update_record(language = l10n) else: ltable.insert(template_id = template_id, language = l10n, ) if l10n: # Update Questions qtable = s3db.dc_question questions = db(qtable.template_id == template_id).select(qtable.id) question_ids = [q.id for q in questions] qltable = s3db.dc_question_l10n db(qltable.question_id.belongs(question_ids)).update(language = l10n) # Results (Empty Message so we don't get it shown to User) current.response.headers["Content-Type"] = "application/json" output = current.xml.json_message(True, 200, "") else: r.error(400, current.T("Invalid Parameters")) else: r.error(415, current.ERROR.BAD_FORMAT) else: r.error(404, current.ERROR.BAD_RESOURCE) return output
[ "def", "apply_method", "(", "self", ",", "r", ",", "*", "*", "attr", ")", ":", "if", "r", ".", "name", "==", "\"target\"", ":", "if", "r", ".", "http", "==", "\"POST\"", "and", "r", ".", "representation", "==", "\"json\"", ":", "# AJAX method", "# Action the request", "table", "=", "r", ".", "table", "target_id", "=", "r", ".", "id", "if", "not", "current", ".", "auth", ".", "s3_has_permission", "(", "\"update\"", ",", "table", ",", "record_id", "=", "target_id", ")", ":", "r", ".", "unauthorised", "(", ")", "# Update Language", "l10n", "=", "r", ".", "post_vars", ".", "get", "(", "\"l10n\"", ")", "if", "l10n", "is", "not", "None", ":", "db", "=", "current", ".", "db", "s3db", "=", "current", ".", "s3db", "# Update Target", "ltable", "=", "s3db", ".", "dc_target_l10n", "exists", "=", "db", "(", "ltable", ".", "target_id", "==", "target_id", ")", ".", "select", "(", "ltable", ".", "id", ",", "ltable", ".", "language", ",", "limitby", "=", "(", "0", ",", "1", ")", ")", ".", "first", "(", ")", "if", "exists", ":", "if", "exists", ".", "language", "!=", "l10n", ":", "exists", ".", "update_record", "(", "language", "=", "l10n", ")", "else", ":", "ltable", ".", "insert", "(", "target_id", "=", "target_id", ",", "language", "=", "l10n", ",", ")", "# Update Template", "template_id", "=", "r", ".", "record", ".", "template_id", "ltable", "=", "s3db", ".", "dc_template_l10n", "exists", "=", "db", "(", "ltable", ".", "template_id", "==", "template_id", ")", ".", "select", "(", "ltable", ".", "id", ",", "ltable", ".", "language", ",", "limitby", "=", "(", "0", ",", "1", ")", ")", ".", "first", "(", ")", "if", "exists", ":", "if", "exists", ".", "language", "!=", "l10n", ":", "exists", ".", "update_record", "(", "language", "=", "l10n", ")", "else", ":", "ltable", ".", "insert", "(", "template_id", "=", "template_id", ",", "language", "=", "l10n", ",", ")", "if", "l10n", ":", "# Update Questions", "qtable", "=", "s3db", ".", "dc_question", "questions", "=", "db", "(", "qtable", ".", "template_id", "==", "template_id", ")", ".", "select", "(", "qtable", ".", "id", ")", "question_ids", "=", "[", "q", ".", "id", "for", "q", "in", "questions", "]", "qltable", "=", "s3db", ".", "dc_question_l10n", "db", "(", "qltable", ".", "question_id", ".", "belongs", "(", "question_ids", ")", ")", ".", "update", "(", "language", "=", "l10n", ")", "# Results (Empty Message so we don't get it shown to User)", "current", ".", "response", ".", "headers", "[", "\"Content-Type\"", "]", "=", "\"application/json\"", "output", "=", "current", ".", "xml", ".", "json_message", "(", "True", ",", "200", ",", "\"\"", ")", "else", ":", "r", ".", "error", "(", "400", ",", "current", ".", "T", "(", "\"Invalid Parameters\"", ")", ")", "else", ":", "r", ".", "error", "(", "415", ",", "current", ".", "ERROR", ".", "BAD_FORMAT", ")", "else", ":", "r", ".", "error", "(", "404", ",", "current", ".", "ERROR", ".", "BAD_RESOURCE", ")", "return", "output" ]
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/templates/UCCE/controllers.py#L1259-L1330
pycom/pycom-libraries
75d0e67cb421e0576a3a9677bb0d9d81f27ebdb7
pymesh/pymesh_frozen/lib/mesh_internal.py
python
MeshInternal.receive_all_data
(self, arg)
receives all packages on socket
receives all packages on socket
[ "receives", "all", "packages", "on", "socket" ]
def receive_all_data(self, arg): """ receives all packages on socket """ while True: rcv_data, rcv_addr = self.sock.recvfrom(1024) if len(rcv_data) == 0: break # out of while, no packet rcv_ip = rcv_addr[0] rcv_port = rcv_addr[1] print_debug(4, 'Incoming %d bytes from %s (port %d):' % (len(rcv_data), rcv_ip, rcv_port)) # print_debug(3, rcv_data) print_debug(5, str(self.mesh.lora.stats())) # check if Node is BR if self.br_handler: #check if data is for the external of the Pymesh (for Pybytes) if rcv_data[0] == self.BR_MAGIC_BYTE and len(rcv_data) >= calcsize(self.BR_HEADER_FMT): br_header = unpack(self.BR_HEADER_FMT, rcv_data) print_debug(3, "BR pack, IP dest: %x:%x:%x:%x:%x:%x:%x:%x (port %d)"%( br_header[1],br_header[2],br_header[3],br_header[4], br_header[5],br_header[6],br_header[7],br_header[8], br_header[9])) rcv_data = rcv_data[calcsize(self.BR_HEADER_FMT):] dest_ip = "%x:%x:%x:%x:%x:%x:%x:%x"%( br_header[1],br_header[2],br_header[3],br_header[4], br_header[5],br_header[6],br_header[7],br_header[8]) dest_port = br_header[9] print_debug(3, rcv_data) (type, rcv_data) = self.get_type(rcv_data) print_debug(3, rcv_data) self.br_handler(rcv_ip, rcv_port, rcv_data, dest_ip, dest_port) return # done, no more parsing as this pack was for BR # check packet type (type, rcv_data) = self.get_type(rcv_data) # LEADER if type == self.PACK_ROUTER_NEIGHBORS: print_debug(3, "PACK_ROUTER_NEIGHBORS received") self.mesh.routers_neigh_update(rcv_data) # no answer # elif type == self.PACK_ROUTER_ASK_LEADER_DATA: # print_debug(3, "PACK_ROUTER_ASK_LEADER_DATA received") # # send answer with Leader data # pack = self.mesh.leader_data_pack() # self.send_pack(self.PACK_LEADER_DATA, pack, rcv_ip) # ROUTER elif type == self.PACK_LEADER_ASK_NEIGH: print_debug(3, "PACK_LEADER_ASK_NEIGH received") payload = self.mesh.neighbors_pack() #time.sleep(.2) self.send_pack(self.PACK_ROUTER_NEIGHBORS, payload, rcv_ip) # elif type == self.PACK_LEADER_DATA: # print_debug(3, "PACK_LEADER_DATA received") # if self.mesh.leader_data_unpack(rcv_data): # self.interrogate_leader_ts = time.time() # ALL NODES elif type == self.PACK_MESSAGE: print_debug(3, "PACK_MESSAGE received") # add new pack received message = Message(rcv_data) # print_debug(3, message.payload) message.ip = rcv_ip self.messages.add_rcv_message(message) # send back ACK self.send_pack(self.PACK_MESSAGE_ACK, message.pack_ack(self.MAC), rcv_ip) # forward message to user-application layer if self.message_cb: self.message_cb(rcv_ip, rcv_port, message.payload) elif type == self.PACK_MESSAGE_ACK: print_debug(3, "PACK_MESSAGE_ACK received") # mark message as received self.messages.rcv_ack(rcv_data) elif type == self.PACK_ROUTER_ASK_MACS: print_debug(3, "PACK_ROUTER_ASK_MACS received") payload = self.mesh.leader_data.get_macs_pack() self.send_pack(self.PACK_LEADER_MACS, payload, rcv_ip) elif type == self.PACK_LEADER_MACS: print_debug(3, "PACK_LEADER_MACS received") self.mesh.macs_set(rcv_data) elif type == self.PACK_ROUTER_ASK_CONNECTIONS: print_debug(3, "PACK_ROUTER_ASK_CONNECTIONS received") payload = self.mesh.leader_data.get_connections_pack() self.send_pack(self.PACK_LEADER_CONNECTIONS, payload, rcv_ip) elif type == self.PACK_LEADER_CONNECTIONS: print_debug(3, "PACK_LEADER_CONNECTIONS received") self.mesh.connections_set(rcv_data) elif type == self.PACK_ROUTER_ASK_MAC_DETAILS: print_debug(3, "PACK_ROUTER_ASK_MAC_DETAILS received") (mac_req, ) = unpack('!H', rcv_data) print_debug(3, str(mac_req)) payload = self.mesh.leader_data.node_info_mac_pack(mac_req) if len(payload) > 0: self.send_pack(self.PACK_LEADER_MAC_DETAILS, payload, rcv_ip) else: print_debug(3, "No info found about MAC %d"%mac_req) elif type == self.PACK_LEADER_MAC_DETAILS: print_debug(3, "PACK_LEADER_MAC_DETAILS received") self.mesh.node_info_set(rcv_data) # elif type == self.PACK_FILE_SEND: # print_debug(3, "PACK_FILE_SEND received") # payload = pack("!Q", self.MAC) # self.send_pack(self.PACK_FILE_SEND_ACK, payload, rcv_ip) # # rcv data contains '!QHH' as header # chunk = len(rcv_data) -12 # self.file_size += chunk # print_debug(3, "size: %d, chunk %d" % (self.file_size, chunk)) # file_handler = "ab" # append, by default # if chunk > self.file_packsize: # # started receiving a new file # print_debug(3, "started receiving a new image") # file_handler = "wb" # write/create new file # self.file_packsize = chunk # elif chunk < self.file_packsize: # print_debug(3, "DONE receiving the image") # # done receiving the file # self.file_packsize = 0 # self.file_size = 0 # self.messages.file_transfer_done(rcv_data[:12]) # # else: # # #middle of the file, just write data # # self.file.write(rcv_data) # with open('/flash/dog_rcv.jpg', file_handler) as file: # file.write(rcv_data[12:]) # print_debug(3, "writing the image") # elif type == self.PACK_FILE_SEND_ACK: # mac_rcv = unpack("!Q", rcv_data) # print_debug(3, "PACK_FILE_SEND_ACK received from MAC %d"%mac_rcv) # mac_rcv = 6 # message = self.messages.dict.get(mac_rcv, None) # if message: # print_debug(3, "message found") # self.send_message(message, rcv_data) # else: # print_debug(3, "message NOT found ", mac_rcv, self.messages.dict) else: print_debug(3, "Unknown packet, type: 0x%X" % (type)) print_debug(3, str(rcv_data)) pass
[ "def", "receive_all_data", "(", "self", ",", "arg", ")", ":", "while", "True", ":", "rcv_data", ",", "rcv_addr", "=", "self", ".", "sock", ".", "recvfrom", "(", "1024", ")", "if", "len", "(", "rcv_data", ")", "==", "0", ":", "break", "# out of while, no packet", "rcv_ip", "=", "rcv_addr", "[", "0", "]", "rcv_port", "=", "rcv_addr", "[", "1", "]", "print_debug", "(", "4", ",", "'Incoming %d bytes from %s (port %d):'", "%", "(", "len", "(", "rcv_data", ")", ",", "rcv_ip", ",", "rcv_port", ")", ")", "# print_debug(3, rcv_data)", "print_debug", "(", "5", ",", "str", "(", "self", ".", "mesh", ".", "lora", ".", "stats", "(", ")", ")", ")", "# check if Node is BR", "if", "self", ".", "br_handler", ":", "#check if data is for the external of the Pymesh (for Pybytes)", "if", "rcv_data", "[", "0", "]", "==", "self", ".", "BR_MAGIC_BYTE", "and", "len", "(", "rcv_data", ")", ">=", "calcsize", "(", "self", ".", "BR_HEADER_FMT", ")", ":", "br_header", "=", "unpack", "(", "self", ".", "BR_HEADER_FMT", ",", "rcv_data", ")", "print_debug", "(", "3", ",", "\"BR pack, IP dest: %x:%x:%x:%x:%x:%x:%x:%x (port %d)\"", "%", "(", "br_header", "[", "1", "]", ",", "br_header", "[", "2", "]", ",", "br_header", "[", "3", "]", ",", "br_header", "[", "4", "]", ",", "br_header", "[", "5", "]", ",", "br_header", "[", "6", "]", ",", "br_header", "[", "7", "]", ",", "br_header", "[", "8", "]", ",", "br_header", "[", "9", "]", ")", ")", "rcv_data", "=", "rcv_data", "[", "calcsize", "(", "self", ".", "BR_HEADER_FMT", ")", ":", "]", "dest_ip", "=", "\"%x:%x:%x:%x:%x:%x:%x:%x\"", "%", "(", "br_header", "[", "1", "]", ",", "br_header", "[", "2", "]", ",", "br_header", "[", "3", "]", ",", "br_header", "[", "4", "]", ",", "br_header", "[", "5", "]", ",", "br_header", "[", "6", "]", ",", "br_header", "[", "7", "]", ",", "br_header", "[", "8", "]", ")", "dest_port", "=", "br_header", "[", "9", "]", "print_debug", "(", "3", ",", "rcv_data", ")", "(", "type", ",", "rcv_data", ")", "=", "self", ".", "get_type", "(", "rcv_data", ")", "print_debug", "(", "3", ",", "rcv_data", ")", "self", ".", "br_handler", "(", "rcv_ip", ",", "rcv_port", ",", "rcv_data", ",", "dest_ip", ",", "dest_port", ")", "return", "# done, no more parsing as this pack was for BR", "# check packet type", "(", "type", ",", "rcv_data", ")", "=", "self", ".", "get_type", "(", "rcv_data", ")", "# LEADER", "if", "type", "==", "self", ".", "PACK_ROUTER_NEIGHBORS", ":", "print_debug", "(", "3", ",", "\"PACK_ROUTER_NEIGHBORS received\"", ")", "self", ".", "mesh", ".", "routers_neigh_update", "(", "rcv_data", ")", "# no answer", "# elif type == self.PACK_ROUTER_ASK_LEADER_DATA:", "# print_debug(3, \"PACK_ROUTER_ASK_LEADER_DATA received\")", "# # send answer with Leader data", "# pack = self.mesh.leader_data_pack()", "# self.send_pack(self.PACK_LEADER_DATA, pack, rcv_ip)", "# ROUTER", "elif", "type", "==", "self", ".", "PACK_LEADER_ASK_NEIGH", ":", "print_debug", "(", "3", ",", "\"PACK_LEADER_ASK_NEIGH received\"", ")", "payload", "=", "self", ".", "mesh", ".", "neighbors_pack", "(", ")", "#time.sleep(.2)", "self", ".", "send_pack", "(", "self", ".", "PACK_ROUTER_NEIGHBORS", ",", "payload", ",", "rcv_ip", ")", "# elif type == self.PACK_LEADER_DATA:", "# print_debug(3, \"PACK_LEADER_DATA received\")", "# if self.mesh.leader_data_unpack(rcv_data):", "# self.interrogate_leader_ts = time.time()", "# ALL NODES", "elif", "type", "==", "self", ".", "PACK_MESSAGE", ":", "print_debug", "(", "3", ",", "\"PACK_MESSAGE received\"", ")", "# add new pack received", "message", "=", "Message", "(", "rcv_data", ")", "# print_debug(3, message.payload)", "message", ".", "ip", "=", "rcv_ip", "self", ".", "messages", ".", "add_rcv_message", "(", "message", ")", "# send back ACK", "self", ".", "send_pack", "(", "self", ".", "PACK_MESSAGE_ACK", ",", "message", ".", "pack_ack", "(", "self", ".", "MAC", ")", ",", "rcv_ip", ")", "# forward message to user-application layer", "if", "self", ".", "message_cb", ":", "self", ".", "message_cb", "(", "rcv_ip", ",", "rcv_port", ",", "message", ".", "payload", ")", "elif", "type", "==", "self", ".", "PACK_MESSAGE_ACK", ":", "print_debug", "(", "3", ",", "\"PACK_MESSAGE_ACK received\"", ")", "# mark message as received", "self", ".", "messages", ".", "rcv_ack", "(", "rcv_data", ")", "elif", "type", "==", "self", ".", "PACK_ROUTER_ASK_MACS", ":", "print_debug", "(", "3", ",", "\"PACK_ROUTER_ASK_MACS received\"", ")", "payload", "=", "self", ".", "mesh", ".", "leader_data", ".", "get_macs_pack", "(", ")", "self", ".", "send_pack", "(", "self", ".", "PACK_LEADER_MACS", ",", "payload", ",", "rcv_ip", ")", "elif", "type", "==", "self", ".", "PACK_LEADER_MACS", ":", "print_debug", "(", "3", ",", "\"PACK_LEADER_MACS received\"", ")", "self", ".", "mesh", ".", "macs_set", "(", "rcv_data", ")", "elif", "type", "==", "self", ".", "PACK_ROUTER_ASK_CONNECTIONS", ":", "print_debug", "(", "3", ",", "\"PACK_ROUTER_ASK_CONNECTIONS received\"", ")", "payload", "=", "self", ".", "mesh", ".", "leader_data", ".", "get_connections_pack", "(", ")", "self", ".", "send_pack", "(", "self", ".", "PACK_LEADER_CONNECTIONS", ",", "payload", ",", "rcv_ip", ")", "elif", "type", "==", "self", ".", "PACK_LEADER_CONNECTIONS", ":", "print_debug", "(", "3", ",", "\"PACK_LEADER_CONNECTIONS received\"", ")", "self", ".", "mesh", ".", "connections_set", "(", "rcv_data", ")", "elif", "type", "==", "self", ".", "PACK_ROUTER_ASK_MAC_DETAILS", ":", "print_debug", "(", "3", ",", "\"PACK_ROUTER_ASK_MAC_DETAILS received\"", ")", "(", "mac_req", ",", ")", "=", "unpack", "(", "'!H'", ",", "rcv_data", ")", "print_debug", "(", "3", ",", "str", "(", "mac_req", ")", ")", "payload", "=", "self", ".", "mesh", ".", "leader_data", ".", "node_info_mac_pack", "(", "mac_req", ")", "if", "len", "(", "payload", ")", ">", "0", ":", "self", ".", "send_pack", "(", "self", ".", "PACK_LEADER_MAC_DETAILS", ",", "payload", ",", "rcv_ip", ")", "else", ":", "print_debug", "(", "3", ",", "\"No info found about MAC %d\"", "%", "mac_req", ")", "elif", "type", "==", "self", ".", "PACK_LEADER_MAC_DETAILS", ":", "print_debug", "(", "3", ",", "\"PACK_LEADER_MAC_DETAILS received\"", ")", "self", ".", "mesh", ".", "node_info_set", "(", "rcv_data", ")", "# elif type == self.PACK_FILE_SEND:", "# print_debug(3, \"PACK_FILE_SEND received\")", "# payload = pack(\"!Q\", self.MAC)", "# self.send_pack(self.PACK_FILE_SEND_ACK, payload, rcv_ip)", "# # rcv data contains '!QHH' as header", "# chunk = len(rcv_data) -12", "# self.file_size += chunk", "# print_debug(3, \"size: %d, chunk %d\" % (self.file_size, chunk))", "# file_handler = \"ab\" # append, by default", "# if chunk > self.file_packsize:", "# # started receiving a new file", "# print_debug(3, \"started receiving a new image\")", "# file_handler = \"wb\" # write/create new file", "# self.file_packsize = chunk", "# elif chunk < self.file_packsize:", "# print_debug(3, \"DONE receiving the image\")", "# # done receiving the file", "# self.file_packsize = 0", "# self.file_size = 0", "# self.messages.file_transfer_done(rcv_data[:12])", "# # else:", "# # #middle of the file, just write data", "# # self.file.write(rcv_data)", "# with open('/flash/dog_rcv.jpg', file_handler) as file:", "# file.write(rcv_data[12:])", "# print_debug(3, \"writing the image\")", "# elif type == self.PACK_FILE_SEND_ACK:", "# mac_rcv = unpack(\"!Q\", rcv_data)", "# print_debug(3, \"PACK_FILE_SEND_ACK received from MAC %d\"%mac_rcv)", "# mac_rcv = 6", "# message = self.messages.dict.get(mac_rcv, None)", "# if message:", "# print_debug(3, \"message found\")", "# self.send_message(message, rcv_data)", "# else:", "# print_debug(3, \"message NOT found \", mac_rcv, self.messages.dict)", "else", ":", "print_debug", "(", "3", ",", "\"Unknown packet, type: 0x%X\"", "%", "(", "type", ")", ")", "print_debug", "(", "3", ",", "str", "(", "rcv_data", ")", ")", "pass" ]
https://github.com/pycom/pycom-libraries/blob/75d0e67cb421e0576a3a9677bb0d9d81f27ebdb7/pymesh/pymesh_frozen/lib/mesh_internal.py#L450-L609
Speedml/speedml
c78effcdf745b723a7b558e1ee4639f7ba173d22
speedml/feature.py
python
Feature.fillna
(self, a, new)
return message.format(start)
Fills empty or null values in ``a`` feature name with ``new`` string value.
Fills empty or null values in ``a`` feature name with ``new`` string value.
[ "Fills", "empty", "or", "null", "values", "in", "a", "feature", "name", "with", "new", "string", "value", "." ]
def fillna(self, a, new): """ Fills empty or null values in ``a`` feature name with ``new`` string value. """ start = Base.train[a].isnull().sum() + Base.test[a].isnull().sum() Base.train[a] = Base.train[a].fillna(new) Base.test[a] = Base.test[a].fillna(new) message = 'Filled {} null values across test and train datasets.' return message.format(start)
[ "def", "fillna", "(", "self", ",", "a", ",", "new", ")", ":", "start", "=", "Base", ".", "train", "[", "a", "]", ".", "isnull", "(", ")", ".", "sum", "(", ")", "+", "Base", ".", "test", "[", "a", "]", ".", "isnull", "(", ")", ".", "sum", "(", ")", "Base", ".", "train", "[", "a", "]", "=", "Base", ".", "train", "[", "a", "]", ".", "fillna", "(", "new", ")", "Base", ".", "test", "[", "a", "]", "=", "Base", ".", "test", "[", "a", "]", ".", "fillna", "(", "new", ")", "message", "=", "'Filled {} null values across test and train datasets.'", "return", "message", ".", "format", "(", "start", ")" ]
https://github.com/Speedml/speedml/blob/c78effcdf745b723a7b558e1ee4639f7ba173d22/speedml/feature.py#L54-L64
CGATOxford/cgat
326aad4694bdfae8ddc194171bb5d73911243947
obsolete/pipeline_species_conservation.py
python
plotFigure1cGCContent
( infiles, outfiles)
Figure 1c: density plots of GC content
Figure 1c: density plots of GC content
[ "Figure", "1c", ":", "density", "plots", "of", "GC", "content" ]
def plotFigure1cGCContent( infiles, outfiles): '''Figure 1c: density plots of GC content''' capseq_out, control_out = outfiles indir = os.path.dirname(infiles[0]) scriptsdir = PARAMS["scriptsdir"] R('''source("%(scriptsdir)s/R/proj007/proj007.R") ''' % locals()) R('''speciesPlot(dir="%(indir)s", pattern="*testes-cap.replicated.gc.export", main="Testes CAPseq", xlab="GC Content", filename="%(capseq_out)s", plotcol=2, xlimit=c(0,1), ylimit=c(0,15))''' % locals() ) R('''speciesPlot(dir="%(indir)s", pattern="*testes-cap.replicated.gc.export", main="Testes Control", xlab="GC Content", filename="%(control_out)s", plotcol=3, xlimit=c(0,1), ylimit=c(0,15))''' % locals() )
[ "def", "plotFigure1cGCContent", "(", "infiles", ",", "outfiles", ")", ":", "capseq_out", ",", "control_out", "=", "outfiles", "indir", "=", "os", ".", "path", ".", "dirname", "(", "infiles", "[", "0", "]", ")", "scriptsdir", "=", "PARAMS", "[", "\"scriptsdir\"", "]", "R", "(", "'''source(\"%(scriptsdir)s/R/proj007/proj007.R\") '''", "%", "locals", "(", ")", ")", "R", "(", "'''speciesPlot(dir=\"%(indir)s\", pattern=\"*testes-cap.replicated.gc.export\", main=\"Testes CAPseq\", xlab=\"GC Content\", filename=\"%(capseq_out)s\", plotcol=2, xlimit=c(0,1), ylimit=c(0,15))'''", "%", "locals", "(", ")", ")", "R", "(", "'''speciesPlot(dir=\"%(indir)s\", pattern=\"*testes-cap.replicated.gc.export\", main=\"Testes Control\", xlab=\"GC Content\", filename=\"%(control_out)s\", plotcol=3, xlimit=c(0,1), ylimit=c(0,15))'''", "%", "locals", "(", ")", ")" ]
https://github.com/CGATOxford/cgat/blob/326aad4694bdfae8ddc194171bb5d73911243947/obsolete/pipeline_species_conservation.py#L939-L946
frappe/frappe
b64cab6867dfd860f10ccaf41a4ec04bc890b583
frappe/database/database.py
python
Database.table_exists
(self, doctype, cached=True)
return ("tab" + doctype) in self.get_tables(cached=cached)
Returns True if table for given doctype exists.
Returns True if table for given doctype exists.
[ "Returns", "True", "if", "table", "for", "given", "doctype", "exists", "." ]
def table_exists(self, doctype, cached=True): """Returns True if table for given doctype exists.""" return ("tab" + doctype) in self.get_tables(cached=cached)
[ "def", "table_exists", "(", "self", ",", "doctype", ",", "cached", "=", "True", ")", ":", "return", "(", "\"tab\"", "+", "doctype", ")", "in", "self", ".", "get_tables", "(", "cached", "=", "cached", ")" ]
https://github.com/frappe/frappe/blob/b64cab6867dfd860f10ccaf41a4ec04bc890b583/frappe/database/database.py#L844-L846
lovelylain/pyctp
fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d
example/pyctp/my/sticks.py
python
dma_opener.check
(self,cur_trade)
return False
[]
def check(self,cur_trade): mcur = sum(self.wbuffer.data[:-5])/(self.length-4) #mhigh = max(self.wbuffer.data[:-4]) #mlow = min(self.wbuffer.data[:-4]) umcur = mcur + self.cur_tick.dopen/300 #umcur = mcur + (mhigh-mlow) if self.spre == False and self.cur_tick.price > umcur: self.spre = True return True else: self.spre = False return False
[ "def", "check", "(", "self", ",", "cur_trade", ")", ":", "mcur", "=", "sum", "(", "self", ".", "wbuffer", ".", "data", "[", ":", "-", "5", "]", ")", "/", "(", "self", ".", "length", "-", "4", ")", "#mhigh = max(self.wbuffer.data[:-4])", "#mlow = min(self.wbuffer.data[:-4])", "umcur", "=", "mcur", "+", "self", ".", "cur_tick", ".", "dopen", "/", "300", "#umcur = mcur + (mhigh-mlow)", "if", "self", ".", "spre", "==", "False", "and", "self", ".", "cur_tick", ".", "price", ">", "umcur", ":", "self", ".", "spre", "=", "True", "return", "True", "else", ":", "self", ".", "spre", "=", "False", "return", "False" ]
https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/example/pyctp/my/sticks.py#L754-L765
google/pytype
fa43edc95dd42ade6e3147d6580d63e778c9d506
pytype/vm.py
python
VirtualMachine.byte_BUILD_MAP
(self, state, op)
return state.push(the_map)
Build a dictionary.
Build a dictionary.
[ "Build", "a", "dictionary", "." ]
def byte_BUILD_MAP(self, state, op): """Build a dictionary.""" the_map = self.ctx.convert.build_map(state.node) state, args = state.popn(2 * op.arg) for i in range(op.arg): key, val = args[2*i], args[2*i+1] state = self.store_subscr(state, the_map, key, val) return state.push(the_map)
[ "def", "byte_BUILD_MAP", "(", "self", ",", "state", ",", "op", ")", ":", "the_map", "=", "self", ".", "ctx", ".", "convert", ".", "build_map", "(", "state", ".", "node", ")", "state", ",", "args", "=", "state", ".", "popn", "(", "2", "*", "op", ".", "arg", ")", "for", "i", "in", "range", "(", "op", ".", "arg", ")", ":", "key", ",", "val", "=", "args", "[", "2", "*", "i", "]", ",", "args", "[", "2", "*", "i", "+", "1", "]", "state", "=", "self", ".", "store_subscr", "(", "state", ",", "the_map", ",", "key", ",", "val", ")", "return", "state", ".", "push", "(", "the_map", ")" ]
https://github.com/google/pytype/blob/fa43edc95dd42ade6e3147d6580d63e778c9d506/pytype/vm.py#L1710-L1717
boston-dynamics/spot-sdk
5ffa12e6943a47323c7279d86e30346868755f52
python/examples/mission_recorder/mission_recorder.py
python
RecorderInterface._download_and_write_waypoint_snapshots
(self, waypoints)
Download the waypoint snapshots from robot to the specified, local filepath location.
Download the waypoint snapshots from robot to the specified, local filepath location.
[ "Download", "the", "waypoint", "snapshots", "from", "robot", "to", "the", "specified", "local", "filepath", "location", "." ]
def _download_and_write_waypoint_snapshots(self, waypoints): """Download the waypoint snapshots from robot to the specified, local filepath location.""" num_waypoint_snapshots_downloaded = 0 for waypoint in waypoints: try: waypoint_snapshot = self._graph_nav_client.download_waypoint_snapshot( waypoint.snapshot_id) except Exception: # Failure in downloading waypoint snapshot. Continue to next snapshot. self.add_message("Failed to download waypoint snapshot: " + waypoint.snapshot_id) continue write_bytes(os.path.join(self._download_filepath, 'waypoint_snapshots'), waypoint.snapshot_id, waypoint_snapshot.SerializeToString()) num_waypoint_snapshots_downloaded += 1 self.add_message("Downloaded {} of the total {} waypoint snapshots.".format( num_waypoint_snapshots_downloaded, len(waypoints)))
[ "def", "_download_and_write_waypoint_snapshots", "(", "self", ",", "waypoints", ")", ":", "num_waypoint_snapshots_downloaded", "=", "0", "for", "waypoint", "in", "waypoints", ":", "try", ":", "waypoint_snapshot", "=", "self", ".", "_graph_nav_client", ".", "download_waypoint_snapshot", "(", "waypoint", ".", "snapshot_id", ")", "except", "Exception", ":", "# Failure in downloading waypoint snapshot. Continue to next snapshot.", "self", ".", "add_message", "(", "\"Failed to download waypoint snapshot: \"", "+", "waypoint", ".", "snapshot_id", ")", "continue", "write_bytes", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_download_filepath", ",", "'waypoint_snapshots'", ")", ",", "waypoint", ".", "snapshot_id", ",", "waypoint_snapshot", ".", "SerializeToString", "(", ")", ")", "num_waypoint_snapshots_downloaded", "+=", "1", "self", ".", "add_message", "(", "\"Downloaded {} of the total {} waypoint snapshots.\"", ".", "format", "(", "num_waypoint_snapshots_downloaded", ",", "len", "(", "waypoints", ")", ")", ")" ]
https://github.com/boston-dynamics/spot-sdk/blob/5ffa12e6943a47323c7279d86e30346868755f52/python/examples/mission_recorder/mission_recorder.py#L713-L728
raveberry/raveberry
df0186c94b238b57de86d3fd5c595dcd08a7c708
backend/core/musiq/localdrive.py
python
LocalPlaylistProvider.get_id_from_external_url
(url: str)
return url[len("local_library/") :]
[]
def get_id_from_external_url(url: str) -> str: return url[len("local_library/") :]
[ "def", "get_id_from_external_url", "(", "url", ":", "str", ")", "->", "str", ":", "return", "url", "[", "len", "(", "\"local_library/\"", ")", ":", "]" ]
https://github.com/raveberry/raveberry/blob/df0186c94b238b57de86d3fd5c595dcd08a7c708/backend/core/musiq/localdrive.py#L127-L128
IronLanguages/ironpython3
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
Src/StdLib/Lib/tkinter/__init__.py
python
BaseWidget._setup
(self, master, cnf)
Internal function. Sets up information about children.
Internal function. Sets up information about children.
[ "Internal", "function", ".", "Sets", "up", "information", "about", "children", "." ]
def _setup(self, master, cnf): """Internal function. Sets up information about children.""" if _support_default_root: global _default_root if not master: if not _default_root: _default_root = Tk() master = _default_root self.master = master self.tk = master.tk name = None if 'name' in cnf: name = cnf['name'] del cnf['name'] if not name: name = repr(id(self)) self._name = name if master._w=='.': self._w = '.' + name else: self._w = master._w + '.' + name self.children = {} if self._name in self.master.children: self.master.children[self._name].destroy() self.master.children[self._name] = self
[ "def", "_setup", "(", "self", ",", "master", ",", "cnf", ")", ":", "if", "_support_default_root", ":", "global", "_default_root", "if", "not", "master", ":", "if", "not", "_default_root", ":", "_default_root", "=", "Tk", "(", ")", "master", "=", "_default_root", "self", ".", "master", "=", "master", "self", ".", "tk", "=", "master", ".", "tk", "name", "=", "None", "if", "'name'", "in", "cnf", ":", "name", "=", "cnf", "[", "'name'", "]", "del", "cnf", "[", "'name'", "]", "if", "not", "name", ":", "name", "=", "repr", "(", "id", "(", "self", ")", ")", "self", ".", "_name", "=", "name", "if", "master", ".", "_w", "==", "'.'", ":", "self", ".", "_w", "=", "'.'", "+", "name", "else", ":", "self", ".", "_w", "=", "master", ".", "_w", "+", "'.'", "+", "name", "self", ".", "children", "=", "{", "}", "if", "self", ".", "_name", "in", "self", ".", "master", ".", "children", ":", "self", ".", "master", ".", "children", "[", "self", ".", "_name", "]", ".", "destroy", "(", ")", "self", ".", "master", ".", "children", "[", "self", ".", "_name", "]", "=", "self" ]
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/tkinter/__init__.py#L2089-L2113
learningequality/ka-lite
571918ea668013dcf022286ea85eff1c5333fb8b
kalite/packages/bundled/django/db/backends/sqlite3/introspection.py
python
DatabaseIntrospection.get_table_list
(self, cursor)
return [row[0] for row in cursor.fetchall()]
Returns a list of table names in the current database.
Returns a list of table names in the current database.
[ "Returns", "a", "list", "of", "table", "names", "in", "the", "current", "database", "." ]
def get_table_list(self, cursor): "Returns a list of table names in the current database." # Skip the sqlite_sequence system table used for autoincrement key # generation. cursor.execute(""" SELECT name FROM sqlite_master WHERE type='table' AND NOT name='sqlite_sequence' ORDER BY name""") return [row[0] for row in cursor.fetchall()]
[ "def", "get_table_list", "(", "self", ",", "cursor", ")", ":", "# Skip the sqlite_sequence system table used for autoincrement key", "# generation.", "cursor", ".", "execute", "(", "\"\"\"\n SELECT name FROM sqlite_master\n WHERE type='table' AND NOT name='sqlite_sequence'\n ORDER BY name\"\"\"", ")", "return", "[", "row", "[", "0", "]", "for", "row", "in", "cursor", ".", "fetchall", "(", ")", "]" ]
https://github.com/learningequality/ka-lite/blob/571918ea668013dcf022286ea85eff1c5333fb8b/kalite/packages/bundled/django/db/backends/sqlite3/introspection.py#L51-L59
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/Lib/email/message.py
python
Message.get_content_maintype
(self)
return ctype.split('/')[0]
Return the message's main content type. This is the `maintype' part of the string returned by get_content_type().
Return the message's main content type.
[ "Return", "the", "message", "s", "main", "content", "type", "." ]
def get_content_maintype(self): """Return the message's main content type. This is the `maintype' part of the string returned by get_content_type(). """ ctype = self.get_content_type() return ctype.split('/')[0]
[ "def", "get_content_maintype", "(", "self", ")", ":", "ctype", "=", "self", ".", "get_content_type", "(", ")", "return", "ctype", ".", "split", "(", "'/'", ")", "[", "0", "]" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/Lib/email/message.py#L456-L463
yaysummeriscoming/DALI_pytorch_demo
8f62ebd3b30892023616451673678be8e33a56d1
dali.py
python
DaliIteratorCPUNoPrefetch.__next__
(self)
return input, target
[]
def __next__(self): data = next(self._dali_iterator) # Decode the data output input = data[0]['data'] target = data[0]['label'].squeeze().long() # DALI should already output target on device # Copy to GPU & apply final processing in seperate CUDA stream input = input.cuda(non_blocking=True) input = input.permute(0, 3, 1, 2) # Input tensor is transferred to GPU as 8 bit, to save bandwidth if self.fp16: input = input.half() else: input = input.float() input = input.sub_(self.mean).div_(self.std) return input, target
[ "def", "__next__", "(", "self", ")", ":", "data", "=", "next", "(", "self", ".", "_dali_iterator", ")", "# Decode the data output", "input", "=", "data", "[", "0", "]", "[", "'data'", "]", "target", "=", "data", "[", "0", "]", "[", "'label'", "]", ".", "squeeze", "(", ")", ".", "long", "(", ")", "# DALI should already output target on device", "# Copy to GPU & apply final processing in seperate CUDA stream", "input", "=", "input", ".", "cuda", "(", "non_blocking", "=", "True", ")", "input", "=", "input", ".", "permute", "(", "0", ",", "3", ",", "1", ",", "2", ")", "# Input tensor is transferred to GPU as 8 bit, to save bandwidth", "if", "self", ".", "fp16", ":", "input", "=", "input", ".", "half", "(", ")", "else", ":", "input", "=", "input", ".", "float", "(", ")", "input", "=", "input", ".", "sub_", "(", "self", ".", "mean", ")", ".", "div_", "(", "self", ".", "std", ")", "return", "input", ",", "target" ]
https://github.com/yaysummeriscoming/DALI_pytorch_demo/blob/8f62ebd3b30892023616451673678be8e33a56d1/dali.py#L357-L376
collective/icalendar
90c059350698efa3da7486f22edcb59757f33df9
src/icalendar/cli.py
python
_format_attendees
(attendees)
return _format_name(attendees)
Format the list of attendees. :arg any attendees: Either a list, a string or a vCalAddress object. :returns str: Formatted list of attendees.
Format the list of attendees.
[ "Format", "the", "list", "of", "attendees", "." ]
def _format_attendees(attendees): """Format the list of attendees. :arg any attendees: Either a list, a string or a vCalAddress object. :returns str: Formatted list of attendees. """ if type(attendees) == list: return '\n '.join(map(_format_name, attendees)) return _format_name(attendees)
[ "def", "_format_attendees", "(", "attendees", ")", ":", "if", "type", "(", "attendees", ")", "==", "list", ":", "return", "'\\n '", ".", "join", "(", "map", "(", "_format_name", ",", "attendees", ")", ")", "return", "_format_name", "(", "attendees", ")" ]
https://github.com/collective/icalendar/blob/90c059350698efa3da7486f22edcb59757f33df9/src/icalendar/cli.py#L43-L52
xjsender/haoide
717dd706db1169bfc41e818ac6fc6cd9a0aef12d
util.py
python
load_metadata_cache
(reload_cache=False, username=None)
return globals()["components"]
Reload component cache in globals()
Reload component cache in globals()
[ "Reload", "component", "cache", "in", "globals", "()" ]
def load_metadata_cache(reload_cache=False, username=None): """ Reload component cache in globals() """ if reload_cache or "components" not in globals(): component_metadata = sublime.load_settings(context.COMPONENT_METADATA_SETTINGS) if not username: username = context.get_setting("username") globals()["components"] = component_metadata.get(username, {}) return globals()["components"]
[ "def", "load_metadata_cache", "(", "reload_cache", "=", "False", ",", "username", "=", "None", ")", ":", "if", "reload_cache", "or", "\"components\"", "not", "in", "globals", "(", ")", ":", "component_metadata", "=", "sublime", ".", "load_settings", "(", "context", ".", "COMPONENT_METADATA_SETTINGS", ")", "if", "not", "username", ":", "username", "=", "context", ".", "get_setting", "(", "\"username\"", ")", "globals", "(", ")", "[", "\"components\"", "]", "=", "component_metadata", ".", "get", "(", "username", ",", "{", "}", ")", "return", "globals", "(", ")", "[", "\"components\"", "]" ]
https://github.com/xjsender/haoide/blob/717dd706db1169bfc41e818ac6fc6cd9a0aef12d/util.py#L3150-L3159
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/ex-submodules/phonelog/utils.py
python
_process_user_error_subreport
(domain, xform)
[]
def _process_user_error_subreport(domain, xform): if UserErrorEntry.objects.filter(xform_id=xform.form_id).exists(): return errors = _get_logs(xform.form_data, 'user_error_subreport', 'user_error') to_save = [] for i, error in enumerate(errors): # beta versions have 'version', but the name should now be 'app_build'. # Probably fine to remove after June 2016. version = error['app_build'] if 'app_build' in error else error['version'] entry = UserErrorEntry( domain=domain, xform_id=xform.form_id, i=i, app_id=error['app_id'], version_number=int(version), date=error["@date"], server_date=xform.received_on, user_id=error['user_id'], expr=error['expr'], msg=error['msg'], session=error['session'], type=error['type'], context_node=error.get('context_node', ''), ) to_save.append(entry) UserErrorEntry.objects.bulk_create(to_save)
[ "def", "_process_user_error_subreport", "(", "domain", ",", "xform", ")", ":", "if", "UserErrorEntry", ".", "objects", ".", "filter", "(", "xform_id", "=", "xform", ".", "form_id", ")", ".", "exists", "(", ")", ":", "return", "errors", "=", "_get_logs", "(", "xform", ".", "form_data", ",", "'user_error_subreport'", ",", "'user_error'", ")", "to_save", "=", "[", "]", "for", "i", ",", "error", "in", "enumerate", "(", "errors", ")", ":", "# beta versions have 'version', but the name should now be 'app_build'.", "# Probably fine to remove after June 2016.", "version", "=", "error", "[", "'app_build'", "]", "if", "'app_build'", "in", "error", "else", "error", "[", "'version'", "]", "entry", "=", "UserErrorEntry", "(", "domain", "=", "domain", ",", "xform_id", "=", "xform", ".", "form_id", ",", "i", "=", "i", ",", "app_id", "=", "error", "[", "'app_id'", "]", ",", "version_number", "=", "int", "(", "version", ")", ",", "date", "=", "error", "[", "\"@date\"", "]", ",", "server_date", "=", "xform", ".", "received_on", ",", "user_id", "=", "error", "[", "'user_id'", "]", ",", "expr", "=", "error", "[", "'expr'", "]", ",", "msg", "=", "error", "[", "'msg'", "]", ",", "session", "=", "error", "[", "'session'", "]", ",", "type", "=", "error", "[", "'type'", "]", ",", "context_node", "=", "error", ".", "get", "(", "'context_node'", ",", "''", ")", ",", ")", "to_save", ".", "append", "(", "entry", ")", "UserErrorEntry", ".", "objects", ".", "bulk_create", "(", "to_save", ")" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/ex-submodules/phonelog/utils.py#L124-L149
bikalims/bika.lims
35e4bbdb5a3912cae0b5eb13e51097c8b0486349
bika/lims/jsonapi/request.py
python
get_children
(default=None)
return is_true("children", default)
returns the 'children' from the request
returns the 'children' from the request
[ "returns", "the", "children", "from", "the", "request" ]
def get_children(default=None): """ returns the 'children' from the request """ return is_true("children", default)
[ "def", "get_children", "(", "default", "=", "None", ")", ":", "return", "is_true", "(", "\"children\"", ",", "default", ")" ]
https://github.com/bikalims/bika.lims/blob/35e4bbdb5a3912cae0b5eb13e51097c8b0486349/bika/lims/jsonapi/request.py#L92-L95
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/setuptools/wheel.py
python
Wheel._install_as_egg
(self, destination_eggdir, zf)
[]
def _install_as_egg(self, destination_eggdir, zf): dist_basename = '%s-%s' % (self.project_name, self.version) dist_info = self.get_dist_info(zf) dist_data = '%s.data' % dist_basename egg_info = os.path.join(destination_eggdir, 'EGG-INFO') self._convert_metadata(zf, destination_eggdir, dist_info, egg_info) self._move_data_entries(destination_eggdir, dist_data) self._fix_namespace_packages(egg_info, destination_eggdir)
[ "def", "_install_as_egg", "(", "self", ",", "destination_eggdir", ",", "zf", ")", ":", "dist_basename", "=", "'%s-%s'", "%", "(", "self", ".", "project_name", ",", "self", ".", "version", ")", "dist_info", "=", "self", ".", "get_dist_info", "(", "zf", ")", "dist_data", "=", "'%s.data'", "%", "dist_basename", "egg_info", "=", "os", ".", "path", ".", "join", "(", "destination_eggdir", ",", "'EGG-INFO'", ")", "self", ".", "_convert_metadata", "(", "zf", ",", "destination_eggdir", ",", "dist_info", ",", "egg_info", ")", "self", ".", "_move_data_entries", "(", "destination_eggdir", ",", "dist_data", ")", "self", ".", "_fix_namespace_packages", "(", "egg_info", ",", "destination_eggdir", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/setuptools/wheel.py#L102-L110
mozilla/pontoon
d26999eea57902a30b5c15e9b77277fe7e76a60f
pontoon/teams/views.py
python
ajax_insights
(request, locale)
return render(request, "teams/includes/insights.html", insights)
Insights tab.
Insights tab.
[ "Insights", "tab", "." ]
def ajax_insights(request, locale): """Insights tab.""" if not settings.ENABLE_INSIGHTS_TAB: raise ImproperlyConfigured("ENABLE_INSIGHTS_TAB variable not set in settings.") locale = get_object_or_404(Locale, code=locale) insights = get_locale_insights(Q(locale=locale)) return render(request, "teams/includes/insights.html", insights)
[ "def", "ajax_insights", "(", "request", ",", "locale", ")", ":", "if", "not", "settings", ".", "ENABLE_INSIGHTS_TAB", ":", "raise", "ImproperlyConfigured", "(", "\"ENABLE_INSIGHTS_TAB variable not set in settings.\"", ")", "locale", "=", "get_object_or_404", "(", "Locale", ",", "code", "=", "locale", ")", "insights", "=", "get_locale_insights", "(", "Q", "(", "locale", "=", "locale", ")", ")", "return", "render", "(", "request", ",", "\"teams/includes/insights.html\"", ",", "insights", ")" ]
https://github.com/mozilla/pontoon/blob/d26999eea57902a30b5c15e9b77277fe7e76a60f/pontoon/teams/views.py#L101-L109
tendenci/tendenci
0f2c348cc0e7d41bc56f50b00ce05544b083bf1d
tendenci/apps/base/models.py
python
BaseImport.__str__
(self)
return self.get_file().file.name
[]
def __str__(self): return self.get_file().file.name
[ "def", "__str__", "(", "self", ")", ":", "return", "self", ".", "get_file", "(", ")", ".", "file", ".", "name" ]
https://github.com/tendenci/tendenci/blob/0f2c348cc0e7d41bc56f50b00ce05544b083bf1d/tendenci/apps/base/models.py#L96-L97
perone/medicaltorch
34ea15075a57271940d26684c34767a8a9e8fb58
medicaltorch/models.py
python
UpConv.forward
(self, x, y)
return x
[]
def forward(self, x, y): x = self.up1(x, scale_factor=2, mode='bilinear', align_corners=True) x = torch.cat([x, y], dim=1) x = self.downconv(x) return x
[ "def", "forward", "(", "self", ",", "x", ",", "y", ")", ":", "x", "=", "self", ".", "up1", "(", "x", ",", "scale_factor", "=", "2", ",", "mode", "=", "'bilinear'", ",", "align_corners", "=", "True", ")", "x", "=", "torch", ".", "cat", "(", "[", "x", ",", "y", "]", ",", "dim", "=", "1", ")", "x", "=", "self", ".", "downconv", "(", "x", ")", "return", "x" ]
https://github.com/perone/medicaltorch/blob/34ea15075a57271940d26684c34767a8a9e8fb58/medicaltorch/models.py#L204-L208
Rayhane-mamah/Tacotron-2
ab5cb08a931fc842d3892ebeb27c8b8734ddd4b8
wavenet_vocoder/models/modules.py
python
ConvTranspose1D._init_kernel
(self, kernel_size, strides, filters)
return init_kernel * (self.NN_scaler)**(1/self.up_layers)
Nearest Neighbor Upsample (Checkerboard free) init kernel size
Nearest Neighbor Upsample (Checkerboard free) init kernel size
[ "Nearest", "Neighbor", "Upsample", "(", "Checkerboard", "free", ")", "init", "kernel", "size" ]
def _init_kernel(self, kernel_size, strides, filters): '''Nearest Neighbor Upsample (Checkerboard free) init kernel size ''' overlap = float(kernel_size[1] // strides[1]) init_kernel = np.arange(filters) init_kernel = np_utils.to_categorical(init_kernel, num_classes=len(init_kernel)).reshape(1, 1, -1, filters).astype(np.float32) init_kernel = np.tile(init_kernel, [kernel_size[0], kernel_size[1], 1, 1]) init_kernel = init_kernel / max(overlap, 1.) if kernel_size[1] % 2 == 0 else init_kernel return init_kernel * (self.NN_scaler)**(1/self.up_layers)
[ "def", "_init_kernel", "(", "self", ",", "kernel_size", ",", "strides", ",", "filters", ")", ":", "overlap", "=", "float", "(", "kernel_size", "[", "1", "]", "//", "strides", "[", "1", "]", ")", "init_kernel", "=", "np", ".", "arange", "(", "filters", ")", "init_kernel", "=", "np_utils", ".", "to_categorical", "(", "init_kernel", ",", "num_classes", "=", "len", "(", "init_kernel", ")", ")", ".", "reshape", "(", "1", ",", "1", ",", "-", "1", ",", "filters", ")", ".", "astype", "(", "np", ".", "float32", ")", "init_kernel", "=", "np", ".", "tile", "(", "init_kernel", ",", "[", "kernel_size", "[", "0", "]", ",", "kernel_size", "[", "1", "]", ",", "1", ",", "1", "]", ")", "init_kernel", "=", "init_kernel", "/", "max", "(", "overlap", ",", "1.", ")", "if", "kernel_size", "[", "1", "]", "%", "2", "==", "0", "else", "init_kernel", "return", "init_kernel", "*", "(", "self", ".", "NN_scaler", ")", "**", "(", "1", "/", "self", ".", "up_layers", ")" ]
https://github.com/Rayhane-mamah/Tacotron-2/blob/ab5cb08a931fc842d3892ebeb27c8b8734ddd4b8/wavenet_vocoder/models/modules.py#L724-L733
trezor/trezor-core
18c3a6a5bd45923380312b064be96155f5a7377d
src/apps/monero/signing/step_06_set_output.py
python
_range_proof
(state, rsig_data)
return rsig_data_new, mask
Computes rangeproof and handles range proof offloading logic. Since HF10 the commitments are deterministic. The range proof is incrementally hashed to the final_message.
Computes rangeproof and handles range proof offloading logic.
[ "Computes", "rangeproof", "and", "handles", "range", "proof", "offloading", "logic", "." ]
def _range_proof(state, rsig_data): """ Computes rangeproof and handles range proof offloading logic. Since HF10 the commitments are deterministic. The range proof is incrementally hashed to the final_message. """ provided_rsig = None if rsig_data and rsig_data.rsig and len(rsig_data.rsig) > 0: provided_rsig = rsig_data.rsig if not state.rsig_offload and provided_rsig: raise signing.Error("Provided unexpected rsig") # Batching & validation bidx = _get_rsig_batch(state, state.current_output_index) last_in_batch = _is_last_in_batch(state, state.current_output_index, bidx) if state.rsig_offload and provided_rsig and not last_in_batch: raise signing.Error("Provided rsig too early") if ( state.rsig_offload and last_in_batch and not provided_rsig and (not state.is_det_mask() or state.is_processing_offloaded) ): raise signing.Error("Rsig expected, not provided") # Batch not finished, skip range sig generation now mask = state.output_masks[-1] if not state.is_processing_offloaded else None offload_mask = mask and state.is_det_mask() and state.rsig_offload # If not last, do not proceed to the BP processing. if not last_in_batch: rsig_data_new = ( _return_rsig_data(mask=crypto.encodeint(mask)) if offload_mask else None ) return rsig_data_new, mask # Rangeproof # Pedersen commitment on the value, mask from the commitment, range signature. rsig = None state.mem_trace("pre-rproof" if __debug__ else None, collect=True) if not state.rsig_offload: """Bulletproof calculation in Trezor""" rsig = _rsig_bp(state) elif state.is_det_mask() and not state.is_processing_offloaded: """Bulletproof offloaded to the host, deterministic masks. Nothing here, waiting for offloaded BP.""" pass elif state.is_det_mask() and state.is_processing_offloaded: """Bulletproof offloaded to the host, check BP, hash it.""" _rsig_process_bp(state, rsig_data) else: """Bulletproof calculated on host, verify in Trezor""" _rsig_process_bp(state, rsig_data) state.mem_trace("rproof" if __debug__ else None, collect=True) # Construct new rsig data to send back to the host. rsig_data_new = _return_rsig_data( rsig, crypto.encodeint(mask) if offload_mask else None ) if state.current_output_index + 1 == state.output_count and ( not state.rsig_offload or state.is_processing_offloaded ): # output masks and amounts are not needed anymore state.output_amounts = None state.output_masks = None return rsig_data_new, mask
[ "def", "_range_proof", "(", "state", ",", "rsig_data", ")", ":", "provided_rsig", "=", "None", "if", "rsig_data", "and", "rsig_data", ".", "rsig", "and", "len", "(", "rsig_data", ".", "rsig", ")", ">", "0", ":", "provided_rsig", "=", "rsig_data", ".", "rsig", "if", "not", "state", ".", "rsig_offload", "and", "provided_rsig", ":", "raise", "signing", ".", "Error", "(", "\"Provided unexpected rsig\"", ")", "# Batching & validation", "bidx", "=", "_get_rsig_batch", "(", "state", ",", "state", ".", "current_output_index", ")", "last_in_batch", "=", "_is_last_in_batch", "(", "state", ",", "state", ".", "current_output_index", ",", "bidx", ")", "if", "state", ".", "rsig_offload", "and", "provided_rsig", "and", "not", "last_in_batch", ":", "raise", "signing", ".", "Error", "(", "\"Provided rsig too early\"", ")", "if", "(", "state", ".", "rsig_offload", "and", "last_in_batch", "and", "not", "provided_rsig", "and", "(", "not", "state", ".", "is_det_mask", "(", ")", "or", "state", ".", "is_processing_offloaded", ")", ")", ":", "raise", "signing", ".", "Error", "(", "\"Rsig expected, not provided\"", ")", "# Batch not finished, skip range sig generation now", "mask", "=", "state", ".", "output_masks", "[", "-", "1", "]", "if", "not", "state", ".", "is_processing_offloaded", "else", "None", "offload_mask", "=", "mask", "and", "state", ".", "is_det_mask", "(", ")", "and", "state", ".", "rsig_offload", "# If not last, do not proceed to the BP processing.", "if", "not", "last_in_batch", ":", "rsig_data_new", "=", "(", "_return_rsig_data", "(", "mask", "=", "crypto", ".", "encodeint", "(", "mask", ")", ")", "if", "offload_mask", "else", "None", ")", "return", "rsig_data_new", ",", "mask", "# Rangeproof", "# Pedersen commitment on the value, mask from the commitment, range signature.", "rsig", "=", "None", "state", ".", "mem_trace", "(", "\"pre-rproof\"", "if", "__debug__", "else", "None", ",", "collect", "=", "True", ")", "if", "not", "state", ".", "rsig_offload", ":", "\"\"\"Bulletproof calculation in Trezor\"\"\"", "rsig", "=", "_rsig_bp", "(", "state", ")", "elif", "state", ".", "is_det_mask", "(", ")", "and", "not", "state", ".", "is_processing_offloaded", ":", "\"\"\"Bulletproof offloaded to the host, deterministic masks. Nothing here, waiting for offloaded BP.\"\"\"", "pass", "elif", "state", ".", "is_det_mask", "(", ")", "and", "state", ".", "is_processing_offloaded", ":", "\"\"\"Bulletproof offloaded to the host, check BP, hash it.\"\"\"", "_rsig_process_bp", "(", "state", ",", "rsig_data", ")", "else", ":", "\"\"\"Bulletproof calculated on host, verify in Trezor\"\"\"", "_rsig_process_bp", "(", "state", ",", "rsig_data", ")", "state", ".", "mem_trace", "(", "\"rproof\"", "if", "__debug__", "else", "None", ",", "collect", "=", "True", ")", "# Construct new rsig data to send back to the host.", "rsig_data_new", "=", "_return_rsig_data", "(", "rsig", ",", "crypto", ".", "encodeint", "(", "mask", ")", "if", "offload_mask", "else", "None", ")", "if", "state", ".", "current_output_index", "+", "1", "==", "state", ".", "output_count", "and", "(", "not", "state", ".", "rsig_offload", "or", "state", ".", "is_processing_offloaded", ")", ":", "# output masks and amounts are not needed anymore", "state", ".", "output_amounts", "=", "None", "state", ".", "output_masks", "=", "None", "return", "rsig_data_new", ",", "mask" ]
https://github.com/trezor/trezor-core/blob/18c3a6a5bd45923380312b064be96155f5a7377d/src/apps/monero/signing/step_06_set_output.py#L222-L295
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/plat-riscos/riscospath.py
python
normpath
(p)
return fs+drive+rhs
Normalize path, eliminating up-directory ^s.
Normalize path, eliminating up-directory ^s.
[ "Normalize", "path", "eliminating", "up", "-", "directory", "^s", "." ]
def normpath(p): """ Normalize path, eliminating up-directory ^s. """ (fs, drive, path)= _split(p) rhs= '' ups= 0 while path!='': (path, el)= split(path) if el=='^': ups= ups+1 else: if ups>0: ups= ups-1 else: if rhs=='': rhs= el else: rhs= el+'.'+rhs while ups>0: ups= ups-1 rhs= '^.'+rhs return fs+drive+rhs
[ "def", "normpath", "(", "p", ")", ":", "(", "fs", ",", "drive", ",", "path", ")", "=", "_split", "(", "p", ")", "rhs", "=", "''", "ups", "=", "0", "while", "path", "!=", "''", ":", "(", "path", ",", "el", ")", "=", "split", "(", "path", ")", "if", "el", "==", "'^'", ":", "ups", "=", "ups", "+", "1", "else", ":", "if", "ups", ">", "0", ":", "ups", "=", "ups", "-", "1", "else", ":", "if", "rhs", "==", "''", ":", "rhs", "=", "el", "else", ":", "rhs", "=", "el", "+", "'.'", "+", "rhs", "while", "ups", ">", "0", ":", "ups", "=", "ups", "-", "1", "rhs", "=", "'^.'", "+", "rhs", "return", "fs", "+", "drive", "+", "rhs" ]
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/plat-riscos/riscospath.py#L327-L349
qutip/qutip
52d01da181a21b810c3407812c670f35fdc647e8
qutip/fastsparse.py
python
fast_csr_matrix.adjoint
(self)
return zcsr_adjoint(self)
Same as getH
Same as getH
[ "Same", "as", "getH" ]
def adjoint(self): """ Same as getH """ return zcsr_adjoint(self)
[ "def", "adjoint", "(", "self", ")", ":", "return", "zcsr_adjoint", "(", "self", ")" ]
https://github.com/qutip/qutip/blob/52d01da181a21b810c3407812c670f35fdc647e8/qutip/fastsparse.py#L349-L353
borgbase/vorta
23c47673c009bdef8baebb0b9cdf5e78c07fe373
src/vorta/application.py
python
VortaApp.react_to_log
(self, mgs, context)
Trigger Vorta actions based on Borg logs. E.g. repo lock.
Trigger Vorta actions based on Borg logs. E.g. repo lock.
[ "Trigger", "Vorta", "actions", "based", "on", "Borg", "logs", ".", "E", ".", "g", ".", "repo", "lock", "." ]
def react_to_log(self, mgs, context): """ Trigger Vorta actions based on Borg logs. E.g. repo lock. """ msgid = context.get('msgid') if msgid == 'LockTimeout': profile = BackupProfileModel.get(name=context['profile_name']) repo_url = context.get('repo_url') msg = QMessageBox() msg.setWindowTitle(self.tr("Repository In Use")) msg.setIcon(QMessageBox.Critical) abortButton = msg.addButton(self.tr("Abort"), QMessageBox.RejectRole) msg.addButton(self.tr("Continue"), QMessageBox.AcceptRole) msg.setDefaultButton(abortButton) msg.setText(self.tr(f"The repository at {repo_url} might be in use elsewhere.")) msg.setInformativeText(self.tr("Only break the lock if you are certain no other Borg process " "on any machine is accessing the repository. Abort or break the lock?")) msg.accepted.connect(lambda: self.break_lock(profile)) self._msg = msg msg.show() elif msgid == 'LockFailed': repo_url = context.get('repo_url') msg = QMessageBox() msg.setText( self.tr( f"You do not have permission to access the repository at {repo_url}. Gain access and try again.")) # noqa: E501 msg.setWindowTitle(self.tr("No Repository Permissions")) self._msg = msg msg.show()
[ "def", "react_to_log", "(", "self", ",", "mgs", ",", "context", ")", ":", "msgid", "=", "context", ".", "get", "(", "'msgid'", ")", "if", "msgid", "==", "'LockTimeout'", ":", "profile", "=", "BackupProfileModel", ".", "get", "(", "name", "=", "context", "[", "'profile_name'", "]", ")", "repo_url", "=", "context", ".", "get", "(", "'repo_url'", ")", "msg", "=", "QMessageBox", "(", ")", "msg", ".", "setWindowTitle", "(", "self", ".", "tr", "(", "\"Repository In Use\"", ")", ")", "msg", ".", "setIcon", "(", "QMessageBox", ".", "Critical", ")", "abortButton", "=", "msg", ".", "addButton", "(", "self", ".", "tr", "(", "\"Abort\"", ")", ",", "QMessageBox", ".", "RejectRole", ")", "msg", ".", "addButton", "(", "self", ".", "tr", "(", "\"Continue\"", ")", ",", "QMessageBox", ".", "AcceptRole", ")", "msg", ".", "setDefaultButton", "(", "abortButton", ")", "msg", ".", "setText", "(", "self", ".", "tr", "(", "f\"The repository at {repo_url} might be in use elsewhere.\"", ")", ")", "msg", ".", "setInformativeText", "(", "self", ".", "tr", "(", "\"Only break the lock if you are certain no other Borg process \"", "\"on any machine is accessing the repository. Abort or break the lock?\"", ")", ")", "msg", ".", "accepted", ".", "connect", "(", "lambda", ":", "self", ".", "break_lock", "(", "profile", ")", ")", "self", ".", "_msg", "=", "msg", "msg", ".", "show", "(", ")", "elif", "msgid", "==", "'LockFailed'", ":", "repo_url", "=", "context", ".", "get", "(", "'repo_url'", ")", "msg", "=", "QMessageBox", "(", ")", "msg", ".", "setText", "(", "self", ".", "tr", "(", "f\"You do not have permission to access the repository at {repo_url}. Gain access and try again.\"", ")", ")", "# noqa: E501", "msg", ".", "setWindowTitle", "(", "self", ".", "tr", "(", "\"No Repository Permissions\"", ")", ")", "self", ".", "_msg", "=", "msg", "msg", ".", "show", "(", ")" ]
https://github.com/borgbase/vorta/blob/23c47673c009bdef8baebb0b9cdf5e78c07fe373/src/vorta/application.py#L221-L249
pymedusa/Medusa
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
lib/pkg_resources/_vendor/pyparsing.py
python
ParserElement.__sub__
(self, other)
return self + And._ErrorStop() + other
Implementation of - operator, returns C{L{And}} with error stop
Implementation of - operator, returns C{L{And}} with error stop
[ "Implementation", "of", "-", "operator", "returns", "C", "{", "L", "{", "And", "}}", "with", "error", "stop" ]
def __sub__(self, other): """ Implementation of - operator, returns C{L{And}} with error stop """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return self + And._ErrorStop() + other
[ "def", "__sub__", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "basestring", ")", ":", "other", "=", "ParserElement", ".", "_literalStringClass", "(", "other", ")", "if", "not", "isinstance", "(", "other", ",", "ParserElement", ")", ":", "warnings", ".", "warn", "(", "\"Cannot combine element of type %s with ParserElement\"", "%", "type", "(", "other", ")", ",", "SyntaxWarning", ",", "stacklevel", "=", "2", ")", "return", "None", "return", "self", "+", "And", ".", "_ErrorStop", "(", ")", "+", "other" ]
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/lib/pkg_resources/_vendor/pyparsing.py#L1853-L1863
bamtercelboo/pytorch_NER_BiLSTM_CNN_CRF
375d00155baa2e0937c050562926bd0f2cd0a0f5
DataUtils/Batch_Iterator_torch.py
python
Iterators._Create_Each_Batch
(self, insts, batch_size, operator, device)
return features
:param insts: :param batch_size: :param operator: :return:
:param insts: :param batch_size: :param operator: :return:
[ ":", "param", "insts", ":", ":", "param", "batch_size", ":", ":", "param", "operator", ":", ":", "return", ":" ]
def _Create_Each_Batch(self, insts, batch_size, operator, device): """ :param insts: :param batch_size: :param operator: :return: """ # print("create one batch......") batch_length = len(insts) # copy with the max length for padding max_word_size = -1 max_label_size = -1 sentence_length = [] for inst in insts: sentence_length.append(inst.words_size) word_size = inst.words_size if word_size > max_word_size: max_word_size = word_size if len(inst.labels) > max_label_size: max_label_size = len(inst.labels) assert max_word_size == max_label_size # create with the Tensor/Variable # word features batch_word_features = torch.zeros(batch_length, max_word_size, device=cpu_device, requires_grad=True).long() batch_char_features = torch.zeros(batch_length, max_word_size, self.max_char_len, device=cpu_device, requires_grad=True).long() batch_label_features = torch.zeros(batch_length * max_word_size, device=cpu_device, requires_grad=True).long() for id_inst in range(batch_length): inst = insts[id_inst] # copy with the word features for id_word_index in range(max_word_size): if id_word_index < inst.words_size: batch_word_features.data[id_inst][id_word_index] = inst.words_index[id_word_index] else: batch_word_features.data[id_inst][id_word_index] = operator.word_paddingId if id_word_index < len(inst.label_index): batch_label_features.data[id_inst * max_word_size + id_word_index] = inst.label_index[id_word_index] else: batch_label_features.data[id_inst * max_word_size + id_word_index] = operator.label_paddingId # batch_label_features.data[id_inst * max_word_size + id_word_index] = 0 # char max_char_size = len(inst.chars_index[id_word_index]) if id_word_index < inst.words_size else 0 for id_word_c in range(self.max_char_len): if id_word_c < max_char_size: batch_char_features.data[id_inst][id_word_index][id_word_c] = inst.chars_index[id_word_index][id_word_c] else: batch_char_features.data[id_inst][id_word_index][id_word_c] = operator.char_paddingId # batch features = Batch_Features() features.batch_length = batch_length features.inst = insts features.word_features = batch_word_features features.char_features = batch_char_features features.label_features = batch_label_features features.sentence_length = sentence_length features.desorted_indices = None if device != cpu_device: features.cuda(features) return features
[ "def", "_Create_Each_Batch", "(", "self", ",", "insts", ",", "batch_size", ",", "operator", ",", "device", ")", ":", "# print(\"create one batch......\")", "batch_length", "=", "len", "(", "insts", ")", "# copy with the max length for padding", "max_word_size", "=", "-", "1", "max_label_size", "=", "-", "1", "sentence_length", "=", "[", "]", "for", "inst", "in", "insts", ":", "sentence_length", ".", "append", "(", "inst", ".", "words_size", ")", "word_size", "=", "inst", ".", "words_size", "if", "word_size", ">", "max_word_size", ":", "max_word_size", "=", "word_size", "if", "len", "(", "inst", ".", "labels", ")", ">", "max_label_size", ":", "max_label_size", "=", "len", "(", "inst", ".", "labels", ")", "assert", "max_word_size", "==", "max_label_size", "# create with the Tensor/Variable", "# word features", "batch_word_features", "=", "torch", ".", "zeros", "(", "batch_length", ",", "max_word_size", ",", "device", "=", "cpu_device", ",", "requires_grad", "=", "True", ")", ".", "long", "(", ")", "batch_char_features", "=", "torch", ".", "zeros", "(", "batch_length", ",", "max_word_size", ",", "self", ".", "max_char_len", ",", "device", "=", "cpu_device", ",", "requires_grad", "=", "True", ")", ".", "long", "(", ")", "batch_label_features", "=", "torch", ".", "zeros", "(", "batch_length", "*", "max_word_size", ",", "device", "=", "cpu_device", ",", "requires_grad", "=", "True", ")", ".", "long", "(", ")", "for", "id_inst", "in", "range", "(", "batch_length", ")", ":", "inst", "=", "insts", "[", "id_inst", "]", "# copy with the word features", "for", "id_word_index", "in", "range", "(", "max_word_size", ")", ":", "if", "id_word_index", "<", "inst", ".", "words_size", ":", "batch_word_features", ".", "data", "[", "id_inst", "]", "[", "id_word_index", "]", "=", "inst", ".", "words_index", "[", "id_word_index", "]", "else", ":", "batch_word_features", ".", "data", "[", "id_inst", "]", "[", "id_word_index", "]", "=", "operator", ".", "word_paddingId", "if", "id_word_index", "<", "len", "(", "inst", ".", "label_index", ")", ":", "batch_label_features", ".", "data", "[", "id_inst", "*", "max_word_size", "+", "id_word_index", "]", "=", "inst", ".", "label_index", "[", "id_word_index", "]", "else", ":", "batch_label_features", ".", "data", "[", "id_inst", "*", "max_word_size", "+", "id_word_index", "]", "=", "operator", ".", "label_paddingId", "# batch_label_features.data[id_inst * max_word_size + id_word_index] = 0", "# char", "max_char_size", "=", "len", "(", "inst", ".", "chars_index", "[", "id_word_index", "]", ")", "if", "id_word_index", "<", "inst", ".", "words_size", "else", "0", "for", "id_word_c", "in", "range", "(", "self", ".", "max_char_len", ")", ":", "if", "id_word_c", "<", "max_char_size", ":", "batch_char_features", ".", "data", "[", "id_inst", "]", "[", "id_word_index", "]", "[", "id_word_c", "]", "=", "inst", ".", "chars_index", "[", "id_word_index", "]", "[", "id_word_c", "]", "else", ":", "batch_char_features", ".", "data", "[", "id_inst", "]", "[", "id_word_index", "]", "[", "id_word_c", "]", "=", "operator", ".", "char_paddingId", "# batch", "features", "=", "Batch_Features", "(", ")", "features", ".", "batch_length", "=", "batch_length", "features", ".", "inst", "=", "insts", "features", ".", "word_features", "=", "batch_word_features", "features", ".", "char_features", "=", "batch_char_features", "features", ".", "label_features", "=", "batch_label_features", "features", ".", "sentence_length", "=", "sentence_length", "features", ".", "desorted_indices", "=", "None", "if", "device", "!=", "cpu_device", ":", "features", ".", "cuda", "(", "features", ")", "return", "features" ]
https://github.com/bamtercelboo/pytorch_NER_BiLSTM_CNN_CRF/blob/375d00155baa2e0937c050562926bd0f2cd0a0f5/DataUtils/Batch_Iterator_torch.py#L136-L200
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Demo/scripts/fact.py
python
fact
(n)
return res
[]
def fact(n): if n < 1: raise ValueError('fact() argument should be >= 1') if n == 1: return [] # special case res = [] # Treat even factors special, so we can use i += 2 later while n % 2 == 0: res.append(2) n //= 2 # Try odd numbers up to sqrt(n) limit = sqrt(n+1) i = 3 while i <= limit: if n % i == 0: res.append(i) n //= i limit = sqrt(n+1) else: i += 2 if n != 1: res.append(n) return res
[ "def", "fact", "(", "n", ")", ":", "if", "n", "<", "1", ":", "raise", "ValueError", "(", "'fact() argument should be >= 1'", ")", "if", "n", "==", "1", ":", "return", "[", "]", "# special case", "res", "=", "[", "]", "# Treat even factors special, so we can use i += 2 later", "while", "n", "%", "2", "==", "0", ":", "res", ".", "append", "(", "2", ")", "n", "//=", "2", "# Try odd numbers up to sqrt(n)", "limit", "=", "sqrt", "(", "n", "+", "1", ")", "i", "=", "3", "while", "i", "<=", "limit", ":", "if", "n", "%", "i", "==", "0", ":", "res", ".", "append", "(", "i", ")", "n", "//=", "i", "limit", "=", "sqrt", "(", "n", "+", "1", ")", "else", ":", "i", "+=", "2", "if", "n", "!=", "1", ":", "res", ".", "append", "(", "n", ")", "return", "res" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Demo/scripts/fact.py#L11-L33
gentoo/portage
e5be73709b1a42b40380fd336f9381452b01a723
lib/_emerge/PipeReader.py
python
PipeReader.close
(self)
Free the memory buffer.
Free the memory buffer.
[ "Free", "the", "memory", "buffer", "." ]
def close(self): """Free the memory buffer.""" self._read_data = None
[ "def", "close", "(", "self", ")", ":", "self", ".", "_read_data", "=", "None" ]
https://github.com/gentoo/portage/blob/e5be73709b1a42b40380fd336f9381452b01a723/lib/_emerge/PipeReader.py#L46-L48
adewes/blitzdb
36191579be14fbc2d7a47ede099bcdf31297a9fa
blitzdb/backends/sql/backend.py
python
Backend.connection
(self)
[]
def connection(self): if self._conn: self._conn.close() self._conn = None
[ "def", "connection", "(", "self", ")", ":", "if", "self", ".", "_conn", ":", "self", ".", "_conn", ".", "close", "(", ")", "self", ".", "_conn", "=", "None" ]
https://github.com/adewes/blitzdb/blob/36191579be14fbc2d7a47ede099bcdf31297a9fa/blitzdb/backends/sql/backend.py#L112-L115
huhamhire/huhamhire-hosts
33b9c49e7a4045b00e0c0df06f25e9ce8a037761
gui/qdialog_d.py
python
QDialogDaemon.fetch_update_after_check
(self)
Decide whether to retrieve a new data file from server or not after checking update information from a mirror.
Decide whether to retrieve a new data file from server or not after checking update information from a mirror.
[ "Decide", "whether", "to", "retrieve", "a", "new", "data", "file", "from", "server", "or", "not", "after", "checking", "update", "information", "from", "a", "mirror", "." ]
def fetch_update_after_check(self): """ Decide whether to retrieve a new data file from server or not after checking update information from a mirror. """ if self._update["version"] == \ unicode(_translate("Util", "[Error]", None)): self.finish_fetch(error=1) elif self.new_version(): self.fetch_update() else: self.info_uptodate() self.finish_fetch()
[ "def", "fetch_update_after_check", "(", "self", ")", ":", "if", "self", ".", "_update", "[", "\"version\"", "]", "==", "unicode", "(", "_translate", "(", "\"Util\"", ",", "\"[Error]\"", ",", "None", ")", ")", ":", "self", ".", "finish_fetch", "(", "error", "=", "1", ")", "elif", "self", ".", "new_version", "(", ")", ":", "self", ".", "fetch_update", "(", ")", "else", ":", "self", ".", "info_uptodate", "(", ")", "self", ".", "finish_fetch", "(", ")" ]
https://github.com/huhamhire/huhamhire-hosts/blob/33b9c49e7a4045b00e0c0df06f25e9ce8a037761/gui/qdialog_d.py#L158-L170
bendmorris/static-python
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
Lib/tkinter/__init__.py
python
Misc.event_delete
(self, virtual, *sequences)
Unbind a virtual event VIRTUAL from SEQUENCE.
Unbind a virtual event VIRTUAL from SEQUENCE.
[ "Unbind", "a", "virtual", "event", "VIRTUAL", "from", "SEQUENCE", "." ]
def event_delete(self, virtual, *sequences): """Unbind a virtual event VIRTUAL from SEQUENCE.""" args = ('event', 'delete', virtual) + sequences self.tk.call(args)
[ "def", "event_delete", "(", "self", ",", "virtual", ",", "*", "sequences", ")", ":", "args", "=", "(", "'event'", ",", "'delete'", ",", "virtual", ")", "+", "sequences", "self", ".", "tk", ".", "call", "(", "args", ")" ]
https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/tkinter/__init__.py#L1431-L1434
shmilylty/OneForAll
48591142a641e80f8a64ab215d11d06b696702d7
modules/datasets/fullhunt.py
python
FullHuntAPI.query
(self)
向接口查询子域并做子域匹配
向接口查询子域并做子域匹配
[ "向接口查询子域并做子域匹配" ]
def query(self): """ 向接口查询子域并做子域匹配 """ self.header = self.get_header() self.header.update({'X-API-KEY': self.api}) self.proxy = self.get_proxy(self.source) url = f'https://fullhunt.io/api/v1/domain/{self.domain}/subdomains' resp = self.get(url) self.subdomains = self.collect_subdomains(resp)
[ "def", "query", "(", "self", ")", ":", "self", ".", "header", "=", "self", ".", "get_header", "(", ")", "self", ".", "header", ".", "update", "(", "{", "'X-API-KEY'", ":", "self", ".", "api", "}", ")", "self", ".", "proxy", "=", "self", ".", "get_proxy", "(", "self", ".", "source", ")", "url", "=", "f'https://fullhunt.io/api/v1/domain/{self.domain}/subdomains'", "resp", "=", "self", ".", "get", "(", "url", ")", "self", ".", "subdomains", "=", "self", ".", "collect_subdomains", "(", "resp", ")" ]
https://github.com/shmilylty/OneForAll/blob/48591142a641e80f8a64ab215d11d06b696702d7/modules/datasets/fullhunt.py#L13-L22
nipy/nipy
d16d268938dcd5c15748ca051532c21f57cf8a22
nipy/algorithms/clustering/bgmm.py
python
BGMM.update_precisions
(self, x, z)
Given the allocation vector z, and the corresponding data x, resample the precisions Parameters ---------- x array of shape (nb_samples,self.dim) the data used in the estimation process z array of shape (nb_samples), type = np.int the corresponding classification
Given the allocation vector z, and the corresponding data x, resample the precisions
[ "Given", "the", "allocation", "vector", "z", "and", "the", "corresponding", "data", "x", "resample", "the", "precisions" ]
def update_precisions(self, x, z): """ Given the allocation vector z, and the corresponding data x, resample the precisions Parameters ---------- x array of shape (nb_samples,self.dim) the data used in the estimation process z array of shape (nb_samples), type = np.int the corresponding classification """ pop = self.pop(z) self.dof = self.prior_dof + pop + 1 rpop = pop + (pop == 0) self._detp = np.zeros(self.k) for k in range(self.k): # empirical means empmeans = np.sum(x[z == k], 0) / rpop[k] dm = np.reshape(empmeans - self.prior_means[k], (1, self.dim)) # scatter dx = np.reshape(x[z == k] - empmeans, (pop[k], self.dim)) scatter = np.dot(dx.T, dx) # bias addcov = np.dot(dm.T, dm) * self.prior_shrinkage[k] # covariance = prior term + scatter + bias covariance = self._inv_prior_scale[k] + scatter + addcov #precision scale = inv(covariance) self.precisions[k] = generate_Wishart(self.dof[k], scale) self._detp[k] = detsh(self.precisions[k])
[ "def", "update_precisions", "(", "self", ",", "x", ",", "z", ")", ":", "pop", "=", "self", ".", "pop", "(", "z", ")", "self", ".", "dof", "=", "self", ".", "prior_dof", "+", "pop", "+", "1", "rpop", "=", "pop", "+", "(", "pop", "==", "0", ")", "self", ".", "_detp", "=", "np", ".", "zeros", "(", "self", ".", "k", ")", "for", "k", "in", "range", "(", "self", ".", "k", ")", ":", "# empirical means", "empmeans", "=", "np", ".", "sum", "(", "x", "[", "z", "==", "k", "]", ",", "0", ")", "/", "rpop", "[", "k", "]", "dm", "=", "np", ".", "reshape", "(", "empmeans", "-", "self", ".", "prior_means", "[", "k", "]", ",", "(", "1", ",", "self", ".", "dim", ")", ")", "# scatter", "dx", "=", "np", ".", "reshape", "(", "x", "[", "z", "==", "k", "]", "-", "empmeans", ",", "(", "pop", "[", "k", "]", ",", "self", ".", "dim", ")", ")", "scatter", "=", "np", ".", "dot", "(", "dx", ".", "T", ",", "dx", ")", "# bias", "addcov", "=", "np", ".", "dot", "(", "dm", ".", "T", ",", "dm", ")", "*", "self", ".", "prior_shrinkage", "[", "k", "]", "# covariance = prior term + scatter + bias", "covariance", "=", "self", ".", "_inv_prior_scale", "[", "k", "]", "+", "scatter", "+", "addcov", "#precision", "scale", "=", "inv", "(", "covariance", ")", "self", ".", "precisions", "[", "k", "]", "=", "generate_Wishart", "(", "self", ".", "dof", "[", "k", "]", ",", "scale", ")", "self", ".", "_detp", "[", "k", "]", "=", "detsh", "(", "self", ".", "precisions", "[", "k", "]", ")" ]
https://github.com/nipy/nipy/blob/d16d268938dcd5c15748ca051532c21f57cf8a22/nipy/algorithms/clustering/bgmm.py#L561-L597
Borda/pyImSegm
7584b40a8d5bba04d3bf46f540f22b5d923e4b03
imsegm/utilities/drawing.py
python
ellipse_perimeter
(r, c, r_radius, c_radius, orientation=0., shape=None)
return rr, cc
see New version scikit-image v0.14 .. note:: Should be solved in skimage v0.14 :param int r: center position in rows :param int c: center position in columns :param int r_radius: ellipse diam in rows :param int c_radius: ellipse diam in columns :param float orientation: ellipse orientation :param tuple(int,int) shape: size of output mask :return tuple(list(int),list(int)): indexes of filled positions >>> img = np.zeros((14, 20), dtype=int) >>> rr, cc = ellipse_perimeter(7, 10, 3, 9, np.deg2rad(30), img.shape) >>> img[rr, cc] = 1 >>> img array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
see New version scikit-image v0.14
[ "see", "New", "version", "scikit", "-", "image", "v0", ".", "14" ]
def ellipse_perimeter(r, c, r_radius, c_radius, orientation=0., shape=None): """ see New version scikit-image v0.14 .. note:: Should be solved in skimage v0.14 :param int r: center position in rows :param int c: center position in columns :param int r_radius: ellipse diam in rows :param int c_radius: ellipse diam in columns :param float orientation: ellipse orientation :param tuple(int,int) shape: size of output mask :return tuple(list(int),list(int)): indexes of filled positions >>> img = np.zeros((14, 20), dtype=int) >>> rr, cc = ellipse_perimeter(7, 10, 3, 9, np.deg2rad(30), img.shape) >>> img[rr, cc] = 1 >>> img array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) """ rr, cc = draw.ellipse_perimeter(r, c, r_radius, c_radius, orientation=-orientation, shape=shape) return rr, cc
[ "def", "ellipse_perimeter", "(", "r", ",", "c", ",", "r_radius", ",", "c_radius", ",", "orientation", "=", "0.", ",", "shape", "=", "None", ")", ":", "rr", ",", "cc", "=", "draw", ".", "ellipse_perimeter", "(", "r", ",", "c", ",", "r_radius", ",", "c_radius", ",", "orientation", "=", "-", "orientation", ",", "shape", "=", "shape", ")", "return", "rr", ",", "cc" ]
https://github.com/Borda/pyImSegm/blob/7584b40a8d5bba04d3bf46f540f22b5d923e4b03/imsegm/utilities/drawing.py#L154-L187
plotly/plotly.py
cfad7862594b35965c0e000813bd7805e8494a5b
packages/python/plotly/plotly/matplotlylib/mpltools.py
python
get_rect_ymin
(data)
return min(data[0][1], data[1][1], data[2][1], data[3][1])
Find minimum y value from four (x,y) vertices.
Find minimum y value from four (x,y) vertices.
[ "Find", "minimum", "y", "value", "from", "four", "(", "x", "y", ")", "vertices", "." ]
def get_rect_ymin(data): """Find minimum y value from four (x,y) vertices.""" return min(data[0][1], data[1][1], data[2][1], data[3][1])
[ "def", "get_rect_ymin", "(", "data", ")", ":", "return", "min", "(", "data", "[", "0", "]", "[", "1", "]", ",", "data", "[", "1", "]", "[", "1", "]", ",", "data", "[", "2", "]", "[", "1", "]", ",", "data", "[", "3", "]", "[", "1", "]", ")" ]
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/matplotlylib/mpltools.py#L354-L356
markovmodel/PyEMMA
e9d08d715dde17ceaa96480a9ab55d5e87d3a4b3
pyemma/coordinates/transform/_tica_base.py
python
TICABase.dimension
(self)
return d
output dimension
output dimension
[ "output", "dimension" ]
def dimension(self): """ output dimension """ if self.dim > -1: return self.dim d = None if self.dim != -1 and not self._estimated: # fixed parametrization d = self.dim elif self._estimated: # parametrization finished. Dimension is known dim = len(self.eigenvalues) if self.var_cutoff < 1.0: # if subspace_variance, reduce the output dimension if needed dim = min(dim, np.searchsorted(self.cumvar, self.var_cutoff) + 1) d = dim elif self.var_cutoff == 1.0: # We only know that all dimensions are wanted, so return input dim d = self.data_producer.dimension() else: # We know nothing. Give up raise RuntimeError('Requested dimension, but the dimension depends on the cumulative variance and the ' 'transformer has not yet been estimated. Call estimate() before.') return d
[ "def", "dimension", "(", "self", ")", ":", "if", "self", ".", "dim", ">", "-", "1", ":", "return", "self", ".", "dim", "d", "=", "None", "if", "self", ".", "dim", "!=", "-", "1", "and", "not", "self", ".", "_estimated", ":", "# fixed parametrization", "d", "=", "self", ".", "dim", "elif", "self", ".", "_estimated", ":", "# parametrization finished. Dimension is known", "dim", "=", "len", "(", "self", ".", "eigenvalues", ")", "if", "self", ".", "var_cutoff", "<", "1.0", ":", "# if subspace_variance, reduce the output dimension if needed", "dim", "=", "min", "(", "dim", ",", "np", ".", "searchsorted", "(", "self", ".", "cumvar", ",", "self", ".", "var_cutoff", ")", "+", "1", ")", "d", "=", "dim", "elif", "self", ".", "var_cutoff", "==", "1.0", ":", "# We only know that all dimensions are wanted, so return input dim", "d", "=", "self", ".", "data_producer", ".", "dimension", "(", ")", "else", ":", "# We know nothing. Give up", "raise", "RuntimeError", "(", "'Requested dimension, but the dimension depends on the cumulative variance and the '", "'transformer has not yet been estimated. Call estimate() before.'", ")", "return", "d" ]
https://github.com/markovmodel/PyEMMA/blob/e9d08d715dde17ceaa96480a9ab55d5e87d3a4b3/pyemma/coordinates/transform/_tica_base.py#L98-L115
dpgaspar/Flask-AppBuilder
557249f33b66d02a48c1322ef21324b815abe18e
examples/quickactions/app/views.py
python
GroupModelView.myaction
(self, item)
return redirect(self.get_redirect())
do something with the item record
do something with the item record
[ "do", "something", "with", "the", "item", "record" ]
def myaction(self, item): """ do something with the item record """ return redirect(self.get_redirect())
[ "def", "myaction", "(", "self", ",", "item", ")", ":", "return", "redirect", "(", "self", ".", "get_redirect", "(", ")", ")" ]
https://github.com/dpgaspar/Flask-AppBuilder/blob/557249f33b66d02a48c1322ef21324b815abe18e/examples/quickactions/app/views.py#L17-L21
Tygs/0bin
c65d5c4d090d385eb5c40df8e0001af9267f3b4b
zerobin/utils.py
python
SettingsContainer.update_with_dict
(self, mapping)
return self
Update settings with values from the given mapping object. (Taking only variable with uppercased name)
Update settings with values from the given mapping object. (Taking only variable with uppercased name)
[ "Update", "settings", "with", "values", "from", "the", "given", "mapping", "object", ".", "(", "Taking", "only", "variable", "with", "uppercased", "name", ")" ]
def update_with_dict(self, mapping): """ Update settings with values from the given mapping object. (Taking only variable with uppercased name) """ for name, value in mapping.items(): if name.isupper(): setattr(self, name, value) return self
[ "def", "update_with_dict", "(", "self", ",", "mapping", ")", ":", "for", "name", ",", "value", "in", "mapping", ".", "items", "(", ")", ":", "if", "name", ".", "isupper", "(", ")", ":", "setattr", "(", "self", ",", "name", ",", "value", ")", "return", "self" ]
https://github.com/Tygs/0bin/blob/c65d5c4d090d385eb5c40df8e0001af9267f3b4b/zerobin/utils.py#L35-L43
stopstalk/stopstalk-deployment
10c3ab44c4ece33ae515f6888c15033db2004bb1
aws_lambda/spoj_aws_lambda_function/lambda_code/lxml/html/__init__.py
python
InputElement.type
(self)
return self.get('type', 'text').lower()
Return the type of this element (using the type attribute).
Return the type of this element (using the type attribute).
[ "Return", "the", "type", "of", "this", "element", "(", "using", "the", "type", "attribute", ")", "." ]
def type(self): """ Return the type of this element (using the type attribute). """ return self.get('type', 'text').lower()
[ "def", "type", "(", "self", ")", ":", "return", "self", ".", "get", "(", "'type'", ",", "'text'", ")", ".", "lower", "(", ")" ]
https://github.com/stopstalk/stopstalk-deployment/blob/10c3ab44c4ece33ae515f6888c15033db2004bb1/aws_lambda/spoj_aws_lambda_function/lambda_code/lxml/html/__init__.py#L1657-L1661
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
ansible/roles/lib_gcloud/library/gcloud_dm_resource_builder.py
python
GcloudCLI.project
(self)
return self._project
property for project
property for project
[ "property", "for", "project" ]
def project(self): '''property for project''' return self._project
[ "def", "project", "(", "self", ")", ":", "return", "self", ".", "_project" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_gcloud/library/gcloud_dm_resource_builder.py#L62-L64
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/modules/zpool.py
python
status
(zpool=None)
return ret
Return the status of the named zpool zpool : string optional name of storage pool .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zpool.status myzpool
Return the status of the named zpool
[ "Return", "the", "status", "of", "the", "named", "zpool" ]
def status(zpool=None): """ Return the status of the named zpool zpool : string optional name of storage pool .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zpool.status myzpool """ ret = OrderedDict() ## collect status output res = __salt__["cmd.run_all"]( __utils__["zfs.zpool_command"]("status", target=zpool), python_shell=False, ) if res["retcode"] != 0: return __utils__["zfs.parse_command_result"](res) # NOTE: command output for reference # ===================================================================== # pool: data # state: ONLINE # scan: scrub repaired 0 in 2h27m with 0 errors on Mon Jan 8 03:27:25 2018 # config: # # NAME STATE READ WRITE CKSUM # data ONLINE 0 0 0 # mirror-0 ONLINE 0 0 0 # c0tXXXXCXXXXXXXXXXXd0 ONLINE 0 0 0 # c0tXXXXCXXXXXXXXXXXd0 ONLINE 0 0 0 # c0tXXXXCXXXXXXXXXXXd0 ONLINE 0 0 0 # # errors: No known data errors # ===================================================================== ## parse status output # NOTE: output is 'key: value' except for the 'config' key. # multiple pools will repeat the output, so if switch pools if # we see 'pool:' current_pool = None current_prop = None for zpd in res["stdout"].splitlines(): if zpd.strip() == "": continue if ":" in zpd and zpd[0] != "\t": # NOTE: line is 'key: value' format, we just update a dict prop = zpd.split(":")[0].strip() value = ":".join(zpd.split(":")[1:]).strip() if prop == "pool" and current_pool != value: current_pool = value ret[current_pool] = OrderedDict() if prop != "pool": ret[current_pool][prop] = value current_prop = prop else: # NOTE: we append the line output to the last property # this should only happens once we hit the config # section ret[current_pool][current_prop] = "{}\n{}".format( ret[current_pool][current_prop], zpd ) ## parse config property for each pool # NOTE: the config property has some structured data # sadly this data is in a different format than # the rest and it needs further processing for pool in ret: if "config" not in ret[pool]: continue header = None root_vdev = None vdev = None dev = None rdev = None config = ret[pool]["config"] config_data = OrderedDict() for line in config.splitlines(): # NOTE: the first line is the header # we grab all the none whitespace values if not header: header = line.strip().lower() header = [x for x in header.split(" ") if x not in [""]] continue # NOTE: data is indented by 1 tab, then multiples of 2 spaces # to differential root vdev, vdev, and dev # # we just strip the initial tab (can't use .strip() here) if line[0] == "\t": line = line[1:] # NOTE: transform data into dict stat_data = OrderedDict( list( zip( header, [x for x in line.strip().split(" ") if x not in [""]], ) ) ) # NOTE: decode the zfs values properly stat_data = __utils__["zfs.from_auto_dict"](stat_data) # NOTE: store stat_data in the proper location if line.startswith(" " * 6): rdev = stat_data["name"] config_data[root_vdev][vdev][dev][rdev] = stat_data elif line.startswith(" " * 4): rdev = None dev = stat_data["name"] config_data[root_vdev][vdev][dev] = stat_data elif line.startswith(" " * 2): rdev = dev = None vdev = stat_data["name"] config_data[root_vdev][vdev] = stat_data else: rdev = dev = vdev = None root_vdev = stat_data["name"] config_data[root_vdev] = stat_data # NOTE: name already used as identifier, drop duplicate data del stat_data["name"] ret[pool]["config"] = config_data return ret
[ "def", "status", "(", "zpool", "=", "None", ")", ":", "ret", "=", "OrderedDict", "(", ")", "## collect status output", "res", "=", "__salt__", "[", "\"cmd.run_all\"", "]", "(", "__utils__", "[", "\"zfs.zpool_command\"", "]", "(", "\"status\"", ",", "target", "=", "zpool", ")", ",", "python_shell", "=", "False", ",", ")", "if", "res", "[", "\"retcode\"", "]", "!=", "0", ":", "return", "__utils__", "[", "\"zfs.parse_command_result\"", "]", "(", "res", ")", "# NOTE: command output for reference", "# =====================================================================", "# pool: data", "# state: ONLINE", "# scan: scrub repaired 0 in 2h27m with 0 errors on Mon Jan 8 03:27:25 2018", "# config:", "#", "# NAME STATE READ WRITE CKSUM", "# data ONLINE 0 0 0", "# mirror-0 ONLINE 0 0 0", "# c0tXXXXCXXXXXXXXXXXd0 ONLINE 0 0 0", "# c0tXXXXCXXXXXXXXXXXd0 ONLINE 0 0 0", "# c0tXXXXCXXXXXXXXXXXd0 ONLINE 0 0 0", "#", "# errors: No known data errors", "# =====================================================================", "## parse status output", "# NOTE: output is 'key: value' except for the 'config' key.", "# multiple pools will repeat the output, so if switch pools if", "# we see 'pool:'", "current_pool", "=", "None", "current_prop", "=", "None", "for", "zpd", "in", "res", "[", "\"stdout\"", "]", ".", "splitlines", "(", ")", ":", "if", "zpd", ".", "strip", "(", ")", "==", "\"\"", ":", "continue", "if", "\":\"", "in", "zpd", "and", "zpd", "[", "0", "]", "!=", "\"\\t\"", ":", "# NOTE: line is 'key: value' format, we just update a dict", "prop", "=", "zpd", ".", "split", "(", "\":\"", ")", "[", "0", "]", ".", "strip", "(", ")", "value", "=", "\":\"", ".", "join", "(", "zpd", ".", "split", "(", "\":\"", ")", "[", "1", ":", "]", ")", ".", "strip", "(", ")", "if", "prop", "==", "\"pool\"", "and", "current_pool", "!=", "value", ":", "current_pool", "=", "value", "ret", "[", "current_pool", "]", "=", "OrderedDict", "(", ")", "if", "prop", "!=", "\"pool\"", ":", "ret", "[", "current_pool", "]", "[", "prop", "]", "=", "value", "current_prop", "=", "prop", "else", ":", "# NOTE: we append the line output to the last property", "# this should only happens once we hit the config", "# section", "ret", "[", "current_pool", "]", "[", "current_prop", "]", "=", "\"{}\\n{}\"", ".", "format", "(", "ret", "[", "current_pool", "]", "[", "current_prop", "]", ",", "zpd", ")", "## parse config property for each pool", "# NOTE: the config property has some structured data", "# sadly this data is in a different format than", "# the rest and it needs further processing", "for", "pool", "in", "ret", ":", "if", "\"config\"", "not", "in", "ret", "[", "pool", "]", ":", "continue", "header", "=", "None", "root_vdev", "=", "None", "vdev", "=", "None", "dev", "=", "None", "rdev", "=", "None", "config", "=", "ret", "[", "pool", "]", "[", "\"config\"", "]", "config_data", "=", "OrderedDict", "(", ")", "for", "line", "in", "config", ".", "splitlines", "(", ")", ":", "# NOTE: the first line is the header", "# we grab all the none whitespace values", "if", "not", "header", ":", "header", "=", "line", ".", "strip", "(", ")", ".", "lower", "(", ")", "header", "=", "[", "x", "for", "x", "in", "header", ".", "split", "(", "\" \"", ")", "if", "x", "not", "in", "[", "\"\"", "]", "]", "continue", "# NOTE: data is indented by 1 tab, then multiples of 2 spaces", "# to differential root vdev, vdev, and dev", "#", "# we just strip the initial tab (can't use .strip() here)", "if", "line", "[", "0", "]", "==", "\"\\t\"", ":", "line", "=", "line", "[", "1", ":", "]", "# NOTE: transform data into dict", "stat_data", "=", "OrderedDict", "(", "list", "(", "zip", "(", "header", ",", "[", "x", "for", "x", "in", "line", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "if", "x", "not", "in", "[", "\"\"", "]", "]", ",", ")", ")", ")", "# NOTE: decode the zfs values properly", "stat_data", "=", "__utils__", "[", "\"zfs.from_auto_dict\"", "]", "(", "stat_data", ")", "# NOTE: store stat_data in the proper location", "if", "line", ".", "startswith", "(", "\" \"", "*", "6", ")", ":", "rdev", "=", "stat_data", "[", "\"name\"", "]", "config_data", "[", "root_vdev", "]", "[", "vdev", "]", "[", "dev", "]", "[", "rdev", "]", "=", "stat_data", "elif", "line", ".", "startswith", "(", "\" \"", "*", "4", ")", ":", "rdev", "=", "None", "dev", "=", "stat_data", "[", "\"name\"", "]", "config_data", "[", "root_vdev", "]", "[", "vdev", "]", "[", "dev", "]", "=", "stat_data", "elif", "line", ".", "startswith", "(", "\" \"", "*", "2", ")", ":", "rdev", "=", "dev", "=", "None", "vdev", "=", "stat_data", "[", "\"name\"", "]", "config_data", "[", "root_vdev", "]", "[", "vdev", "]", "=", "stat_data", "else", ":", "rdev", "=", "dev", "=", "vdev", "=", "None", "root_vdev", "=", "stat_data", "[", "\"name\"", "]", "config_data", "[", "root_vdev", "]", "=", "stat_data", "# NOTE: name already used as identifier, drop duplicate data", "del", "stat_data", "[", "\"name\"", "]", "ret", "[", "pool", "]", "[", "\"config\"", "]", "=", "config_data", "return", "ret" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/zpool.py#L95-L231
quark-engine/quark-engine
0d53b225b0c57bed6d1e2a5ec22efc842d45ed60
quark/core/struct/tableobject.py
python
TableObject.__repr__
(self)
return f"<TableObject-{self.hash_table}>"
[]
def __repr__(self): return f"<TableObject-{self.hash_table}>"
[ "def", "__repr__", "(", "self", ")", ":", "return", "f\"<TableObject-{self.hash_table}>\"" ]
https://github.com/quark-engine/quark-engine/blob/0d53b225b0c57bed6d1e2a5ec22efc842d45ed60/quark/core/struct/tableobject.py#L20-L21
sqall01/alertR
e1d1a83e54f876cc4cd7bd87387e05cb75d4dc13
managerClientDatabase/lib/manager/storage.py
python
_Storage.update_server_information
(self, msg_time: int, options: List[ManagerObjOption], profiles: List[ManagerObjProfile], nodes: List[ManagerObjNode], sensors: List[ManagerObjSensor], alerts: List[ManagerObjAlert], managers: List[ManagerObjManager], alert_levels: List[ManagerObjAlertLevel], sensor_alerts: List[ManagerObjSensorAlert])
Updates the received server information. :param msg_time: :param options: :param profiles: :param nodes: :param sensors: :param alerts: :param managers: :param alert_levels: :param sensor_alerts: :return Success or Failure
Updates the received server information.
[ "Updates", "the", "received", "server", "information", "." ]
def update_server_information(self, msg_time: int, options: List[ManagerObjOption], profiles: List[ManagerObjProfile], nodes: List[ManagerObjNode], sensors: List[ManagerObjSensor], alerts: List[ManagerObjAlert], managers: List[ManagerObjManager], alert_levels: List[ManagerObjAlertLevel], sensor_alerts: List[ManagerObjSensorAlert]) -> bool: """ Updates the received server information. :param msg_time: :param options: :param profiles: :param nodes: :param sensors: :param alerts: :param managers: :param alert_levels: :param sensor_alerts: :return Success or Failure """ raise NotImplemented("Function not implemented yet.")
[ "def", "update_server_information", "(", "self", ",", "msg_time", ":", "int", ",", "options", ":", "List", "[", "ManagerObjOption", "]", ",", "profiles", ":", "List", "[", "ManagerObjProfile", "]", ",", "nodes", ":", "List", "[", "ManagerObjNode", "]", ",", "sensors", ":", "List", "[", "ManagerObjSensor", "]", ",", "alerts", ":", "List", "[", "ManagerObjAlert", "]", ",", "managers", ":", "List", "[", "ManagerObjManager", "]", ",", "alert_levels", ":", "List", "[", "ManagerObjAlertLevel", "]", ",", "sensor_alerts", ":", "List", "[", "ManagerObjSensorAlert", "]", ")", "->", "bool", ":", "raise", "NotImplemented", "(", "\"Function not implemented yet.\"", ")" ]
https://github.com/sqall01/alertR/blob/e1d1a83e54f876cc4cd7bd87387e05cb75d4dc13/managerClientDatabase/lib/manager/storage.py#L46-L70
pwnieexpress/raspberry_pwn
86f80e781cfb9f130a21a7ff41ef3d5c0340f287
src/pexpect-2.3/pexpect.py
python
spawn.kill
(self, sig)
This sends the given signal to the child application. In keeping with UNIX tradition it has a misleading name. It does not necessarily kill the child unless you send the right signal.
This sends the given signal to the child application. In keeping with UNIX tradition it has a misleading name. It does not necessarily kill the child unless you send the right signal.
[ "This", "sends", "the", "given", "signal", "to", "the", "child", "application", ".", "In", "keeping", "with", "UNIX", "tradition", "it", "has", "a", "misleading", "name", ".", "It", "does", "not", "necessarily", "kill", "the", "child", "unless", "you", "send", "the", "right", "signal", "." ]
def kill(self, sig): """This sends the given signal to the child application. In keeping with UNIX tradition it has a misleading name. It does not necessarily kill the child unless you send the right signal. """ # Same as os.kill, but the pid is given for you. if self.isalive(): os.kill(self.pid, sig)
[ "def", "kill", "(", "self", ",", "sig", ")", ":", "# Same as os.kill, but the pid is given for you.", "if", "self", ".", "isalive", "(", ")", ":", "os", ".", "kill", "(", "self", ".", "pid", ",", "sig", ")" ]
https://github.com/pwnieexpress/raspberry_pwn/blob/86f80e781cfb9f130a21a7ff41ef3d5c0340f287/src/pexpect-2.3/pexpect.py#L1175-L1183
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/plat-mac/lib-scriptpackages/Explorer/Microsoft_Internet_Explorer.py
python
Microsoft_Internet_Explorer_Events.do_script
(self, _object, _attributes={}, **_arguments)
do script: Execute script commands Required argument: JavaScript text to execute Keyword argument window: optional Window Identifier (as supplied by the ListWindows event) specifying context in which to execute the script Keyword argument _attributes: AppleEvent attribute dictionary Returns: Return value
do script: Execute script commands Required argument: JavaScript text to execute Keyword argument window: optional Window Identifier (as supplied by the ListWindows event) specifying context in which to execute the script Keyword argument _attributes: AppleEvent attribute dictionary Returns: Return value
[ "do", "script", ":", "Execute", "script", "commands", "Required", "argument", ":", "JavaScript", "text", "to", "execute", "Keyword", "argument", "window", ":", "optional", "Window", "Identifier", "(", "as", "supplied", "by", "the", "ListWindows", "event", ")", "specifying", "context", "in", "which", "to", "execute", "the", "script", "Keyword", "argument", "_attributes", ":", "AppleEvent", "attribute", "dictionary", "Returns", ":", "Return", "value" ]
def do_script(self, _object, _attributes={}, **_arguments): """do script: Execute script commands Required argument: JavaScript text to execute Keyword argument window: optional Window Identifier (as supplied by the ListWindows event) specifying context in which to execute the script Keyword argument _attributes: AppleEvent attribute dictionary Returns: Return value """ _code = 'misc' _subcode = 'dosc' aetools.keysubst(_arguments, self._argmap_do_script) _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.get('errn', 0): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----']
[ "def", "do_script", "(", "self", ",", "_object", ",", "_attributes", "=", "{", "}", ",", "*", "*", "_arguments", ")", ":", "_code", "=", "'misc'", "_subcode", "=", "'dosc'", "aetools", ".", "keysubst", "(", "_arguments", ",", "self", ".", "_argmap_do_script", ")", "_arguments", "[", "'----'", "]", "=", "_object", "_reply", ",", "_arguments", ",", "_attributes", "=", "self", ".", "send", "(", "_code", ",", "_subcode", ",", "_arguments", ",", "_attributes", ")", "if", "_arguments", ".", "get", "(", "'errn'", ",", "0", ")", ":", "raise", "aetools", ".", "Error", ",", "aetools", ".", "decodeerror", "(", "_arguments", ")", "# XXXX Optionally decode result", "if", "_arguments", ".", "has_key", "(", "'----'", ")", ":", "return", "_arguments", "[", "'----'", "]" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/plat-mac/lib-scriptpackages/Explorer/Microsoft_Internet_Explorer.py#L60-L80
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit - MAC OSX/tools/harvester/discovery/exaleadsearch.py
python
search_exalead.get_files
(self)
return rawres.fileurls(self.files)
[]
def get_files(self): rawres = myparser.parser(self.totalresults, self.word) return rawres.fileurls(self.files)
[ "def", "get_files", "(", "self", ")", ":", "rawres", "=", "myparser", ".", "parser", "(", "self", ".", "totalresults", ",", "self", ".", "word", ")", "return", "rawres", ".", "fileurls", "(", "self", ".", "files", ")" ]
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit - MAC OSX/tools/harvester/discovery/exaleadsearch.py#L74-L76
rwth-i6/returnn
f2d718a197a280b0d5f0fd91a7fcb8658560dddb
returnn/datasets/lm.py
python
TranslationDataset._get_data
(self, key, line_nr)
:param str key: "data" or "classes" :param int line_nr: :return: 1D array :rtype: numpy.ndarray
:param str key: "data" or "classes" :param int line_nr: :return: 1D array :rtype: numpy.ndarray
[ ":", "param", "str", "key", ":", "data", "or", "classes", ":", "param", "int", "line_nr", ":", ":", "return", ":", "1D", "array", ":", "rtype", ":", "numpy", ".", "ndarray" ]
def _get_data(self, key, line_nr): """ :param str key: "data" or "classes" :param int line_nr: :return: 1D array :rtype: numpy.ndarray """ import time last_print_time = 0 last_print_len = None while True: with self._lock: if self._data_len is not None: assert line_nr <= self._data_len cur_len = len(self._data[key]) if line_nr < cur_len: return self._data[key][line_nr] if cur_len != last_print_len and time.time() - last_print_time > 10: print("%r: waiting for %r, line %i (%i loaded so far)..." % (self, key, line_nr, cur_len), file=log.v3) last_print_len = cur_len last_print_time = time.time() time.sleep(1)
[ "def", "_get_data", "(", "self", ",", "key", ",", "line_nr", ")", ":", "import", "time", "last_print_time", "=", "0", "last_print_len", "=", "None", "while", "True", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_data_len", "is", "not", "None", ":", "assert", "line_nr", "<=", "self", ".", "_data_len", "cur_len", "=", "len", "(", "self", ".", "_data", "[", "key", "]", ")", "if", "line_nr", "<", "cur_len", ":", "return", "self", ".", "_data", "[", "key", "]", "[", "line_nr", "]", "if", "cur_len", "!=", "last_print_len", "and", "time", ".", "time", "(", ")", "-", "last_print_time", ">", "10", ":", "print", "(", "\"%r: waiting for %r, line %i (%i loaded so far)...\"", "%", "(", "self", ",", "key", ",", "line_nr", ",", "cur_len", ")", ",", "file", "=", "log", ".", "v3", ")", "last_print_len", "=", "cur_len", "last_print_time", "=", "time", ".", "time", "(", ")", "time", ".", "sleep", "(", "1", ")" ]
https://github.com/rwth-i6/returnn/blob/f2d718a197a280b0d5f0fd91a7fcb8658560dddb/returnn/datasets/lm.py#L1298-L1319
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
google/ads/googleads/v8/services/services/user_interest_service/client.py
python
UserInterestServiceClient.parse_common_location_path
(path: str)
return m.groupdict() if m else {}
Parse a location path into its component segments.
Parse a location path into its component segments.
[ "Parse", "a", "location", "path", "into", "its", "component", "segments", "." ]
def parse_common_location_path(path: str) -> Dict[str, str]: """Parse a location path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path ) return m.groupdict() if m else {}
[ "def", "parse_common_location_path", "(", "path", ":", "str", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "m", "=", "re", ".", "match", "(", "r\"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$\"", ",", "path", ")", "return", "m", ".", "groupdict", "(", ")", "if", "m", "else", "{", "}" ]
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v8/services/services/user_interest_service/client.py#L228-L233
Pymol-Scripts/Pymol-script-repo
bcd7bb7812dc6db1595953dfa4471fa15fb68c77
modules/pdb2pqr/contrib/numpy-1.1.0/numpy/f2py/lib/main.py
python
get_values
(sys_argv, prefix='', suffix='', strip_prefix=False, strip_suffix=False)
return ret
Return a list of values with pattern <prefix><value><suffix>. The corresponding items will be removed from sys_argv.
Return a list of values with pattern <prefix><value><suffix>. The corresponding items will be removed from sys_argv.
[ "Return", "a", "list", "of", "values", "with", "pattern", "<prefix", ">", "<value", ">", "<suffix", ">", ".", "The", "corresponding", "items", "will", "be", "removed", "from", "sys_argv", "." ]
def get_values(sys_argv, prefix='', suffix='', strip_prefix=False, strip_suffix=False): """ Return a list of values with pattern <prefix><value><suffix>. The corresponding items will be removed from sys_argv. """ match = re.compile(prefix + r'.*' + suffix + '\Z').match ret = [item for item in sys_argv if match(item)] [sys_argv.remove(item) for item in ret] if strip_prefix and prefix: i = len(prefix) ret = [item[i:] for item in ret] if strip_suffix and suffix: i = len(suffix) ret = [item[:-i] for item in ret] return ret
[ "def", "get_values", "(", "sys_argv", ",", "prefix", "=", "''", ",", "suffix", "=", "''", ",", "strip_prefix", "=", "False", ",", "strip_suffix", "=", "False", ")", ":", "match", "=", "re", ".", "compile", "(", "prefix", "+", "r'.*'", "+", "suffix", "+", "'\\Z'", ")", ".", "match", "ret", "=", "[", "item", "for", "item", "in", "sys_argv", "if", "match", "(", "item", ")", "]", "[", "sys_argv", ".", "remove", "(", "item", ")", "for", "item", "in", "ret", "]", "if", "strip_prefix", "and", "prefix", ":", "i", "=", "len", "(", "prefix", ")", "ret", "=", "[", "item", "[", "i", ":", "]", "for", "item", "in", "ret", "]", "if", "strip_suffix", "and", "suffix", ":", "i", "=", "len", "(", "suffix", ")", "ret", "=", "[", "item", "[", ":", "-", "i", "]", "for", "item", "in", "ret", "]", "return", "ret" ]
https://github.com/Pymol-Scripts/Pymol-script-repo/blob/bcd7bb7812dc6db1595953dfa4471fa15fb68c77/modules/pdb2pqr/contrib/numpy-1.1.0/numpy/f2py/lib/main.py#L106-L121
spywhere/Javatar
e273ec40c209658247a71b109bb90cd126984a29
core/thread_progress.py
python
MultiThreadProgress.add
(self, thread, message)
Add thread to the list @param thread: thread to be added @param message: message to be show
Add thread to the list
[ "Add", "thread", "to", "the", "list" ]
def add(self, thread, message): """ Add thread to the list @param thread: thread to be added @param message: message to be show """ self.thread_list.append([thread, message])
[ "def", "add", "(", "self", ",", "thread", ",", "message", ")", ":", "self", ".", "thread_list", ".", "append", "(", "[", "thread", ",", "message", "]", ")" ]
https://github.com/spywhere/Javatar/blob/e273ec40c209658247a71b109bb90cd126984a29/core/thread_progress.py#L30-L37
wkcn/MobulaOP
ae693a6a55824e9a0785f5b9befde6bd8ecccfd6
mobula/glue/backend.py
python
op_gen
(glue_mod, op, name)
return glue_mod.gen_cache[name]
Get operator generator of glue module. Parameters ---------- glue_mod: Glue Module op: object The object of custom operator. name: str The name of custom operator. Returns ------- The operator generator of glue module.
Get operator generator of glue module.
[ "Get", "operator", "generator", "of", "glue", "module", "." ]
def op_gen(glue_mod, op, name): """ Get operator generator of glue module. Parameters ---------- glue_mod: Glue Module op: object The object of custom operator. name: str The name of custom operator. Returns ------- The operator generator of glue module. """ if name not in glue_mod.gen_cache: glue_mod.gen_cache[name] = glue_mod.OpGen(op=op, name=name) return glue_mod.gen_cache[name]
[ "def", "op_gen", "(", "glue_mod", ",", "op", ",", "name", ")", ":", "if", "name", "not", "in", "glue_mod", ".", "gen_cache", ":", "glue_mod", ".", "gen_cache", "[", "name", "]", "=", "glue_mod", ".", "OpGen", "(", "op", "=", "op", ",", "name", "=", "name", ")", "return", "glue_mod", ".", "gen_cache", "[", "name", "]" ]
https://github.com/wkcn/MobulaOP/blob/ae693a6a55824e9a0785f5b9befde6bd8ecccfd6/mobula/glue/backend.py#L142-L159
jcmgray/quimb
a54b22c61534be8acbc9efe4da97fb5c7f12057d
quimb/tensor/tensor_core.py
python
reindex
(self, index_map, inplace=False)
return new
Rename the indices of this tensor, optionally in-place. Parameters ---------- index_map : dict-like Mapping of pairs ``{old_ind: new_ind, ...}``. inplace : bool, optional If ``False`` (the default), a copy of this tensor with the changed inds will be returned.
Rename the indices of this tensor, optionally in-place.
[ "Rename", "the", "indices", "of", "this", "tensor", "optionally", "in", "-", "place", "." ]
def reindex(self, index_map, inplace=False): """Rename the indices of this tensor, optionally in-place. Parameters ---------- index_map : dict-like Mapping of pairs ``{old_ind: new_ind, ...}``. inplace : bool, optional If ``False`` (the default), a copy of this tensor with the changed inds will be returned. """ new = self if inplace else self.copy() new_inds = tuple(index_map.get(ind, ind) for ind in new.inds) if self.left_inds: new_left_inds = (index_map.get(ind, ind) for ind in self.left_inds) else: new_left_inds = self.left_inds new.modify(inds=new_inds, left_inds=new_left_inds) return new
[ "def", "reindex", "(", "self", ",", "index_map", ",", "inplace", "=", "False", ")", ":", "new", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "new_inds", "=", "tuple", "(", "index_map", ".", "get", "(", "ind", ",", "ind", ")", "for", "ind", "in", "new", ".", "inds", ")", "if", "self", ".", "left_inds", ":", "new_left_inds", "=", "(", "index_map", ".", "get", "(", "ind", ",", "ind", ")", "for", "ind", "in", "self", ".", "left_inds", ")", "else", ":", "new_left_inds", "=", "self", ".", "left_inds", "new", ".", "modify", "(", "inds", "=", "new_inds", ",", "left_inds", "=", "new_left_inds", ")", "return", "new" ]
https://github.com/jcmgray/quimb/blob/a54b22c61534be8acbc9efe4da97fb5c7f12057d/quimb/tensor/tensor_core.py#L2412-L2434
sqlalchemy/sqlalchemy
eb716884a4abcabae84a6aaba105568e925b7d27
lib/sqlalchemy/engine/interfaces.py
python
ExecutionContext.get_result_cursor_strategy
(self, result)
Return a result cursor strategy for a given result object. This method is implemented by the :class:`.DefaultDialect` and is only needed by implementing dialects in the case where some special steps regarding the cursor must be taken, such as manufacturing fake results from some other element of the cursor, or pre-buffering the cursor's results. A simplified version of the default implementation is:: from sqlalchemy.engine.result import DefaultCursorFetchStrategy class MyExecutionContext(DefaultExecutionContext): def get_result_cursor_strategy(self, result): return DefaultCursorFetchStrategy.create(result) Above, the :class:`.DefaultCursorFetchStrategy` will be applied to the result object. For results that are pre-buffered from a cursor that might be closed, an implementation might be:: from sqlalchemy.engine.result import ( FullyBufferedCursorFetchStrategy ) class MyExecutionContext(DefaultExecutionContext): _pre_buffered_result = None def pre_exec(self): if self.special_condition_prebuffer_cursor(): self._pre_buffered_result = ( self.cursor.description, self.cursor.fetchall() ) def get_result_cursor_strategy(self, result): if self._pre_buffered_result: description, cursor_buffer = self._pre_buffered_result return ( FullyBufferedCursorFetchStrategy. create_from_buffer( result, description, cursor_buffer ) ) else: return DefaultCursorFetchStrategy.create(result) This method replaces the previous not-quite-documented ``get_result_proxy()`` method. .. versionadded:: 1.4 - result objects now interpret cursor results based on a pluggable "strategy" object, which is delivered by the :class:`.ExecutionContext` via the :meth:`.ExecutionContext.get_result_cursor_strategy` method. .. seealso:: :meth:`.ExecutionContext.get_out_parameter_values`
Return a result cursor strategy for a given result object.
[ "Return", "a", "result", "cursor", "strategy", "for", "a", "given", "result", "object", "." ]
def get_result_cursor_strategy(self, result): """Return a result cursor strategy for a given result object. This method is implemented by the :class:`.DefaultDialect` and is only needed by implementing dialects in the case where some special steps regarding the cursor must be taken, such as manufacturing fake results from some other element of the cursor, or pre-buffering the cursor's results. A simplified version of the default implementation is:: from sqlalchemy.engine.result import DefaultCursorFetchStrategy class MyExecutionContext(DefaultExecutionContext): def get_result_cursor_strategy(self, result): return DefaultCursorFetchStrategy.create(result) Above, the :class:`.DefaultCursorFetchStrategy` will be applied to the result object. For results that are pre-buffered from a cursor that might be closed, an implementation might be:: from sqlalchemy.engine.result import ( FullyBufferedCursorFetchStrategy ) class MyExecutionContext(DefaultExecutionContext): _pre_buffered_result = None def pre_exec(self): if self.special_condition_prebuffer_cursor(): self._pre_buffered_result = ( self.cursor.description, self.cursor.fetchall() ) def get_result_cursor_strategy(self, result): if self._pre_buffered_result: description, cursor_buffer = self._pre_buffered_result return ( FullyBufferedCursorFetchStrategy. create_from_buffer( result, description, cursor_buffer ) ) else: return DefaultCursorFetchStrategy.create(result) This method replaces the previous not-quite-documented ``get_result_proxy()`` method. .. versionadded:: 1.4 - result objects now interpret cursor results based on a pluggable "strategy" object, which is delivered by the :class:`.ExecutionContext` via the :meth:`.ExecutionContext.get_result_cursor_strategy` method. .. seealso:: :meth:`.ExecutionContext.get_out_parameter_values` """ raise NotImplementedError()
[ "def", "get_result_cursor_strategy", "(", "self", ",", "result", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/sqlalchemy/sqlalchemy/blob/eb716884a4abcabae84a6aaba105568e925b7d27/lib/sqlalchemy/engine/interfaces.py#L2051-L2112
jython/jython3
def4f8ec47cb7a9c799ea4c745f12badf92c5769
lib-python/3.5.1/pickletools.py
python
read_bytes1
(f)
r""" >>> import io >>> read_bytes1(io.BytesIO(b"\x00")) b'' >>> read_bytes1(io.BytesIO(b"\x03abcdef")) b'abc'
r""" >>> import io >>> read_bytes1(io.BytesIO(b"\x00")) b'' >>> read_bytes1(io.BytesIO(b"\x03abcdef")) b'abc'
[ "r", ">>>", "import", "io", ">>>", "read_bytes1", "(", "io", ".", "BytesIO", "(", "b", "\\", "x00", "))", "b", ">>>", "read_bytes1", "(", "io", ".", "BytesIO", "(", "b", "\\", "x03abcdef", "))", "b", "abc" ]
def read_bytes1(f): r""" >>> import io >>> read_bytes1(io.BytesIO(b"\x00")) b'' >>> read_bytes1(io.BytesIO(b"\x03abcdef")) b'abc' """ n = read_uint1(f) assert n >= 0 data = f.read(n) if len(data) == n: return data raise ValueError("expected %d bytes in a bytes1, but only %d remain" % (n, len(data)))
[ "def", "read_bytes1", "(", "f", ")", ":", "n", "=", "read_uint1", "(", "f", ")", "assert", "n", ">=", "0", "data", "=", "f", ".", "read", "(", "n", ")", "if", "len", "(", "data", ")", "==", "n", ":", "return", "data", "raise", "ValueError", "(", "\"expected %d bytes in a bytes1, but only %d remain\"", "%", "(", "n", ",", "len", "(", "data", ")", ")", ")" ]
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/pickletools.py#L472-L487
PaddlePaddle/Research
2da0bd6c72d60e9df403aff23a7802779561c4a1
ST_DM/KDD2021-MSTPAC/code/MST-PAC/frame/core/base_frame.py
python
BaseFrame.parse_args
(self)
return
parse args and load config from conf file
parse args and load config from conf file
[ "parse", "args", "and", "load", "config", "from", "conf", "file" ]
def parse_args(self): """ parse args and load config from conf file """ #init ArgumentParser """ parser = argparse.ArgumentParser() parser.add_argument("--conf_file") args = parser.parse_args() """ """ set the necessary default args. Fault-tolerant in frame, even though the necessary args is not define in user conf file. """ self.set_default_args() #load user defined conf file flags.DEFINE_custom( #'conf_file', args.conf_file, 'load flags from conf file', 'conf_file', './conf/demo_local.conf', 'load flags from conf file', action=LoadConfFile, sec_name="Train") #append additional args self.append_additional_args(FLAGS) if FLAGS.debug_mode: logging.info("base_lr: %f\n" "CUDA_VISIBLE_DEVICES: %s\n" "num_gpus: %d\n" "file_list: %s\n" "dataset_dir: %s\n" % (FLAGS.base_lr, str(FLAGS.cuda_visible_devices), FLAGS.num_gpus, FLAGS.file_list, FLAGS.dataset_dir ) ) return
[ "def", "parse_args", "(", "self", ")", ":", "#init ArgumentParser", "\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--conf_file\")\n args = parser.parse_args()\n \"\"\"", "\"\"\"\n set the necessary default args. \n Fault-tolerant in frame, even though\n the necessary args is not define in user conf file.\n \"\"\"", "self", ".", "set_default_args", "(", ")", "#load user defined conf file", "flags", ".", "DEFINE_custom", "(", "#'conf_file', args.conf_file, 'load flags from conf file', ", "'conf_file'", ",", "'./conf/demo_local.conf'", ",", "'load flags from conf file'", ",", "action", "=", "LoadConfFile", ",", "sec_name", "=", "\"Train\"", ")", "#append additional args ", "self", ".", "append_additional_args", "(", "FLAGS", ")", "if", "FLAGS", ".", "debug_mode", ":", "logging", ".", "info", "(", "\"base_lr: %f\\n\"", "\"CUDA_VISIBLE_DEVICES: %s\\n\"", "\"num_gpus: %d\\n\"", "\"file_list: %s\\n\"", "\"dataset_dir: %s\\n\"", "%", "(", "FLAGS", ".", "base_lr", ",", "str", "(", "FLAGS", ".", "cuda_visible_devices", ")", ",", "FLAGS", ".", "num_gpus", ",", "FLAGS", ".", "file_list", ",", "FLAGS", ".", "dataset_dir", ")", ")", "return" ]
https://github.com/PaddlePaddle/Research/blob/2da0bd6c72d60e9df403aff23a7802779561c4a1/ST_DM/KDD2021-MSTPAC/code/MST-PAC/frame/core/base_frame.py#L52-L92