repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
mozilla-releng/signtool
signtool/util/paths.py
https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/paths.py#L20-L27
def convertPath(srcpath, dstdir): """Given `srcpath`, return a corresponding path within `dstdir`""" bits = srcpath.split("/") bits.pop(0) # Strip out leading 'unsigned' from paths like unsigned/update/win32/... if bits[0] == 'unsigned': bits.pop(0) return os.path.join(dstdir, *bits)
[ "def", "convertPath", "(", "srcpath", ",", "dstdir", ")", ":", "bits", "=", "srcpath", ".", "split", "(", "\"/\"", ")", "bits", ".", "pop", "(", "0", ")", "# Strip out leading 'unsigned' from paths like unsigned/update/win32/...", "if", "bits", "[", "0", "]", "==", "'unsigned'", ":", "bits", ".", "pop", "(", "0", ")", "return", "os", ".", "path", ".", "join", "(", "dstdir", ",", "*", "bits", ")" ]
Given `srcpath`, return a corresponding path within `dstdir`
[ "Given", "srcpath", "return", "a", "corresponding", "path", "within", "dstdir" ]
python
train
38.625
paulovn/sparql-kernel
sparqlkernel/connection.py
https://github.com/paulovn/sparql-kernel/blob/1d2d155ff5da72070cb2a98fae33ea8113fac782/sparqlkernel/connection.py#L310-L355
def render_graph(result, cfg, **kwargs): """ Render to output a result that can be parsed as an RDF graph """ # Mapping from MIME types to formats accepted by RDFlib rdflib_formats = {'text/rdf+n3': 'n3', 'text/turtle': 'turtle', 'application/x-turtle': 'turtle', 'text/turtle': 'turtle', 'application/rdf+xml': 'xml', 'text/rdf': 'xml', 'application/rdf+xml': 'xml'} try: got = kwargs.get('format', 'text/rdf+n3') fmt = rdflib_formats[got] except KeyError: raise KrnlException('Unsupported format for graph processing: {!s}', got) g = ConjunctiveGraph() g.load(StringInputSource(result), format=fmt) display = cfg.dis[0] if is_collection(cfg.dis) else cfg.dis if display in ('png', 'svg'): try: literal = len(cfg.dis) > 1 and cfg.dis[1].startswith('withlit') opt = {'lang': cfg.lan, 'literal': literal, 'graphviz': []} data, metadata = draw_graph(g, fmt=display, options=opt) return {'data': data, 'metadata': metadata} except Exception as e: raise KrnlException('Exception while drawing graph: {!r}', e) elif display == 'table': it = rdf_iterator(g, set(cfg.lan), add_vtype=cfg.typ) n, data = html_table(it, limit=cfg.lmt, withtype=cfg.typ) data += div('Shown: {}, Total rows: {}', n if cfg.lmt else 'all', len(g), css="tinfo") data = {'text/html': div(data)} elif len(g) == 0: data = {'text/html': div(div('empty graph', css='krn-warn'))} else: data = {'text/plain': g.serialize(format='nt').decode('utf-8')} return {'data': data, 'metadata': {}}
[ "def", "render_graph", "(", "result", ",", "cfg", ",", "*", "*", "kwargs", ")", ":", "# Mapping from MIME types to formats accepted by RDFlib", "rdflib_formats", "=", "{", "'text/rdf+n3'", ":", "'n3'", ",", "'text/turtle'", ":", "'turtle'", ",", "'application/x-turtle'", ":", "'turtle'", ",", "'text/turtle'", ":", "'turtle'", ",", "'application/rdf+xml'", ":", "'xml'", ",", "'text/rdf'", ":", "'xml'", ",", "'application/rdf+xml'", ":", "'xml'", "}", "try", ":", "got", "=", "kwargs", ".", "get", "(", "'format'", ",", "'text/rdf+n3'", ")", "fmt", "=", "rdflib_formats", "[", "got", "]", "except", "KeyError", ":", "raise", "KrnlException", "(", "'Unsupported format for graph processing: {!s}'", ",", "got", ")", "g", "=", "ConjunctiveGraph", "(", ")", "g", ".", "load", "(", "StringInputSource", "(", "result", ")", ",", "format", "=", "fmt", ")", "display", "=", "cfg", ".", "dis", "[", "0", "]", "if", "is_collection", "(", "cfg", ".", "dis", ")", "else", "cfg", ".", "dis", "if", "display", "in", "(", "'png'", ",", "'svg'", ")", ":", "try", ":", "literal", "=", "len", "(", "cfg", ".", "dis", ")", ">", "1", "and", "cfg", ".", "dis", "[", "1", "]", ".", "startswith", "(", "'withlit'", ")", "opt", "=", "{", "'lang'", ":", "cfg", ".", "lan", ",", "'literal'", ":", "literal", ",", "'graphviz'", ":", "[", "]", "}", "data", ",", "metadata", "=", "draw_graph", "(", "g", ",", "fmt", "=", "display", ",", "options", "=", "opt", ")", "return", "{", "'data'", ":", "data", ",", "'metadata'", ":", "metadata", "}", "except", "Exception", "as", "e", ":", "raise", "KrnlException", "(", "'Exception while drawing graph: {!r}'", ",", "e", ")", "elif", "display", "==", "'table'", ":", "it", "=", "rdf_iterator", "(", "g", ",", "set", "(", "cfg", ".", "lan", ")", ",", "add_vtype", "=", "cfg", ".", "typ", ")", "n", ",", "data", "=", "html_table", "(", "it", ",", "limit", "=", "cfg", ".", "lmt", ",", "withtype", "=", "cfg", ".", "typ", ")", "data", "+=", "div", "(", "'Shown: {}, Total rows: {}'", ",", "n", "if", "cfg", ".", "lmt", "else", "'all'", ",", "len", "(", "g", ")", ",", "css", "=", "\"tinfo\"", ")", "data", "=", "{", "'text/html'", ":", "div", "(", "data", ")", "}", "elif", "len", "(", "g", ")", "==", "0", ":", "data", "=", "{", "'text/html'", ":", "div", "(", "div", "(", "'empty graph'", ",", "css", "=", "'krn-warn'", ")", ")", "}", "else", ":", "data", "=", "{", "'text/plain'", ":", "g", ".", "serialize", "(", "format", "=", "'nt'", ")", ".", "decode", "(", "'utf-8'", ")", "}", "return", "{", "'data'", ":", "data", ",", "'metadata'", ":", "{", "}", "}" ]
Render to output a result that can be parsed as an RDF graph
[ "Render", "to", "output", "a", "result", "that", "can", "be", "parsed", "as", "an", "RDF", "graph" ]
python
train
39.152174
biocore/deblur
deblur/workflow.py
https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L922-L965
def _system_call(cmd, stdoutfilename=None): """Execute the command `cmd` Parameters ---------- cmd : str The string containing the command to be run. stdoutfilename : str Name of the file to save stdout to or None (default) to not save to file stderrfilename : str Name of the file to save stderr to or None (default) to not save to file Returns ------- tuple of (str, str, int) The standard output, standard error and exist status of the executed command Notes ----- This function is ported and modified from QIIME (http://www.qiime.org), previously named qiime_system_call. QIIME is a GPL project, but we obtained permission from the authors of this function to port it to Qiita and keep it under BSD license. """ logger = logging.getLogger(__name__) logger.debug('system call: %s' % cmd) if stdoutfilename: with open(stdoutfilename, 'w') as f: proc = subprocess.Popen(cmd, universal_newlines=True, shell=False, stdout=f, stderr=subprocess.PIPE) else: proc = subprocess.Popen(cmd, universal_newlines=True, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Communicate pulls all stdout/stderr from the PIPEs # This call blocks until the command is done stdout, stderr = proc.communicate() return_value = proc.returncode return stdout, stderr, return_value
[ "def", "_system_call", "(", "cmd", ",", "stdoutfilename", "=", "None", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "'system call: %s'", "%", "cmd", ")", "if", "stdoutfilename", ":", "with", "open", "(", "stdoutfilename", ",", "'w'", ")", "as", "f", ":", "proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "universal_newlines", "=", "True", ",", "shell", "=", "False", ",", "stdout", "=", "f", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "else", ":", "proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "universal_newlines", "=", "True", ",", "shell", "=", "False", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "# Communicate pulls all stdout/stderr from the PIPEs", "# This call blocks until the command is done", "stdout", ",", "stderr", "=", "proc", ".", "communicate", "(", ")", "return_value", "=", "proc", ".", "returncode", "return", "stdout", ",", "stderr", ",", "return_value" ]
Execute the command `cmd` Parameters ---------- cmd : str The string containing the command to be run. stdoutfilename : str Name of the file to save stdout to or None (default) to not save to file stderrfilename : str Name of the file to save stderr to or None (default) to not save to file Returns ------- tuple of (str, str, int) The standard output, standard error and exist status of the executed command Notes ----- This function is ported and modified from QIIME (http://www.qiime.org), previously named qiime_system_call. QIIME is a GPL project, but we obtained permission from the authors of this function to port it to Qiita and keep it under BSD license.
[ "Execute", "the", "command", "cmd", "Parameters", "----------", "cmd", ":", "str", "The", "string", "containing", "the", "command", "to", "be", "run", ".", "stdoutfilename", ":", "str", "Name", "of", "the", "file", "to", "save", "stdout", "to", "or", "None", "(", "default", ")", "to", "not", "save", "to", "file", "stderrfilename", ":", "str", "Name", "of", "the", "file", "to", "save", "stderr", "to", "or", "None", "(", "default", ")", "to", "not", "save", "to", "file" ]
python
train
35.295455
boakley/robotframework-lint
rflint/parser/parser.py
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L227-L232
def keywords(self): '''Generator which returns all keywords in the suite''' for table in self.tables: if isinstance(table, KeywordTable): for keyword in table.keywords: yield keyword
[ "def", "keywords", "(", "self", ")", ":", "for", "table", "in", "self", ".", "tables", ":", "if", "isinstance", "(", "table", ",", "KeywordTable", ")", ":", "for", "keyword", "in", "table", ".", "keywords", ":", "yield", "keyword" ]
Generator which returns all keywords in the suite
[ "Generator", "which", "returns", "all", "keywords", "in", "the", "suite" ]
python
valid
40.166667
cs01/pygdbmi
pygdbmi/gdbmiparser.py
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbmiparser.py#L270-L285
def _parse_key_val(stream): """Parse key, value combination return (tuple): Parsed key (string) Parsed value (either a string, array, or dict) """ logger.debug("parsing key/val") key = _parse_key(stream) val = _parse_val(stream) logger.debug("parsed key/val") logger.debug("%s", fmt_green(key)) logger.debug("%s", fmt_green(val)) return key, val
[ "def", "_parse_key_val", "(", "stream", ")", ":", "logger", ".", "debug", "(", "\"parsing key/val\"", ")", "key", "=", "_parse_key", "(", "stream", ")", "val", "=", "_parse_val", "(", "stream", ")", "logger", ".", "debug", "(", "\"parsed key/val\"", ")", "logger", ".", "debug", "(", "\"%s\"", ",", "fmt_green", "(", "key", ")", ")", "logger", ".", "debug", "(", "\"%s\"", ",", "fmt_green", "(", "val", ")", ")", "return", "key", ",", "val" ]
Parse key, value combination return (tuple): Parsed key (string) Parsed value (either a string, array, or dict)
[ "Parse", "key", "value", "combination", "return", "(", "tuple", ")", ":", "Parsed", "key", "(", "string", ")", "Parsed", "value", "(", "either", "a", "string", "array", "or", "dict", ")" ]
python
valid
24.3125
rigetti/pyquil
pyquil/device.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/device.py#L245-L253
def fCZ_std_errs(self): """ Get a dictionary of the standard errors of the CZ fidelities from the specs, keyed by targets (qubit-qubit pairs). :return: A dictionary of CZ fidelities, normalized to unity. :rtype: Dict[tuple(int, int), float] """ return {tuple(es.targets): es.fCZ_std_err for es in self.edges_specs}
[ "def", "fCZ_std_errs", "(", "self", ")", ":", "return", "{", "tuple", "(", "es", ".", "targets", ")", ":", "es", ".", "fCZ_std_err", "for", "es", "in", "self", ".", "edges_specs", "}" ]
Get a dictionary of the standard errors of the CZ fidelities from the specs, keyed by targets (qubit-qubit pairs). :return: A dictionary of CZ fidelities, normalized to unity. :rtype: Dict[tuple(int, int), float]
[ "Get", "a", "dictionary", "of", "the", "standard", "errors", "of", "the", "CZ", "fidelities", "from", "the", "specs", "keyed", "by", "targets", "(", "qubit", "-", "qubit", "pairs", ")", "." ]
python
train
40.333333
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/g5k_api_utils.py
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/g5k_api_utils.py#L493-L543
def _do_synchronise_jobs(walltime, machines): """ This returns a common reservation date for all the jobs. This reservation date is really only a hint and will be supplied to each oar server. Without this *common* reservation_date, one oar server can decide to postpone the start of the job while the other are already running. But this doens't prevent the start of a job on one site to drift (e.g because the machines need to be restarted.) But this shouldn't exceed few minutes. """ offset = SYNCHRONISATION_OFFSET start = time.time() + offset _t = time.strptime(walltime, "%H:%M:%S") _walltime = _t.tm_hour * 3600 + _t.tm_min * 60 + _t.tm_sec # Compute the demand for each cluster demands = defaultdict(int) for machine in machines: cluster = machine["cluster"] demands[cluster] += machine["nodes"] # Early leave if only one cluster is there if len(list(demands.keys())) <= 1: logger.debug("Only one cluster detected: no synchronisation needed") return None clusters = clusters_sites_obj(list(demands.keys())) # Early leave if only one site is concerned sites = set(list(clusters.values())) if len(sites) <= 1: logger.debug("Only one site detected: no synchronisation needed") return None # Test the proposed reservation_date ok = True for cluster, nodes in demands.items(): cluster_status = clusters[cluster].status.list() ok = ok and can_start_on_cluster(cluster_status.nodes, nodes, start, _walltime) if not ok: break if ok: # The proposed reservation_date fits logger.info("Reservation_date=%s (%s)" % (_date2h(start), sites)) return start if start is None: raise EnosG5kSynchronisationError(sites)
[ "def", "_do_synchronise_jobs", "(", "walltime", ",", "machines", ")", ":", "offset", "=", "SYNCHRONISATION_OFFSET", "start", "=", "time", ".", "time", "(", ")", "+", "offset", "_t", "=", "time", ".", "strptime", "(", "walltime", ",", "\"%H:%M:%S\"", ")", "_walltime", "=", "_t", ".", "tm_hour", "*", "3600", "+", "_t", ".", "tm_min", "*", "60", "+", "_t", ".", "tm_sec", "# Compute the demand for each cluster", "demands", "=", "defaultdict", "(", "int", ")", "for", "machine", "in", "machines", ":", "cluster", "=", "machine", "[", "\"cluster\"", "]", "demands", "[", "cluster", "]", "+=", "machine", "[", "\"nodes\"", "]", "# Early leave if only one cluster is there", "if", "len", "(", "list", "(", "demands", ".", "keys", "(", ")", ")", ")", "<=", "1", ":", "logger", ".", "debug", "(", "\"Only one cluster detected: no synchronisation needed\"", ")", "return", "None", "clusters", "=", "clusters_sites_obj", "(", "list", "(", "demands", ".", "keys", "(", ")", ")", ")", "# Early leave if only one site is concerned", "sites", "=", "set", "(", "list", "(", "clusters", ".", "values", "(", ")", ")", ")", "if", "len", "(", "sites", ")", "<=", "1", ":", "logger", ".", "debug", "(", "\"Only one site detected: no synchronisation needed\"", ")", "return", "None", "# Test the proposed reservation_date", "ok", "=", "True", "for", "cluster", ",", "nodes", "in", "demands", ".", "items", "(", ")", ":", "cluster_status", "=", "clusters", "[", "cluster", "]", ".", "status", ".", "list", "(", ")", "ok", "=", "ok", "and", "can_start_on_cluster", "(", "cluster_status", ".", "nodes", ",", "nodes", ",", "start", ",", "_walltime", ")", "if", "not", "ok", ":", "break", "if", "ok", ":", "# The proposed reservation_date fits", "logger", ".", "info", "(", "\"Reservation_date=%s (%s)\"", "%", "(", "_date2h", "(", "start", ")", ",", "sites", ")", ")", "return", "start", "if", "start", "is", "None", ":", "raise", "EnosG5kSynchronisationError", "(", "sites", ")" ]
This returns a common reservation date for all the jobs. This reservation date is really only a hint and will be supplied to each oar server. Without this *common* reservation_date, one oar server can decide to postpone the start of the job while the other are already running. But this doens't prevent the start of a job on one site to drift (e.g because the machines need to be restarted.) But this shouldn't exceed few minutes.
[ "This", "returns", "a", "common", "reservation", "date", "for", "all", "the", "jobs", "." ]
python
train
37.352941
fastai/fastai
fastai/text/transform.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/text/transform.py#L116-L120
def process_all(self, texts:Collection[str]) -> List[List[str]]: "Process a list of `texts`." if self.n_cpus <= 1: return self._process_all_1(texts) with ProcessPoolExecutor(self.n_cpus) as e: return sum(e.map(self._process_all_1, partition_by_cores(texts, self.n_cpus)), [])
[ "def", "process_all", "(", "self", ",", "texts", ":", "Collection", "[", "str", "]", ")", "->", "List", "[", "List", "[", "str", "]", "]", ":", "if", "self", ".", "n_cpus", "<=", "1", ":", "return", "self", ".", "_process_all_1", "(", "texts", ")", "with", "ProcessPoolExecutor", "(", "self", ".", "n_cpus", ")", "as", "e", ":", "return", "sum", "(", "e", ".", "map", "(", "self", ".", "_process_all_1", ",", "partition_by_cores", "(", "texts", ",", "self", ".", "n_cpus", ")", ")", ",", "[", "]", ")" ]
Process a list of `texts`.
[ "Process", "a", "list", "of", "texts", "." ]
python
train
61.4
mbedmicro/pyOCD
pyocd/coresight/cortex_m.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L996-L1081
def read_core_registers_raw(self, reg_list): """ Read one or more core registers Read core registers in reg_list and return a list of values. If any register in reg_list is a string, find the number associated to this register in the lookup table CORE_REGISTER. """ # convert to index only reg_list = [register_name_to_index(reg) for reg in reg_list] # Sanity check register values for reg in reg_list: if reg not in CORE_REGISTER.values(): raise ValueError("unknown reg: %d" % reg) elif is_fpu_register(reg) and (not self.has_fpu): raise ValueError("attempt to read FPU register without FPU") # Handle doubles. doubles = [reg for reg in reg_list if is_double_float_register(reg)] hasDoubles = len(doubles) > 0 if hasDoubles: originalRegList = reg_list # Strip doubles from reg_list. reg_list = [reg for reg in reg_list if not is_double_float_register(reg)] # Read float regs required to build doubles. singleRegList = [] for reg in doubles: singleRegList += (-reg, -reg + 1) singleValues = self.read_core_registers_raw(singleRegList) # Begin all reads and writes dhcsr_cb_list = [] reg_cb_list = [] for reg in reg_list: if is_cfbp_subregister(reg): reg = CORE_REGISTER['cfbp'] elif is_psr_subregister(reg): reg = CORE_REGISTER['xpsr'] # write id in DCRSR self.write_memory(CortexM.DCRSR, reg) # Technically, we need to poll S_REGRDY in DHCSR here before reading DCRDR. But # we're running so slow compared to the target that it's not necessary. # Read it and assert that S_REGRDY is set dhcsr_cb = self.read_memory(CortexM.DHCSR, now=False) reg_cb = self.read_memory(CortexM.DCRDR, now=False) dhcsr_cb_list.append(dhcsr_cb) reg_cb_list.append(reg_cb) # Read all results reg_vals = [] for reg, reg_cb, dhcsr_cb in zip(reg_list, reg_cb_list, dhcsr_cb_list): dhcsr_val = dhcsr_cb() assert dhcsr_val & CortexM.S_REGRDY val = reg_cb() # Special handling for registers that are combined into a single DCRSR number. if is_cfbp_subregister(reg): val = (val >> ((-reg - 1) * 8)) & 0xff elif is_psr_subregister(reg): val &= sysm_to_psr_mask(reg) reg_vals.append(val) # Merge double regs back into result list. if hasDoubles: results = [] for reg in originalRegList: # Double if is_double_float_register(reg): doubleIndex = doubles.index(reg) singleLow = singleValues[doubleIndex * 2] singleHigh = singleValues[doubleIndex * 2 + 1] double = (singleHigh << 32) | singleLow results.append(double) # Other register else: results.append(reg_vals[reg_list.index(reg)]) reg_vals = results return reg_vals
[ "def", "read_core_registers_raw", "(", "self", ",", "reg_list", ")", ":", "# convert to index only", "reg_list", "=", "[", "register_name_to_index", "(", "reg", ")", "for", "reg", "in", "reg_list", "]", "# Sanity check register values", "for", "reg", "in", "reg_list", ":", "if", "reg", "not", "in", "CORE_REGISTER", ".", "values", "(", ")", ":", "raise", "ValueError", "(", "\"unknown reg: %d\"", "%", "reg", ")", "elif", "is_fpu_register", "(", "reg", ")", "and", "(", "not", "self", ".", "has_fpu", ")", ":", "raise", "ValueError", "(", "\"attempt to read FPU register without FPU\"", ")", "# Handle doubles.", "doubles", "=", "[", "reg", "for", "reg", "in", "reg_list", "if", "is_double_float_register", "(", "reg", ")", "]", "hasDoubles", "=", "len", "(", "doubles", ")", ">", "0", "if", "hasDoubles", ":", "originalRegList", "=", "reg_list", "# Strip doubles from reg_list.", "reg_list", "=", "[", "reg", "for", "reg", "in", "reg_list", "if", "not", "is_double_float_register", "(", "reg", ")", "]", "# Read float regs required to build doubles.", "singleRegList", "=", "[", "]", "for", "reg", "in", "doubles", ":", "singleRegList", "+=", "(", "-", "reg", ",", "-", "reg", "+", "1", ")", "singleValues", "=", "self", ".", "read_core_registers_raw", "(", "singleRegList", ")", "# Begin all reads and writes", "dhcsr_cb_list", "=", "[", "]", "reg_cb_list", "=", "[", "]", "for", "reg", "in", "reg_list", ":", "if", "is_cfbp_subregister", "(", "reg", ")", ":", "reg", "=", "CORE_REGISTER", "[", "'cfbp'", "]", "elif", "is_psr_subregister", "(", "reg", ")", ":", "reg", "=", "CORE_REGISTER", "[", "'xpsr'", "]", "# write id in DCRSR", "self", ".", "write_memory", "(", "CortexM", ".", "DCRSR", ",", "reg", ")", "# Technically, we need to poll S_REGRDY in DHCSR here before reading DCRDR. But", "# we're running so slow compared to the target that it's not necessary.", "# Read it and assert that S_REGRDY is set", "dhcsr_cb", "=", "self", ".", "read_memory", "(", "CortexM", ".", "DHCSR", ",", "now", "=", "False", ")", "reg_cb", "=", "self", ".", "read_memory", "(", "CortexM", ".", "DCRDR", ",", "now", "=", "False", ")", "dhcsr_cb_list", ".", "append", "(", "dhcsr_cb", ")", "reg_cb_list", ".", "append", "(", "reg_cb", ")", "# Read all results", "reg_vals", "=", "[", "]", "for", "reg", ",", "reg_cb", ",", "dhcsr_cb", "in", "zip", "(", "reg_list", ",", "reg_cb_list", ",", "dhcsr_cb_list", ")", ":", "dhcsr_val", "=", "dhcsr_cb", "(", ")", "assert", "dhcsr_val", "&", "CortexM", ".", "S_REGRDY", "val", "=", "reg_cb", "(", ")", "# Special handling for registers that are combined into a single DCRSR number.", "if", "is_cfbp_subregister", "(", "reg", ")", ":", "val", "=", "(", "val", ">>", "(", "(", "-", "reg", "-", "1", ")", "*", "8", ")", ")", "&", "0xff", "elif", "is_psr_subregister", "(", "reg", ")", ":", "val", "&=", "sysm_to_psr_mask", "(", "reg", ")", "reg_vals", ".", "append", "(", "val", ")", "# Merge double regs back into result list.", "if", "hasDoubles", ":", "results", "=", "[", "]", "for", "reg", "in", "originalRegList", ":", "# Double", "if", "is_double_float_register", "(", "reg", ")", ":", "doubleIndex", "=", "doubles", ".", "index", "(", "reg", ")", "singleLow", "=", "singleValues", "[", "doubleIndex", "*", "2", "]", "singleHigh", "=", "singleValues", "[", "doubleIndex", "*", "2", "+", "1", "]", "double", "=", "(", "singleHigh", "<<", "32", ")", "|", "singleLow", "results", ".", "append", "(", "double", ")", "# Other register", "else", ":", "results", ".", "append", "(", "reg_vals", "[", "reg_list", ".", "index", "(", "reg", ")", "]", ")", "reg_vals", "=", "results", "return", "reg_vals" ]
Read one or more core registers Read core registers in reg_list and return a list of values. If any register in reg_list is a string, find the number associated to this register in the lookup table CORE_REGISTER.
[ "Read", "one", "or", "more", "core", "registers" ]
python
train
38.337209
Dallinger/Dallinger
dallinger/models.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/models.py#L692-L769
def neighbors(self, type=None, direction="to", failed=None): """Get a node's neighbors - nodes that are directly connected to it. Type specifies the class of neighbour and must be a subclass of Node (default is Node). Connection is the direction of the connections and can be "to" (default), "from", "either", or "both". """ # get type if type is None: type = Node if not issubclass(type, Node): raise ValueError( "{} is not a valid neighbor type," "needs to be a subclass of Node.".format(type) ) # get direction if direction not in ["both", "either", "from", "to"]: raise ValueError( "{} not a valid neighbor connection." "Should be both, either, to or from.".format(direction) ) if failed is not None: raise ValueError( "You should not pass a failed argument to neighbors(). " "Neighbors is " "unusual in that a failed argument cannot be passed. This is " "because there is inherent uncertainty in what it means for a " "neighbor to be failed. The neighbors function will only ever " "return not-failed nodes connected to you via not-failed " "vectors. If you want to do more elaborate queries, for " "example, getting not-failed nodes connected to you via failed" " vectors, you should do so via sql queries." ) neighbors = [] # get the neighbours if direction == "to": outgoing_vectors = ( Vector.query.with_entities(Vector.destination_id) .filter_by(origin_id=self.id, failed=False) .all() ) neighbor_ids = [v.destination_id for v in outgoing_vectors] if neighbor_ids: neighbors = Node.query.filter(Node.id.in_(neighbor_ids)).all() neighbors = [n for n in neighbors if isinstance(n, type)] if direction == "from": incoming_vectors = ( Vector.query.with_entities(Vector.origin_id) .filter_by(destination_id=self.id, failed=False) .all() ) neighbor_ids = [v.origin_id for v in incoming_vectors] if neighbor_ids: neighbors = Node.query.filter(Node.id.in_(neighbor_ids)).all() neighbors = [n for n in neighbors if isinstance(n, type)] if direction == "either": neighbors = list( set( self.neighbors(type=type, direction="to") + self.neighbors(type=type, direction="from") ) ) if direction == "both": neighbors = list( set(self.neighbors(type=type, direction="to")) & set(self.neighbors(type=type, direction="from")) ) return neighbors
[ "def", "neighbors", "(", "self", ",", "type", "=", "None", ",", "direction", "=", "\"to\"", ",", "failed", "=", "None", ")", ":", "# get type", "if", "type", "is", "None", ":", "type", "=", "Node", "if", "not", "issubclass", "(", "type", ",", "Node", ")", ":", "raise", "ValueError", "(", "\"{} is not a valid neighbor type,\"", "\"needs to be a subclass of Node.\"", ".", "format", "(", "type", ")", ")", "# get direction", "if", "direction", "not", "in", "[", "\"both\"", ",", "\"either\"", ",", "\"from\"", ",", "\"to\"", "]", ":", "raise", "ValueError", "(", "\"{} not a valid neighbor connection.\"", "\"Should be both, either, to or from.\"", ".", "format", "(", "direction", ")", ")", "if", "failed", "is", "not", "None", ":", "raise", "ValueError", "(", "\"You should not pass a failed argument to neighbors(). \"", "\"Neighbors is \"", "\"unusual in that a failed argument cannot be passed. This is \"", "\"because there is inherent uncertainty in what it means for a \"", "\"neighbor to be failed. The neighbors function will only ever \"", "\"return not-failed nodes connected to you via not-failed \"", "\"vectors. If you want to do more elaborate queries, for \"", "\"example, getting not-failed nodes connected to you via failed\"", "\" vectors, you should do so via sql queries.\"", ")", "neighbors", "=", "[", "]", "# get the neighbours", "if", "direction", "==", "\"to\"", ":", "outgoing_vectors", "=", "(", "Vector", ".", "query", ".", "with_entities", "(", "Vector", ".", "destination_id", ")", ".", "filter_by", "(", "origin_id", "=", "self", ".", "id", ",", "failed", "=", "False", ")", ".", "all", "(", ")", ")", "neighbor_ids", "=", "[", "v", ".", "destination_id", "for", "v", "in", "outgoing_vectors", "]", "if", "neighbor_ids", ":", "neighbors", "=", "Node", ".", "query", ".", "filter", "(", "Node", ".", "id", ".", "in_", "(", "neighbor_ids", ")", ")", ".", "all", "(", ")", "neighbors", "=", "[", "n", "for", "n", "in", "neighbors", "if", "isinstance", "(", "n", ",", "type", ")", "]", "if", "direction", "==", "\"from\"", ":", "incoming_vectors", "=", "(", "Vector", ".", "query", ".", "with_entities", "(", "Vector", ".", "origin_id", ")", ".", "filter_by", "(", "destination_id", "=", "self", ".", "id", ",", "failed", "=", "False", ")", ".", "all", "(", ")", ")", "neighbor_ids", "=", "[", "v", ".", "origin_id", "for", "v", "in", "incoming_vectors", "]", "if", "neighbor_ids", ":", "neighbors", "=", "Node", ".", "query", ".", "filter", "(", "Node", ".", "id", ".", "in_", "(", "neighbor_ids", ")", ")", ".", "all", "(", ")", "neighbors", "=", "[", "n", "for", "n", "in", "neighbors", "if", "isinstance", "(", "n", ",", "type", ")", "]", "if", "direction", "==", "\"either\"", ":", "neighbors", "=", "list", "(", "set", "(", "self", ".", "neighbors", "(", "type", "=", "type", ",", "direction", "=", "\"to\"", ")", "+", "self", ".", "neighbors", "(", "type", "=", "type", ",", "direction", "=", "\"from\"", ")", ")", ")", "if", "direction", "==", "\"both\"", ":", "neighbors", "=", "list", "(", "set", "(", "self", ".", "neighbors", "(", "type", "=", "type", ",", "direction", "=", "\"to\"", ")", ")", "&", "set", "(", "self", ".", "neighbors", "(", "type", "=", "type", ",", "direction", "=", "\"from\"", ")", ")", ")", "return", "neighbors" ]
Get a node's neighbors - nodes that are directly connected to it. Type specifies the class of neighbour and must be a subclass of Node (default is Node). Connection is the direction of the connections and can be "to" (default), "from", "either", or "both".
[ "Get", "a", "node", "s", "neighbors", "-", "nodes", "that", "are", "directly", "connected", "to", "it", "." ]
python
train
38.730769
gbiggs/rtctree
rtctree/ports.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/ports.py#L149-L163
def get_connections_by_dests(self, dests): '''Search for all connections involving this and all other ports.''' with self._mutex: res = [] for c in self.connections: if not c.has_port(self): continue has_dest = False for d in dests: if c.has_port(d): has_dest = True break if has_dest: res.append(c) return res
[ "def", "get_connections_by_dests", "(", "self", ",", "dests", ")", ":", "with", "self", ".", "_mutex", ":", "res", "=", "[", "]", "for", "c", "in", "self", ".", "connections", ":", "if", "not", "c", ".", "has_port", "(", "self", ")", ":", "continue", "has_dest", "=", "False", "for", "d", "in", "dests", ":", "if", "c", ".", "has_port", "(", "d", ")", ":", "has_dest", "=", "True", "break", "if", "has_dest", ":", "res", ".", "append", "(", "c", ")", "return", "res" ]
Search for all connections involving this and all other ports.
[ "Search", "for", "all", "connections", "involving", "this", "and", "all", "other", "ports", "." ]
python
train
34.666667
SeabornGames/RequestClient
example_bindings/account_access.py
https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/example_bindings/account_access.py#L25-L31
def delete(self, account_id, user_id): """ Only the primary on the account can add or remove user's access to an account :param account_id: int of the account_id for the account :param user_id: int of the user_id to grant access :return: Access dict """ return self.connection.delete('account/access', account_id=account_id, user_id=user_id)
[ "def", "delete", "(", "self", ",", "account_id", ",", "user_id", ")", ":", "return", "self", ".", "connection", ".", "delete", "(", "'account/access'", ",", "account_id", "=", "account_id", ",", "user_id", "=", "user_id", ")" ]
Only the primary on the account can add or remove user's access to an account :param account_id: int of the account_id for the account :param user_id: int of the user_id to grant access :return: Access dict
[ "Only", "the", "primary", "on", "the", "account", "can", "add", "or", "remove", "user", "s", "access", "to", "an", "account", ":", "param", "account_id", ":", "int", "of", "the", "account_id", "for", "the", "account", ":", "param", "user_id", ":", "int", "of", "the", "user_id", "to", "grant", "access", ":", "return", ":", "Access", "dict" ]
python
train
56.571429
pyamg/pyamg
pyamg/aggregation/aggregate.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/aggregation/aggregate.py#L180-L272
def lloyd_aggregation(C, ratio=0.03, distance='unit', maxiter=10): """Aggregate nodes using Lloyd Clustering. Parameters ---------- C : csr_matrix strength of connection matrix ratio : scalar Fraction of the nodes which will be seeds. distance : ['unit','abs','inv',None] Distance assigned to each edge of the graph G used in Lloyd clustering For each nonzero value C[i,j]: ======= =========================== 'unit' G[i,j] = 1 'abs' G[i,j] = abs(C[i,j]) 'inv' G[i,j] = 1.0/abs(C[i,j]) 'same' G[i,j] = C[i,j] 'sub' G[i,j] = C[i,j] - min(C) ======= =========================== maxiter : int Maximum number of iterations to perform Returns ------- AggOp : csr_matrix aggregation operator which determines the sparsity pattern of the tentative prolongator seeds : array array of Cpts, i.e., Cpts[i] = root node of aggregate i See Also -------- amg_core.standard_aggregation Examples -------- >>> from scipy.sparse import csr_matrix >>> from pyamg.gallery import poisson >>> from pyamg.aggregation.aggregate import lloyd_aggregation >>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices >>> A.todense() matrix([[ 2., -1., 0., 0.], [-1., 2., -1., 0.], [ 0., -1., 2., -1.], [ 0., 0., -1., 2.]]) >>> lloyd_aggregation(A)[0].todense() # one aggregate matrix([[1], [1], [1], [1]], dtype=int8) >>> # more seeding for two aggregates >>> Agg = lloyd_aggregation(A,ratio=0.5)[0].todense() """ if ratio <= 0 or ratio > 1: raise ValueError('ratio must be > 0.0 and <= 1.0') if not (isspmatrix_csr(C) or isspmatrix_csc(C)): raise TypeError('expected csr_matrix or csc_matrix') if distance == 'unit': data = np.ones_like(C.data).astype(float) elif distance == 'abs': data = abs(C.data) elif distance == 'inv': data = 1.0/abs(C.data) elif distance is 'same': data = C.data elif distance is 'min': data = C.data - C.data.min() else: raise ValueError('unrecognized value distance=%s' % distance) if C.dtype == complex: data = np.real(data) assert(data.min() >= 0) G = C.__class__((data, C.indices, C.indptr), shape=C.shape) num_seeds = int(min(max(ratio * G.shape[0], 1), G.shape[0])) distances, clusters, seeds = lloyd_cluster(G, num_seeds, maxiter=maxiter) row = (clusters >= 0).nonzero()[0] col = clusters[row] data = np.ones(len(row), dtype='int8') AggOp = coo_matrix((data, (row, col)), shape=(G.shape[0], num_seeds)).tocsr() return AggOp, seeds
[ "def", "lloyd_aggregation", "(", "C", ",", "ratio", "=", "0.03", ",", "distance", "=", "'unit'", ",", "maxiter", "=", "10", ")", ":", "if", "ratio", "<=", "0", "or", "ratio", ">", "1", ":", "raise", "ValueError", "(", "'ratio must be > 0.0 and <= 1.0'", ")", "if", "not", "(", "isspmatrix_csr", "(", "C", ")", "or", "isspmatrix_csc", "(", "C", ")", ")", ":", "raise", "TypeError", "(", "'expected csr_matrix or csc_matrix'", ")", "if", "distance", "==", "'unit'", ":", "data", "=", "np", ".", "ones_like", "(", "C", ".", "data", ")", ".", "astype", "(", "float", ")", "elif", "distance", "==", "'abs'", ":", "data", "=", "abs", "(", "C", ".", "data", ")", "elif", "distance", "==", "'inv'", ":", "data", "=", "1.0", "/", "abs", "(", "C", ".", "data", ")", "elif", "distance", "is", "'same'", ":", "data", "=", "C", ".", "data", "elif", "distance", "is", "'min'", ":", "data", "=", "C", ".", "data", "-", "C", ".", "data", ".", "min", "(", ")", "else", ":", "raise", "ValueError", "(", "'unrecognized value distance=%s'", "%", "distance", ")", "if", "C", ".", "dtype", "==", "complex", ":", "data", "=", "np", ".", "real", "(", "data", ")", "assert", "(", "data", ".", "min", "(", ")", ">=", "0", ")", "G", "=", "C", ".", "__class__", "(", "(", "data", ",", "C", ".", "indices", ",", "C", ".", "indptr", ")", ",", "shape", "=", "C", ".", "shape", ")", "num_seeds", "=", "int", "(", "min", "(", "max", "(", "ratio", "*", "G", ".", "shape", "[", "0", "]", ",", "1", ")", ",", "G", ".", "shape", "[", "0", "]", ")", ")", "distances", ",", "clusters", ",", "seeds", "=", "lloyd_cluster", "(", "G", ",", "num_seeds", ",", "maxiter", "=", "maxiter", ")", "row", "=", "(", "clusters", ">=", "0", ")", ".", "nonzero", "(", ")", "[", "0", "]", "col", "=", "clusters", "[", "row", "]", "data", "=", "np", ".", "ones", "(", "len", "(", "row", ")", ",", "dtype", "=", "'int8'", ")", "AggOp", "=", "coo_matrix", "(", "(", "data", ",", "(", "row", ",", "col", ")", ")", ",", "shape", "=", "(", "G", ".", "shape", "[", "0", "]", ",", "num_seeds", ")", ")", ".", "tocsr", "(", ")", "return", "AggOp", ",", "seeds" ]
Aggregate nodes using Lloyd Clustering. Parameters ---------- C : csr_matrix strength of connection matrix ratio : scalar Fraction of the nodes which will be seeds. distance : ['unit','abs','inv',None] Distance assigned to each edge of the graph G used in Lloyd clustering For each nonzero value C[i,j]: ======= =========================== 'unit' G[i,j] = 1 'abs' G[i,j] = abs(C[i,j]) 'inv' G[i,j] = 1.0/abs(C[i,j]) 'same' G[i,j] = C[i,j] 'sub' G[i,j] = C[i,j] - min(C) ======= =========================== maxiter : int Maximum number of iterations to perform Returns ------- AggOp : csr_matrix aggregation operator which determines the sparsity pattern of the tentative prolongator seeds : array array of Cpts, i.e., Cpts[i] = root node of aggregate i See Also -------- amg_core.standard_aggregation Examples -------- >>> from scipy.sparse import csr_matrix >>> from pyamg.gallery import poisson >>> from pyamg.aggregation.aggregate import lloyd_aggregation >>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices >>> A.todense() matrix([[ 2., -1., 0., 0.], [-1., 2., -1., 0.], [ 0., -1., 2., -1.], [ 0., 0., -1., 2.]]) >>> lloyd_aggregation(A)[0].todense() # one aggregate matrix([[1], [1], [1], [1]], dtype=int8) >>> # more seeding for two aggregates >>> Agg = lloyd_aggregation(A,ratio=0.5)[0].todense()
[ "Aggregate", "nodes", "using", "Lloyd", "Clustering", "." ]
python
train
29.602151
djordon/queueing-tool
queueing_tool/graph/graph_wrapper.py
https://github.com/djordon/queueing-tool/blob/ccd418cf647ac03a54f78ba5e3725903f541b808/queueing_tool/graph/graph_wrapper.py#L318-L354
def get_edge_type(self, edge_type): """Returns all edges with the specified edge type. Parameters ---------- edge_type : int An integer specifying what type of edges to return. Returns ------- out : list of 2-tuples A list of 2-tuples representing the edges in the graph with the specified edge type. Examples -------- Lets get type 2 edges from the following graph >>> import queueing_tool as qt >>> adjacency = { ... 0: {1: {'edge_type': 2}}, ... 1: {2: {'edge_type': 1}, ... 3: {'edge_type': 4}}, ... 2: {0: {'edge_type': 2}}, ... 3: {3: {'edge_type': 0}} ... } >>> G = qt.QueueNetworkDiGraph(adjacency) >>> ans = G.get_edge_type(2) >>> ans.sort() >>> ans [(0, 1), (2, 0)] """ edges = [] for e in self.edges(): if self.adj[e[0]][e[1]].get('edge_type') == edge_type: edges.append(e) return edges
[ "def", "get_edge_type", "(", "self", ",", "edge_type", ")", ":", "edges", "=", "[", "]", "for", "e", "in", "self", ".", "edges", "(", ")", ":", "if", "self", ".", "adj", "[", "e", "[", "0", "]", "]", "[", "e", "[", "1", "]", "]", ".", "get", "(", "'edge_type'", ")", "==", "edge_type", ":", "edges", ".", "append", "(", "e", ")", "return", "edges" ]
Returns all edges with the specified edge type. Parameters ---------- edge_type : int An integer specifying what type of edges to return. Returns ------- out : list of 2-tuples A list of 2-tuples representing the edges in the graph with the specified edge type. Examples -------- Lets get type 2 edges from the following graph >>> import queueing_tool as qt >>> adjacency = { ... 0: {1: {'edge_type': 2}}, ... 1: {2: {'edge_type': 1}, ... 3: {'edge_type': 4}}, ... 2: {0: {'edge_type': 2}}, ... 3: {3: {'edge_type': 0}} ... } >>> G = qt.QueueNetworkDiGraph(adjacency) >>> ans = G.get_edge_type(2) >>> ans.sort() >>> ans [(0, 1), (2, 0)]
[ "Returns", "all", "edges", "with", "the", "specified", "edge", "type", "." ]
python
valid
28.891892
lsbardel/python-stdnet
stdnet/apps/searchengine/models.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/searchengine/models.py#L36-L44
def object(self, session): '''Instance of :attr:`model_type` with id :attr:`object_id`.''' if not hasattr(self, '_object'): pkname = self.model_type._meta.pkname() query = session.query(self.model_type).filter(**{pkname: self.object_id}) return query.items(callback=self.__set_object) else: return self._object
[ "def", "object", "(", "self", ",", "session", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_object'", ")", ":", "pkname", "=", "self", ".", "model_type", ".", "_meta", ".", "pkname", "(", ")", "query", "=", "session", ".", "query", "(", "self", ".", "model_type", ")", ".", "filter", "(", "*", "*", "{", "pkname", ":", "self", ".", "object_id", "}", ")", "return", "query", ".", "items", "(", "callback", "=", "self", ".", "__set_object", ")", "else", ":", "return", "self", ".", "_object" ]
Instance of :attr:`model_type` with id :attr:`object_id`.
[ "Instance", "of", ":", "attr", ":", "model_type", "with", "id", ":", "attr", ":", "object_id", "." ]
python
train
49.222222
KelSolaar/Umbra
umbra/components/factory/script_editor/workers.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/workers.py#L394-L428
def __search_files(self, files): """ Searches in given files. :param files: Files. :type files: list """ for file in files: if self.__interrupt: return if not foundations.common.path_exists(file): continue if foundations.io.is_readable(file): if foundations.io.is_binary_file(file): continue LOGGER.info("{0} | Searching '{1}' file!".format(self.__class__.__name__, file)) cache_data = self.__container.files_cache.get_content(file) if not cache_data: reader = foundations.io.File(file) content = reader.read() if content is None: LOGGER.warning("!> Error occured while reading '{0}' file proceeding to next one!".format(file)) continue self.__container.files_cache.add_content(**{file: CacheData(content=content, document=None)}) else: content = cache_data.content occurrences = self.__search_document(QTextDocument(QString(content)), self.__pattern, self.__settings) occurrences and self.__search_results.append(SearchResult(file=file, pattern=self.__pattern, settings=self.__settings, occurrences=occurrences))
[ "def", "__search_files", "(", "self", ",", "files", ")", ":", "for", "file", "in", "files", ":", "if", "self", ".", "__interrupt", ":", "return", "if", "not", "foundations", ".", "common", ".", "path_exists", "(", "file", ")", ":", "continue", "if", "foundations", ".", "io", ".", "is_readable", "(", "file", ")", ":", "if", "foundations", ".", "io", ".", "is_binary_file", "(", "file", ")", ":", "continue", "LOGGER", ".", "info", "(", "\"{0} | Searching '{1}' file!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "file", ")", ")", "cache_data", "=", "self", ".", "__container", ".", "files_cache", ".", "get_content", "(", "file", ")", "if", "not", "cache_data", ":", "reader", "=", "foundations", ".", "io", ".", "File", "(", "file", ")", "content", "=", "reader", ".", "read", "(", ")", "if", "content", "is", "None", ":", "LOGGER", ".", "warning", "(", "\"!> Error occured while reading '{0}' file proceeding to next one!\"", ".", "format", "(", "file", ")", ")", "continue", "self", ".", "__container", ".", "files_cache", ".", "add_content", "(", "*", "*", "{", "file", ":", "CacheData", "(", "content", "=", "content", ",", "document", "=", "None", ")", "}", ")", "else", ":", "content", "=", "cache_data", ".", "content", "occurrences", "=", "self", ".", "__search_document", "(", "QTextDocument", "(", "QString", "(", "content", ")", ")", ",", "self", ".", "__pattern", ",", "self", ".", "__settings", ")", "occurrences", "and", "self", ".", "__search_results", ".", "append", "(", "SearchResult", "(", "file", "=", "file", ",", "pattern", "=", "self", ".", "__pattern", ",", "settings", "=", "self", ".", "__settings", ",", "occurrences", "=", "occurrences", ")", ")" ]
Searches in given files. :param files: Files. :type files: list
[ "Searches", "in", "given", "files", "." ]
python
train
43.942857
wbond/oscrypto
oscrypto/_pkcs12.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_pkcs12.py#L26-L198
def pkcs12_kdf(hash_algorithm, password, salt, iterations, key_length, id_): """ KDF from RFC7292 appendix b.2 - https://tools.ietf.org/html/rfc7292#page-19 :param hash_algorithm: The string name of the hash algorithm to use: "md5", "sha1", "sha224", "sha256", "sha384", "sha512" :param password: A byte string of the password to use an input to the KDF :param salt: A cryptographic random byte string :param iterations: The numbers of iterations to use when deriving the key :param key_length: The length of the desired key in bytes :param id_: The ID of the usage - 1 for key, 2 for iv, 3 for mac :return: The derived key as a byte string """ if not isinstance(password, byte_cls): raise TypeError(pretty_message( ''' password must be a byte string, not %s ''', type_name(password) )) if not isinstance(salt, byte_cls): raise TypeError(pretty_message( ''' salt must be a byte string, not %s ''', type_name(salt) )) if not isinstance(iterations, int_types): raise TypeError(pretty_message( ''' iterations must be an integer, not %s ''', type_name(iterations) )) if iterations < 1: raise ValueError(pretty_message( ''' iterations must be greater than 0 - is %s ''', repr(iterations) )) if not isinstance(key_length, int_types): raise TypeError(pretty_message( ''' key_length must be an integer, not %s ''', type_name(key_length) )) if key_length < 1: raise ValueError(pretty_message( ''' key_length must be greater than 0 - is %s ''', repr(key_length) )) if hash_algorithm not in set(['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']): raise ValueError(pretty_message( ''' hash_algorithm must be one of "md5", "sha1", "sha224", "sha256", "sha384", "sha512", not %s ''', repr(hash_algorithm) )) if id_ not in set([1, 2, 3]): raise ValueError(pretty_message( ''' id_ must be one of 1, 2, 3, not %s ''', repr(id_) )) utf16_password = password.decode('utf-8').encode('utf-16be') + b'\x00\x00' algo = getattr(hashlib, hash_algorithm) # u and v values are bytes (not bits as in the RFC) u = { 'md5': 16, 'sha1': 20, 'sha224': 28, 'sha256': 32, 'sha384': 48, 'sha512': 64 }[hash_algorithm] if hash_algorithm in ['sha384', 'sha512']: v = 128 else: v = 64 # Step 1 d = chr_cls(id_) * v # Step 2 s = b'' if salt != b'': s_len = v * int(math.ceil(float(len(salt)) / v)) while len(s) < s_len: s += salt s = s[0:s_len] # Step 3 p = b'' if utf16_password != b'': p_len = v * int(math.ceil(float(len(utf16_password)) / v)) while len(p) < p_len: p += utf16_password p = p[0:p_len] # Step 4 i = s + p # Step 5 c = int(math.ceil(float(key_length) / u)) a = b'\x00' * (c * u) for num in range(1, c + 1): # Step 6A a2 = algo(d + i).digest() for _ in range(2, iterations + 1): a2 = algo(a2).digest() if num < c: # Step 6B b = b'' while len(b) < v: b += a2 b = int_from_bytes(b[0:v]) + 1 # Step 6C for num2 in range(0, len(i) // v): start = num2 * v end = (num2 + 1) * v i_num2 = i[start:end] i_num2 = int_to_bytes(int_from_bytes(i_num2) + b) # Ensure the new slice is the right size i_num2_l = len(i_num2) if i_num2_l > v: i_num2 = i_num2[i_num2_l - v:] i = i[0:start] + i_num2 + i[end:] # Step 7 (one peice at a time) begin = (num - 1) * u to_copy = min(key_length, u) a = a[0:begin] + a2[0:to_copy] + a[begin + to_copy:] return a[0:key_length]
[ "def", "pkcs12_kdf", "(", "hash_algorithm", ",", "password", ",", "salt", ",", "iterations", ",", "key_length", ",", "id_", ")", ":", "if", "not", "isinstance", "(", "password", ",", "byte_cls", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n password must be a byte string, not %s\n '''", ",", "type_name", "(", "password", ")", ")", ")", "if", "not", "isinstance", "(", "salt", ",", "byte_cls", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n salt must be a byte string, not %s\n '''", ",", "type_name", "(", "salt", ")", ")", ")", "if", "not", "isinstance", "(", "iterations", ",", "int_types", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n iterations must be an integer, not %s\n '''", ",", "type_name", "(", "iterations", ")", ")", ")", "if", "iterations", "<", "1", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n iterations must be greater than 0 - is %s\n '''", ",", "repr", "(", "iterations", ")", ")", ")", "if", "not", "isinstance", "(", "key_length", ",", "int_types", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n key_length must be an integer, not %s\n '''", ",", "type_name", "(", "key_length", ")", ")", ")", "if", "key_length", "<", "1", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n key_length must be greater than 0 - is %s\n '''", ",", "repr", "(", "key_length", ")", ")", ")", "if", "hash_algorithm", "not", "in", "set", "(", "[", "'md5'", ",", "'sha1'", ",", "'sha224'", ",", "'sha256'", ",", "'sha384'", ",", "'sha512'", "]", ")", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n hash_algorithm must be one of \"md5\", \"sha1\", \"sha224\", \"sha256\",\n \"sha384\", \"sha512\", not %s\n '''", ",", "repr", "(", "hash_algorithm", ")", ")", ")", "if", "id_", "not", "in", "set", "(", "[", "1", ",", "2", ",", "3", "]", ")", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n id_ must be one of 1, 2, 3, not %s\n '''", ",", "repr", "(", "id_", ")", ")", ")", "utf16_password", "=", "password", ".", "decode", "(", "'utf-8'", ")", ".", "encode", "(", "'utf-16be'", ")", "+", "b'\\x00\\x00'", "algo", "=", "getattr", "(", "hashlib", ",", "hash_algorithm", ")", "# u and v values are bytes (not bits as in the RFC)", "u", "=", "{", "'md5'", ":", "16", ",", "'sha1'", ":", "20", ",", "'sha224'", ":", "28", ",", "'sha256'", ":", "32", ",", "'sha384'", ":", "48", ",", "'sha512'", ":", "64", "}", "[", "hash_algorithm", "]", "if", "hash_algorithm", "in", "[", "'sha384'", ",", "'sha512'", "]", ":", "v", "=", "128", "else", ":", "v", "=", "64", "# Step 1", "d", "=", "chr_cls", "(", "id_", ")", "*", "v", "# Step 2", "s", "=", "b''", "if", "salt", "!=", "b''", ":", "s_len", "=", "v", "*", "int", "(", "math", ".", "ceil", "(", "float", "(", "len", "(", "salt", ")", ")", "/", "v", ")", ")", "while", "len", "(", "s", ")", "<", "s_len", ":", "s", "+=", "salt", "s", "=", "s", "[", "0", ":", "s_len", "]", "# Step 3", "p", "=", "b''", "if", "utf16_password", "!=", "b''", ":", "p_len", "=", "v", "*", "int", "(", "math", ".", "ceil", "(", "float", "(", "len", "(", "utf16_password", ")", ")", "/", "v", ")", ")", "while", "len", "(", "p", ")", "<", "p_len", ":", "p", "+=", "utf16_password", "p", "=", "p", "[", "0", ":", "p_len", "]", "# Step 4", "i", "=", "s", "+", "p", "# Step 5", "c", "=", "int", "(", "math", ".", "ceil", "(", "float", "(", "key_length", ")", "/", "u", ")", ")", "a", "=", "b'\\x00'", "*", "(", "c", "*", "u", ")", "for", "num", "in", "range", "(", "1", ",", "c", "+", "1", ")", ":", "# Step 6A", "a2", "=", "algo", "(", "d", "+", "i", ")", ".", "digest", "(", ")", "for", "_", "in", "range", "(", "2", ",", "iterations", "+", "1", ")", ":", "a2", "=", "algo", "(", "a2", ")", ".", "digest", "(", ")", "if", "num", "<", "c", ":", "# Step 6B", "b", "=", "b''", "while", "len", "(", "b", ")", "<", "v", ":", "b", "+=", "a2", "b", "=", "int_from_bytes", "(", "b", "[", "0", ":", "v", "]", ")", "+", "1", "# Step 6C", "for", "num2", "in", "range", "(", "0", ",", "len", "(", "i", ")", "//", "v", ")", ":", "start", "=", "num2", "*", "v", "end", "=", "(", "num2", "+", "1", ")", "*", "v", "i_num2", "=", "i", "[", "start", ":", "end", "]", "i_num2", "=", "int_to_bytes", "(", "int_from_bytes", "(", "i_num2", ")", "+", "b", ")", "# Ensure the new slice is the right size", "i_num2_l", "=", "len", "(", "i_num2", ")", "if", "i_num2_l", ">", "v", ":", "i_num2", "=", "i_num2", "[", "i_num2_l", "-", "v", ":", "]", "i", "=", "i", "[", "0", ":", "start", "]", "+", "i_num2", "+", "i", "[", "end", ":", "]", "# Step 7 (one peice at a time)", "begin", "=", "(", "num", "-", "1", ")", "*", "u", "to_copy", "=", "min", "(", "key_length", ",", "u", ")", "a", "=", "a", "[", "0", ":", "begin", "]", "+", "a2", "[", "0", ":", "to_copy", "]", "+", "a", "[", "begin", "+", "to_copy", ":", "]", "return", "a", "[", "0", ":", "key_length", "]" ]
KDF from RFC7292 appendix b.2 - https://tools.ietf.org/html/rfc7292#page-19 :param hash_algorithm: The string name of the hash algorithm to use: "md5", "sha1", "sha224", "sha256", "sha384", "sha512" :param password: A byte string of the password to use an input to the KDF :param salt: A cryptographic random byte string :param iterations: The numbers of iterations to use when deriving the key :param key_length: The length of the desired key in bytes :param id_: The ID of the usage - 1 for key, 2 for iv, 3 for mac :return: The derived key as a byte string
[ "KDF", "from", "RFC7292", "appendix", "b", ".", "2", "-", "https", ":", "//", "tools", ".", "ietf", ".", "org", "/", "html", "/", "rfc7292#page", "-", "19" ]
python
valid
24.786127
speechinformaticslab/vfclust
vfclust/vfclust.py
https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L783-L789
def get_similarity_measures(self): """Helper function for computing similarity measures.""" if not self.quiet: print print "Computing", self.current_similarity_measure, "similarity..." self.compute_similarity_scores()
[ "def", "get_similarity_measures", "(", "self", ")", ":", "if", "not", "self", ".", "quiet", ":", "print", "print", "\"Computing\"", ",", "self", ".", "current_similarity_measure", ",", "\"similarity...\"", "self", ".", "compute_similarity_scores", "(", ")" ]
Helper function for computing similarity measures.
[ "Helper", "function", "for", "computing", "similarity", "measures", "." ]
python
train
37.142857
burnash/gspread
gspread/models.py
https://github.com/burnash/gspread/blob/0e8debe208095aeed3e3e7136c2fa5cd74090946/gspread/models.py#L467-L490
def acell(self, label, value_render_option='FORMATTED_VALUE'): """Returns an instance of a :class:`gspread.models.Cell`. :param label: Cell label in A1 notation Letter case is ignored. :type label: str :param value_render_option: (optional) Determines how values should be rendered in the the output. See `ValueRenderOption`_ in the Sheets API. :type value_render_option: str .. _ValueRenderOption: https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption Example: >>> worksheet.acell('A1') <Cell R1C1 "I'm cell A1"> """ return self.cell( *(a1_to_rowcol(label)), value_render_option=value_render_option )
[ "def", "acell", "(", "self", ",", "label", ",", "value_render_option", "=", "'FORMATTED_VALUE'", ")", ":", "return", "self", ".", "cell", "(", "*", "(", "a1_to_rowcol", "(", "label", ")", ")", ",", "value_render_option", "=", "value_render_option", ")" ]
Returns an instance of a :class:`gspread.models.Cell`. :param label: Cell label in A1 notation Letter case is ignored. :type label: str :param value_render_option: (optional) Determines how values should be rendered in the the output. See `ValueRenderOption`_ in the Sheets API. :type value_render_option: str .. _ValueRenderOption: https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption Example: >>> worksheet.acell('A1') <Cell R1C1 "I'm cell A1">
[ "Returns", "an", "instance", "of", "a", ":", "class", ":", "gspread", ".", "models", ".", "Cell", "." ]
python
train
34.208333
CiscoUcs/UcsPythonSDK
src/UcsSdk/UcsBase.py
https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/UcsBase.py#L721-L757
def DownloadFile(hUcs, source, destination): """ Method provides the functionality to download file from the UCS. This method is used in BackupUcs and GetTechSupport to download the files from the Ucs. """ import urllib2 from sys import stdout from time import sleep httpAddress = "%s/%s" % (hUcs.Uri(), source) file_name = httpAddress.split('/')[-1] req = urllib2.Request(httpAddress) # send the new url with the cookie. req.add_header('Cookie', 'ucsm-cookie=%s' % (hUcs._cookie)) res = urllib2.urlopen(req) meta = res.info() file_size = int(meta.getheaders("Content-Length")[0]) print "Downloading: %s Bytes: %s" % (file_name, file_size) f = open(destination, 'wb') file_size_dl = 0 block_sz = 8192 while True: rBuffer = res.read(block_sz) if not rBuffer: break file_size_dl += len(rBuffer) f.write(rBuffer) status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size) status = status + chr(8) * (len(status) + 1) stdout.write("\r%s" % status) stdout.flush() # print status f.close()
[ "def", "DownloadFile", "(", "hUcs", ",", "source", ",", "destination", ")", ":", "import", "urllib2", "from", "sys", "import", "stdout", "from", "time", "import", "sleep", "httpAddress", "=", "\"%s/%s\"", "%", "(", "hUcs", ".", "Uri", "(", ")", ",", "source", ")", "file_name", "=", "httpAddress", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "req", "=", "urllib2", ".", "Request", "(", "httpAddress", ")", "# send the new url with the cookie.", "req", ".", "add_header", "(", "'Cookie'", ",", "'ucsm-cookie=%s'", "%", "(", "hUcs", ".", "_cookie", ")", ")", "res", "=", "urllib2", ".", "urlopen", "(", "req", ")", "meta", "=", "res", ".", "info", "(", ")", "file_size", "=", "int", "(", "meta", ".", "getheaders", "(", "\"Content-Length\"", ")", "[", "0", "]", ")", "print", "\"Downloading: %s Bytes: %s\"", "%", "(", "file_name", ",", "file_size", ")", "f", "=", "open", "(", "destination", ",", "'wb'", ")", "file_size_dl", "=", "0", "block_sz", "=", "8192", "while", "True", ":", "rBuffer", "=", "res", ".", "read", "(", "block_sz", ")", "if", "not", "rBuffer", ":", "break", "file_size_dl", "+=", "len", "(", "rBuffer", ")", "f", ".", "write", "(", "rBuffer", ")", "status", "=", "r\"%10d [%3.2f%%]\"", "%", "(", "file_size_dl", ",", "file_size_dl", "*", "100.", "/", "file_size", ")", "status", "=", "status", "+", "chr", "(", "8", ")", "*", "(", "len", "(", "status", ")", "+", "1", ")", "stdout", ".", "write", "(", "\"\\r%s\"", "%", "status", ")", "stdout", ".", "flush", "(", ")", "# print status", "f", ".", "close", "(", ")" ]
Method provides the functionality to download file from the UCS. This method is used in BackupUcs and GetTechSupport to download the files from the Ucs.
[ "Method", "provides", "the", "functionality", "to", "download", "file", "from", "the", "UCS", ".", "This", "method", "is", "used", "in", "BackupUcs", "and", "GetTechSupport", "to", "download", "the", "files", "from", "the", "Ucs", "." ]
python
train
28.189189
google/mobly
mobly/controllers/android_device.py
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/android_device.py#L527-L544
def debug_tag(self, tag): """Setter for the debug tag. By default, the tag is the serial of the device, but sometimes it may be more descriptive to use a different tag of the user's choice. Changing debug tag changes part of the prefix of debug info emitted by this object, like log lines and the message of DeviceError. Example: By default, the device's serial number is used: 'INFO [AndroidDevice|abcdefg12345] One pending call ringing.' The tag can be customized with `ad.debug_tag = 'Caller'`: 'INFO [AndroidDevice|Caller] One pending call ringing.' """ self.log.info('Logging debug tag set to "%s"', tag) self._debug_tag = tag self.log.extra['tag'] = tag
[ "def", "debug_tag", "(", "self", ",", "tag", ")", ":", "self", ".", "log", ".", "info", "(", "'Logging debug tag set to \"%s\"'", ",", "tag", ")", "self", ".", "_debug_tag", "=", "tag", "self", ".", "log", ".", "extra", "[", "'tag'", "]", "=", "tag" ]
Setter for the debug tag. By default, the tag is the serial of the device, but sometimes it may be more descriptive to use a different tag of the user's choice. Changing debug tag changes part of the prefix of debug info emitted by this object, like log lines and the message of DeviceError. Example: By default, the device's serial number is used: 'INFO [AndroidDevice|abcdefg12345] One pending call ringing.' The tag can be customized with `ad.debug_tag = 'Caller'`: 'INFO [AndroidDevice|Caller] One pending call ringing.'
[ "Setter", "for", "the", "debug", "tag", "." ]
python
train
43.388889
cirruscluster/cirruscluster
cirruscluster/workstation.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/workstation.py#L60-L71
def CirrusIamUserReady(iam_aws_id, iam_aws_secret): """ Returns true if provided IAM credentials are ready to use. """ is_ready = False try: s3 = core.CreateTestedS3Connection(iam_aws_id, iam_aws_secret) if s3: if core.CirrusAccessIdMetadata(s3, iam_aws_id).IsInitialized(): is_ready = True except boto.exception.BotoServerError as e: print e return is_ready
[ "def", "CirrusIamUserReady", "(", "iam_aws_id", ",", "iam_aws_secret", ")", ":", "is_ready", "=", "False", "try", ":", "s3", "=", "core", ".", "CreateTestedS3Connection", "(", "iam_aws_id", ",", "iam_aws_secret", ")", "if", "s3", ":", "if", "core", ".", "CirrusAccessIdMetadata", "(", "s3", ",", "iam_aws_id", ")", ".", "IsInitialized", "(", ")", ":", "is_ready", "=", "True", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "print", "e", "return", "is_ready" ]
Returns true if provided IAM credentials are ready to use.
[ "Returns", "true", "if", "provided", "IAM", "credentials", "are", "ready", "to", "use", "." ]
python
train
33.416667
acutesoftware/AIKIF
aikif/dataTools/cls_data.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_data.py#L120-L138
def _calc_size_stats(self): """ get the size in bytes and num records of the content """ self.total_records = 0 self.total_length = 0 self.total_nodes = 0 if type(self.content['data']) is dict: self.total_length += len(str(self.content['data'])) self.total_records += 1 self.total_nodes = sum(len(x) for x in self.content['data'].values()) elif hasattr(self.content['data'], '__iter__') and type(self.content['data']) is not str: self._get_size_recursive(self.content['data']) else: self.total_records += 1 self.total_length += len(str(self.content['data'])) return str(self.total_records) + ' records [or ' + str(self.total_nodes) + ' nodes], taking ' + str(self.total_length) + ' bytes'
[ "def", "_calc_size_stats", "(", "self", ")", ":", "self", ".", "total_records", "=", "0", "self", ".", "total_length", "=", "0", "self", ".", "total_nodes", "=", "0", "if", "type", "(", "self", ".", "content", "[", "'data'", "]", ")", "is", "dict", ":", "self", ".", "total_length", "+=", "len", "(", "str", "(", "self", ".", "content", "[", "'data'", "]", ")", ")", "self", ".", "total_records", "+=", "1", "self", ".", "total_nodes", "=", "sum", "(", "len", "(", "x", ")", "for", "x", "in", "self", ".", "content", "[", "'data'", "]", ".", "values", "(", ")", ")", "elif", "hasattr", "(", "self", ".", "content", "[", "'data'", "]", ",", "'__iter__'", ")", "and", "type", "(", "self", ".", "content", "[", "'data'", "]", ")", "is", "not", "str", ":", "self", ".", "_get_size_recursive", "(", "self", ".", "content", "[", "'data'", "]", ")", "else", ":", "self", ".", "total_records", "+=", "1", "self", ".", "total_length", "+=", "len", "(", "str", "(", "self", ".", "content", "[", "'data'", "]", ")", ")", "return", "str", "(", "self", ".", "total_records", ")", "+", "' records [or '", "+", "str", "(", "self", ".", "total_nodes", ")", "+", "' nodes], taking '", "+", "str", "(", "self", ".", "total_length", ")", "+", "' bytes'" ]
get the size in bytes and num records of the content
[ "get", "the", "size", "in", "bytes", "and", "num", "records", "of", "the", "content" ]
python
train
45.684211
rootpy/rootpy
rootpy/tree/tree.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/tree/tree.py#L257-L279
def activate(self, branches, exclusive=False): """ Activate branches Parameters ---------- branches : str or list branch or list of branches to activate exclusive : bool, optional (default=False) if True deactivate the remaining branches """ if exclusive: self.SetBranchStatus('*', 0) if isinstance(branches, string_types): branches = [branches] for branch in branches: if '*' in branch: matched_branches = self.glob(branch) for b in matched_branches: self.SetBranchStatus(b, 1) elif self.has_branch(branch): self.SetBranchStatus(branch, 1)
[ "def", "activate", "(", "self", ",", "branches", ",", "exclusive", "=", "False", ")", ":", "if", "exclusive", ":", "self", ".", "SetBranchStatus", "(", "'*'", ",", "0", ")", "if", "isinstance", "(", "branches", ",", "string_types", ")", ":", "branches", "=", "[", "branches", "]", "for", "branch", "in", "branches", ":", "if", "'*'", "in", "branch", ":", "matched_branches", "=", "self", ".", "glob", "(", "branch", ")", "for", "b", "in", "matched_branches", ":", "self", ".", "SetBranchStatus", "(", "b", ",", "1", ")", "elif", "self", ".", "has_branch", "(", "branch", ")", ":", "self", ".", "SetBranchStatus", "(", "branch", ",", "1", ")" ]
Activate branches Parameters ---------- branches : str or list branch or list of branches to activate exclusive : bool, optional (default=False) if True deactivate the remaining branches
[ "Activate", "branches" ]
python
train
32.173913
cdgriffith/Reusables
reusables/wrappers.py
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/wrappers.py#L196-L224
def queue_it(queue=g_queue, **put_args): """ Wrapper. Instead of returning the result of the function, add it to a queue. .. code: python import reusables import queue my_queue = queue.Queue() @reusables.queue_it(my_queue) def func(a): return a func(10) print(my_queue.get()) # 10 :param queue: Queue to add result into """ def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): queue.put(func(*args, **kwargs), **put_args) return wrapper return func_wrapper
[ "def", "queue_it", "(", "queue", "=", "g_queue", ",", "*", "*", "put_args", ")", ":", "def", "func_wrapper", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "queue", ".", "put", "(", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ",", "*", "*", "put_args", ")", "return", "wrapper", "return", "func_wrapper" ]
Wrapper. Instead of returning the result of the function, add it to a queue. .. code: python import reusables import queue my_queue = queue.Queue() @reusables.queue_it(my_queue) def func(a): return a func(10) print(my_queue.get()) # 10 :param queue: Queue to add result into
[ "Wrapper", ".", "Instead", "of", "returning", "the", "result", "of", "the", "function", "add", "it", "to", "a", "queue", "." ]
python
train
20.344828
sorgerlab/indra
indra/sources/eidos/processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L27-L38
def extract_causal_relations(self): """Extract causal relations as Statements.""" # Get the extractions that are labeled as directed and causal relations = [e for e in self.doc.extractions if 'DirectedRelation' in e['labels'] and 'Causal' in e['labels']] # For each relation, we try to extract an INDRA Statement and # save it if its valid for relation in relations: stmt = self.get_causal_relation(relation) if stmt is not None: self.statements.append(stmt)
[ "def", "extract_causal_relations", "(", "self", ")", ":", "# Get the extractions that are labeled as directed and causal", "relations", "=", "[", "e", "for", "e", "in", "self", ".", "doc", ".", "extractions", "if", "'DirectedRelation'", "in", "e", "[", "'labels'", "]", "and", "'Causal'", "in", "e", "[", "'labels'", "]", "]", "# For each relation, we try to extract an INDRA Statement and", "# save it if its valid", "for", "relation", "in", "relations", ":", "stmt", "=", "self", ".", "get_causal_relation", "(", "relation", ")", "if", "stmt", "is", "not", "None", ":", "self", ".", "statements", ".", "append", "(", "stmt", ")" ]
Extract causal relations as Statements.
[ "Extract", "causal", "relations", "as", "Statements", "." ]
python
train
48.083333
cuihantao/andes
andes/variables/varout.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/variables/varout.py#L169-L197
def dump_np_vars(self, store_format='csv', delimiter=','): """ Dump the TDS simulation data to files by calling subroutines `write_lst` and `write_np_dat`. Parameters ----------- store_format : str dump format in `('csv', 'txt', 'hdf5')` delimiter : str delimiter for the `csv` and `txt` format Returns ------- bool: success flag """ ret = False if self.system.files.no_output is True: logger.debug('no_output is True, thus no TDS dump saved ') return True if self.write_lst() and self.write_np_dat(store_format=store_format, delimiter=delimiter): ret = True return ret
[ "def", "dump_np_vars", "(", "self", ",", "store_format", "=", "'csv'", ",", "delimiter", "=", "','", ")", ":", "ret", "=", "False", "if", "self", ".", "system", ".", "files", ".", "no_output", "is", "True", ":", "logger", ".", "debug", "(", "'no_output is True, thus no TDS dump saved '", ")", "return", "True", "if", "self", ".", "write_lst", "(", ")", "and", "self", ".", "write_np_dat", "(", "store_format", "=", "store_format", ",", "delimiter", "=", "delimiter", ")", ":", "ret", "=", "True", "return", "ret" ]
Dump the TDS simulation data to files by calling subroutines `write_lst` and `write_np_dat`. Parameters ----------- store_format : str dump format in `('csv', 'txt', 'hdf5')` delimiter : str delimiter for the `csv` and `txt` format Returns ------- bool: success flag
[ "Dump", "the", "TDS", "simulation", "data", "to", "files", "by", "calling", "subroutines", "write_lst", "and", "write_np_dat", "." ]
python
train
25.137931
ekmmetering/ekmmeters
ekmmeters.py
https://github.com/ekmmetering/ekmmeters/blob/b3748bdf30263bfa46ea40157bdf8df2522e1904/ekmmeters.py#L3543-L3616
def calculateFields(self): """Write calculated fields for read buffer.""" pf1 = self.m_blk_b[Field.Cos_Theta_Ln_1][MeterData.StringValue] pf2 = self.m_blk_b[Field.Cos_Theta_Ln_2][MeterData.StringValue] pf3 = self.m_blk_b[Field.Cos_Theta_Ln_3][MeterData.StringValue] pf1_int = self.calcPF(pf1) pf2_int = self.calcPF(pf2) pf3_int = self.calcPF(pf3) self.m_blk_b[Field.Power_Factor_Ln_1][MeterData.StringValue] = str(pf1_int) self.m_blk_b[Field.Power_Factor_Ln_2][MeterData.StringValue] = str(pf2_int) self.m_blk_b[Field.Power_Factor_Ln_3][MeterData.StringValue] = str(pf3_int) self.m_blk_b[Field.Power_Factor_Ln_1][MeterData.NativeValue] = pf1_int self.m_blk_b[Field.Power_Factor_Ln_2][MeterData.NativeValue] = pf2_int self.m_blk_b[Field.Power_Factor_Ln_3][MeterData.NativeValue] = pf2_int rms_watts_1 = self.m_blk_b[Field.RMS_Watts_Ln_1][MeterData.NativeValue] rms_watts_2 = self.m_blk_b[Field.RMS_Watts_Ln_2][MeterData.NativeValue] rms_watts_3 = self.m_blk_b[Field.RMS_Watts_Ln_3][MeterData.NativeValue] sign_rms_watts_1 = 1 sign_rms_watts_2 = 1 sign_rms_watts_3 = 1 direction_byte = self.m_blk_a[Field.State_Watts_Dir][MeterData.NativeValue] if direction_byte == DirectionFlag.ForwardForwardForward: # all good pass if direction_byte == DirectionFlag.ForwardForwardReverse: sign_rms_watts_3 = -1 pass if direction_byte == DirectionFlag.ForwardReverseForward: sign_rms_watts_2 = -1 pass if direction_byte == DirectionFlag.ReverseForwardForward: sign_rms_watts_1 = -1 pass if direction_byte == DirectionFlag.ForwardReverseReverse: sign_rms_watts_2 = -1 sign_rms_watts_3 = -1 pass if direction_byte == DirectionFlag.ReverseForwardReverse: sign_rms_watts_1 = -1 sign_rms_watts_3 = -1 pass if direction_byte == DirectionFlag.ReverseReverseForward: sign_rms_watts_1 = -1 sign_rms_watts_2 = -1 pass if direction_byte == DirectionFlag.ReverseReverseReverse: sign_rms_watts_1 = -1 sign_rms_watts_2 = -1 sign_rms_watts_3 = -1 pass net_watts_1 = rms_watts_1 * sign_rms_watts_1 net_watts_2 = rms_watts_2 * sign_rms_watts_2 net_watts_3 = rms_watts_3 * sign_rms_watts_3 net_watts_tot = net_watts_1 + net_watts_2 + net_watts_3 self.m_blk_b[Field.Net_Calc_Watts_Ln_1][MeterData.NativeValue] = net_watts_1 self.m_blk_b[Field.Net_Calc_Watts_Ln_2][MeterData.NativeValue] = net_watts_2 self.m_blk_b[Field.Net_Calc_Watts_Ln_3][MeterData.NativeValue] = net_watts_3 self.m_blk_b[Field.Net_Calc_Watts_Tot][MeterData.NativeValue] = net_watts_tot self.m_blk_b[Field.Net_Calc_Watts_Ln_1][MeterData.StringValue] = str(net_watts_1) self.m_blk_b[Field.Net_Calc_Watts_Ln_2][MeterData.StringValue] = str(net_watts_2) self.m_blk_b[Field.Net_Calc_Watts_Ln_3][MeterData.StringValue] = str(net_watts_3) self.m_blk_b[Field.Net_Calc_Watts_Tot][MeterData.StringValue] = str(net_watts_tot) pass
[ "def", "calculateFields", "(", "self", ")", ":", "pf1", "=", "self", ".", "m_blk_b", "[", "Field", ".", "Cos_Theta_Ln_1", "]", "[", "MeterData", ".", "StringValue", "]", "pf2", "=", "self", ".", "m_blk_b", "[", "Field", ".", "Cos_Theta_Ln_2", "]", "[", "MeterData", ".", "StringValue", "]", "pf3", "=", "self", ".", "m_blk_b", "[", "Field", ".", "Cos_Theta_Ln_3", "]", "[", "MeterData", ".", "StringValue", "]", "pf1_int", "=", "self", ".", "calcPF", "(", "pf1", ")", "pf2_int", "=", "self", ".", "calcPF", "(", "pf2", ")", "pf3_int", "=", "self", ".", "calcPF", "(", "pf3", ")", "self", ".", "m_blk_b", "[", "Field", ".", "Power_Factor_Ln_1", "]", "[", "MeterData", ".", "StringValue", "]", "=", "str", "(", "pf1_int", ")", "self", ".", "m_blk_b", "[", "Field", ".", "Power_Factor_Ln_2", "]", "[", "MeterData", ".", "StringValue", "]", "=", "str", "(", "pf2_int", ")", "self", ".", "m_blk_b", "[", "Field", ".", "Power_Factor_Ln_3", "]", "[", "MeterData", ".", "StringValue", "]", "=", "str", "(", "pf3_int", ")", "self", ".", "m_blk_b", "[", "Field", ".", "Power_Factor_Ln_1", "]", "[", "MeterData", ".", "NativeValue", "]", "=", "pf1_int", "self", ".", "m_blk_b", "[", "Field", ".", "Power_Factor_Ln_2", "]", "[", "MeterData", ".", "NativeValue", "]", "=", "pf2_int", "self", ".", "m_blk_b", "[", "Field", ".", "Power_Factor_Ln_3", "]", "[", "MeterData", ".", "NativeValue", "]", "=", "pf2_int", "rms_watts_1", "=", "self", ".", "m_blk_b", "[", "Field", ".", "RMS_Watts_Ln_1", "]", "[", "MeterData", ".", "NativeValue", "]", "rms_watts_2", "=", "self", ".", "m_blk_b", "[", "Field", ".", "RMS_Watts_Ln_2", "]", "[", "MeterData", ".", "NativeValue", "]", "rms_watts_3", "=", "self", ".", "m_blk_b", "[", "Field", ".", "RMS_Watts_Ln_3", "]", "[", "MeterData", ".", "NativeValue", "]", "sign_rms_watts_1", "=", "1", "sign_rms_watts_2", "=", "1", "sign_rms_watts_3", "=", "1", "direction_byte", "=", "self", ".", "m_blk_a", "[", "Field", ".", "State_Watts_Dir", "]", "[", "MeterData", ".", "NativeValue", "]", "if", "direction_byte", "==", "DirectionFlag", ".", "ForwardForwardForward", ":", "# all good", "pass", "if", "direction_byte", "==", "DirectionFlag", ".", "ForwardForwardReverse", ":", "sign_rms_watts_3", "=", "-", "1", "pass", "if", "direction_byte", "==", "DirectionFlag", ".", "ForwardReverseForward", ":", "sign_rms_watts_2", "=", "-", "1", "pass", "if", "direction_byte", "==", "DirectionFlag", ".", "ReverseForwardForward", ":", "sign_rms_watts_1", "=", "-", "1", "pass", "if", "direction_byte", "==", "DirectionFlag", ".", "ForwardReverseReverse", ":", "sign_rms_watts_2", "=", "-", "1", "sign_rms_watts_3", "=", "-", "1", "pass", "if", "direction_byte", "==", "DirectionFlag", ".", "ReverseForwardReverse", ":", "sign_rms_watts_1", "=", "-", "1", "sign_rms_watts_3", "=", "-", "1", "pass", "if", "direction_byte", "==", "DirectionFlag", ".", "ReverseReverseForward", ":", "sign_rms_watts_1", "=", "-", "1", "sign_rms_watts_2", "=", "-", "1", "pass", "if", "direction_byte", "==", "DirectionFlag", ".", "ReverseReverseReverse", ":", "sign_rms_watts_1", "=", "-", "1", "sign_rms_watts_2", "=", "-", "1", "sign_rms_watts_3", "=", "-", "1", "pass", "net_watts_1", "=", "rms_watts_1", "*", "sign_rms_watts_1", "net_watts_2", "=", "rms_watts_2", "*", "sign_rms_watts_2", "net_watts_3", "=", "rms_watts_3", "*", "sign_rms_watts_3", "net_watts_tot", "=", "net_watts_1", "+", "net_watts_2", "+", "net_watts_3", "self", ".", "m_blk_b", "[", "Field", ".", "Net_Calc_Watts_Ln_1", "]", "[", "MeterData", ".", "NativeValue", "]", "=", "net_watts_1", "self", ".", "m_blk_b", "[", "Field", ".", "Net_Calc_Watts_Ln_2", "]", "[", "MeterData", ".", "NativeValue", "]", "=", "net_watts_2", "self", ".", "m_blk_b", "[", "Field", ".", "Net_Calc_Watts_Ln_3", "]", "[", "MeterData", ".", "NativeValue", "]", "=", "net_watts_3", "self", ".", "m_blk_b", "[", "Field", ".", "Net_Calc_Watts_Tot", "]", "[", "MeterData", ".", "NativeValue", "]", "=", "net_watts_tot", "self", ".", "m_blk_b", "[", "Field", ".", "Net_Calc_Watts_Ln_1", "]", "[", "MeterData", ".", "StringValue", "]", "=", "str", "(", "net_watts_1", ")", "self", ".", "m_blk_b", "[", "Field", ".", "Net_Calc_Watts_Ln_2", "]", "[", "MeterData", ".", "StringValue", "]", "=", "str", "(", "net_watts_2", ")", "self", ".", "m_blk_b", "[", "Field", ".", "Net_Calc_Watts_Ln_3", "]", "[", "MeterData", ".", "StringValue", "]", "=", "str", "(", "net_watts_3", ")", "self", ".", "m_blk_b", "[", "Field", ".", "Net_Calc_Watts_Tot", "]", "[", "MeterData", ".", "StringValue", "]", "=", "str", "(", "net_watts_tot", ")", "pass" ]
Write calculated fields for read buffer.
[ "Write", "calculated", "fields", "for", "read", "buffer", "." ]
python
test
44.22973
darkfeline/animanager
animanager/commands/search.py
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/search.py#L92-L102
def _find_files(dirpath: str) -> 'Iterable[str]': """Find files recursively. Returns a generator that yields paths in no particular order. """ for dirpath, dirnames, filenames in os.walk(dirpath, topdown=True, followlinks=True): if os.path.basename(dirpath).startswith('.'): del dirnames[:] for filename in filenames: yield os.path.join(dirpath, filename)
[ "def", "_find_files", "(", "dirpath", ":", "str", ")", "->", "'Iterable[str]'", ":", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "dirpath", ",", "topdown", "=", "True", ",", "followlinks", "=", "True", ")", ":", "if", "os", ".", "path", ".", "basename", "(", "dirpath", ")", ".", "startswith", "(", "'.'", ")", ":", "del", "dirnames", "[", ":", "]", "for", "filename", "in", "filenames", ":", "yield", "os", ".", "path", ".", "join", "(", "dirpath", ",", "filename", ")" ]
Find files recursively. Returns a generator that yields paths in no particular order.
[ "Find", "files", "recursively", "." ]
python
train
40.909091
wandb/client
wandb/vendor/prompt_toolkit/terminal/vt100_output.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/terminal/vt100_output.py#L290-L341
def _colors_to_code(self, fg_color, bg_color): " Return a tuple with the vt100 values that represent this color. " # When requesting ANSI colors only, and both fg/bg color were converted # to ANSI, ensure that the foreground and background color are not the # same. (Unless they were explicitely defined to be the same color.) fg_ansi = [()] def get(color, bg): table = BG_ANSI_COLORS if bg else FG_ANSI_COLORS if color is None: return () # 16 ANSI colors. (Given by name.) elif color in table: return (table[color], ) # RGB colors. (Defined as 'ffffff'.) else: try: rgb = self._color_name_to_rgb(color) except ValueError: return () # When only 16 colors are supported, use that. if self.ansi_colors_only(): if bg: # Background. if fg_color != bg_color: exclude = (fg_ansi[0], ) else: exclude = () code, name = _16_bg_colors.get_code(rgb, exclude=exclude) return (code, ) else: # Foreground. code, name = _16_fg_colors.get_code(rgb) fg_ansi[0] = name return (code, ) # True colors. (Only when this feature is enabled.) elif self.true_color: r, g, b = rgb return (48 if bg else 38, 2, r, g, b) # 256 RGB colors. else: return (48 if bg else 38, 5, _256_colors[rgb]) result = [] result.extend(get(fg_color, False)) result.extend(get(bg_color, True)) return map(six.text_type, result)
[ "def", "_colors_to_code", "(", "self", ",", "fg_color", ",", "bg_color", ")", ":", "# When requesting ANSI colors only, and both fg/bg color were converted", "# to ANSI, ensure that the foreground and background color are not the", "# same. (Unless they were explicitely defined to be the same color.)", "fg_ansi", "=", "[", "(", ")", "]", "def", "get", "(", "color", ",", "bg", ")", ":", "table", "=", "BG_ANSI_COLORS", "if", "bg", "else", "FG_ANSI_COLORS", "if", "color", "is", "None", ":", "return", "(", ")", "# 16 ANSI colors. (Given by name.)", "elif", "color", "in", "table", ":", "return", "(", "table", "[", "color", "]", ",", ")", "# RGB colors. (Defined as 'ffffff'.)", "else", ":", "try", ":", "rgb", "=", "self", ".", "_color_name_to_rgb", "(", "color", ")", "except", "ValueError", ":", "return", "(", ")", "# When only 16 colors are supported, use that.", "if", "self", ".", "ansi_colors_only", "(", ")", ":", "if", "bg", ":", "# Background.", "if", "fg_color", "!=", "bg_color", ":", "exclude", "=", "(", "fg_ansi", "[", "0", "]", ",", ")", "else", ":", "exclude", "=", "(", ")", "code", ",", "name", "=", "_16_bg_colors", ".", "get_code", "(", "rgb", ",", "exclude", "=", "exclude", ")", "return", "(", "code", ",", ")", "else", ":", "# Foreground.", "code", ",", "name", "=", "_16_fg_colors", ".", "get_code", "(", "rgb", ")", "fg_ansi", "[", "0", "]", "=", "name", "return", "(", "code", ",", ")", "# True colors. (Only when this feature is enabled.)", "elif", "self", ".", "true_color", ":", "r", ",", "g", ",", "b", "=", "rgb", "return", "(", "48", "if", "bg", "else", "38", ",", "2", ",", "r", ",", "g", ",", "b", ")", "# 256 RGB colors.", "else", ":", "return", "(", "48", "if", "bg", "else", "38", ",", "5", ",", "_256_colors", "[", "rgb", "]", ")", "result", "=", "[", "]", "result", ".", "extend", "(", "get", "(", "fg_color", ",", "False", ")", ")", "result", ".", "extend", "(", "get", "(", "bg_color", ",", "True", ")", ")", "return", "map", "(", "six", ".", "text_type", ",", "result", ")" ]
Return a tuple with the vt100 values that represent this color.
[ "Return", "a", "tuple", "with", "the", "vt100", "values", "that", "represent", "this", "color", "." ]
python
train
36.865385
UpCloudLtd/upcloud-python-api
upcloud_api/server.py
https://github.com/UpCloudLtd/upcloud-python-api/blob/954b0ad7c4b932b2be31a95d88975f6b0eeac8ed/upcloud_api/server.py#L195-L201
def add_ip(self, family='IPv4'): """ Allocate a new (random) IP-address to the Server. """ IP = self.cloud_manager.attach_ip(self.uuid, family) self.ip_addresses.append(IP) return IP
[ "def", "add_ip", "(", "self", ",", "family", "=", "'IPv4'", ")", ":", "IP", "=", "self", ".", "cloud_manager", ".", "attach_ip", "(", "self", ".", "uuid", ",", "family", ")", "self", ".", "ip_addresses", ".", "append", "(", "IP", ")", "return", "IP" ]
Allocate a new (random) IP-address to the Server.
[ "Allocate", "a", "new", "(", "random", ")", "IP", "-", "address", "to", "the", "Server", "." ]
python
train
32
djangobot/djangobot
djangobot/client.py
https://github.com/djangobot/djangobot/blob/0ec951891812ea4114c27a08c790f63d0f0fd254/djangobot/client.py#L98-L109
def onMessage(self, payload, isBinary): """ Send the payload onto the {slack.[payload['type]'} channel. The message is transalated from IDs to human-readable identifiers. Note: The slack API only sends JSON, isBinary will always be false. """ msg = self.translate(unpack(payload)) if 'type' in msg: channel_name = 'slack.{}'.format(msg['type']) print('Sending on {}'.format(channel_name)) channels.Channel(channel_name).send({'text': pack(msg)})
[ "def", "onMessage", "(", "self", ",", "payload", ",", "isBinary", ")", ":", "msg", "=", "self", ".", "translate", "(", "unpack", "(", "payload", ")", ")", "if", "'type'", "in", "msg", ":", "channel_name", "=", "'slack.{}'", ".", "format", "(", "msg", "[", "'type'", "]", ")", "print", "(", "'Sending on {}'", ".", "format", "(", "channel_name", ")", ")", "channels", ".", "Channel", "(", "channel_name", ")", ".", "send", "(", "{", "'text'", ":", "pack", "(", "msg", ")", "}", ")" ]
Send the payload onto the {slack.[payload['type]'} channel. The message is transalated from IDs to human-readable identifiers. Note: The slack API only sends JSON, isBinary will always be false.
[ "Send", "the", "payload", "onto", "the", "{", "slack", ".", "[", "payload", "[", "type", "]", "}", "channel", ".", "The", "message", "is", "transalated", "from", "IDs", "to", "human", "-", "readable", "identifiers", "." ]
python
test
43.916667
iotile/coretools
iotilecore/iotile/core/hw/transport/adapter/async_wrapper.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/adapter/async_wrapper.py#L164-L171
async def close_interface(self, conn_id, interface): """Close an interface on this IOTile device. See :meth:`AbstractDeviceAdapter.close_interface`. """ resp = await self._execute(self._adapter.close_interface_sync, conn_id, interface) _raise_error(conn_id, 'close_interface', resp)
[ "async", "def", "close_interface", "(", "self", ",", "conn_id", ",", "interface", ")", ":", "resp", "=", "await", "self", ".", "_execute", "(", "self", ".", "_adapter", ".", "close_interface_sync", ",", "conn_id", ",", "interface", ")", "_raise_error", "(", "conn_id", ",", "'close_interface'", ",", "resp", ")" ]
Close an interface on this IOTile device. See :meth:`AbstractDeviceAdapter.close_interface`.
[ "Close", "an", "interface", "on", "this", "IOTile", "device", "." ]
python
train
39.625
jobovy/galpy
galpy/actionAngle/actionAngleTorus.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/actionAngle/actionAngleTorus.py#L197-L244
def hessianFreqs(self,jr,jphi,jz,**kwargs): """ NAME: hessianFreqs PURPOSE: return the Hessian d Omega / d J and frequencies Omega corresponding to a torus INPUT: jr - radial action (scalar) jphi - azimuthal action (scalar) jz - vertical action (scalar) tol= (object-wide value) goal for |dJ|/|J| along the torus dJ= (object-wide value) action difference when computing derivatives (Hessian or Jacobian) nosym= (False) if True, don't explicitly symmetrize the Hessian (good to check errors) OUTPUT: (dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error message) HISTORY: 2016-07-15 - Written - Bovy (UofT) """ out= actionAngleTorus_c.actionAngleTorus_hessian_c(\ self._pot, jr,jphi,jz, tol=kwargs.get('tol',self._tol), dJ=kwargs.get('dJ',self._dJ)) if out[4] != 0: warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[4],_autofit_errvals[out[4]]), galpyWarning) # Re-arrange frequencies and actions to r,phi,z out[0][:,:]= out[0][:,[0,2,1]] out[0][:,:]= out[0][[0,2,1]] if kwargs.get('nosym',False): return out else :# explicitly symmetrize return (0.5*(out[0]+out[0].T),out[1],out[2],out[3],out[4])
[ "def", "hessianFreqs", "(", "self", ",", "jr", ",", "jphi", ",", "jz", ",", "*", "*", "kwargs", ")", ":", "out", "=", "actionAngleTorus_c", ".", "actionAngleTorus_hessian_c", "(", "self", ".", "_pot", ",", "jr", ",", "jphi", ",", "jz", ",", "tol", "=", "kwargs", ".", "get", "(", "'tol'", ",", "self", ".", "_tol", ")", ",", "dJ", "=", "kwargs", ".", "get", "(", "'dJ'", ",", "self", ".", "_dJ", ")", ")", "if", "out", "[", "4", "]", "!=", "0", ":", "warnings", ".", "warn", "(", "\"actionAngleTorus' AutoFit exited with non-zero return status %i: %s\"", "%", "(", "out", "[", "4", "]", ",", "_autofit_errvals", "[", "out", "[", "4", "]", "]", ")", ",", "galpyWarning", ")", "# Re-arrange frequencies and actions to r,phi,z", "out", "[", "0", "]", "[", ":", ",", ":", "]", "=", "out", "[", "0", "]", "[", ":", ",", "[", "0", ",", "2", ",", "1", "]", "]", "out", "[", "0", "]", "[", ":", ",", ":", "]", "=", "out", "[", "0", "]", "[", "[", "0", ",", "2", ",", "1", "]", "]", "if", "kwargs", ".", "get", "(", "'nosym'", ",", "False", ")", ":", "return", "out", "else", ":", "# explicitly symmetrize", "return", "(", "0.5", "*", "(", "out", "[", "0", "]", "+", "out", "[", "0", "]", ".", "T", ")", ",", "out", "[", "1", "]", ",", "out", "[", "2", "]", ",", "out", "[", "3", "]", ",", "out", "[", "4", "]", ")" ]
NAME: hessianFreqs PURPOSE: return the Hessian d Omega / d J and frequencies Omega corresponding to a torus INPUT: jr - radial action (scalar) jphi - azimuthal action (scalar) jz - vertical action (scalar) tol= (object-wide value) goal for |dJ|/|J| along the torus dJ= (object-wide value) action difference when computing derivatives (Hessian or Jacobian) nosym= (False) if True, don't explicitly symmetrize the Hessian (good to check errors) OUTPUT: (dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error message) HISTORY: 2016-07-15 - Written - Bovy (UofT)
[ "NAME", ":" ]
python
train
29.729167
astropy/regions
regions/io/ds9/write.py
https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/io/ds9/write.py#L12-L46
def ds9_objects_to_string(regions, coordsys='fk5', fmt='.6f', radunit='deg'): """ Converts a `list` of `~regions.Region` to DS9 region string. Parameters ---------- regions : `list` List of `~regions.Region` objects coordsys : `str`, optional This overrides the coordinate system frame for all regions. Default is 'fk5'. fmt : `str`, optional A python string format defining the output precision. Default is .6f, which is accurate to 0.0036 arcseconds. radunit : `str`, optional This denotes the unit of the radius. Default is 'deg'(degrees) Returns ------- region_string : `str` DS9 region string Examples -------- >>> from astropy import units as u >>> from astropy.coordinates import SkyCoord >>> from regions import CircleSkyRegion, ds9_objects_to_string >>> reg_sky = CircleSkyRegion(SkyCoord(1 * u.deg, 2 * u.deg), 5 * u.deg) >>> print(ds9_objects_to_string([reg_sky])) # Region file format: DS9 astropy/regions fk5 circle(1.000007,2.000002,5.000000) """ shapelist = to_shape_list(regions, coordsys) return shapelist.to_ds9(coordsys, fmt, radunit)
[ "def", "ds9_objects_to_string", "(", "regions", ",", "coordsys", "=", "'fk5'", ",", "fmt", "=", "'.6f'", ",", "radunit", "=", "'deg'", ")", ":", "shapelist", "=", "to_shape_list", "(", "regions", ",", "coordsys", ")", "return", "shapelist", ".", "to_ds9", "(", "coordsys", ",", "fmt", ",", "radunit", ")" ]
Converts a `list` of `~regions.Region` to DS9 region string. Parameters ---------- regions : `list` List of `~regions.Region` objects coordsys : `str`, optional This overrides the coordinate system frame for all regions. Default is 'fk5'. fmt : `str`, optional A python string format defining the output precision. Default is .6f, which is accurate to 0.0036 arcseconds. radunit : `str`, optional This denotes the unit of the radius. Default is 'deg'(degrees) Returns ------- region_string : `str` DS9 region string Examples -------- >>> from astropy import units as u >>> from astropy.coordinates import SkyCoord >>> from regions import CircleSkyRegion, ds9_objects_to_string >>> reg_sky = CircleSkyRegion(SkyCoord(1 * u.deg, 2 * u.deg), 5 * u.deg) >>> print(ds9_objects_to_string([reg_sky])) # Region file format: DS9 astropy/regions fk5 circle(1.000007,2.000002,5.000000)
[ "Converts", "a", "list", "of", "~regions", ".", "Region", "to", "DS9", "region", "string", "." ]
python
train
33.542857
bitesofcode/projexui
projexui/widgets/xmultitagedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xmultitagedit.py#L471-L483
def itemFromTag( self, tag ): """ Returns the item assigned to the given tag. :param tag | <str> :return <XMultiTagItem> || None """ for row in range(self.count() - 1): item = self.item(row) if ( item and item.text() == tag ): return item return None
[ "def", "itemFromTag", "(", "self", ",", "tag", ")", ":", "for", "row", "in", "range", "(", "self", ".", "count", "(", ")", "-", "1", ")", ":", "item", "=", "self", ".", "item", "(", "row", ")", "if", "(", "item", "and", "item", ".", "text", "(", ")", "==", "tag", ")", ":", "return", "item", "return", "None" ]
Returns the item assigned to the given tag. :param tag | <str> :return <XMultiTagItem> || None
[ "Returns", "the", "item", "assigned", "to", "the", "given", "tag", ".", ":", "param", "tag", "|", "<str", ">", ":", "return", "<XMultiTagItem", ">", "||", "None" ]
python
train
28.692308
rbarrois/django_xworkflows
django_xworkflows/models.py
https://github.com/rbarrois/django_xworkflows/blob/7f6c3e54e7fd64d39541bffa654c7f2e28685270/django_xworkflows/models.py#L217-L227
def _find_workflows(mcs, attrs): """Find workflow definition(s) in a WorkflowEnabled definition. This method overrides the default behavior from xworkflows in order to use our custom StateField objects. """ workflows = {} for k, v in attrs.items(): if isinstance(v, StateField): workflows[k] = v return workflows
[ "def", "_find_workflows", "(", "mcs", ",", "attrs", ")", ":", "workflows", "=", "{", "}", "for", "k", ",", "v", "in", "attrs", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "StateField", ")", ":", "workflows", "[", "k", "]", "=", "v", "return", "workflows" ]
Find workflow definition(s) in a WorkflowEnabled definition. This method overrides the default behavior from xworkflows in order to use our custom StateField objects.
[ "Find", "workflow", "definition", "(", "s", ")", "in", "a", "WorkflowEnabled", "definition", "." ]
python
train
35.181818
joar/mig
mig/__init__.py
https://github.com/joar/mig/blob/e1a7a8b9ea5941a05a27d5afbb5952965bb20ae5/mig/__init__.py#L75-L85
def sorted_migrations(self): """ Sort migrations if necessary and store in self._sorted_migrations """ if not self._sorted_migrations: self._sorted_migrations = sorted( self.migration_registry.items(), # sort on the key... the migration number key=lambda migration_tuple: migration_tuple[0]) return self._sorted_migrations
[ "def", "sorted_migrations", "(", "self", ")", ":", "if", "not", "self", ".", "_sorted_migrations", ":", "self", ".", "_sorted_migrations", "=", "sorted", "(", "self", ".", "migration_registry", ".", "items", "(", ")", ",", "# sort on the key... the migration number", "key", "=", "lambda", "migration_tuple", ":", "migration_tuple", "[", "0", "]", ")", "return", "self", ".", "_sorted_migrations" ]
Sort migrations if necessary and store in self._sorted_migrations
[ "Sort", "migrations", "if", "necessary", "and", "store", "in", "self", ".", "_sorted_migrations" ]
python
train
37.545455
chemlab/chemlab
chemlab/mviewer/api/display.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/mviewer/api/display.py#L114-L120
def goto_time(timeval): '''Go to a specific time (in nanoseconds) in the current trajectory. ''' i = bisect.bisect(viewer.frame_times, timeval * 1000) goto_frame(i)
[ "def", "goto_time", "(", "timeval", ")", ":", "i", "=", "bisect", ".", "bisect", "(", "viewer", ".", "frame_times", ",", "timeval", "*", "1000", ")", "goto_frame", "(", "i", ")" ]
Go to a specific time (in nanoseconds) in the current trajectory.
[ "Go", "to", "a", "specific", "time", "(", "in", "nanoseconds", ")", "in", "the", "current", "trajectory", "." ]
python
train
25.571429
defunkt/pystache
pystache/context.py
https://github.com/defunkt/pystache/blob/17a5dfdcd56eb76af731d141de395a7632a905b8/pystache/context.py#L146-L199
def create(*context, **kwargs): """ Build a ContextStack instance from a sequence of context-like items. This factory-style method is more general than the ContextStack class's constructor in that, unlike the constructor, the argument list can itself contain ContextStack instances. Here is an example illustrating various aspects of this method: >>> obj1 = {'animal': 'cat', 'vegetable': 'carrot', 'mineral': 'copper'} >>> obj2 = ContextStack({'vegetable': 'spinach', 'mineral': 'silver'}) >>> >>> context = ContextStack.create(obj1, None, obj2, mineral='gold') >>> >>> context.get('animal') 'cat' >>> context.get('vegetable') 'spinach' >>> context.get('mineral') 'gold' Arguments: *context: zero or more dictionaries, ContextStack instances, or objects with which to populate the initial context stack. None arguments will be skipped. Items in the *context list are added to the stack in order so that later items in the argument list take precedence over earlier items. This behavior is the same as the constructor's. **kwargs: additional key-value data to add to the context stack. As these arguments appear after all items in the *context list, in the case of key conflicts these values take precedence over all items in the *context list. This behavior is the same as the constructor's. """ items = context context = ContextStack() for item in items: if item is None: continue if isinstance(item, ContextStack): context._stack.extend(item._stack) else: context.push(item) if kwargs: context.push(kwargs) return context
[ "def", "create", "(", "*", "context", ",", "*", "*", "kwargs", ")", ":", "items", "=", "context", "context", "=", "ContextStack", "(", ")", "for", "item", "in", "items", ":", "if", "item", "is", "None", ":", "continue", "if", "isinstance", "(", "item", ",", "ContextStack", ")", ":", "context", ".", "_stack", ".", "extend", "(", "item", ".", "_stack", ")", "else", ":", "context", ".", "push", "(", "item", ")", "if", "kwargs", ":", "context", ".", "push", "(", "kwargs", ")", "return", "context" ]
Build a ContextStack instance from a sequence of context-like items. This factory-style method is more general than the ContextStack class's constructor in that, unlike the constructor, the argument list can itself contain ContextStack instances. Here is an example illustrating various aspects of this method: >>> obj1 = {'animal': 'cat', 'vegetable': 'carrot', 'mineral': 'copper'} >>> obj2 = ContextStack({'vegetable': 'spinach', 'mineral': 'silver'}) >>> >>> context = ContextStack.create(obj1, None, obj2, mineral='gold') >>> >>> context.get('animal') 'cat' >>> context.get('vegetable') 'spinach' >>> context.get('mineral') 'gold' Arguments: *context: zero or more dictionaries, ContextStack instances, or objects with which to populate the initial context stack. None arguments will be skipped. Items in the *context list are added to the stack in order so that later items in the argument list take precedence over earlier items. This behavior is the same as the constructor's. **kwargs: additional key-value data to add to the context stack. As these arguments appear after all items in the *context list, in the case of key conflicts these values take precedence over all items in the *context list. This behavior is the same as the constructor's.
[ "Build", "a", "ContextStack", "instance", "from", "a", "sequence", "of", "context", "-", "like", "items", "." ]
python
train
35.222222
rackerlabs/simpl
simpl/incubator/rest.py
https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/incubator/rest.py#L115-L120
def coerce_many(schema=str): """Expect the input to be a sequence of items which conform to `schema`.""" def validate(val): """Apply schema check/version to each item.""" return [volup.Coerce(schema)(x) for x in val] return validate
[ "def", "coerce_many", "(", "schema", "=", "str", ")", ":", "def", "validate", "(", "val", ")", ":", "\"\"\"Apply schema check/version to each item.\"\"\"", "return", "[", "volup", ".", "Coerce", "(", "schema", ")", "(", "x", ")", "for", "x", "in", "val", "]", "return", "validate" ]
Expect the input to be a sequence of items which conform to `schema`.
[ "Expect", "the", "input", "to", "be", "a", "sequence", "of", "items", "which", "conform", "to", "schema", "." ]
python
train
42.5
hughsie/python-appstream
appstream/component.py
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/component.py#L260-L265
def get_image_by_kind(self, kind): """ returns a image of a specific kind """ for ss in self.images: if ss.kind == kind: return ss return None
[ "def", "get_image_by_kind", "(", "self", ",", "kind", ")", ":", "for", "ss", "in", "self", ".", "images", ":", "if", "ss", ".", "kind", "==", "kind", ":", "return", "ss", "return", "None" ]
returns a image of a specific kind
[ "returns", "a", "image", "of", "a", "specific", "kind" ]
python
train
31.5
JdeRobot/base
src/drivers/drone/cmdvel.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/drone/cmdvel.py#L230-L241
def sendCMD (self, vel): ''' Sends CMDVel. @param vel: CMDVel to publish @type vel: CMDVel ''' self.lock.acquire() self.vel = vel self.lock.release()
[ "def", "sendCMD", "(", "self", ",", "vel", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "self", ".", "vel", "=", "vel", "self", ".", "lock", ".", "release", "(", ")" ]
Sends CMDVel. @param vel: CMDVel to publish @type vel: CMDVel
[ "Sends", "CMDVel", "." ]
python
train
17.083333
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/p_jaffe.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/p_jaffe.py#L28-L46
def density_2d(self, x, y, rho0, Ra, Rs, center_x=0, center_y=0): """ projected density :param x: :param y: :param rho0: :param Ra: :param Rs: :param center_x: :param center_y: :return: """ Ra, Rs = self._sort_ra_rs(Ra, Rs) x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) sigma0 = self.rho2sigma(rho0, Ra, Rs) sigma = sigma0 * Ra * Rs / (Rs - Ra) * (1 / np.sqrt(Ra ** 2 + r ** 2) - 1 / np.sqrt(Rs ** 2 + r ** 2)) return sigma
[ "def", "density_2d", "(", "self", ",", "x", ",", "y", ",", "rho0", ",", "Ra", ",", "Rs", ",", "center_x", "=", "0", ",", "center_y", "=", "0", ")", ":", "Ra", ",", "Rs", "=", "self", ".", "_sort_ra_rs", "(", "Ra", ",", "Rs", ")", "x_", "=", "x", "-", "center_x", "y_", "=", "y", "-", "center_y", "r", "=", "np", ".", "sqrt", "(", "x_", "**", "2", "+", "y_", "**", "2", ")", "sigma0", "=", "self", ".", "rho2sigma", "(", "rho0", ",", "Ra", ",", "Rs", ")", "sigma", "=", "sigma0", "*", "Ra", "*", "Rs", "/", "(", "Rs", "-", "Ra", ")", "*", "(", "1", "/", "np", ".", "sqrt", "(", "Ra", "**", "2", "+", "r", "**", "2", ")", "-", "1", "/", "np", ".", "sqrt", "(", "Rs", "**", "2", "+", "r", "**", "2", ")", ")", "return", "sigma" ]
projected density :param x: :param y: :param rho0: :param Ra: :param Rs: :param center_x: :param center_y: :return:
[ "projected", "density", ":", "param", "x", ":", ":", "param", "y", ":", ":", "param", "rho0", ":", ":", "param", "Ra", ":", ":", "param", "Rs", ":", ":", "param", "center_x", ":", ":", "param", "center_y", ":", ":", "return", ":" ]
python
train
29.789474
twilio/twilio-python
twilio/rest/fax/v1/fax/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/fax/v1/fax/__init__.py#L104-L139
def page(self, from_=values.unset, to=values.unset, date_created_on_or_before=values.unset, date_created_after=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of FaxInstance records from the API. Request is executed immediately :param unicode from_: Retrieve only those faxes sent from this phone number :param unicode to: Retrieve only those faxes sent to this phone number :param datetime date_created_on_or_before: Retrieve only faxes created on or before this date :param datetime date_created_after: Retrieve only faxes created after this date :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of FaxInstance :rtype: twilio.rest.fax.v1.fax.FaxPage """ params = values.of({ 'From': from_, 'To': to, 'DateCreatedOnOrBefore': serialize.iso8601_datetime(date_created_on_or_before), 'DateCreatedAfter': serialize.iso8601_datetime(date_created_after), 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return FaxPage(self._version, response, self._solution)
[ "def", "page", "(", "self", ",", "from_", "=", "values", ".", "unset", ",", "to", "=", "values", ".", "unset", ",", "date_created_on_or_before", "=", "values", ".", "unset", ",", "date_created_after", "=", "values", ".", "unset", ",", "page_token", "=", "values", ".", "unset", ",", "page_number", "=", "values", ".", "unset", ",", "page_size", "=", "values", ".", "unset", ")", ":", "params", "=", "values", ".", "of", "(", "{", "'From'", ":", "from_", ",", "'To'", ":", "to", ",", "'DateCreatedOnOrBefore'", ":", "serialize", ".", "iso8601_datetime", "(", "date_created_on_or_before", ")", ",", "'DateCreatedAfter'", ":", "serialize", ".", "iso8601_datetime", "(", "date_created_after", ")", ",", "'PageToken'", ":", "page_token", ",", "'Page'", ":", "page_number", ",", "'PageSize'", ":", "page_size", ",", "}", ")", "response", "=", "self", ".", "_version", ".", "page", "(", "'GET'", ",", "self", ".", "_uri", ",", "params", "=", "params", ",", ")", "return", "FaxPage", "(", "self", ".", "_version", ",", "response", ",", "self", ".", "_solution", ")" ]
Retrieve a single page of FaxInstance records from the API. Request is executed immediately :param unicode from_: Retrieve only those faxes sent from this phone number :param unicode to: Retrieve only those faxes sent to this phone number :param datetime date_created_on_or_before: Retrieve only faxes created on or before this date :param datetime date_created_after: Retrieve only faxes created after this date :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of FaxInstance :rtype: twilio.rest.fax.v1.fax.FaxPage
[ "Retrieve", "a", "single", "page", "of", "FaxInstance", "records", "from", "the", "API", ".", "Request", "is", "executed", "immediately" ]
python
train
42.888889
nyaruka/smartmin
smartmin/views.py
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L881-L896
def lookup_field_label(self, context, field, default=None): """ Figures out what the field label should be for the passed in field name. We overload this so as to use our form to see if there is label set there. If so then we'll pass that as the default instead of having our parent derive the field from the name. """ default = None for form_field in self.form: if form_field.name == field: default = form_field.label break return super(SmartFormMixin, self).lookup_field_label(context, field, default=default)
[ "def", "lookup_field_label", "(", "self", ",", "context", ",", "field", ",", "default", "=", "None", ")", ":", "default", "=", "None", "for", "form_field", "in", "self", ".", "form", ":", "if", "form_field", ".", "name", "==", "field", ":", "default", "=", "form_field", ".", "label", "break", "return", "super", "(", "SmartFormMixin", ",", "self", ")", ".", "lookup_field_label", "(", "context", ",", "field", ",", "default", "=", "default", ")" ]
Figures out what the field label should be for the passed in field name. We overload this so as to use our form to see if there is label set there. If so then we'll pass that as the default instead of having our parent derive the field from the name.
[ "Figures", "out", "what", "the", "field", "label", "should", "be", "for", "the", "passed", "in", "field", "name", "." ]
python
train
38.5
markuskiller/textblob-de
textblob_de/blob.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L667-L692
def sentiment(self): """Return a tuple of form (polarity, subjectivity ) where polarity is a float within the range [-1.0, 1.0] and subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective. :rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)`` """ #: Enhancement Issue #2 #: adapted from 'textblob.en.sentiments.py' #: Return type declaration _RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity']) _polarity = 0 _subjectivity = 0 for s in self.sentences: _polarity += s.polarity _subjectivity += s.subjectivity try: polarity = _polarity / len(self.sentences) except ZeroDivisionError: polarity = 0.0 try: subjectivity = _subjectivity / len(self.sentences) except ZeroDivisionError: subjectivity = 0.0 return _RETURN_TYPE(polarity, subjectivity)
[ "def", "sentiment", "(", "self", ")", ":", "#: Enhancement Issue #2", "#: adapted from 'textblob.en.sentiments.py'", "#: Return type declaration", "_RETURN_TYPE", "=", "namedtuple", "(", "'Sentiment'", ",", "[", "'polarity'", ",", "'subjectivity'", "]", ")", "_polarity", "=", "0", "_subjectivity", "=", "0", "for", "s", "in", "self", ".", "sentences", ":", "_polarity", "+=", "s", ".", "polarity", "_subjectivity", "+=", "s", ".", "subjectivity", "try", ":", "polarity", "=", "_polarity", "/", "len", "(", "self", ".", "sentences", ")", "except", "ZeroDivisionError", ":", "polarity", "=", "0.0", "try", ":", "subjectivity", "=", "_subjectivity", "/", "len", "(", "self", ".", "sentences", ")", "except", "ZeroDivisionError", ":", "subjectivity", "=", "0.0", "return", "_RETURN_TYPE", "(", "polarity", ",", "subjectivity", ")" ]
Return a tuple of form (polarity, subjectivity ) where polarity is a float within the range [-1.0, 1.0] and subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective. :rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)``
[ "Return", "a", "tuple", "of", "form", "(", "polarity", "subjectivity", ")", "where", "polarity", "is", "a", "float", "within", "the", "range", "[", "-", "1", ".", "0", "1", ".", "0", "]", "and", "subjectivity", "is", "a", "float", "within", "the", "range", "[", "0", ".", "0", "1", ".", "0", "]", "where", "0", ".", "0", "is", "very", "objective", "and", "1", ".", "0", "is", "very", "subjective", "." ]
python
train
39.384615
spotify/gordon
gordon/metrics/ffwd.py
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/metrics/ffwd.py#L87-L96
async def send(self, metric): """Transform metric to JSON bytestring and send to server. Args: metric (dict): Complete metric to send as JSON. """ message = json.dumps(metric).encode('utf-8') await self.loop.create_datagram_endpoint( lambda: UDPClientProtocol(message), remote_addr=(self.ip, self.port))
[ "async", "def", "send", "(", "self", ",", "metric", ")", ":", "message", "=", "json", ".", "dumps", "(", "metric", ")", ".", "encode", "(", "'utf-8'", ")", "await", "self", ".", "loop", ".", "create_datagram_endpoint", "(", "lambda", ":", "UDPClientProtocol", "(", "message", ")", ",", "remote_addr", "=", "(", "self", ".", "ip", ",", "self", ".", "port", ")", ")" ]
Transform metric to JSON bytestring and send to server. Args: metric (dict): Complete metric to send as JSON.
[ "Transform", "metric", "to", "JSON", "bytestring", "and", "send", "to", "server", "." ]
python
train
37.1
openego/ding0
ding0/grid/mv_grid/util/data_input.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/mv_grid/util/data_input.py#L81-L105
def _parse_depot_section(f): """Parse TSPLIB DEPOT_SECTION data part from file descriptor f Args ---- f : str File descriptor Returns ------- int an array of depots """ depots = [] for line in f: line = strip(line) if line == '-1' or line == 'EOF': # End of section break else: depots.append(line) if len(depots) != 1: raise ParseException('One and only one depot is supported') return int(depots[0])
[ "def", "_parse_depot_section", "(", "f", ")", ":", "depots", "=", "[", "]", "for", "line", "in", "f", ":", "line", "=", "strip", "(", "line", ")", "if", "line", "==", "'-1'", "or", "line", "==", "'EOF'", ":", "# End of section", "break", "else", ":", "depots", ".", "append", "(", "line", ")", "if", "len", "(", "depots", ")", "!=", "1", ":", "raise", "ParseException", "(", "'One and only one depot is supported'", ")", "return", "int", "(", "depots", "[", "0", "]", ")" ]
Parse TSPLIB DEPOT_SECTION data part from file descriptor f Args ---- f : str File descriptor Returns ------- int an array of depots
[ "Parse", "TSPLIB", "DEPOT_SECTION", "data", "part", "from", "file", "descriptor", "f", "Args", "----", "f", ":", "str", "File", "descriptor", "Returns", "-------", "int", "an", "array", "of", "depots" ]
python
train
20.16
c2nes/javalang
javalang/util.py
https://github.com/c2nes/javalang/blob/f98ffcb31d1daa57fbe5bd6def8ad7c3126d8242/javalang/util.py#L70-L88
def pop_marker(self, reset): """ Pop a marker off of the marker stack. If reset is True then the iterator will be returned to the state it was in before the corresponding call to push_marker(). """ marker = self.markers.pop() if reset: # Make the values available to be read again marker.extend(self.look_ahead) self.look_ahead = marker elif self.markers: # Otherwise, reassign the values to the top marker self.markers[-1].extend(marker) else: # If there are not more markers in the stack then discard the values pass
[ "def", "pop_marker", "(", "self", ",", "reset", ")", ":", "marker", "=", "self", ".", "markers", ".", "pop", "(", ")", "if", "reset", ":", "# Make the values available to be read again", "marker", ".", "extend", "(", "self", ".", "look_ahead", ")", "self", ".", "look_ahead", "=", "marker", "elif", "self", ".", "markers", ":", "# Otherwise, reassign the values to the top marker", "self", ".", "markers", "[", "-", "1", "]", ".", "extend", "(", "marker", ")", "else", ":", "# If there are not more markers in the stack then discard the values", "pass" ]
Pop a marker off of the marker stack. If reset is True then the iterator will be returned to the state it was in before the corresponding call to push_marker().
[ "Pop", "a", "marker", "off", "of", "the", "marker", "stack", ".", "If", "reset", "is", "True", "then", "the", "iterator", "will", "be", "returned", "to", "the", "state", "it", "was", "in", "before", "the", "corresponding", "call", "to", "push_marker", "()", "." ]
python
valid
34.263158
jackjackk/gdxpy
gdxpy/gdxpy.py
https://github.com/jackjackk/gdxpy/blob/ffc698b0306a7ee8aa327833f52002f78a5ccf4e/gdxpy/gdxpy.py#L166-L170
def close(self): '''Close Gdx file and free up resources.''' h = self.gdx_handle gdxcc.gdxClose(h) gdxcc.gdxFree(h)
[ "def", "close", "(", "self", ")", ":", "h", "=", "self", ".", "gdx_handle", "gdxcc", ".", "gdxClose", "(", "h", ")", "gdxcc", ".", "gdxFree", "(", "h", ")" ]
Close Gdx file and free up resources.
[ "Close", "Gdx", "file", "and", "free", "up", "resources", "." ]
python
train
28.6
consbio/gis-metadata-parser
gis_metadata/utils.py
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L223-L234
def get_default_for(prop, value): """ Ensures complex property types have the correct default values """ prop = prop.strip('_') # Handle alternate props (leading underscores) val = reduce_value(value) # Filtering of value happens here if prop in _COMPLEX_LISTS: return wrap_value(val) elif prop in _COMPLEX_STRUCTS: return val or {} else: return u'' if val is None else val
[ "def", "get_default_for", "(", "prop", ",", "value", ")", ":", "prop", "=", "prop", ".", "strip", "(", "'_'", ")", "# Handle alternate props (leading underscores)", "val", "=", "reduce_value", "(", "value", ")", "# Filtering of value happens here", "if", "prop", "in", "_COMPLEX_LISTS", ":", "return", "wrap_value", "(", "val", ")", "elif", "prop", "in", "_COMPLEX_STRUCTS", ":", "return", "val", "or", "{", "}", "else", ":", "return", "u''", "if", "val", "is", "None", "else", "val" ]
Ensures complex property types have the correct default values
[ "Ensures", "complex", "property", "types", "have", "the", "correct", "default", "values" ]
python
train
34.75
jaywink/federation
federation/protocols/activitypub/protocol.py
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/protocols/activitypub/protocol.py#L19-L23
def identify_id(id: str) -> bool: """ Try to identify whether this is an ActivityPub ID. """ return re.match(r'^https?://', id, flags=re.IGNORECASE) is not None
[ "def", "identify_id", "(", "id", ":", "str", ")", "->", "bool", ":", "return", "re", ".", "match", "(", "r'^https?://'", ",", "id", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "is", "not", "None" ]
Try to identify whether this is an ActivityPub ID.
[ "Try", "to", "identify", "whether", "this", "is", "an", "ActivityPub", "ID", "." ]
python
train
34.4
openstack/python-scciclient
scciclient/irmc/viom/client.py
https://github.com/openstack/python-scciclient/blob/4585ce2f76853b9773fb190ca0cfff0aa04a7cf8/scciclient/irmc/viom/client.py#L375-L401
def set_fc_volume(self, port_id, target_wwn, target_lun=0, boot_prio=1, initiator_wwnn=None, initiator_wwpn=None): """Set FibreChannel volume information to configuration. :param port_id: Physical port ID. :param target_wwn: WWN of target. :param target_lun: LUN number of target. :param boot_prio: Boot priority of the volume. 1 indicates the highest priority. :param initiator_wwnn: Virtual WWNN for initiator if necessary. :param initiator_wwpn: Virtual WWPN for initiator if necessary. """ port_handler = _parse_physical_port_id(port_id) fc_target = elcm.FCTarget(target_wwn, target_lun) fc_boot = elcm.FCBoot(boot_prio=boot_prio, boot_enable=True) fc_boot.add_target(fc_target) port = self._find_port(port_handler) if port: port_handler.set_fc_port(port, fc_boot, wwnn=initiator_wwnn, wwpn=initiator_wwpn) else: port = port_handler.create_fc_port(fc_boot, wwnn=initiator_wwnn, wwpn=initiator_wwpn) self._add_port(port_handler, port)
[ "def", "set_fc_volume", "(", "self", ",", "port_id", ",", "target_wwn", ",", "target_lun", "=", "0", ",", "boot_prio", "=", "1", ",", "initiator_wwnn", "=", "None", ",", "initiator_wwpn", "=", "None", ")", ":", "port_handler", "=", "_parse_physical_port_id", "(", "port_id", ")", "fc_target", "=", "elcm", ".", "FCTarget", "(", "target_wwn", ",", "target_lun", ")", "fc_boot", "=", "elcm", ".", "FCBoot", "(", "boot_prio", "=", "boot_prio", ",", "boot_enable", "=", "True", ")", "fc_boot", ".", "add_target", "(", "fc_target", ")", "port", "=", "self", ".", "_find_port", "(", "port_handler", ")", "if", "port", ":", "port_handler", ".", "set_fc_port", "(", "port", ",", "fc_boot", ",", "wwnn", "=", "initiator_wwnn", ",", "wwpn", "=", "initiator_wwpn", ")", "else", ":", "port", "=", "port_handler", ".", "create_fc_port", "(", "fc_boot", ",", "wwnn", "=", "initiator_wwnn", ",", "wwpn", "=", "initiator_wwpn", ")", "self", ".", "_add_port", "(", "port_handler", ",", "port", ")" ]
Set FibreChannel volume information to configuration. :param port_id: Physical port ID. :param target_wwn: WWN of target. :param target_lun: LUN number of target. :param boot_prio: Boot priority of the volume. 1 indicates the highest priority. :param initiator_wwnn: Virtual WWNN for initiator if necessary. :param initiator_wwpn: Virtual WWPN for initiator if necessary.
[ "Set", "FibreChannel", "volume", "information", "to", "configuration", "." ]
python
train
46.62963
openid/python-openid
openid/extensions/sreg.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/sreg.py#L330-L351
def requestFields(self, field_names, required=False, strict=False): """Add the given list of fields to the request @param field_names: The simple registration data fields to request @type field_names: [str] @param required: Whether these values should be presented to the user as required @param strict: whether to raise an exception when a field is added to a request more than once @raise ValueError: when a field requested is not a simple registration field or strict is set and a field was requested more than once """ if isinstance(field_names, basestring): raise TypeError('Fields should be passed as a list of ' 'strings (not %r)' % (type(field_names),)) for field_name in field_names: self.requestField(field_name, required, strict=strict)
[ "def", "requestFields", "(", "self", ",", "field_names", ",", "required", "=", "False", ",", "strict", "=", "False", ")", ":", "if", "isinstance", "(", "field_names", ",", "basestring", ")", ":", "raise", "TypeError", "(", "'Fields should be passed as a list of '", "'strings (not %r)'", "%", "(", "type", "(", "field_names", ")", ",", ")", ")", "for", "field_name", "in", "field_names", ":", "self", ".", "requestField", "(", "field_name", ",", "required", ",", "strict", "=", "strict", ")" ]
Add the given list of fields to the request @param field_names: The simple registration data fields to request @type field_names: [str] @param required: Whether these values should be presented to the user as required @param strict: whether to raise an exception when a field is added to a request more than once @raise ValueError: when a field requested is not a simple registration field or strict is set and a field was requested more than once
[ "Add", "the", "given", "list", "of", "fields", "to", "the", "request" ]
python
train
41.045455
dcramer/quickunit
quickunit/utils.py
https://github.com/dcramer/quickunit/blob/f72b038aaead2c6f2c6013a94a1823724f59a205/quickunit/utils.py#L12-L26
def is_py_script(filename): "Returns True if a file is a python executable." if not os.path.exists(filename) and os.path.isfile(filename): return False elif filename.endswith(".py"): return True elif not os.access(filename, os.X_OK): return False else: try: with open(filename, "r") as fp: first_line = fp.readline().strip() return "#!" in first_line and "python" in first_line except StopIteration: return False
[ "def", "is_py_script", "(", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", "and", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "return", "False", "elif", "filename", ".", "endswith", "(", "\".py\"", ")", ":", "return", "True", "elif", "not", "os", ".", "access", "(", "filename", ",", "os", ".", "X_OK", ")", ":", "return", "False", "else", ":", "try", ":", "with", "open", "(", "filename", ",", "\"r\"", ")", "as", "fp", ":", "first_line", "=", "fp", ".", "readline", "(", ")", ".", "strip", "(", ")", "return", "\"#!\"", "in", "first_line", "and", "\"python\"", "in", "first_line", "except", "StopIteration", ":", "return", "False" ]
Returns True if a file is a python executable.
[ "Returns", "True", "if", "a", "file", "is", "a", "python", "executable", "." ]
python
train
34
apache/airflow
airflow/contrib/hooks/slack_webhook_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/slack_webhook_hook.py#L80-L97
def _get_token(self, token, http_conn_id): """ Given either a manually set token or a conn_id, return the webhook_token to use :param token: The manually provided token :type token: str :param http_conn_id: The conn_id provided :type http_conn_id: str :return: webhook_token (str) to use """ if token: return token elif http_conn_id: conn = self.get_connection(http_conn_id) extra = conn.extra_dejson return extra.get('webhook_token', '') else: raise AirflowException('Cannot get token: No valid Slack ' 'webhook token nor conn_id supplied')
[ "def", "_get_token", "(", "self", ",", "token", ",", "http_conn_id", ")", ":", "if", "token", ":", "return", "token", "elif", "http_conn_id", ":", "conn", "=", "self", ".", "get_connection", "(", "http_conn_id", ")", "extra", "=", "conn", ".", "extra_dejson", "return", "extra", ".", "get", "(", "'webhook_token'", ",", "''", ")", "else", ":", "raise", "AirflowException", "(", "'Cannot get token: No valid Slack '", "'webhook token nor conn_id supplied'", ")" ]
Given either a manually set token or a conn_id, return the webhook_token to use :param token: The manually provided token :type token: str :param http_conn_id: The conn_id provided :type http_conn_id: str :return: webhook_token (str) to use
[ "Given", "either", "a", "manually", "set", "token", "or", "a", "conn_id", "return", "the", "webhook_token", "to", "use", ":", "param", "token", ":", "The", "manually", "provided", "token", ":", "type", "token", ":", "str", ":", "param", "http_conn_id", ":", "The", "conn_id", "provided", ":", "type", "http_conn_id", ":", "str", ":", "return", ":", "webhook_token", "(", "str", ")", "to", "use" ]
python
test
39.277778
HumanCellAtlas/dcp-cli
hca/util/_docs.py
https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/util/_docs.py#L51-L98
def _parse_docstring(docstring): """ Using the sphinx RSTParse to parse __doc__ for argparse `parameters`, `help`, and `description`. The first rst paragraph encountered it treated as the argparse help text. Any param fields are treated as argparse arguments. Any other text is combined and added to the argparse description. example: \""" this will be the summary :param name: describe the parameter called name. this will be the descriptions * more description * more description This will also be in the description \""" :param str docstring: :return: :rtype: dict """ settings = OptionParser(components=(RSTParser,)).get_default_values() rstparser = RSTParser() document = utils.new_document(' ', settings) rstparser.parse(docstring, document) if document.children[0].tagname != 'block_quote': logger.warning("The first line of the docstring must be blank.") else: document = document.children[0] def get_params(field_list_node, params): for field in field_list_node.children: name = field.children[0].rawsource.split(' ') if 'param' == name[0]: params[name[-1]] = field.children[1].astext() method_args = {'summary': '', 'params': dict(), 'description': ''} for node in document.children: if node.tagname is 'paragraph' and method_args['summary'] == '': method_args['summary'] = node.astext() elif node.tagname is 'field_list': get_params(node, method_args['params']) else: method_args['description'] += '\n' + node.astext() return method_args
[ "def", "_parse_docstring", "(", "docstring", ")", ":", "settings", "=", "OptionParser", "(", "components", "=", "(", "RSTParser", ",", ")", ")", ".", "get_default_values", "(", ")", "rstparser", "=", "RSTParser", "(", ")", "document", "=", "utils", ".", "new_document", "(", "' '", ",", "settings", ")", "rstparser", ".", "parse", "(", "docstring", ",", "document", ")", "if", "document", ".", "children", "[", "0", "]", ".", "tagname", "!=", "'block_quote'", ":", "logger", ".", "warning", "(", "\"The first line of the docstring must be blank.\"", ")", "else", ":", "document", "=", "document", ".", "children", "[", "0", "]", "def", "get_params", "(", "field_list_node", ",", "params", ")", ":", "for", "field", "in", "field_list_node", ".", "children", ":", "name", "=", "field", ".", "children", "[", "0", "]", ".", "rawsource", ".", "split", "(", "' '", ")", "if", "'param'", "==", "name", "[", "0", "]", ":", "params", "[", "name", "[", "-", "1", "]", "]", "=", "field", ".", "children", "[", "1", "]", ".", "astext", "(", ")", "method_args", "=", "{", "'summary'", ":", "''", ",", "'params'", ":", "dict", "(", ")", ",", "'description'", ":", "''", "}", "for", "node", "in", "document", ".", "children", ":", "if", "node", ".", "tagname", "is", "'paragraph'", "and", "method_args", "[", "'summary'", "]", "==", "''", ":", "method_args", "[", "'summary'", "]", "=", "node", ".", "astext", "(", ")", "elif", "node", ".", "tagname", "is", "'field_list'", ":", "get_params", "(", "node", ",", "method_args", "[", "'params'", "]", ")", "else", ":", "method_args", "[", "'description'", "]", "+=", "'\\n'", "+", "node", ".", "astext", "(", ")", "return", "method_args" ]
Using the sphinx RSTParse to parse __doc__ for argparse `parameters`, `help`, and `description`. The first rst paragraph encountered it treated as the argparse help text. Any param fields are treated as argparse arguments. Any other text is combined and added to the argparse description. example: \""" this will be the summary :param name: describe the parameter called name. this will be the descriptions * more description * more description This will also be in the description \""" :param str docstring: :return: :rtype: dict
[ "Using", "the", "sphinx", "RSTParse", "to", "parse", "__doc__", "for", "argparse", "parameters", "help", "and", "description", ".", "The", "first", "rst", "paragraph", "encountered", "it", "treated", "as", "the", "argparse", "help", "text", ".", "Any", "param", "fields", "are", "treated", "as", "argparse", "arguments", ".", "Any", "other", "text", "is", "combined", "and", "added", "to", "the", "argparse", "description", "." ]
python
train
34.979167
pettarin/ipapy
ipapy/ipastring.py
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L342-L357
def cns_vwl_str_len_wb_sb(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, 4. the length diacritics, 5. the word breaks, and 6. the syllable breaks in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_stress or c.is_length or c.is_word_break or c.is_syllable_break))])
[ "def", "cns_vwl_str_len_wb_sb", "(", "self", ")", ":", "return", "IPAString", "(", "ipa_chars", "=", "[", "c", "for", "c", "in", "self", ".", "ipa_chars", "if", "(", "c", ".", "is_letter", ")", "or", "(", "c", ".", "is_suprasegmental", "and", "(", "c", ".", "is_stress", "or", "c", ".", "is_length", "or", "c", ".", "is_word_break", "or", "c", ".", "is_syllable_break", ")", ")", "]", ")" ]
Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, 4. the length diacritics, 5. the word breaks, and 6. the syllable breaks in the current string. :rtype: IPAString
[ "Return", "a", "new", "IPAString", "containing", "only", ":", "1", ".", "the", "consonants", "2", ".", "the", "vowels", "and", "3", ".", "the", "stress", "diacritics", "4", ".", "the", "length", "diacritics", "5", ".", "the", "word", "breaks", "and", "6", ".", "the", "syllable", "breaks" ]
python
train
32.75
nilp0inter/cpe
cpe/cpeset2_3.py
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpeset2_3.py#L423-L437
def name_match(self, wfn): """ Accepts a set of CPE Names K and a candidate CPE Name X. It returns 'True' if X matches any member of K, and 'False' otherwise. :param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}. :param CPE cpe: A candidate CPE Name X. :returns: True if X matches K, otherwise False. :rtype: boolean """ for N in self.K: if CPESet2_3.cpe_superset(wfn, N): return True return False
[ "def", "name_match", "(", "self", ",", "wfn", ")", ":", "for", "N", "in", "self", ".", "K", ":", "if", "CPESet2_3", ".", "cpe_superset", "(", "wfn", ",", "N", ")", ":", "return", "True", "return", "False" ]
Accepts a set of CPE Names K and a candidate CPE Name X. It returns 'True' if X matches any member of K, and 'False' otherwise. :param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}. :param CPE cpe: A candidate CPE Name X. :returns: True if X matches K, otherwise False. :rtype: boolean
[ "Accepts", "a", "set", "of", "CPE", "Names", "K", "and", "a", "candidate", "CPE", "Name", "X", ".", "It", "returns", "True", "if", "X", "matches", "any", "member", "of", "K", "and", "False", "otherwise", "." ]
python
train
33.8
pawel-kow/domainconnect_python
domainconnect/domainconnect.py
https://github.com/pawel-kow/domainconnect_python/blob/2467093cc4e997234e0fb5c55e71f76b856c1ab1/domainconnect/domainconnect.py#L344-L400
def get_domain_connect_template_async_context(self, domain, provider_id, service_id, redirect_uri, params=None, state=None, service_id_in_path=False): """Makes full Domain Connect discovery of a domain and returns full context to request async consent. :param domain: str :param provider_id: str :param service_id: str :param redirect_uri: str :param params: dict :param state: str :param service_id_in_path: bool :return: (DomainConnectAsyncContext, str) asyncConsentUrl field of returned context shall be used to redirect the browser to second field is an indication of error :raises: NoDomainConnectRecordException when no _domainconnect record found :raises: NoDomainConnectSettingsException when settings are not found :raises: TemplateNotSupportedException when template is not found :raises: InvalidDomainConnectSettingsException when parts of the settings are missing :raises: DomainConnectException on other domain connect issues """ if params is None: params = {} config = self.get_domain_config(domain) self.check_template_supported(config, provider_id, service_id) if config.urlAsyncUX is None: raise InvalidDomainConnectSettingsException("No asynch UX URL in config") if service_id_in_path: if type(service_id) is list: raise DomainConnectException("Multiple services are only supported with service_id_in_path=false") async_url_format = '{0}/v2/domainTemplates/providers/{1}/services/{2}' \ '?client_id={1}&scope={2}&domain={3}&host={4}&{5}' else: if type(service_id) is list: service_id = '+'.join(service_id) async_url_format = '{0}/v2/domainTemplates/providers/{1}' \ '?client_id={1}&scope={2}&domain={3}&host={4}&{5}' if redirect_uri is not None: params["redirect_uri"] = redirect_uri if state is not None: params["state"] = state ret = DomainConnectAsyncContext(config, provider_id, service_id, redirect_uri, params) ret.asyncConsentUrl = async_url_format.format(config.urlAsyncUX, provider_id, service_id, config.domain_root, config.host, urllib.parse.urlencode( sorted(params.items(), key=lambda val: val[0]))) return ret
[ "def", "get_domain_connect_template_async_context", "(", "self", ",", "domain", ",", "provider_id", ",", "service_id", ",", "redirect_uri", ",", "params", "=", "None", ",", "state", "=", "None", ",", "service_id_in_path", "=", "False", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "config", "=", "self", ".", "get_domain_config", "(", "domain", ")", "self", ".", "check_template_supported", "(", "config", ",", "provider_id", ",", "service_id", ")", "if", "config", ".", "urlAsyncUX", "is", "None", ":", "raise", "InvalidDomainConnectSettingsException", "(", "\"No asynch UX URL in config\"", ")", "if", "service_id_in_path", ":", "if", "type", "(", "service_id", ")", "is", "list", ":", "raise", "DomainConnectException", "(", "\"Multiple services are only supported with service_id_in_path=false\"", ")", "async_url_format", "=", "'{0}/v2/domainTemplates/providers/{1}/services/{2}'", "'?client_id={1}&scope={2}&domain={3}&host={4}&{5}'", "else", ":", "if", "type", "(", "service_id", ")", "is", "list", ":", "service_id", "=", "'+'", ".", "join", "(", "service_id", ")", "async_url_format", "=", "'{0}/v2/domainTemplates/providers/{1}'", "'?client_id={1}&scope={2}&domain={3}&host={4}&{5}'", "if", "redirect_uri", "is", "not", "None", ":", "params", "[", "\"redirect_uri\"", "]", "=", "redirect_uri", "if", "state", "is", "not", "None", ":", "params", "[", "\"state\"", "]", "=", "state", "ret", "=", "DomainConnectAsyncContext", "(", "config", ",", "provider_id", ",", "service_id", ",", "redirect_uri", ",", "params", ")", "ret", ".", "asyncConsentUrl", "=", "async_url_format", ".", "format", "(", "config", ".", "urlAsyncUX", ",", "provider_id", ",", "service_id", ",", "config", ".", "domain_root", ",", "config", ".", "host", ",", "urllib", ".", "parse", ".", "urlencode", "(", "sorted", "(", "params", ".", "items", "(", ")", ",", "key", "=", "lambda", "val", ":", "val", "[", "0", "]", ")", ")", ")", "return", "ret" ]
Makes full Domain Connect discovery of a domain and returns full context to request async consent. :param domain: str :param provider_id: str :param service_id: str :param redirect_uri: str :param params: dict :param state: str :param service_id_in_path: bool :return: (DomainConnectAsyncContext, str) asyncConsentUrl field of returned context shall be used to redirect the browser to second field is an indication of error :raises: NoDomainConnectRecordException when no _domainconnect record found :raises: NoDomainConnectSettingsException when settings are not found :raises: TemplateNotSupportedException when template is not found :raises: InvalidDomainConnectSettingsException when parts of the settings are missing :raises: DomainConnectException on other domain connect issues
[ "Makes", "full", "Domain", "Connect", "discovery", "of", "a", "domain", "and", "returns", "full", "context", "to", "request", "async", "consent", "." ]
python
train
47.210526
mehmetg/streak_client
streak_client/streak_client.py
https://github.com/mehmetg/streak_client/blob/46575510b4e4163a4a3cc06f7283a1ae377cdce6/streak_client/streak_client.py#L1004-L1016
def get_file(self, file_key): '''Gets file information Args: file_key key for the file to get return (status code, dict of file info) ''' uri = '/'.join([ self.api_uri, self.files_suffix, file_key ]) return self._req('get', uri)
[ "def", "get_file", "(", "self", ",", "file_key", ")", ":", "uri", "=", "'/'", ".", "join", "(", "[", "self", ".", "api_uri", ",", "self", ".", "files_suffix", ",", "file_key", "]", ")", "return", "self", ".", "_req", "(", "'get'", ",", "uri", ")" ]
Gets file information Args: file_key key for the file to get return (status code, dict of file info)
[ "Gets", "file", "information", "Args", ":", "file_key", "key", "for", "the", "file", "to", "get", "return", "(", "status", "code", "dict", "of", "file", "info", ")" ]
python
train
20
jplusplus/statscraper
statscraper/base_scraper.py
https://github.com/jplusplus/statscraper/blob/932ec048b23d15b3dbdaf829facc55fd78ec0109/statscraper/base_scraper.py#L192-L196
def tuple(self): """ Tuple conversion to (value, dimensions), e.g.: (123, {dimension_1: "foo", dimension_2: "bar"}) """ return (self.value, {dv.id: dv.value for dv in self.dimensionvalues})
[ "def", "tuple", "(", "self", ")", ":", "return", "(", "self", ".", "value", ",", "{", "dv", ".", "id", ":", "dv", ".", "value", "for", "dv", "in", "self", ".", "dimensionvalues", "}", ")" ]
Tuple conversion to (value, dimensions), e.g.: (123, {dimension_1: "foo", dimension_2: "bar"})
[ "Tuple", "conversion", "to", "(", "value", "dimensions", ")", "e", ".", "g", ".", ":", "(", "123", "{", "dimension_1", ":", "foo", "dimension_2", ":", "bar", "}", ")" ]
python
train
43.6
waqasbhatti/astrobase
astrobase/checkplot/pkl_xmatch.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/checkplot/pkl_xmatch.py#L75-L154
def _parse_xmatch_catalog_header(xc, xk): ''' This parses the header for a catalog file and returns it as a file object. Parameters ---------- xc : str The file name of an xmatch catalog prepared previously. xk : list of str This is a list of column names to extract from the xmatch catalog. Returns ------- tuple The tuple returned is of the form:: (infd: the file object associated with the opened xmatch catalog, catdefdict: a dict describing the catalog column definitions, catcolinds: column number indices of the catalog, catcoldtypes: the numpy dtypes of the catalog columns, catcolnames: the names of each catalog column, catcolunits: the units associated with each catalog column) ''' catdef = [] # read in this catalog and transparently handle gzipped files if xc.endswith('.gz'): infd = gzip.open(xc,'rb') else: infd = open(xc,'rb') # read in the defs for line in infd: if line.decode().startswith('#'): catdef.append( line.decode().replace('#','').strip().rstrip('\n') ) if not line.decode().startswith('#'): break if not len(catdef) > 0: LOGERROR("catalog definition not parseable " "for catalog: %s, skipping..." % xc) return None catdef = ' '.join(catdef) catdefdict = json.loads(catdef) catdefkeys = [x['key'] for x in catdefdict['columns']] catdefdtypes = [x['dtype'] for x in catdefdict['columns']] catdefnames = [x['name'] for x in catdefdict['columns']] catdefunits = [x['unit'] for x in catdefdict['columns']] # get the correct column indices and dtypes for the requested columns # from the catdefdict catcolinds = [] catcoldtypes = [] catcolnames = [] catcolunits = [] for xkcol in xk: if xkcol in catdefkeys: xkcolind = catdefkeys.index(xkcol) catcolinds.append(xkcolind) catcoldtypes.append(catdefdtypes[xkcolind]) catcolnames.append(catdefnames[xkcolind]) catcolunits.append(catdefunits[xkcolind]) return (infd, catdefdict, catcolinds, catcoldtypes, catcolnames, catcolunits)
[ "def", "_parse_xmatch_catalog_header", "(", "xc", ",", "xk", ")", ":", "catdef", "=", "[", "]", "# read in this catalog and transparently handle gzipped files", "if", "xc", ".", "endswith", "(", "'.gz'", ")", ":", "infd", "=", "gzip", ".", "open", "(", "xc", ",", "'rb'", ")", "else", ":", "infd", "=", "open", "(", "xc", ",", "'rb'", ")", "# read in the defs", "for", "line", "in", "infd", ":", "if", "line", ".", "decode", "(", ")", ".", "startswith", "(", "'#'", ")", ":", "catdef", ".", "append", "(", "line", ".", "decode", "(", ")", ".", "replace", "(", "'#'", ",", "''", ")", ".", "strip", "(", ")", ".", "rstrip", "(", "'\\n'", ")", ")", "if", "not", "line", ".", "decode", "(", ")", ".", "startswith", "(", "'#'", ")", ":", "break", "if", "not", "len", "(", "catdef", ")", ">", "0", ":", "LOGERROR", "(", "\"catalog definition not parseable \"", "\"for catalog: %s, skipping...\"", "%", "xc", ")", "return", "None", "catdef", "=", "' '", ".", "join", "(", "catdef", ")", "catdefdict", "=", "json", ".", "loads", "(", "catdef", ")", "catdefkeys", "=", "[", "x", "[", "'key'", "]", "for", "x", "in", "catdefdict", "[", "'columns'", "]", "]", "catdefdtypes", "=", "[", "x", "[", "'dtype'", "]", "for", "x", "in", "catdefdict", "[", "'columns'", "]", "]", "catdefnames", "=", "[", "x", "[", "'name'", "]", "for", "x", "in", "catdefdict", "[", "'columns'", "]", "]", "catdefunits", "=", "[", "x", "[", "'unit'", "]", "for", "x", "in", "catdefdict", "[", "'columns'", "]", "]", "# get the correct column indices and dtypes for the requested columns", "# from the catdefdict", "catcolinds", "=", "[", "]", "catcoldtypes", "=", "[", "]", "catcolnames", "=", "[", "]", "catcolunits", "=", "[", "]", "for", "xkcol", "in", "xk", ":", "if", "xkcol", "in", "catdefkeys", ":", "xkcolind", "=", "catdefkeys", ".", "index", "(", "xkcol", ")", "catcolinds", ".", "append", "(", "xkcolind", ")", "catcoldtypes", ".", "append", "(", "catdefdtypes", "[", "xkcolind", "]", ")", "catcolnames", ".", "append", "(", "catdefnames", "[", "xkcolind", "]", ")", "catcolunits", ".", "append", "(", "catdefunits", "[", "xkcolind", "]", ")", "return", "(", "infd", ",", "catdefdict", ",", "catcolinds", ",", "catcoldtypes", ",", "catcolnames", ",", "catcolunits", ")" ]
This parses the header for a catalog file and returns it as a file object. Parameters ---------- xc : str The file name of an xmatch catalog prepared previously. xk : list of str This is a list of column names to extract from the xmatch catalog. Returns ------- tuple The tuple returned is of the form:: (infd: the file object associated with the opened xmatch catalog, catdefdict: a dict describing the catalog column definitions, catcolinds: column number indices of the catalog, catcoldtypes: the numpy dtypes of the catalog columns, catcolnames: the names of each catalog column, catcolunits: the units associated with each catalog column)
[ "This", "parses", "the", "header", "for", "a", "catalog", "file", "and", "returns", "it", "as", "a", "file", "object", "." ]
python
valid
28.3
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/client/magics.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/client/magics.py#L177-L185
def pxconfig(self, line): """configure default targets/blocking for %px magics""" args = magic_arguments.parse_argstring(self.pxconfig, line) if args.targets: self.view.targets = self._eval_target_str(args.targets) if args.block is not None: self.view.block = args.block if args.set_verbose is not None: self.verbose = args.set_verbose
[ "def", "pxconfig", "(", "self", ",", "line", ")", ":", "args", "=", "magic_arguments", ".", "parse_argstring", "(", "self", ".", "pxconfig", ",", "line", ")", "if", "args", ".", "targets", ":", "self", ".", "view", ".", "targets", "=", "self", ".", "_eval_target_str", "(", "args", ".", "targets", ")", "if", "args", ".", "block", "is", "not", "None", ":", "self", ".", "view", ".", "block", "=", "args", ".", "block", "if", "args", ".", "set_verbose", "is", "not", "None", ":", "self", ".", "verbose", "=", "args", ".", "set_verbose" ]
configure default targets/blocking for %px magics
[ "configure", "default", "targets", "/", "blocking", "for", "%px", "magics" ]
python
test
44.777778
lpantano/seqcluster
seqcluster/detect/metacluster.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/detect/metacluster.py#L378-L399
def _split_cluster_by_most_vote(c, p): """split cluster by most-vote strategy""" old, new = c[p[0]], c[p[1]] old_size = _get_seqs(old) new_size = _get_seqs(new) logger.debug("_most_vote: size of %s with %s - %s with %s" % (old.id, len(old_size), new.id, len(new_size))) if len(old_size) > len(new_size): keep, remove = old, new else: keep, remove = new, old common = list(set(old_size).intersection(new_size)) logger.debug("_most_vote: keep %s remove %s with common %s" % (keep.id, remove.id, len(common))) for idl in remove.loci2seq: if len(common) > 0: remove.loci2seq[idl] = list(set(remove.loci2seq[idl]) - set(common)) keep.loci2seq = {k: v for k, v in keep.loci2seq.iteritems() if len(v) > 0} remove.loci2seq = {k: v for k, v in remove.loci2seq.iteritems() if len(v) > 0} keep.update() remove.update() c[keep.id] = keep c[remove.id] = remove return c
[ "def", "_split_cluster_by_most_vote", "(", "c", ",", "p", ")", ":", "old", ",", "new", "=", "c", "[", "p", "[", "0", "]", "]", ",", "c", "[", "p", "[", "1", "]", "]", "old_size", "=", "_get_seqs", "(", "old", ")", "new_size", "=", "_get_seqs", "(", "new", ")", "logger", ".", "debug", "(", "\"_most_vote: size of %s with %s - %s with %s\"", "%", "(", "old", ".", "id", ",", "len", "(", "old_size", ")", ",", "new", ".", "id", ",", "len", "(", "new_size", ")", ")", ")", "if", "len", "(", "old_size", ")", ">", "len", "(", "new_size", ")", ":", "keep", ",", "remove", "=", "old", ",", "new", "else", ":", "keep", ",", "remove", "=", "new", ",", "old", "common", "=", "list", "(", "set", "(", "old_size", ")", ".", "intersection", "(", "new_size", ")", ")", "logger", ".", "debug", "(", "\"_most_vote: keep %s remove %s with common %s\"", "%", "(", "keep", ".", "id", ",", "remove", ".", "id", ",", "len", "(", "common", ")", ")", ")", "for", "idl", "in", "remove", ".", "loci2seq", ":", "if", "len", "(", "common", ")", ">", "0", ":", "remove", ".", "loci2seq", "[", "idl", "]", "=", "list", "(", "set", "(", "remove", ".", "loci2seq", "[", "idl", "]", ")", "-", "set", "(", "common", ")", ")", "keep", ".", "loci2seq", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "keep", ".", "loci2seq", ".", "iteritems", "(", ")", "if", "len", "(", "v", ")", ">", "0", "}", "remove", ".", "loci2seq", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "remove", ".", "loci2seq", ".", "iteritems", "(", ")", "if", "len", "(", "v", ")", ">", "0", "}", "keep", ".", "update", "(", ")", "remove", ".", "update", "(", ")", "c", "[", "keep", ".", "id", "]", "=", "keep", "c", "[", "remove", ".", "id", "]", "=", "remove", "return", "c" ]
split cluster by most-vote strategy
[ "split", "cluster", "by", "most", "-", "vote", "strategy" ]
python
train
42.727273
limix/glimix-core
glimix_core/lmm/_lmm_scan.py
https://github.com/limix/glimix-core/blob/cddd0994591d100499cc41c1f480ddd575e7a980/glimix_core/lmm/_lmm_scan.py#L554-L568
def _bstar_set(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM): """ Compute -2𝐲ᵀBEⱼ𝐛ⱼ + (𝐛ⱼEⱼ)ᵀBEⱼ𝐛ⱼ. For 𝐛ⱼ = [𝜷ⱼᵀ 𝜶ⱼᵀ]ᵀ. """ from numpy_sugar import epsilon r = yTBy r -= 2 * add.reduce([i @ beta for i in yTBX]) r -= 2 * add.reduce([i @ alpha for i in yTBM]) r += add.reduce([beta.T @ i @ beta for i in XTBX]) r += 2 * add.reduce([beta.T @ i @ alpha for i in XTBM]) r += add.reduce([alpha.T @ i @ alpha for i in MTBM]) return clip(r, epsilon.tiny, inf)
[ "def", "_bstar_set", "(", "beta", ",", "alpha", ",", "yTBy", ",", "yTBX", ",", "yTBM", ",", "XTBX", ",", "XTBM", ",", "MTBM", ")", ":", "from", "numpy_sugar", "import", "epsilon", "r", "=", "yTBy", "r", "-=", "2", "*", "add", ".", "reduce", "(", "[", "i", "@", "beta", "for", "i", "in", "yTBX", "]", ")", "r", "-=", "2", "*", "add", ".", "reduce", "(", "[", "i", "@", "alpha", "for", "i", "in", "yTBM", "]", ")", "r", "+=", "add", ".", "reduce", "(", "[", "beta", ".", "T", "@", "i", "@", "beta", "for", "i", "in", "XTBX", "]", ")", "r", "+=", "2", "*", "add", ".", "reduce", "(", "[", "beta", ".", "T", "@", "i", "@", "alpha", "for", "i", "in", "XTBM", "]", ")", "r", "+=", "add", ".", "reduce", "(", "[", "alpha", ".", "T", "@", "i", "@", "alpha", "for", "i", "in", "MTBM", "]", ")", "return", "clip", "(", "r", ",", "epsilon", ".", "tiny", ",", "inf", ")" ]
Compute -2𝐲ᵀBEⱼ𝐛ⱼ + (𝐛ⱼEⱼ)ᵀBEⱼ𝐛ⱼ. For 𝐛ⱼ = [𝜷ⱼᵀ 𝜶ⱼᵀ]ᵀ.
[ "Compute", "-", "2𝐲ᵀBEⱼ𝐛ⱼ", "+", "(", "𝐛ⱼEⱼ", ")", "ᵀBEⱼ𝐛ⱼ", "." ]
python
valid
32.733333
klahnakoski/pyLibrary
mo_parquet/schema.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_parquet/schema.py#L202-L219
def get_parquet_metadata( self, path='.' ): """ OUTPUT PARQUET METADATA COLUMNS :param path: FOR INTERNAL USE :return: LIST OF SchemaElement """ children = [] for name, child_schema in sort_using_key(self.more.items(), lambda p: p[0]): children.extend(child_schema.get_parquet_metadata(concat_field(path, name))) if path == '.': return children else: self.element.num_children = len(children) return [self.element] + children
[ "def", "get_parquet_metadata", "(", "self", ",", "path", "=", "'.'", ")", ":", "children", "=", "[", "]", "for", "name", ",", "child_schema", "in", "sort_using_key", "(", "self", ".", "more", ".", "items", "(", ")", ",", "lambda", "p", ":", "p", "[", "0", "]", ")", ":", "children", ".", "extend", "(", "child_schema", ".", "get_parquet_metadata", "(", "concat_field", "(", "path", ",", "name", ")", ")", ")", "if", "path", "==", "'.'", ":", "return", "children", "else", ":", "self", ".", "element", ".", "num_children", "=", "len", "(", "children", ")", "return", "[", "self", ".", "element", "]", "+", "children" ]
OUTPUT PARQUET METADATA COLUMNS :param path: FOR INTERNAL USE :return: LIST OF SchemaElement
[ "OUTPUT", "PARQUET", "METADATA", "COLUMNS", ":", "param", "path", ":", "FOR", "INTERNAL", "USE", ":", "return", ":", "LIST", "OF", "SchemaElement" ]
python
train
30.5
williamjameshandley/fgivenx
fgivenx/_utils.py
https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/_utils.py#L4-L61
def _check_args(logZ, f, x, samples, weights): """ Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`. Parameters ---------- f, x, samples, weights: see arguments for :func:`fgivenx.drivers.compute_samples` """ # convert to arrays if logZ is None: logZ = [0] f = [f] samples = [samples] weights = [weights] # logZ logZ = numpy.array(logZ, dtype='double') if len(logZ.shape) is not 1: raise ValueError("logZ should be a 1D array") # x x = numpy.array(x, dtype='double') if len(x.shape) is not 1: raise ValueError("x should be a 1D array") # f if len(logZ) != len(f): raise ValueError("len(logZ) = %i != len(f)= %i" % (len(logZ), len(f))) for func in f: if not callable(func): raise ValueError("first argument f must be function" "(or list of functions) of two variables") # samples if len(logZ) != len(samples): raise ValueError("len(logZ) = %i != len(samples)= %i" % (len(logZ), len(samples))) samples = [numpy.array(s, dtype='double') for s in samples] for s in samples: if len(s.shape) is not 2: raise ValueError("each set of samples should be a 2D array") # weights if len(logZ) != len(weights): raise ValueError("len(logZ) = %i != len(weights)= %i" % (len(logZ), len(weights))) weights = [numpy.array(w, dtype='double') if w is not None else numpy.ones(len(s), dtype='double') for w, s in zip(weights, samples)] for w, s in zip(weights, samples): if len(w.shape) is not 1: raise ValueError("each set of weights should be a 1D array") if len(w) != len(s): raise ValueError("len(w) = %i != len(s) = %i" % (len(s), len(w))) return logZ, f, x, samples, weights
[ "def", "_check_args", "(", "logZ", ",", "f", ",", "x", ",", "samples", ",", "weights", ")", ":", "# convert to arrays", "if", "logZ", "is", "None", ":", "logZ", "=", "[", "0", "]", "f", "=", "[", "f", "]", "samples", "=", "[", "samples", "]", "weights", "=", "[", "weights", "]", "# logZ", "logZ", "=", "numpy", ".", "array", "(", "logZ", ",", "dtype", "=", "'double'", ")", "if", "len", "(", "logZ", ".", "shape", ")", "is", "not", "1", ":", "raise", "ValueError", "(", "\"logZ should be a 1D array\"", ")", "# x", "x", "=", "numpy", ".", "array", "(", "x", ",", "dtype", "=", "'double'", ")", "if", "len", "(", "x", ".", "shape", ")", "is", "not", "1", ":", "raise", "ValueError", "(", "\"x should be a 1D array\"", ")", "# f", "if", "len", "(", "logZ", ")", "!=", "len", "(", "f", ")", ":", "raise", "ValueError", "(", "\"len(logZ) = %i != len(f)= %i\"", "%", "(", "len", "(", "logZ", ")", ",", "len", "(", "f", ")", ")", ")", "for", "func", "in", "f", ":", "if", "not", "callable", "(", "func", ")", ":", "raise", "ValueError", "(", "\"first argument f must be function\"", "\"(or list of functions) of two variables\"", ")", "# samples", "if", "len", "(", "logZ", ")", "!=", "len", "(", "samples", ")", ":", "raise", "ValueError", "(", "\"len(logZ) = %i != len(samples)= %i\"", "%", "(", "len", "(", "logZ", ")", ",", "len", "(", "samples", ")", ")", ")", "samples", "=", "[", "numpy", ".", "array", "(", "s", ",", "dtype", "=", "'double'", ")", "for", "s", "in", "samples", "]", "for", "s", "in", "samples", ":", "if", "len", "(", "s", ".", "shape", ")", "is", "not", "2", ":", "raise", "ValueError", "(", "\"each set of samples should be a 2D array\"", ")", "# weights", "if", "len", "(", "logZ", ")", "!=", "len", "(", "weights", ")", ":", "raise", "ValueError", "(", "\"len(logZ) = %i != len(weights)= %i\"", "%", "(", "len", "(", "logZ", ")", ",", "len", "(", "weights", ")", ")", ")", "weights", "=", "[", "numpy", ".", "array", "(", "w", ",", "dtype", "=", "'double'", ")", "if", "w", "is", "not", "None", "else", "numpy", ".", "ones", "(", "len", "(", "s", ")", ",", "dtype", "=", "'double'", ")", "for", "w", ",", "s", "in", "zip", "(", "weights", ",", "samples", ")", "]", "for", "w", ",", "s", "in", "zip", "(", "weights", ",", "samples", ")", ":", "if", "len", "(", "w", ".", "shape", ")", "is", "not", "1", ":", "raise", "ValueError", "(", "\"each set of weights should be a 1D array\"", ")", "if", "len", "(", "w", ")", "!=", "len", "(", "s", ")", ":", "raise", "ValueError", "(", "\"len(w) = %i != len(s) = %i\"", "%", "(", "len", "(", "s", ")", ",", "len", "(", "w", ")", ")", ")", "return", "logZ", ",", "f", ",", "x", ",", "samples", ",", "weights" ]
Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`. Parameters ---------- f, x, samples, weights: see arguments for :func:`fgivenx.drivers.compute_samples`
[ "Sanity", "-", "check", "the", "arguments", "for", ":", "func", ":", "fgivenx", ".", "drivers", ".", "compute_samples", "." ]
python
train
33.189655
angr/angr
angr/analyses/vfg.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/vfg.py#L446-L511
def _pre_analysis(self): """ Executed before analysis starts. Necessary initializations are performed here. :return: None """ l.debug("Starting from %#x", self._start) # initialize the task stack self._task_stack = [ ] # initialize the execution counter dict self._execution_counter = defaultdict(int) # Generate a CFG if no CFG is provided if not self._cfg: l.debug("Generating a CFG, since none was given...") # TODO: can we use a fast CFG instead? note that fast CFG does not care of context sensitivity at all, but # TODO: for state merging, we also don't really care about context sensitivity. self._cfg = self.project.analyses.CFGEmulated(context_sensitivity_level=self._context_sensitivity_level, starts=(self._start,) ) if not self._cfg.normalized: l.warning("The given CFG is not normalized, which might impact the performance/accuracy of the VFG " "analysis.") # Prepare the state initial_state = self._prepare_initial_state(self._start, self._initial_state) initial_state.ip = self._start if self.project.arch.name.startswith('MIPS'): initial_state.regs.t9 = self._start # clear function merge points cache self._function_merge_points = {} # Create the initial state state = initial_state.copy() if self._start_at_function: # set the return address to an address so we can catch it and terminate the VSA analysis # TODO: Properly pick an address that will not conflict with any existing code and data in the program self._final_address = 0x4fff0000 self._set_return_address(state, self._final_address) call_stack = None if not self._start_at_function: # we should build a custom call stack call_stack = CallStack() call_stack = call_stack.call(None, self._function_start, retn_target=self._final_address) job = VFGJob(state.addr, state, self._context_sensitivity_level, jumpkind='Ijk_Boring', final_return_address=self._final_address, call_stack=call_stack ) block_id = BlockID.new(state.addr, job.get_call_stack_suffix(), job.jumpkind) job._block_id = block_id self._insert_job(job) # create the task function_analysis_task = FunctionAnalysis(self._function_start, self._final_address) function_analysis_task.jobs.append(job) self._task_stack.append(function_analysis_task)
[ "def", "_pre_analysis", "(", "self", ")", ":", "l", ".", "debug", "(", "\"Starting from %#x\"", ",", "self", ".", "_start", ")", "# initialize the task stack", "self", ".", "_task_stack", "=", "[", "]", "# initialize the execution counter dict", "self", ".", "_execution_counter", "=", "defaultdict", "(", "int", ")", "# Generate a CFG if no CFG is provided", "if", "not", "self", ".", "_cfg", ":", "l", ".", "debug", "(", "\"Generating a CFG, since none was given...\"", ")", "# TODO: can we use a fast CFG instead? note that fast CFG does not care of context sensitivity at all, but", "# TODO: for state merging, we also don't really care about context sensitivity.", "self", ".", "_cfg", "=", "self", ".", "project", ".", "analyses", ".", "CFGEmulated", "(", "context_sensitivity_level", "=", "self", ".", "_context_sensitivity_level", ",", "starts", "=", "(", "self", ".", "_start", ",", ")", ")", "if", "not", "self", ".", "_cfg", ".", "normalized", ":", "l", ".", "warning", "(", "\"The given CFG is not normalized, which might impact the performance/accuracy of the VFG \"", "\"analysis.\"", ")", "# Prepare the state", "initial_state", "=", "self", ".", "_prepare_initial_state", "(", "self", ".", "_start", ",", "self", ".", "_initial_state", ")", "initial_state", ".", "ip", "=", "self", ".", "_start", "if", "self", ".", "project", ".", "arch", ".", "name", ".", "startswith", "(", "'MIPS'", ")", ":", "initial_state", ".", "regs", ".", "t9", "=", "self", ".", "_start", "# clear function merge points cache", "self", ".", "_function_merge_points", "=", "{", "}", "# Create the initial state", "state", "=", "initial_state", ".", "copy", "(", ")", "if", "self", ".", "_start_at_function", ":", "# set the return address to an address so we can catch it and terminate the VSA analysis", "# TODO: Properly pick an address that will not conflict with any existing code and data in the program", "self", ".", "_final_address", "=", "0x4fff0000", "self", ".", "_set_return_address", "(", "state", ",", "self", ".", "_final_address", ")", "call_stack", "=", "None", "if", "not", "self", ".", "_start_at_function", ":", "# we should build a custom call stack", "call_stack", "=", "CallStack", "(", ")", "call_stack", "=", "call_stack", ".", "call", "(", "None", ",", "self", ".", "_function_start", ",", "retn_target", "=", "self", ".", "_final_address", ")", "job", "=", "VFGJob", "(", "state", ".", "addr", ",", "state", ",", "self", ".", "_context_sensitivity_level", ",", "jumpkind", "=", "'Ijk_Boring'", ",", "final_return_address", "=", "self", ".", "_final_address", ",", "call_stack", "=", "call_stack", ")", "block_id", "=", "BlockID", ".", "new", "(", "state", ".", "addr", ",", "job", ".", "get_call_stack_suffix", "(", ")", ",", "job", ".", "jumpkind", ")", "job", ".", "_block_id", "=", "block_id", "self", ".", "_insert_job", "(", "job", ")", "# create the task", "function_analysis_task", "=", "FunctionAnalysis", "(", "self", ".", "_function_start", ",", "self", ".", "_final_address", ")", "function_analysis_task", ".", "jobs", ".", "append", "(", "job", ")", "self", ".", "_task_stack", ".", "append", "(", "function_analysis_task", ")" ]
Executed before analysis starts. Necessary initializations are performed here. :return: None
[ "Executed", "before", "analysis", "starts", ".", "Necessary", "initializations", "are", "performed", "here", "." ]
python
train
40.181818
andymccurdy/redis-py
redis/client.py
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L1742-L1756
def scan(self, cursor=0, match=None, count=None): """ Incrementally return lists of key names. Also return a cursor indicating the scan position. ``match`` allows for filtering the keys by pattern ``count`` allows for hint the minimum number of returns """ pieces = [cursor] if match is not None: pieces.extend([Token.get_token('MATCH'), match]) if count is not None: pieces.extend([Token.get_token('COUNT'), count]) return self.execute_command('SCAN', *pieces)
[ "def", "scan", "(", "self", ",", "cursor", "=", "0", ",", "match", "=", "None", ",", "count", "=", "None", ")", ":", "pieces", "=", "[", "cursor", "]", "if", "match", "is", "not", "None", ":", "pieces", ".", "extend", "(", "[", "Token", ".", "get_token", "(", "'MATCH'", ")", ",", "match", "]", ")", "if", "count", "is", "not", "None", ":", "pieces", ".", "extend", "(", "[", "Token", ".", "get_token", "(", "'COUNT'", ")", ",", "count", "]", ")", "return", "self", ".", "execute_command", "(", "'SCAN'", ",", "*", "pieces", ")" ]
Incrementally return lists of key names. Also return a cursor indicating the scan position. ``match`` allows for filtering the keys by pattern ``count`` allows for hint the minimum number of returns
[ "Incrementally", "return", "lists", "of", "key", "names", ".", "Also", "return", "a", "cursor", "indicating", "the", "scan", "position", "." ]
python
train
36.866667
googleapis/google-cloud-python
storage/google/cloud/storage/blob.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/blob.py#L1474-L1483
def make_public(self, client=None): """Update blob's ACL, granting read access to anonymous users. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket. """ self.acl.all().grant_read() self.acl.save(client=client)
[ "def", "make_public", "(", "self", ",", "client", "=", "None", ")", ":", "self", ".", "acl", ".", "all", "(", ")", ".", "grant_read", "(", ")", "self", ".", "acl", ".", "save", "(", "client", "=", "client", ")" ]
Update blob's ACL, granting read access to anonymous users. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket.
[ "Update", "blob", "s", "ACL", "granting", "read", "access", "to", "anonymous", "users", "." ]
python
train
43.7
manns/pyspread
pyspread/src/gui/_widgets.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_widgets.py#L331-L361
def fold_all(self): """Folds/unfolds all levels in the editor""" line_count = self.GetLineCount() expanding = True # find out if we are folding or unfolding for line_num in range(line_count): if self.GetFoldLevel(line_num) & stc.STC_FOLDLEVELHEADERFLAG: expanding = not self.GetFoldExpanded(line_num) break line_num = 0 while line_num < line_count: level = self.GetFoldLevel(line_num) if level & stc.STC_FOLDLEVELHEADERFLAG and \ (level & stc.STC_FOLDLEVELNUMBERMASK) == stc.STC_FOLDLEVELBASE: if expanding: self.SetFoldExpanded(line_num, True) line_num = self.expand(line_num, True) line_num = line_num - 1 else: last_child = self.GetLastChild(line_num, -1) self.SetFoldExpanded(line_num, False) if last_child > line_num: self.HideLines(line_num + 1, last_child) line_num = line_num + 1
[ "def", "fold_all", "(", "self", ")", ":", "line_count", "=", "self", ".", "GetLineCount", "(", ")", "expanding", "=", "True", "# find out if we are folding or unfolding", "for", "line_num", "in", "range", "(", "line_count", ")", ":", "if", "self", ".", "GetFoldLevel", "(", "line_num", ")", "&", "stc", ".", "STC_FOLDLEVELHEADERFLAG", ":", "expanding", "=", "not", "self", ".", "GetFoldExpanded", "(", "line_num", ")", "break", "line_num", "=", "0", "while", "line_num", "<", "line_count", ":", "level", "=", "self", ".", "GetFoldLevel", "(", "line_num", ")", "if", "level", "&", "stc", ".", "STC_FOLDLEVELHEADERFLAG", "and", "(", "level", "&", "stc", ".", "STC_FOLDLEVELNUMBERMASK", ")", "==", "stc", ".", "STC_FOLDLEVELBASE", ":", "if", "expanding", ":", "self", ".", "SetFoldExpanded", "(", "line_num", ",", "True", ")", "line_num", "=", "self", ".", "expand", "(", "line_num", ",", "True", ")", "line_num", "=", "line_num", "-", "1", "else", ":", "last_child", "=", "self", ".", "GetLastChild", "(", "line_num", ",", "-", "1", ")", "self", ".", "SetFoldExpanded", "(", "line_num", ",", "False", ")", "if", "last_child", ">", "line_num", ":", "self", ".", "HideLines", "(", "line_num", "+", "1", ",", "last_child", ")", "line_num", "=", "line_num", "+", "1" ]
Folds/unfolds all levels in the editor
[ "Folds", "/", "unfolds", "all", "levels", "in", "the", "editor" ]
python
train
35.193548
python-cmd2/cmd2
cmd2/parsing.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/parsing.py#L357-L383
def tokenize(self, line: str, expand: bool = True) -> List[str]: """ Lex a string into a list of tokens. Shortcuts and aliases are expanded and comments are removed :param line: the command line being lexed :param expand: If True, then aliases and shortcuts will be expanded. Set this to False if no expansion should occur because the command name is already known. Otherwise the command could be expanded if it matched an alias name. This is for cases where a do_* method was called manually (e.g do_help('alias'). :return: A list of tokens :raises ValueError if there are unclosed quotation marks. """ # expand shortcuts and aliases if expand: line = self._expand(line) # check if this line is a comment if line.lstrip().startswith(constants.COMMENT_CHAR): return [] # split on whitespace tokens = shlex_split(line) # custom lexing tokens = self._split_on_punctuation(tokens) return tokens
[ "def", "tokenize", "(", "self", ",", "line", ":", "str", ",", "expand", ":", "bool", "=", "True", ")", "->", "List", "[", "str", "]", ":", "# expand shortcuts and aliases", "if", "expand", ":", "line", "=", "self", ".", "_expand", "(", "line", ")", "# check if this line is a comment", "if", "line", ".", "lstrip", "(", ")", ".", "startswith", "(", "constants", ".", "COMMENT_CHAR", ")", ":", "return", "[", "]", "# split on whitespace", "tokens", "=", "shlex_split", "(", "line", ")", "# custom lexing", "tokens", "=", "self", ".", "_split_on_punctuation", "(", "tokens", ")", "return", "tokens" ]
Lex a string into a list of tokens. Shortcuts and aliases are expanded and comments are removed :param line: the command line being lexed :param expand: If True, then aliases and shortcuts will be expanded. Set this to False if no expansion should occur because the command name is already known. Otherwise the command could be expanded if it matched an alias name. This is for cases where a do_* method was called manually (e.g do_help('alias'). :return: A list of tokens :raises ValueError if there are unclosed quotation marks.
[ "Lex", "a", "string", "into", "a", "list", "of", "tokens", ".", "Shortcuts", "and", "aliases", "are", "expanded", "and", "comments", "are", "removed" ]
python
train
40.407407
python-diamond/Diamond
src/collectors/postqueue/postqueue.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/postqueue/postqueue.py#L28-L39
def get_default_config(self): """ Returns the default collector settings """ config = super(PostqueueCollector, self).get_default_config() config.update({ 'path': 'postqueue', 'bin': '/usr/bin/postqueue', 'use_sudo': False, 'sudo_cmd': '/usr/bin/sudo', }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "PostqueueCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'path'", ":", "'postqueue'", ",", "'bin'", ":", "'/usr/bin/postqueue'", ",", "'use_sudo'", ":", "False", ",", "'sudo_cmd'", ":", "'/usr/bin/sudo'", ",", "}", ")", "return", "config" ]
Returns the default collector settings
[ "Returns", "the", "default", "collector", "settings" ]
python
train
33.583333
chrislit/abydos
abydos/distance/_monge_elkan.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/distance/_monge_elkan.py#L106-L139
def sim_monge_elkan(src, tar, sim_func=sim_levenshtein, symmetric=False): """Return the Monge-Elkan similarity of two strings. This is a wrapper for :py:meth:`MongeElkan.sim`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison sim_func : function Rhe internal similarity metric to employ symmetric : bool Return a symmetric similarity measure Returns ------- float Monge-Elkan similarity Examples -------- >>> sim_monge_elkan('cat', 'hat') 0.75 >>> round(sim_monge_elkan('Niall', 'Neil'), 12) 0.666666666667 >>> round(sim_monge_elkan('aluminum', 'Catalan'), 12) 0.388888888889 >>> sim_monge_elkan('ATCG', 'TAGC') 0.5 """ return MongeElkan().sim(src, tar, sim_func, symmetric)
[ "def", "sim_monge_elkan", "(", "src", ",", "tar", ",", "sim_func", "=", "sim_levenshtein", ",", "symmetric", "=", "False", ")", ":", "return", "MongeElkan", "(", ")", ".", "sim", "(", "src", ",", "tar", ",", "sim_func", ",", "symmetric", ")" ]
Return the Monge-Elkan similarity of two strings. This is a wrapper for :py:meth:`MongeElkan.sim`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison sim_func : function Rhe internal similarity metric to employ symmetric : bool Return a symmetric similarity measure Returns ------- float Monge-Elkan similarity Examples -------- >>> sim_monge_elkan('cat', 'hat') 0.75 >>> round(sim_monge_elkan('Niall', 'Neil'), 12) 0.666666666667 >>> round(sim_monge_elkan('aluminum', 'Catalan'), 12) 0.388888888889 >>> sim_monge_elkan('ATCG', 'TAGC') 0.5
[ "Return", "the", "Monge", "-", "Elkan", "similarity", "of", "two", "strings", "." ]
python
valid
24.352941
CivicSpleen/ambry
ambry/library/search_backends/postgres_backend.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search_backends/postgres_backend.py#L241-L297
def _make_query_from_terms(self, terms, limit=None): """ Creates a query for dataset from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple of (TextClause, dict): First element is FTS query, second is parameters of the query. Element of the execution of the query is pair: (vid, score). """ expanded_terms = self._expand_terms(terms) if expanded_terms['doc']: # create query with real score. query_parts = ["SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score"] if expanded_terms['doc'] and expanded_terms['keywords']: query_parts = ["SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) " " + ts_rank_cd(setweight(to_tsvector(coalesce(keywords::text,'')),'B'), to_tsquery(:keywords))" ' as score'] else: # create query with score = 1 because query will not touch doc field. query_parts = ['SELECT vid, 1 as score'] query_parts.append('FROM dataset_index') query_params = {} where_counter = 0 if expanded_terms['doc']: where_counter += 1 query_parts.append('WHERE doc @@ to_tsquery(:doc)') query_params['doc'] = self.backend._and_join(expanded_terms['doc']) if expanded_terms['keywords']: query_params['keywords'] = self.backend._and_join(expanded_terms['keywords']) kw_q = "to_tsvector(coalesce(keywords::text,'')) @@ to_tsquery(:keywords)" query_parts.append( ("AND " if where_counter else "WHERE ") + kw_q ) query_parts.append('ORDER BY score DESC') if limit: query_parts.append('LIMIT :limit') query_params['limit'] = limit query_parts.append(';') deb_msg = 'Dataset terms conversion: `{}` terms converted to `{}` with `{}` params query.'\ .format(terms, query_parts, query_params) logger.debug(deb_msg) q = text('\n'.join(query_parts)), query_params logger.debug('Dataset search query: {}'.format(q)) return q
[ "def", "_make_query_from_terms", "(", "self", ",", "terms", ",", "limit", "=", "None", ")", ":", "expanded_terms", "=", "self", ".", "_expand_terms", "(", "terms", ")", "if", "expanded_terms", "[", "'doc'", "]", ":", "# create query with real score.", "query_parts", "=", "[", "\"SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score\"", "]", "if", "expanded_terms", "[", "'doc'", "]", "and", "expanded_terms", "[", "'keywords'", "]", ":", "query_parts", "=", "[", "\"SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) \"", "\" + ts_rank_cd(setweight(to_tsvector(coalesce(keywords::text,'')),'B'), to_tsquery(:keywords))\"", "' as score'", "]", "else", ":", "# create query with score = 1 because query will not touch doc field.", "query_parts", "=", "[", "'SELECT vid, 1 as score'", "]", "query_parts", ".", "append", "(", "'FROM dataset_index'", ")", "query_params", "=", "{", "}", "where_counter", "=", "0", "if", "expanded_terms", "[", "'doc'", "]", ":", "where_counter", "+=", "1", "query_parts", ".", "append", "(", "'WHERE doc @@ to_tsquery(:doc)'", ")", "query_params", "[", "'doc'", "]", "=", "self", ".", "backend", ".", "_and_join", "(", "expanded_terms", "[", "'doc'", "]", ")", "if", "expanded_terms", "[", "'keywords'", "]", ":", "query_params", "[", "'keywords'", "]", "=", "self", ".", "backend", ".", "_and_join", "(", "expanded_terms", "[", "'keywords'", "]", ")", "kw_q", "=", "\"to_tsvector(coalesce(keywords::text,'')) @@ to_tsquery(:keywords)\"", "query_parts", ".", "append", "(", "(", "\"AND \"", "if", "where_counter", "else", "\"WHERE \"", ")", "+", "kw_q", ")", "query_parts", ".", "append", "(", "'ORDER BY score DESC'", ")", "if", "limit", ":", "query_parts", ".", "append", "(", "'LIMIT :limit'", ")", "query_params", "[", "'limit'", "]", "=", "limit", "query_parts", ".", "append", "(", "';'", ")", "deb_msg", "=", "'Dataset terms conversion: `{}` terms converted to `{}` with `{}` params query.'", ".", "format", "(", "terms", ",", "query_parts", ",", "query_params", ")", "logger", ".", "debug", "(", "deb_msg", ")", "q", "=", "text", "(", "'\\n'", ".", "join", "(", "query_parts", ")", ")", ",", "query_params", "logger", ".", "debug", "(", "'Dataset search query: {}'", ".", "format", "(", "q", ")", ")", "return", "q" ]
Creates a query for dataset from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple of (TextClause, dict): First element is FTS query, second is parameters of the query. Element of the execution of the query is pair: (vid, score).
[ "Creates", "a", "query", "for", "dataset", "from", "decomposed", "search", "terms", "." ]
python
train
37.929825
Fantomas42/django-blog-zinnia
zinnia/admin/entry.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/admin/entry.py#L315-L321
def mark_featured(self, request, queryset): """ Mark selected as featured post. """ queryset.update(featured=True) self.message_user( request, _('Selected entries are now marked as featured.'))
[ "def", "mark_featured", "(", "self", ",", "request", ",", "queryset", ")", ":", "queryset", ".", "update", "(", "featured", "=", "True", ")", "self", ".", "message_user", "(", "request", ",", "_", "(", "'Selected entries are now marked as featured.'", ")", ")" ]
Mark selected as featured post.
[ "Mark", "selected", "as", "featured", "post", "." ]
python
train
34.142857
push-things/django-th
th_slack/my_slack.py
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/th_slack/my_slack.py#L38-L77
def save_data(self, trigger_id, **data): """ get the data from the service :param trigger_id: id of the trigger :params data, dict :rtype: dict """ status = False service = TriggerService.objects.get(id=trigger_id) desc = service.description slack = Slack.objects.get(trigger_id=trigger_id) title = self.set_title(data) if title is None: title = data.get('subject') type_action = data.get('type_action', '') # set the bot username of Slack to the name of the # provider service username = service.provider.name.name.split('Service')[1] # 'build' a link title_link = '' if data.get('permalink'): title_link = ': <' + data.get('permalink') + '|' + title + '>' else: title_link = ': <' + data.get('link') + '|' + title + '>' data = '*' + desc + '*: ' + type_action + title_link payload = {'username': username, 'text': data} r = requests.post(slack.webhook_url, json=payload) if r.status_code == requests.codes.ok: status = True # return the data return status
[ "def", "save_data", "(", "self", ",", "trigger_id", ",", "*", "*", "data", ")", ":", "status", "=", "False", "service", "=", "TriggerService", ".", "objects", ".", "get", "(", "id", "=", "trigger_id", ")", "desc", "=", "service", ".", "description", "slack", "=", "Slack", ".", "objects", ".", "get", "(", "trigger_id", "=", "trigger_id", ")", "title", "=", "self", ".", "set_title", "(", "data", ")", "if", "title", "is", "None", ":", "title", "=", "data", ".", "get", "(", "'subject'", ")", "type_action", "=", "data", ".", "get", "(", "'type_action'", ",", "''", ")", "# set the bot username of Slack to the name of the", "# provider service", "username", "=", "service", ".", "provider", ".", "name", ".", "name", ".", "split", "(", "'Service'", ")", "[", "1", "]", "# 'build' a link", "title_link", "=", "''", "if", "data", ".", "get", "(", "'permalink'", ")", ":", "title_link", "=", "': <'", "+", "data", ".", "get", "(", "'permalink'", ")", "+", "'|'", "+", "title", "+", "'>'", "else", ":", "title_link", "=", "': <'", "+", "data", ".", "get", "(", "'link'", ")", "+", "'|'", "+", "title", "+", "'>'", "data", "=", "'*'", "+", "desc", "+", "'*: '", "+", "type_action", "+", "title_link", "payload", "=", "{", "'username'", ":", "username", ",", "'text'", ":", "data", "}", "r", "=", "requests", ".", "post", "(", "slack", ".", "webhook_url", ",", "json", "=", "payload", ")", "if", "r", ".", "status_code", "==", "requests", ".", "codes", ".", "ok", ":", "status", "=", "True", "# return the data", "return", "status" ]
get the data from the service :param trigger_id: id of the trigger :params data, dict :rtype: dict
[ "get", "the", "data", "from", "the", "service" ]
python
train
30.425
project-ncl/pnc-cli
pnc_cli/buildrecords.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildrecords.py#L33-L39
def list_records_for_build_configuration(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all BuildRecords for a given BuildConfiguration """ data = list_records_for_build_configuration_raw(id, name, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
[ "def", "list_records_for_build_configuration", "(", "id", "=", "None", ",", "name", "=", "None", ",", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ",", "q", "=", "\"\"", ")", ":", "data", "=", "list_records_for_build_configuration_raw", "(", "id", ",", "name", ",", "page_size", ",", "page_index", ",", "sort", ",", "q", ")", "if", "data", ":", "return", "utils", ".", "format_json_list", "(", "data", ")" ]
List all BuildRecords for a given BuildConfiguration
[ "List", "all", "BuildRecords", "for", "a", "given", "BuildConfiguration" ]
python
train
46.142857
bioidiap/bob.bio.spear
bob/bio/spear/utils/__init__.py
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/__init__.py#L137-L198
def smoothing(labels, smoothing_window): """ Applies a smoothing on VAD""" if numpy.sum(labels)< smoothing_window: return labels segments = [] for k in range(1,len(labels)-1): if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 : labels[k]=1 for k in range(1,len(labels)-1): if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 : labels[k]=0 seg = numpy.array([0,0,labels[0]]) for k in range(1,len(labels)): if labels[k] != labels[k-1]: seg[1]=k-1 segments.append(seg) seg = numpy.array([k,k,labels[k]]) seg[1]=len(labels)-1 segments.append(seg) if len(segments) < 2: return labels curr = segments[0] next = segments[1] # Look at the first segment. If it's short enough, just change its labels if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window: if curr[2]==1: labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1) curr[2]=0 else: #curr[2]==0 labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1) curr[2]=1 for k in range(1,len(segments)-1): prev = segments[k-1] curr = segments[k] next = segments[k+1] if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window: if curr[2]==1: labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1) curr[2]=0 else: #curr[2]==0 labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1) curr[2]=1 prev = segments[-2] curr = segments[-1] if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window: if curr[2]==1: labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1) curr[2]=0 else: #if curr[2]==0 labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1) curr[2]=1 return labels
[ "def", "smoothing", "(", "labels", ",", "smoothing_window", ")", ":", "if", "numpy", ".", "sum", "(", "labels", ")", "<", "smoothing_window", ":", "return", "labels", "segments", "=", "[", "]", "for", "k", "in", "range", "(", "1", ",", "len", "(", "labels", ")", "-", "1", ")", ":", "if", "labels", "[", "k", "]", "==", "0", "and", "labels", "[", "k", "-", "1", "]", "==", "1", "and", "labels", "[", "k", "+", "1", "]", "==", "1", ":", "labels", "[", "k", "]", "=", "1", "for", "k", "in", "range", "(", "1", ",", "len", "(", "labels", ")", "-", "1", ")", ":", "if", "labels", "[", "k", "]", "==", "1", "and", "labels", "[", "k", "-", "1", "]", "==", "0", "and", "labels", "[", "k", "+", "1", "]", "==", "0", ":", "labels", "[", "k", "]", "=", "0", "seg", "=", "numpy", ".", "array", "(", "[", "0", ",", "0", ",", "labels", "[", "0", "]", "]", ")", "for", "k", "in", "range", "(", "1", ",", "len", "(", "labels", ")", ")", ":", "if", "labels", "[", "k", "]", "!=", "labels", "[", "k", "-", "1", "]", ":", "seg", "[", "1", "]", "=", "k", "-", "1", "segments", ".", "append", "(", "seg", ")", "seg", "=", "numpy", ".", "array", "(", "[", "k", ",", "k", ",", "labels", "[", "k", "]", "]", ")", "seg", "[", "1", "]", "=", "len", "(", "labels", ")", "-", "1", "segments", ".", "append", "(", "seg", ")", "if", "len", "(", "segments", ")", "<", "2", ":", "return", "labels", "curr", "=", "segments", "[", "0", "]", "next", "=", "segments", "[", "1", "]", "# Look at the first segment. If it's short enough, just change its labels", "if", "(", "curr", "[", "1", "]", "-", "curr", "[", "0", "]", "+", "1", ")", "<", "smoothing_window", "and", "(", "next", "[", "1", "]", "-", "next", "[", "0", "]", "+", "1", ")", ">", "smoothing_window", ":", "if", "curr", "[", "2", "]", "==", "1", ":", "labels", "[", "curr", "[", "0", "]", ":", "(", "curr", "[", "1", "]", "+", "1", ")", "]", "=", "numpy", ".", "zeros", "(", "curr", "[", "1", "]", "-", "curr", "[", "0", "]", "+", "1", ")", "curr", "[", "2", "]", "=", "0", "else", ":", "#curr[2]==0", "labels", "[", "curr", "[", "0", "]", ":", "(", "curr", "[", "1", "]", "+", "1", ")", "]", "=", "numpy", ".", "ones", "(", "curr", "[", "1", "]", "-", "curr", "[", "0", "]", "+", "1", ")", "curr", "[", "2", "]", "=", "1", "for", "k", "in", "range", "(", "1", ",", "len", "(", "segments", ")", "-", "1", ")", ":", "prev", "=", "segments", "[", "k", "-", "1", "]", "curr", "=", "segments", "[", "k", "]", "next", "=", "segments", "[", "k", "+", "1", "]", "if", "(", "curr", "[", "1", "]", "-", "curr", "[", "0", "]", "+", "1", ")", "<", "smoothing_window", "and", "(", "prev", "[", "1", "]", "-", "prev", "[", "0", "]", "+", "1", ")", ">", "smoothing_window", "and", "(", "next", "[", "1", "]", "-", "next", "[", "0", "]", "+", "1", ")", ">", "smoothing_window", ":", "if", "curr", "[", "2", "]", "==", "1", ":", "labels", "[", "curr", "[", "0", "]", ":", "(", "curr", "[", "1", "]", "+", "1", ")", "]", "=", "numpy", ".", "zeros", "(", "curr", "[", "1", "]", "-", "curr", "[", "0", "]", "+", "1", ")", "curr", "[", "2", "]", "=", "0", "else", ":", "#curr[2]==0", "labels", "[", "curr", "[", "0", "]", ":", "(", "curr", "[", "1", "]", "+", "1", ")", "]", "=", "numpy", ".", "ones", "(", "curr", "[", "1", "]", "-", "curr", "[", "0", "]", "+", "1", ")", "curr", "[", "2", "]", "=", "1", "prev", "=", "segments", "[", "-", "2", "]", "curr", "=", "segments", "[", "-", "1", "]", "if", "(", "curr", "[", "1", "]", "-", "curr", "[", "0", "]", "+", "1", ")", "<", "smoothing_window", "and", "(", "prev", "[", "1", "]", "-", "prev", "[", "0", "]", "+", "1", ")", ">", "smoothing_window", ":", "if", "curr", "[", "2", "]", "==", "1", ":", "labels", "[", "curr", "[", "0", "]", ":", "(", "curr", "[", "1", "]", "+", "1", ")", "]", "=", "numpy", ".", "zeros", "(", "curr", "[", "1", "]", "-", "curr", "[", "0", "]", "+", "1", ")", "curr", "[", "2", "]", "=", "0", "else", ":", "#if curr[2]==0", "labels", "[", "curr", "[", "0", "]", ":", "(", "curr", "[", "1", "]", "+", "1", ")", "]", "=", "numpy", ".", "ones", "(", "curr", "[", "1", "]", "-", "curr", "[", "0", "]", "+", "1", ")", "curr", "[", "2", "]", "=", "1", "return", "labels" ]
Applies a smoothing on VAD
[ "Applies", "a", "smoothing", "on", "VAD" ]
python
train
30.145161
carpedm20/fbchat
fbchat/_client.py
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L1630-L1649
def sendLocalVoiceClips( self, clip_paths, message=None, thread_id=None, thread_type=ThreadType.USER ): """ Sends local voice clips to a thread :param clip_paths: Paths of clips to upload and send :param message: Additional message :param thread_id: User/Group ID to send to. See :ref:`intro_threads` :param thread_type: See :ref:`intro_threads` :type thread_type: models.ThreadType :return: :ref:`Message ID <intro_message_ids>` of the sent files :raises: FBchatException if request failed """ clip_paths = require_list(clip_paths) with get_files_from_paths(clip_paths) as x: files = self._upload(x, voice_clip=True) return self._sendFiles( files=files, message=message, thread_id=thread_id, thread_type=thread_type )
[ "def", "sendLocalVoiceClips", "(", "self", ",", "clip_paths", ",", "message", "=", "None", ",", "thread_id", "=", "None", ",", "thread_type", "=", "ThreadType", ".", "USER", ")", ":", "clip_paths", "=", "require_list", "(", "clip_paths", ")", "with", "get_files_from_paths", "(", "clip_paths", ")", "as", "x", ":", "files", "=", "self", ".", "_upload", "(", "x", ",", "voice_clip", "=", "True", ")", "return", "self", ".", "_sendFiles", "(", "files", "=", "files", ",", "message", "=", "message", ",", "thread_id", "=", "thread_id", ",", "thread_type", "=", "thread_type", ")" ]
Sends local voice clips to a thread :param clip_paths: Paths of clips to upload and send :param message: Additional message :param thread_id: User/Group ID to send to. See :ref:`intro_threads` :param thread_type: See :ref:`intro_threads` :type thread_type: models.ThreadType :return: :ref:`Message ID <intro_message_ids>` of the sent files :raises: FBchatException if request failed
[ "Sends", "local", "voice", "clips", "to", "a", "thread" ]
python
train
42.4
Yelp/threat_intel
threat_intel/virustotal.py
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/virustotal.py#L95-L104
def get_file_network_traffic(self, resources): """Retrieves a report about the network traffic of a md5, sha1, and/or sha2 hash of file, when it is executed. Args: resources: list of string hashes. """ api_name = 'virustotal-file-network-traffic' api_endpoint = 'file/network-traffic' return self._extract_all_responses(resources, api_endpoint, api_name)
[ "def", "get_file_network_traffic", "(", "self", ",", "resources", ")", ":", "api_name", "=", "'virustotal-file-network-traffic'", "api_endpoint", "=", "'file/network-traffic'", "return", "self", ".", "_extract_all_responses", "(", "resources", ",", "api_endpoint", ",", "api_name", ")" ]
Retrieves a report about the network traffic of a md5, sha1, and/or sha2 hash of file, when it is executed. Args: resources: list of string hashes.
[ "Retrieves", "a", "report", "about", "the", "network", "traffic", "of", "a", "md5", "sha1", "and", "/", "or", "sha2", "hash", "of", "file", "when", "it", "is", "executed", "." ]
python
train
41.7
fabioz/PyDev.Debugger
third_party/pep8/lib2to3/lib2to3/refactor.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/refactor.py#L552-L593
def refactor_docstring(self, input, filename): """Refactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a ">>>" prompt, and may be continued with "..." prompts, as long as the "..." is indented the same as the ">>>". (Unfortunately we can't use the doctest module's parser, since, like most parsers, it is not geared towards preserving the original source.) """ result = [] block = None block_lineno = None indent = None lineno = 0 for line in input.splitlines(True): lineno += 1 if line.lstrip().startswith(self.PS1): if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block_lineno = lineno block = [line] i = line.find(self.PS1) indent = line[:i] elif (indent is not None and (line.startswith(indent + self.PS2) or line == indent + self.PS2.rstrip() + u"\n")): block.append(line) else: if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block = None indent = None result.append(line) if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) return u"".join(result)
[ "def", "refactor_docstring", "(", "self", ",", "input", ",", "filename", ")", ":", "result", "=", "[", "]", "block", "=", "None", "block_lineno", "=", "None", "indent", "=", "None", "lineno", "=", "0", "for", "line", "in", "input", ".", "splitlines", "(", "True", ")", ":", "lineno", "+=", "1", "if", "line", ".", "lstrip", "(", ")", ".", "startswith", "(", "self", ".", "PS1", ")", ":", "if", "block", "is", "not", "None", ":", "result", ".", "extend", "(", "self", ".", "refactor_doctest", "(", "block", ",", "block_lineno", ",", "indent", ",", "filename", ")", ")", "block_lineno", "=", "lineno", "block", "=", "[", "line", "]", "i", "=", "line", ".", "find", "(", "self", ".", "PS1", ")", "indent", "=", "line", "[", ":", "i", "]", "elif", "(", "indent", "is", "not", "None", "and", "(", "line", ".", "startswith", "(", "indent", "+", "self", ".", "PS2", ")", "or", "line", "==", "indent", "+", "self", ".", "PS2", ".", "rstrip", "(", ")", "+", "u\"\\n\"", ")", ")", ":", "block", ".", "append", "(", "line", ")", "else", ":", "if", "block", "is", "not", "None", ":", "result", ".", "extend", "(", "self", ".", "refactor_doctest", "(", "block", ",", "block_lineno", ",", "indent", ",", "filename", ")", ")", "block", "=", "None", "indent", "=", "None", "result", ".", "append", "(", "line", ")", "if", "block", "is", "not", "None", ":", "result", ".", "extend", "(", "self", ".", "refactor_doctest", "(", "block", ",", "block_lineno", ",", "indent", ",", "filename", ")", ")", "return", "u\"\"", ".", "join", "(", "result", ")" ]
Refactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a ">>>" prompt, and may be continued with "..." prompts, as long as the "..." is indented the same as the ">>>". (Unfortunately we can't use the doctest module's parser, since, like most parsers, it is not geared towards preserving the original source.)
[ "Refactors", "a", "docstring", "looking", "for", "doctests", "." ]
python
train
41.428571
duguyue100/minesweeper
minesweeper/msboard.py
https://github.com/duguyue100/minesweeper/blob/38b1910f4c34d0275ac10a300285aba6f1d91d61/minesweeper/msboard.py#L46-L74
def init_board(self): """Init a valid board by given settings. Parameters ---------- mine_map : numpy.ndarray the map that defines the mine 0 is empty, 1 is mine info_map : numpy.ndarray the map that presents to gamer 0-8 is number of mines in srrounding. 9 is flagged field. 10 is questioned field. 11 is undiscovered field. 12 is a mine field. """ self.mine_map = np.zeros((self.board_height, self.board_width), dtype=np.uint8) idx_list = np.random.permutation(self.board_width*self.board_height) idx_list = idx_list[:self.num_mines] for idx in idx_list: idx_x = int(idx % self.board_width) idx_y = int(idx / self.board_width) self.mine_map[idx_y, idx_x] = 1 self.info_map = np.ones((self.board_height, self.board_width), dtype=np.uint8)*11
[ "def", "init_board", "(", "self", ")", ":", "self", ".", "mine_map", "=", "np", ".", "zeros", "(", "(", "self", ".", "board_height", ",", "self", ".", "board_width", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "idx_list", "=", "np", ".", "random", ".", "permutation", "(", "self", ".", "board_width", "*", "self", ".", "board_height", ")", "idx_list", "=", "idx_list", "[", ":", "self", ".", "num_mines", "]", "for", "idx", "in", "idx_list", ":", "idx_x", "=", "int", "(", "idx", "%", "self", ".", "board_width", ")", "idx_y", "=", "int", "(", "idx", "/", "self", ".", "board_width", ")", "self", ".", "mine_map", "[", "idx_y", ",", "idx_x", "]", "=", "1", "self", ".", "info_map", "=", "np", ".", "ones", "(", "(", "self", ".", "board_height", ",", "self", ".", "board_width", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "*", "11" ]
Init a valid board by given settings. Parameters ---------- mine_map : numpy.ndarray the map that defines the mine 0 is empty, 1 is mine info_map : numpy.ndarray the map that presents to gamer 0-8 is number of mines in srrounding. 9 is flagged field. 10 is questioned field. 11 is undiscovered field. 12 is a mine field.
[ "Init", "a", "valid", "board", "by", "given", "settings", "." ]
python
train
34.586207
Azure/msrestazure-for-python
msrestazure/tools.py
https://github.com/Azure/msrestazure-for-python/blob/5f99262305692525d03ca87d2c5356b05c5aa874/msrestazure/tools.py#L85-L104
def _register_rp(session, url_prefix, rp_name): """Synchronously register the RP is paremeter. Return False if we have a reason to believe this didn't work """ post_url = "{}providers/{}/register?api-version=2016-02-01".format(url_prefix, rp_name) get_url = "{}providers/{}?api-version=2016-02-01".format(url_prefix, rp_name) _LOGGER.warning("Resource provider '%s' used by this operation is not " "registered. We are registering for you.", rp_name) post_response = session.post(post_url) if post_response.status_code != 200: _LOGGER.warning("Registration failed. Please register manually.") return False while True: time.sleep(10) rp_info = session.get(get_url).json() if rp_info['registrationState'] == 'Registered': _LOGGER.warning("Registration succeeded.") return True
[ "def", "_register_rp", "(", "session", ",", "url_prefix", ",", "rp_name", ")", ":", "post_url", "=", "\"{}providers/{}/register?api-version=2016-02-01\"", ".", "format", "(", "url_prefix", ",", "rp_name", ")", "get_url", "=", "\"{}providers/{}?api-version=2016-02-01\"", ".", "format", "(", "url_prefix", ",", "rp_name", ")", "_LOGGER", ".", "warning", "(", "\"Resource provider '%s' used by this operation is not \"", "\"registered. We are registering for you.\"", ",", "rp_name", ")", "post_response", "=", "session", ".", "post", "(", "post_url", ")", "if", "post_response", ".", "status_code", "!=", "200", ":", "_LOGGER", ".", "warning", "(", "\"Registration failed. Please register manually.\"", ")", "return", "False", "while", "True", ":", "time", ".", "sleep", "(", "10", ")", "rp_info", "=", "session", ".", "get", "(", "get_url", ")", ".", "json", "(", ")", "if", "rp_info", "[", "'registrationState'", "]", "==", "'Registered'", ":", "_LOGGER", ".", "warning", "(", "\"Registration succeeded.\"", ")", "return", "True" ]
Synchronously register the RP is paremeter. Return False if we have a reason to believe this didn't work
[ "Synchronously", "register", "the", "RP", "is", "paremeter", ".", "Return", "False", "if", "we", "have", "a", "reason", "to", "believe", "this", "didn", "t", "work" ]
python
train
44
wbond/asn1crypto
asn1crypto/_types.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/_types.py#L29-L46
def type_name(value): """ Returns a user-readable name for the type of an object :param value: A value to get the type name of :return: A unicode string of the object's type name """ if inspect.isclass(value): cls = value else: cls = value.__class__ if cls.__module__ in set(['builtins', '__builtin__']): return cls.__name__ return '%s.%s' % (cls.__module__, cls.__name__)
[ "def", "type_name", "(", "value", ")", ":", "if", "inspect", ".", "isclass", "(", "value", ")", ":", "cls", "=", "value", "else", ":", "cls", "=", "value", ".", "__class__", "if", "cls", ".", "__module__", "in", "set", "(", "[", "'builtins'", ",", "'__builtin__'", "]", ")", ":", "return", "cls", ".", "__name__", "return", "'%s.%s'", "%", "(", "cls", ".", "__module__", ",", "cls", ".", "__name__", ")" ]
Returns a user-readable name for the type of an object :param value: A value to get the type name of :return: A unicode string of the object's type name
[ "Returns", "a", "user", "-", "readable", "name", "for", "the", "type", "of", "an", "object" ]
python
train
24.111111
ionelmc/python-manhole
src/manhole/__init__.py
https://github.com/ionelmc/python-manhole/blob/6a519a1f25142b047e814c6d00f4ef404856a15d/src/manhole/__init__.py#L546-L552
def patched_forkpty(self): """Fork a new process with a new pseudo-terminal as controlling tty.""" pid, master_fd = self.original_os_forkpty() if not pid: _LOG('Fork detected. Reinstalling Manhole.') self.reinstall() return pid, master_fd
[ "def", "patched_forkpty", "(", "self", ")", ":", "pid", ",", "master_fd", "=", "self", ".", "original_os_forkpty", "(", ")", "if", "not", "pid", ":", "_LOG", "(", "'Fork detected. Reinstalling Manhole.'", ")", "self", ".", "reinstall", "(", ")", "return", "pid", ",", "master_fd" ]
Fork a new process with a new pseudo-terminal as controlling tty.
[ "Fork", "a", "new", "process", "with", "a", "new", "pseudo", "-", "terminal", "as", "controlling", "tty", "." ]
python
train
41.142857
inspirehep/harvesting-kit
harvestingkit/scoap3utils.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/scoap3utils.py#L120-L158
def check_pkgs_integrity(filelist, logger, ftp_connector, timeout=120, sleep_time=10): """ Checks if files are not being uploaded to server. @timeout - time after which the script will register an error. """ ref_1 = [] ref_2 = [] i = 1 print >> sys.stdout, "\nChecking packages integrity." for filename in filelist: # ref_1.append(self.get_remote_file_size(filename)) get_remote_file_size(ftp_connector, filename, ref_1) print >> sys.stdout, "\nGoing to sleep for %i sec." % (sleep_time,) time.sleep(sleep_time) while sleep_time*i < timeout: for filename in filelist: # ref_2.append(self.get_remote_file_size(filename)) get_remote_file_size(ftp_connector, filename, ref_2) if ref_1 == ref_2: print >> sys.stdout, "\nIntegrity OK:)" logger.info("Packages integrity OK.") break else: print >> sys.stdout, "\nWaiting %d time for itegrity..." % (i,) logger.info("\nWaiting %d time for itegrity..." % (i,)) i += 1 ref_1, ref_2 = ref_2, [] time.sleep(sleep_time) else: not_finished_files = [] for count, val1 in enumerate(ref_1): if val1 != ref_2[count]: not_finished_files.append(filelist[count]) print >> sys.stdout, "\nOMG, OMG something wrong with integrity." logger.error("Integrity check faild for files %s" % (not_finished_files,))
[ "def", "check_pkgs_integrity", "(", "filelist", ",", "logger", ",", "ftp_connector", ",", "timeout", "=", "120", ",", "sleep_time", "=", "10", ")", ":", "ref_1", "=", "[", "]", "ref_2", "=", "[", "]", "i", "=", "1", "print", ">>", "sys", ".", "stdout", ",", "\"\\nChecking packages integrity.\"", "for", "filename", "in", "filelist", ":", "# ref_1.append(self.get_remote_file_size(filename))", "get_remote_file_size", "(", "ftp_connector", ",", "filename", ",", "ref_1", ")", "print", ">>", "sys", ".", "stdout", ",", "\"\\nGoing to sleep for %i sec.\"", "%", "(", "sleep_time", ",", ")", "time", ".", "sleep", "(", "sleep_time", ")", "while", "sleep_time", "*", "i", "<", "timeout", ":", "for", "filename", "in", "filelist", ":", "# ref_2.append(self.get_remote_file_size(filename))", "get_remote_file_size", "(", "ftp_connector", ",", "filename", ",", "ref_2", ")", "if", "ref_1", "==", "ref_2", ":", "print", ">>", "sys", ".", "stdout", ",", "\"\\nIntegrity OK:)\"", "logger", ".", "info", "(", "\"Packages integrity OK.\"", ")", "break", "else", ":", "print", ">>", "sys", ".", "stdout", ",", "\"\\nWaiting %d time for itegrity...\"", "%", "(", "i", ",", ")", "logger", ".", "info", "(", "\"\\nWaiting %d time for itegrity...\"", "%", "(", "i", ",", ")", ")", "i", "+=", "1", "ref_1", ",", "ref_2", "=", "ref_2", ",", "[", "]", "time", ".", "sleep", "(", "sleep_time", ")", "else", ":", "not_finished_files", "=", "[", "]", "for", "count", ",", "val1", "in", "enumerate", "(", "ref_1", ")", ":", "if", "val1", "!=", "ref_2", "[", "count", "]", ":", "not_finished_files", ".", "append", "(", "filelist", "[", "count", "]", ")", "print", ">>", "sys", ".", "stdout", ",", "\"\\nOMG, OMG something wrong with integrity.\"", "logger", ".", "error", "(", "\"Integrity check faild for files %s\"", "%", "(", "not_finished_files", ",", ")", ")" ]
Checks if files are not being uploaded to server. @timeout - time after which the script will register an error.
[ "Checks", "if", "files", "are", "not", "being", "uploaded", "to", "server", "." ]
python
valid
38.820513
bigchaindb/bigchaindb-driver
bigchaindb_driver/connection.py
https://github.com/bigchaindb/bigchaindb-driver/blob/c294a535f0696bd19483ae11a4882b74e6fc061e/bigchaindb_driver/connection.py#L41-L99
def request(self, method, *, path=None, json=None, params=None, headers=None, timeout=None, backoff_cap=None, **kwargs): """Performs an HTTP request with the given parameters. Implements exponential backoff. If `ConnectionError` occurs, a timestamp equal to now + the default delay (`BACKOFF_DELAY`) is assigned to the object. The timestamp is in UTC. Next time the function is called, it either waits till the timestamp is passed or raises `TimeoutError`. If `ConnectionError` occurs two or more times in a row, the retry count is incremented and the new timestamp is calculated as now + the default delay multiplied by two to the power of the number of retries. If a request is successful, the backoff timestamp is removed, the retry count is back to zero. Args: method (str): HTTP method (e.g.: ``'GET'``). path (str): API endpoint path (e.g.: ``'/transactions'``). json (dict): JSON data to send along with the request. params (dict): Dictionary of URL (query) parameters. headers (dict): Optional headers to pass to the request. timeout (int): Optional timeout in seconds. backoff_cap (int): The maximal allowed backoff delay in seconds to be assigned to a node. kwargs: Optional keyword arguments. """ backoff_timedelta = self.get_backoff_timedelta() if timeout is not None and timeout < backoff_timedelta: raise TimeoutError if backoff_timedelta > 0: time.sleep(backoff_timedelta) connExc = None timeout = timeout if timeout is None else timeout - backoff_timedelta try: response = self._request( method=method, timeout=timeout, url=self.node_url + path if path else self.node_url, json=json, params=params, headers=headers, **kwargs, ) except ConnectionError as err: connExc = err raise err finally: self.update_backoff_time(success=connExc is None, backoff_cap=backoff_cap) return response
[ "def", "request", "(", "self", ",", "method", ",", "*", ",", "path", "=", "None", ",", "json", "=", "None", ",", "params", "=", "None", ",", "headers", "=", "None", ",", "timeout", "=", "None", ",", "backoff_cap", "=", "None", ",", "*", "*", "kwargs", ")", ":", "backoff_timedelta", "=", "self", ".", "get_backoff_timedelta", "(", ")", "if", "timeout", "is", "not", "None", "and", "timeout", "<", "backoff_timedelta", ":", "raise", "TimeoutError", "if", "backoff_timedelta", ">", "0", ":", "time", ".", "sleep", "(", "backoff_timedelta", ")", "connExc", "=", "None", "timeout", "=", "timeout", "if", "timeout", "is", "None", "else", "timeout", "-", "backoff_timedelta", "try", ":", "response", "=", "self", ".", "_request", "(", "method", "=", "method", ",", "timeout", "=", "timeout", ",", "url", "=", "self", ".", "node_url", "+", "path", "if", "path", "else", "self", ".", "node_url", ",", "json", "=", "json", ",", "params", "=", "params", ",", "headers", "=", "headers", ",", "*", "*", "kwargs", ",", ")", "except", "ConnectionError", "as", "err", ":", "connExc", "=", "err", "raise", "err", "finally", ":", "self", ".", "update_backoff_time", "(", "success", "=", "connExc", "is", "None", ",", "backoff_cap", "=", "backoff_cap", ")", "return", "response" ]
Performs an HTTP request with the given parameters. Implements exponential backoff. If `ConnectionError` occurs, a timestamp equal to now + the default delay (`BACKOFF_DELAY`) is assigned to the object. The timestamp is in UTC. Next time the function is called, it either waits till the timestamp is passed or raises `TimeoutError`. If `ConnectionError` occurs two or more times in a row, the retry count is incremented and the new timestamp is calculated as now + the default delay multiplied by two to the power of the number of retries. If a request is successful, the backoff timestamp is removed, the retry count is back to zero. Args: method (str): HTTP method (e.g.: ``'GET'``). path (str): API endpoint path (e.g.: ``'/transactions'``). json (dict): JSON data to send along with the request. params (dict): Dictionary of URL (query) parameters. headers (dict): Optional headers to pass to the request. timeout (int): Optional timeout in seconds. backoff_cap (int): The maximal allowed backoff delay in seconds to be assigned to a node. kwargs: Optional keyword arguments.
[ "Performs", "an", "HTTP", "request", "with", "the", "given", "parameters", "." ]
python
train
39.966102
andreikop/qutepart
qutepart/__init__.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/__init__.py#L940-L952
def _setSolidEdgeGeometry(self): """Sets the solid edge line geometry if needed""" if self._lineLengthEdge is not None: cr = self.contentsRect() # contents margin usually gives 1 # cursor rectangle left edge for the very first character usually # gives 4 x = self.fontMetrics().width('9' * self._lineLengthEdge) + \ self._totalMarginWidth + \ self.contentsMargins().left() + \ self.__cursorRect(self.firstVisibleBlock(), 0, offset=0).left() self._solidEdgeLine.setGeometry(QRect(x, cr.top(), 1, cr.bottom()))
[ "def", "_setSolidEdgeGeometry", "(", "self", ")", ":", "if", "self", ".", "_lineLengthEdge", "is", "not", "None", ":", "cr", "=", "self", ".", "contentsRect", "(", ")", "# contents margin usually gives 1", "# cursor rectangle left edge for the very first character usually", "# gives 4", "x", "=", "self", ".", "fontMetrics", "(", ")", ".", "width", "(", "'9'", "*", "self", ".", "_lineLengthEdge", ")", "+", "self", ".", "_totalMarginWidth", "+", "self", ".", "contentsMargins", "(", ")", ".", "left", "(", ")", "+", "self", ".", "__cursorRect", "(", "self", ".", "firstVisibleBlock", "(", ")", ",", "0", ",", "offset", "=", "0", ")", ".", "left", "(", ")", "self", ".", "_solidEdgeLine", ".", "setGeometry", "(", "QRect", "(", "x", ",", "cr", ".", "top", "(", ")", ",", "1", ",", "cr", ".", "bottom", "(", ")", ")", ")" ]
Sets the solid edge line geometry if needed
[ "Sets", "the", "solid", "edge", "line", "geometry", "if", "needed" ]
python
train
48.692308
JohnDoee/thomas
thomas/outputs/http.py
https://github.com/JohnDoee/thomas/blob/51916dd110098b189a1c2fbcb71794fd9ec94832/thomas/outputs/http.py#L278-L311
def makeProducer(self, request, fileForReading): """ Make a L{StaticProducer} that will produce the body of this response. This method will also set the response code and Content-* headers. @param request: The L{Request} object. @param fileForReading: The file object containing the resource. @return: A L{StaticProducer}. Calling C{.start()} on this will begin producing the response. """ byteRange = request.getHeader(b'range') if byteRange is None or not self.getFileSize(): self._setContentHeaders(request) request.setResponseCode(http.OK) return NoRangeStaticProducer(request, fileForReading) try: parsedRanges = self._parseRangeHeader(byteRange) except ValueError: logger.warning("Ignoring malformed Range header %r" % (byteRange,)) self._setContentHeaders(request) request.setResponseCode(http.OK) return NoRangeStaticProducer(request, fileForReading) if len(parsedRanges) == 1: offset, size = self._doSingleRangeRequest( request, parsedRanges[0]) self._setContentHeaders(request, size) return SingleRangeStaticProducer( request, fileForReading, offset, size) else: rangeInfo = self._doMultipleRangeRequest(request, parsedRanges) return MultipleRangeStaticProducer( request, fileForReading, rangeInfo)
[ "def", "makeProducer", "(", "self", ",", "request", ",", "fileForReading", ")", ":", "byteRange", "=", "request", ".", "getHeader", "(", "b'range'", ")", "if", "byteRange", "is", "None", "or", "not", "self", ".", "getFileSize", "(", ")", ":", "self", ".", "_setContentHeaders", "(", "request", ")", "request", ".", "setResponseCode", "(", "http", ".", "OK", ")", "return", "NoRangeStaticProducer", "(", "request", ",", "fileForReading", ")", "try", ":", "parsedRanges", "=", "self", ".", "_parseRangeHeader", "(", "byteRange", ")", "except", "ValueError", ":", "logger", ".", "warning", "(", "\"Ignoring malformed Range header %r\"", "%", "(", "byteRange", ",", ")", ")", "self", ".", "_setContentHeaders", "(", "request", ")", "request", ".", "setResponseCode", "(", "http", ".", "OK", ")", "return", "NoRangeStaticProducer", "(", "request", ",", "fileForReading", ")", "if", "len", "(", "parsedRanges", ")", "==", "1", ":", "offset", ",", "size", "=", "self", ".", "_doSingleRangeRequest", "(", "request", ",", "parsedRanges", "[", "0", "]", ")", "self", ".", "_setContentHeaders", "(", "request", ",", "size", ")", "return", "SingleRangeStaticProducer", "(", "request", ",", "fileForReading", ",", "offset", ",", "size", ")", "else", ":", "rangeInfo", "=", "self", ".", "_doMultipleRangeRequest", "(", "request", ",", "parsedRanges", ")", "return", "MultipleRangeStaticProducer", "(", "request", ",", "fileForReading", ",", "rangeInfo", ")" ]
Make a L{StaticProducer} that will produce the body of this response. This method will also set the response code and Content-* headers. @param request: The L{Request} object. @param fileForReading: The file object containing the resource. @return: A L{StaticProducer}. Calling C{.start()} on this will begin producing the response.
[ "Make", "a", "L", "{", "StaticProducer", "}", "that", "will", "produce", "the", "body", "of", "this", "response", "." ]
python
train
44.088235
orbingol/NURBS-Python
geomdl/exchange.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/exchange.py#L266-L299
def import_yaml(file_name, **kwargs): """ Imports curves and surfaces from files in YAML format. .. note:: Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package. Use ``jinja2=True`` to activate Jinja2 template processing. Please refer to the documentation for details. :param file_name: name of the input file :type file_name: str :return: a list of rational spline geometries :rtype: list :raises GeomdlException: an error occurred reading the file """ def callback(data): yaml = YAML() return yaml.load(data) # Check if it is possible to import 'ruamel.yaml' try: from ruamel.yaml import YAML except ImportError: raise exch.GeomdlException("Please install 'ruamel.yaml' package to use YAML format: pip install ruamel.yaml") # Get keyword arguments delta = kwargs.get('delta', -1.0) use_template = kwargs.get('jinja2', False) # Read file file_src = exch.read_file(file_name) # Import data return exch.import_dict_str(file_src=file_src, delta=delta, callback=callback, tmpl=use_template)
[ "def", "import_yaml", "(", "file_name", ",", "*", "*", "kwargs", ")", ":", "def", "callback", "(", "data", ")", ":", "yaml", "=", "YAML", "(", ")", "return", "yaml", ".", "load", "(", "data", ")", "# Check if it is possible to import 'ruamel.yaml'", "try", ":", "from", "ruamel", ".", "yaml", "import", "YAML", "except", "ImportError", ":", "raise", "exch", ".", "GeomdlException", "(", "\"Please install 'ruamel.yaml' package to use YAML format: pip install ruamel.yaml\"", ")", "# Get keyword arguments", "delta", "=", "kwargs", ".", "get", "(", "'delta'", ",", "-", "1.0", ")", "use_template", "=", "kwargs", ".", "get", "(", "'jinja2'", ",", "False", ")", "# Read file", "file_src", "=", "exch", ".", "read_file", "(", "file_name", ")", "# Import data", "return", "exch", ".", "import_dict_str", "(", "file_src", "=", "file_src", ",", "delta", "=", "delta", ",", "callback", "=", "callback", ",", "tmpl", "=", "use_template", ")" ]
Imports curves and surfaces from files in YAML format. .. note:: Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package. Use ``jinja2=True`` to activate Jinja2 template processing. Please refer to the documentation for details. :param file_name: name of the input file :type file_name: str :return: a list of rational spline geometries :rtype: list :raises GeomdlException: an error occurred reading the file
[ "Imports", "curves", "and", "surfaces", "from", "files", "in", "YAML", "format", "." ]
python
train
32.352941
bitesofcode/projexui
projexui/widgets/xnodewidget/xnode.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnode.py#L1670-L1683
def setDisabledAlternateColor(self, color): """ Sets the alternate color used when drawing this node as disabled. :param color | <QColor> """ color = QColor(color) if self._palette is None: self._palette = XNodePalette(self._scenePalette) self._palette.setColor(self._palette.Disabled, self._palette.NodeAlternateBackground, color) self.setDirty()
[ "def", "setDisabledAlternateColor", "(", "self", ",", "color", ")", ":", "color", "=", "QColor", "(", "color", ")", "if", "self", ".", "_palette", "is", "None", ":", "self", ".", "_palette", "=", "XNodePalette", "(", "self", ".", "_scenePalette", ")", "self", ".", "_palette", ".", "setColor", "(", "self", ".", "_palette", ".", "Disabled", ",", "self", ".", "_palette", ".", "NodeAlternateBackground", ",", "color", ")", "self", ".", "setDirty", "(", ")" ]
Sets the alternate color used when drawing this node as disabled. :param color | <QColor>
[ "Sets", "the", "alternate", "color", "used", "when", "drawing", "this", "node", "as", "disabled", ".", ":", "param", "color", "|", "<QColor", ">" ]
python
train
35.357143
foremast/foremast
src/foremast/utils/lookups.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/utils/lookups.py#L171-L205
def remote_file(self, branch='master', filename=''): """Read the remote file on Git Server. Args: branch (str): Git Branch to find file. filename (str): Name of file to retrieve relative to root of repository. Returns: str: Contents of remote file. Raises: FileNotFoundError: Requested file missing. """ LOG.info('Retrieving "%s" from "%s".', filename, self.git_short) file_contents = '' try: file_blob = self.project.files.get(file_path=filename, ref=branch) except gitlab.exceptions.GitlabGetError: file_blob = None LOG.debug('GitLab file response:\n%s', file_blob) if not file_blob: msg = 'Project "{0}" is missing file "{1}" in "{2}" branch.'.format(self.git_short, filename, branch) LOG.warning(msg) raise FileNotFoundError(msg) else: file_contents = b64decode(file_blob.content).decode() LOG.debug('Remote file contents:\n%s', file_contents) return file_contents
[ "def", "remote_file", "(", "self", ",", "branch", "=", "'master'", ",", "filename", "=", "''", ")", ":", "LOG", ".", "info", "(", "'Retrieving \"%s\" from \"%s\".'", ",", "filename", ",", "self", ".", "git_short", ")", "file_contents", "=", "''", "try", ":", "file_blob", "=", "self", ".", "project", ".", "files", ".", "get", "(", "file_path", "=", "filename", ",", "ref", "=", "branch", ")", "except", "gitlab", ".", "exceptions", ".", "GitlabGetError", ":", "file_blob", "=", "None", "LOG", ".", "debug", "(", "'GitLab file response:\\n%s'", ",", "file_blob", ")", "if", "not", "file_blob", ":", "msg", "=", "'Project \"{0}\" is missing file \"{1}\" in \"{2}\" branch.'", ".", "format", "(", "self", ".", "git_short", ",", "filename", ",", "branch", ")", "LOG", ".", "warning", "(", "msg", ")", "raise", "FileNotFoundError", "(", "msg", ")", "else", ":", "file_contents", "=", "b64decode", "(", "file_blob", ".", "content", ")", ".", "decode", "(", ")", "LOG", ".", "debug", "(", "'Remote file contents:\\n%s'", ",", "file_contents", ")", "return", "file_contents" ]
Read the remote file on Git Server. Args: branch (str): Git Branch to find file. filename (str): Name of file to retrieve relative to root of repository. Returns: str: Contents of remote file. Raises: FileNotFoundError: Requested file missing.
[ "Read", "the", "remote", "file", "on", "Git", "Server", "." ]
python
train
31.171429
Patreon/patreon-python
examples/flask/my_site/models/tables/db_wrapper.py
https://github.com/Patreon/patreon-python/blob/80c83f018d6bd93b83c188baff727c5e77e01ce6/examples/flask/my_site/models/tables/db_wrapper.py#L1-L7
def build_if_needed(db): """Little helper method for making tables in SQL-Alchemy with SQLite""" if len(db.engine.table_names()) == 0: # import all classes here from my_site.models.tables.user import User db.create_all()
[ "def", "build_if_needed", "(", "db", ")", ":", "if", "len", "(", "db", ".", "engine", ".", "table_names", "(", ")", ")", "==", "0", ":", "# import all classes here", "from", "my_site", ".", "models", ".", "tables", ".", "user", "import", "User", "db", ".", "create_all", "(", ")" ]
Little helper method for making tables in SQL-Alchemy with SQLite
[ "Little", "helper", "method", "for", "making", "tables", "in", "SQL", "-", "Alchemy", "with", "SQLite" ]
python
train
35.285714
tcalmant/ipopo
pelix/ipopo/waiting.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/waiting.py#L123-L136
def _start(self): """ Starts the instantiation queue (called by its bundle activator) """ try: # Try to register to factory events with use_ipopo(self.__context) as ipopo: ipopo.add_listener(self) except BundleException: # Service not yet present pass # Register the iPOPO service listener self.__context.add_service_listener(self, specification=SERVICE_IPOPO)
[ "def", "_start", "(", "self", ")", ":", "try", ":", "# Try to register to factory events", "with", "use_ipopo", "(", "self", ".", "__context", ")", "as", "ipopo", ":", "ipopo", ".", "add_listener", "(", "self", ")", "except", "BundleException", ":", "# Service not yet present", "pass", "# Register the iPOPO service listener", "self", ".", "__context", ".", "add_service_listener", "(", "self", ",", "specification", "=", "SERVICE_IPOPO", ")" ]
Starts the instantiation queue (called by its bundle activator)
[ "Starts", "the", "instantiation", "queue", "(", "called", "by", "its", "bundle", "activator", ")" ]
python
train
33.428571
istresearch/scrapy-cluster
crawler/crawling/distributed_scheduler.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/distributed_scheduler.py#L550-L561
def parse_cookie(self, string): ''' Parses a cookie string like returned in a Set-Cookie header @param string: The cookie string @return: the cookie dict ''' results = re.findall('([^=]+)=([^\;]+);?\s?', string) my_dict = {} for item in results: my_dict[item[0]] = item[1] return my_dict
[ "def", "parse_cookie", "(", "self", ",", "string", ")", ":", "results", "=", "re", ".", "findall", "(", "'([^=]+)=([^\\;]+);?\\s?'", ",", "string", ")", "my_dict", "=", "{", "}", "for", "item", "in", "results", ":", "my_dict", "[", "item", "[", "0", "]", "]", "=", "item", "[", "1", "]", "return", "my_dict" ]
Parses a cookie string like returned in a Set-Cookie header @param string: The cookie string @return: the cookie dict
[ "Parses", "a", "cookie", "string", "like", "returned", "in", "a", "Set", "-", "Cookie", "header" ]
python
train
30.083333
spyder-ide/spyder
spyder/plugins/explorer/widgets.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/explorer/widgets.py#L584-L589
def run(self, fnames=None): """Run Python scripts""" if fnames is None: fnames = self.get_selected_filenames() for fname in fnames: self.sig_run.emit(fname)
[ "def", "run", "(", "self", ",", "fnames", "=", "None", ")", ":", "if", "fnames", "is", "None", ":", "fnames", "=", "self", ".", "get_selected_filenames", "(", ")", "for", "fname", "in", "fnames", ":", "self", ".", "sig_run", ".", "emit", "(", "fname", ")" ]
Run Python scripts
[ "Run", "Python", "scripts" ]
python
train
34
olasitarska/django-gulp-rev
gulp_rev/__init__.py
https://github.com/olasitarska/django-gulp-rev/blob/cb6b725d23906ef4451e9d73b91d8e1888a0b954/gulp_rev/__init__.py#L58-L77
def static_rev(path): """ Gets a joined path with the STATIC_URL setting, and applies revisioning depending on DEBUG setting. Usage:: {% load rev %} {% static_rev "css/base.css" %} Example:: {% static_rev "css/base.css" %} On DEBUG=True will return: /static/css/base.css?d9wdjs On DEBUG=False will return: /static/css/base-d9wdjs.css """ static_path = StaticNode.handle_simple(path) if is_debug(): return dev_url(static_path) return production_url(path, static_path)
[ "def", "static_rev", "(", "path", ")", ":", "static_path", "=", "StaticNode", ".", "handle_simple", "(", "path", ")", "if", "is_debug", "(", ")", ":", "return", "dev_url", "(", "static_path", ")", "return", "production_url", "(", "path", ",", "static_path", ")" ]
Gets a joined path with the STATIC_URL setting, and applies revisioning depending on DEBUG setting. Usage:: {% load rev %} {% static_rev "css/base.css" %} Example:: {% static_rev "css/base.css" %} On DEBUG=True will return: /static/css/base.css?d9wdjs On DEBUG=False will return: /static/css/base-d9wdjs.css
[ "Gets", "a", "joined", "path", "with", "the", "STATIC_URL", "setting", "and", "applies", "revisioning", "depending", "on", "DEBUG", "setting", "." ]
python
train
26.75