repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
spyder-ide/spyder
spyder/utils/vcs.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/vcs.py#L100-L116
def get_hg_revision(repopath): """Return Mercurial revision for the repository located at repopath Result is a tuple (global, local, branch), with None values on error For example: >>> get_hg_revision(".") ('eba7273c69df+', '2015+', 'default') """ try: assert osp.isdir(osp.join(repopath, '.hg')) proc = programs.run_program('hg', ['id', '-nib', repopath]) output, _err = proc.communicate() # output is now: ('eba7273c69df+ 2015+ default\n', None) # Split 2 times max to allow spaces in branch names. return tuple(output.decode().strip().split(None, 2)) except (subprocess.CalledProcessError, AssertionError, AttributeError, OSError): return (None, None, None)
[ "def", "get_hg_revision", "(", "repopath", ")", ":", "try", ":", "assert", "osp", ".", "isdir", "(", "osp", ".", "join", "(", "repopath", ",", "'.hg'", ")", ")", "proc", "=", "programs", ".", "run_program", "(", "'hg'", ",", "[", "'id'", ",", "'-nib'", ",", "repopath", "]", ")", "output", ",", "_err", "=", "proc", ".", "communicate", "(", ")", "# output is now: ('eba7273c69df+ 2015+ default\\n', None)\r", "# Split 2 times max to allow spaces in branch names.\r", "return", "tuple", "(", "output", ".", "decode", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "None", ",", "2", ")", ")", "except", "(", "subprocess", ".", "CalledProcessError", ",", "AssertionError", ",", "AttributeError", ",", "OSError", ")", ":", "return", "(", "None", ",", "None", ",", "None", ")" ]
Return Mercurial revision for the repository located at repopath Result is a tuple (global, local, branch), with None values on error For example: >>> get_hg_revision(".") ('eba7273c69df+', '2015+', 'default')
[ "Return", "Mercurial", "revision", "for", "the", "repository", "located", "at", "repopath", "Result", "is", "a", "tuple", "(", "global", "local", "branch", ")", "with", "None", "values", "on", "error", "For", "example", ":", ">>>", "get_hg_revision", "(", ".", ")", "(", "eba7273c69df", "+", "2015", "+", "default", ")" ]
python
train
PythonCharmers/python-future
src/future/backports/urllib/parse.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/urllib/parse.py#L292-L306
def urlparse(url, scheme='', allow_fragments=True): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) splitresult = urlsplit(url, scheme, allow_fragments) scheme, netloc, url, query, fragment = splitresult if scheme in uses_params and ';' in url: url, params = _splitparams(url) else: params = '' result = ParseResult(scheme, netloc, url, params, query, fragment) return _coerce_result(result)
[ "def", "urlparse", "(", "url", ",", "scheme", "=", "''", ",", "allow_fragments", "=", "True", ")", ":", "url", ",", "scheme", ",", "_coerce_result", "=", "_coerce_args", "(", "url", ",", "scheme", ")", "splitresult", "=", "urlsplit", "(", "url", ",", "scheme", ",", "allow_fragments", ")", "scheme", ",", "netloc", ",", "url", ",", "query", ",", "fragment", "=", "splitresult", "if", "scheme", "in", "uses_params", "and", "';'", "in", "url", ":", "url", ",", "params", "=", "_splitparams", "(", "url", ")", "else", ":", "params", "=", "''", "result", "=", "ParseResult", "(", "scheme", ",", "netloc", ",", "url", ",", "params", ",", "query", ",", "fragment", ")", "return", "_coerce_result", "(", "result", ")" ]
Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.
[ "Parse", "a", "URL", "into", "6", "components", ":", "<scheme", ">", ":", "//", "<netloc", ">", "/", "<path", ">", ";", "<params", ">", "?<query", ">", "#<fragment", ">", "Return", "a", "6", "-", "tuple", ":", "(", "scheme", "netloc", "path", "params", "query", "fragment", ")", ".", "Note", "that", "we", "don", "t", "break", "the", "components", "up", "in", "smaller", "bits", "(", "e", ".", "g", ".", "netloc", "is", "a", "single", "string", ")", "and", "we", "don", "t", "expand", "%", "escapes", "." ]
python
train
obriencj/python-javatools
javatools/__init__.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/__init__.py#L2294-L2309
def is_class(data): """ checks that the data (which is a string, buffer, or a stream supporting the read method) has the magic numbers indicating it is a Java class file. Returns False if the magic numbers do not match, or for any errors. """ try: with unpack(data) as up: magic = up.unpack_struct(_BBBB) return magic == JAVA_CLASS_MAGIC except UnpackException: return False
[ "def", "is_class", "(", "data", ")", ":", "try", ":", "with", "unpack", "(", "data", ")", "as", "up", ":", "magic", "=", "up", ".", "unpack_struct", "(", "_BBBB", ")", "return", "magic", "==", "JAVA_CLASS_MAGIC", "except", "UnpackException", ":", "return", "False" ]
checks that the data (which is a string, buffer, or a stream supporting the read method) has the magic numbers indicating it is a Java class file. Returns False if the magic numbers do not match, or for any errors.
[ "checks", "that", "the", "data", "(", "which", "is", "a", "string", "buffer", "or", "a", "stream", "supporting", "the", "read", "method", ")", "has", "the", "magic", "numbers", "indicating", "it", "is", "a", "Java", "class", "file", ".", "Returns", "False", "if", "the", "magic", "numbers", "do", "not", "match", "or", "for", "any", "errors", "." ]
python
train
SatelliteQE/nailgun
nailgun/entities.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L4180-L4231
def path(self, which=None): """Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: bulk/install_content /api/hosts/:host_id/bulk/install_content errata /api/hosts/:host_id/errata power /api/hosts/:host_id/power errata/apply /api/hosts/:host_id/errata/apply puppetclass_ids /api/hosts/:host_id/puppetclass_ids smart_class_parameters /api/hosts/:host_id/smart_class_parameters smart_variables /api/hosts/:host_id/smart_class_variables module_streams /api/hosts/:host_id/module_streams Otherwise, call ``super``. """ if which in ( 'enc', 'errata', 'errata/apply', 'errata/applicability', 'facts', 'packages', 'power', 'puppetclass_ids', 'smart_class_parameters', 'smart_variables', 'module_streams', ): return '{0}/{1}'.format( super(Host, self).path(which='self'), which ) elif which in ('bulk/install_content',): return '{0}/{1}'.format( super(Host, self).path(which='base'), which ) elif which in ('upload_facts',): return '{0}/{1}'.format( super(Host, self).path(which='base'), 'facts' ) return super(Host, self).path(which)
[ "def", "path", "(", "self", ",", "which", "=", "None", ")", ":", "if", "which", "in", "(", "'enc'", ",", "'errata'", ",", "'errata/apply'", ",", "'errata/applicability'", ",", "'facts'", ",", "'packages'", ",", "'power'", ",", "'puppetclass_ids'", ",", "'smart_class_parameters'", ",", "'smart_variables'", ",", "'module_streams'", ",", ")", ":", "return", "'{0}/{1}'", ".", "format", "(", "super", "(", "Host", ",", "self", ")", ".", "path", "(", "which", "=", "'self'", ")", ",", "which", ")", "elif", "which", "in", "(", "'bulk/install_content'", ",", ")", ":", "return", "'{0}/{1}'", ".", "format", "(", "super", "(", "Host", ",", "self", ")", ".", "path", "(", "which", "=", "'base'", ")", ",", "which", ")", "elif", "which", "in", "(", "'upload_facts'", ",", ")", ":", "return", "'{0}/{1}'", ".", "format", "(", "super", "(", "Host", ",", "self", ")", ".", "path", "(", "which", "=", "'base'", ")", ",", "'facts'", ")", "return", "super", "(", "Host", ",", "self", ")", ".", "path", "(", "which", ")" ]
Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: bulk/install_content /api/hosts/:host_id/bulk/install_content errata /api/hosts/:host_id/errata power /api/hosts/:host_id/power errata/apply /api/hosts/:host_id/errata/apply puppetclass_ids /api/hosts/:host_id/puppetclass_ids smart_class_parameters /api/hosts/:host_id/smart_class_parameters smart_variables /api/hosts/:host_id/smart_class_variables module_streams /api/hosts/:host_id/module_streams Otherwise, call ``super``.
[ "Extend", "nailgun", ".", "entity_mixins", ".", "Entity", ".", "path", ".", "The", "format", "of", "the", "returned", "path", "depends", "on", "the", "value", "of", "which", ":" ]
python
train
wdecoster/nanogui
nanogui/nanoguis.py
https://github.com/wdecoster/nanogui/blob/78e4f8ca511c5ca9fd7ccd6ff03e8edd1a5db54d/nanogui/nanoguis.py#L454-L471
def validate_integer(self, action, index, value_if_allowed, prior_value, text, validation_type, trigger_type, widget_name): """Check if text Entry is valid (number). I have no idea what all these arguments are doing here but took this from https://stackoverflow.com/questions/8959815/restricting-the-value-in-tkinter-entry-widget """ if(action == '1'): if text in '0123456789.-+': try: int(value_if_allowed) return True except ValueError: return False else: return False else: return True
[ "def", "validate_integer", "(", "self", ",", "action", ",", "index", ",", "value_if_allowed", ",", "prior_value", ",", "text", ",", "validation_type", ",", "trigger_type", ",", "widget_name", ")", ":", "if", "(", "action", "==", "'1'", ")", ":", "if", "text", "in", "'0123456789.-+'", ":", "try", ":", "int", "(", "value_if_allowed", ")", "return", "True", "except", "ValueError", ":", "return", "False", "else", ":", "return", "False", "else", ":", "return", "True" ]
Check if text Entry is valid (number). I have no idea what all these arguments are doing here but took this from https://stackoverflow.com/questions/8959815/restricting-the-value-in-tkinter-entry-widget
[ "Check", "if", "text", "Entry", "is", "valid", "(", "number", ")", "." ]
python
test
tumblr/pytumblr
pytumblr/request.py
https://github.com/tumblr/pytumblr/blob/4a5cd7c4b8ae78d12811d9fd52620afa1692a415/pytumblr/request.py#L35-L53
def get(self, url, params): """ Issues a GET request against the API, properly formatting the params :param url: a string, the url you are requesting :param params: a dict, the key-value of all the paramaters needed in the request :returns: a dict parsed of the JSON response """ url = self.host + url if params: url = url + "?" + urllib.parse.urlencode(params) try: resp = requests.get(url, allow_redirects=False, headers=self.headers, auth=self.oauth) except TooManyRedirects as e: resp = e.response return self.json_parse(resp)
[ "def", "get", "(", "self", ",", "url", ",", "params", ")", ":", "url", "=", "self", ".", "host", "+", "url", "if", "params", ":", "url", "=", "url", "+", "\"?\"", "+", "urllib", ".", "parse", ".", "urlencode", "(", "params", ")", "try", ":", "resp", "=", "requests", ".", "get", "(", "url", ",", "allow_redirects", "=", "False", ",", "headers", "=", "self", ".", "headers", ",", "auth", "=", "self", ".", "oauth", ")", "except", "TooManyRedirects", "as", "e", ":", "resp", "=", "e", ".", "response", "return", "self", ".", "json_parse", "(", "resp", ")" ]
Issues a GET request against the API, properly formatting the params :param url: a string, the url you are requesting :param params: a dict, the key-value of all the paramaters needed in the request :returns: a dict parsed of the JSON response
[ "Issues", "a", "GET", "request", "against", "the", "API", "properly", "formatting", "the", "params" ]
python
train
ctuning/ck
ck/repo/module/repo/module.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/repo/module/repo/module.py#L1792-L1972
def deps(i): """ Input: { (data_uoa) - repo UOA or (path) - path to .cmr.json (current_repos) - list of repos being updated (to avoid infinite recursion) (how) - 'pull' (default) or 'add' (version) - checkout version (default - stable) (branch) - git branch (checkout) - git checkout } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ import os o=i.get('out','') duoa=i.get('data_uoa','') cr=i.get('current_repos',[]) # Added repos to avoid duplication/recursion how=i.get('how','') if how=='': how='pull' p=i.get('path','') if p=='': r=ck.access({'action':'load', 'module_uoa':work['self_module_uoa'], 'data_uoa':duoa}) if r['return']>0: return r dr=r['dict'] p=dr.get('path','') if p!='': # path to repo description pp=os.path.join(p, ck.cfg['repo_file']) if os.path.isfile(pp): r=ck.load_json_file({'json_file':pp}) if r['return']>0: return r d=r['dict'] # Check checkouts version=i.get('version','') branch=i.get('branch','') checkout=i.get('checkout','') if version!='': cx=d.get('dict',{}).get('checkouts',{}).get(version,{}) branch=cx.get('branch','') checkout=cx.get('checkout','') ppp=os.getcwd() os.chdir(p) if branch!='': if o=='con': ck.out(' ====================================') ck.out(' git checkout '+branch) ck.out('') r=ck.run_and_get_stdout({'cmd':['git','checkout',branch]}) ck.out(r.get('stdout','')) ck.out(r.get('stderr','')) if checkout!='': if o=='con': ck.out(' ====================================') ck.out(' git checkout '+checkout) ck.out('') r=ck.run_and_get_stdout({'cmd':['git','checkout',checkout]}) ck.out(r.get('stdout','')) ck.out(r.get('stderr','')) os.chdir(ppp) rp1=d.get('dict',{}).get('repo_deps',[]) if len(rp1)==0: rp1=d.get('repo_deps',[]) # for backwards compatibility ... rp2=[] rp=[] if len(rp1)>0: for xruoa in rp1: if type(xruoa)!=list: # for backwards compatibility ruoa=xruoa.get('repo_uoa','') if xruoa.get('repo_uid','')!='': ruoa=xruoa['repo_uid'] if ruoa!='' and ruoa not in cr: rp2.append(xruoa) # Add dependencies on other repositories (but avoid duplication) if len(rp2)==0: if o=='con': ck.out(' No dependencies on other repositories found!') else: for xruoa in rp2: ruoa=xruoa.get('repo_uoa','') if xruoa.get('repo_uid','')!='': ruoa=xruoa['repo_uid'] rurl=xruoa.get('repo_url','') if ruoa!='': x=' Dependency on repository '+ruoa+' ' # Check if this repo exists r=ck.access({'action':'load', 'module_uoa':work['self_module_uoa'], 'data_uoa':ruoa}) if r['return']>0: if r['return']!=16: return r rp.append(xruoa) x+=': should be resolved ...' else: # If explicit branch, still add ! branch=xruoa.get('branch','') checkout=xruoa.get('checkout','') stable=xruoa.get('stable','') version=xruoa.get('version','') if branch!='' or checkout!='' or stable!='' or version!='': xruoa['ignore_pull']='yes' rp.append(xruoa) x+=': should be switched to explicit branch ...' else: x+=': Ok' if o=='con': ck.out(x) if len(rp)>0: for xruoa in rp: ruoa=xruoa.get('repo_uoa','') ruid=xruoa.get('repo_uid','') rurl=xruoa.get('repo_url','') branch=xruoa.get('branch','') checkout=xruoa.get('checkout','') stable=xruoa.get('stable','') version=xruoa.get('version','') ignore_pull=xruoa.get('ignore_pull','') if o=='con': ck.out('') x='' if ruid!='': x=' ('+ruid+')' ck.out(' Resolving dependency on repo: '+ruoa+x) ck.out('') if ruid!='': cr.append(ruid) else: cr.append(ruoa) ii={'action':how, 'module_uoa':work['self_module_uoa'], 'data_uoa':ruoa, 'current_repos':cr, 'url':rurl, 'ignore_pull':ignore_pull, 'branch':branch, 'checkout':checkout, 'stable':stable, 'version':version, 'out':o} if ruid!='': ii['data_uid']=ruid if how=='add': ii['gitzip']='yes' r=ck.access(ii) if r['return']>0: return r return {'return':0, 'current_repos':cr}
[ "def", "deps", "(", "i", ")", ":", "import", "os", "o", "=", "i", ".", "get", "(", "'out'", ",", "''", ")", "duoa", "=", "i", ".", "get", "(", "'data_uoa'", ",", "''", ")", "cr", "=", "i", ".", "get", "(", "'current_repos'", ",", "[", "]", ")", "# Added repos to avoid duplication/recursion", "how", "=", "i", ".", "get", "(", "'how'", ",", "''", ")", "if", "how", "==", "''", ":", "how", "=", "'pull'", "p", "=", "i", ".", "get", "(", "'path'", ",", "''", ")", "if", "p", "==", "''", ":", "r", "=", "ck", ".", "access", "(", "{", "'action'", ":", "'load'", ",", "'module_uoa'", ":", "work", "[", "'self_module_uoa'", "]", ",", "'data_uoa'", ":", "duoa", "}", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "return", "r", "dr", "=", "r", "[", "'dict'", "]", "p", "=", "dr", ".", "get", "(", "'path'", ",", "''", ")", "if", "p", "!=", "''", ":", "# path to repo description", "pp", "=", "os", ".", "path", ".", "join", "(", "p", ",", "ck", ".", "cfg", "[", "'repo_file'", "]", ")", "if", "os", ".", "path", ".", "isfile", "(", "pp", ")", ":", "r", "=", "ck", ".", "load_json_file", "(", "{", "'json_file'", ":", "pp", "}", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "return", "r", "d", "=", "r", "[", "'dict'", "]", "# Check checkouts", "version", "=", "i", ".", "get", "(", "'version'", ",", "''", ")", "branch", "=", "i", ".", "get", "(", "'branch'", ",", "''", ")", "checkout", "=", "i", ".", "get", "(", "'checkout'", ",", "''", ")", "if", "version", "!=", "''", ":", "cx", "=", "d", ".", "get", "(", "'dict'", ",", "{", "}", ")", ".", "get", "(", "'checkouts'", ",", "{", "}", ")", ".", "get", "(", "version", ",", "{", "}", ")", "branch", "=", "cx", ".", "get", "(", "'branch'", ",", "''", ")", "checkout", "=", "cx", ".", "get", "(", "'checkout'", ",", "''", ")", "ppp", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "p", ")", "if", "branch", "!=", "''", ":", "if", "o", "==", "'con'", ":", "ck", ".", "out", "(", "' ===================================='", ")", "ck", ".", "out", "(", "' git checkout '", "+", "branch", ")", "ck", ".", "out", "(", "''", ")", "r", "=", "ck", ".", "run_and_get_stdout", "(", "{", "'cmd'", ":", "[", "'git'", ",", "'checkout'", ",", "branch", "]", "}", ")", "ck", ".", "out", "(", "r", ".", "get", "(", "'stdout'", ",", "''", ")", ")", "ck", ".", "out", "(", "r", ".", "get", "(", "'stderr'", ",", "''", ")", ")", "if", "checkout", "!=", "''", ":", "if", "o", "==", "'con'", ":", "ck", ".", "out", "(", "' ===================================='", ")", "ck", ".", "out", "(", "' git checkout '", "+", "checkout", ")", "ck", ".", "out", "(", "''", ")", "r", "=", "ck", ".", "run_and_get_stdout", "(", "{", "'cmd'", ":", "[", "'git'", ",", "'checkout'", ",", "checkout", "]", "}", ")", "ck", ".", "out", "(", "r", ".", "get", "(", "'stdout'", ",", "''", ")", ")", "ck", ".", "out", "(", "r", ".", "get", "(", "'stderr'", ",", "''", ")", ")", "os", ".", "chdir", "(", "ppp", ")", "rp1", "=", "d", ".", "get", "(", "'dict'", ",", "{", "}", ")", ".", "get", "(", "'repo_deps'", ",", "[", "]", ")", "if", "len", "(", "rp1", ")", "==", "0", ":", "rp1", "=", "d", ".", "get", "(", "'repo_deps'", ",", "[", "]", ")", "# for backwards compatibility ...", "rp2", "=", "[", "]", "rp", "=", "[", "]", "if", "len", "(", "rp1", ")", ">", "0", ":", "for", "xruoa", "in", "rp1", ":", "if", "type", "(", "xruoa", ")", "!=", "list", ":", "# for backwards compatibility", "ruoa", "=", "xruoa", ".", "get", "(", "'repo_uoa'", ",", "''", ")", "if", "xruoa", ".", "get", "(", "'repo_uid'", ",", "''", ")", "!=", "''", ":", "ruoa", "=", "xruoa", "[", "'repo_uid'", "]", "if", "ruoa", "!=", "''", "and", "ruoa", "not", "in", "cr", ":", "rp2", ".", "append", "(", "xruoa", ")", "# Add dependencies on other repositories (but avoid duplication)", "if", "len", "(", "rp2", ")", "==", "0", ":", "if", "o", "==", "'con'", ":", "ck", ".", "out", "(", "' No dependencies on other repositories found!'", ")", "else", ":", "for", "xruoa", "in", "rp2", ":", "ruoa", "=", "xruoa", ".", "get", "(", "'repo_uoa'", ",", "''", ")", "if", "xruoa", ".", "get", "(", "'repo_uid'", ",", "''", ")", "!=", "''", ":", "ruoa", "=", "xruoa", "[", "'repo_uid'", "]", "rurl", "=", "xruoa", ".", "get", "(", "'repo_url'", ",", "''", ")", "if", "ruoa", "!=", "''", ":", "x", "=", "' Dependency on repository '", "+", "ruoa", "+", "' '", "# Check if this repo exists", "r", "=", "ck", ".", "access", "(", "{", "'action'", ":", "'load'", ",", "'module_uoa'", ":", "work", "[", "'self_module_uoa'", "]", ",", "'data_uoa'", ":", "ruoa", "}", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "if", "r", "[", "'return'", "]", "!=", "16", ":", "return", "r", "rp", ".", "append", "(", "xruoa", ")", "x", "+=", "': should be resolved ...'", "else", ":", "# If explicit branch, still add !", "branch", "=", "xruoa", ".", "get", "(", "'branch'", ",", "''", ")", "checkout", "=", "xruoa", ".", "get", "(", "'checkout'", ",", "''", ")", "stable", "=", "xruoa", ".", "get", "(", "'stable'", ",", "''", ")", "version", "=", "xruoa", ".", "get", "(", "'version'", ",", "''", ")", "if", "branch", "!=", "''", "or", "checkout", "!=", "''", "or", "stable", "!=", "''", "or", "version", "!=", "''", ":", "xruoa", "[", "'ignore_pull'", "]", "=", "'yes'", "rp", ".", "append", "(", "xruoa", ")", "x", "+=", "': should be switched to explicit branch ...'", "else", ":", "x", "+=", "': Ok'", "if", "o", "==", "'con'", ":", "ck", ".", "out", "(", "x", ")", "if", "len", "(", "rp", ")", ">", "0", ":", "for", "xruoa", "in", "rp", ":", "ruoa", "=", "xruoa", ".", "get", "(", "'repo_uoa'", ",", "''", ")", "ruid", "=", "xruoa", ".", "get", "(", "'repo_uid'", ",", "''", ")", "rurl", "=", "xruoa", ".", "get", "(", "'repo_url'", ",", "''", ")", "branch", "=", "xruoa", ".", "get", "(", "'branch'", ",", "''", ")", "checkout", "=", "xruoa", ".", "get", "(", "'checkout'", ",", "''", ")", "stable", "=", "xruoa", ".", "get", "(", "'stable'", ",", "''", ")", "version", "=", "xruoa", ".", "get", "(", "'version'", ",", "''", ")", "ignore_pull", "=", "xruoa", ".", "get", "(", "'ignore_pull'", ",", "''", ")", "if", "o", "==", "'con'", ":", "ck", ".", "out", "(", "''", ")", "x", "=", "''", "if", "ruid", "!=", "''", ":", "x", "=", "' ('", "+", "ruid", "+", "')'", "ck", ".", "out", "(", "' Resolving dependency on repo: '", "+", "ruoa", "+", "x", ")", "ck", ".", "out", "(", "''", ")", "if", "ruid", "!=", "''", ":", "cr", ".", "append", "(", "ruid", ")", "else", ":", "cr", ".", "append", "(", "ruoa", ")", "ii", "=", "{", "'action'", ":", "how", ",", "'module_uoa'", ":", "work", "[", "'self_module_uoa'", "]", ",", "'data_uoa'", ":", "ruoa", ",", "'current_repos'", ":", "cr", ",", "'url'", ":", "rurl", ",", "'ignore_pull'", ":", "ignore_pull", ",", "'branch'", ":", "branch", ",", "'checkout'", ":", "checkout", ",", "'stable'", ":", "stable", ",", "'version'", ":", "version", ",", "'out'", ":", "o", "}", "if", "ruid", "!=", "''", ":", "ii", "[", "'data_uid'", "]", "=", "ruid", "if", "how", "==", "'add'", ":", "ii", "[", "'gitzip'", "]", "=", "'yes'", "r", "=", "ck", ".", "access", "(", "ii", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "return", "r", "return", "{", "'return'", ":", "0", ",", "'current_repos'", ":", "cr", "}" ]
Input: { (data_uoa) - repo UOA or (path) - path to .cmr.json (current_repos) - list of repos being updated (to avoid infinite recursion) (how) - 'pull' (default) or 'add' (version) - checkout version (default - stable) (branch) - git branch (checkout) - git checkout } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 }
[ "Input", ":", "{", "(", "data_uoa", ")", "-", "repo", "UOA", "or", "(", "path", ")", "-", "path", "to", ".", "cmr", ".", "json" ]
python
train
rosenbrockc/fortpy
fortpy/interop/ftypes.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/interop/ftypes.py#L290-L312
def _check_dir(self): """Makes sure that the working directory for the wrapper modules exists. """ from os import path, mkdir if not path.isdir(self.dirpath): mkdir(self.dirpath) #Copy the ftypes.py module shipped with fortpy to the local directory. ftypes = path.join(get_fortpy_templates(), "ftypes.py") from shutil import copy copy(ftypes, self.dirpath) #Create the __init__.py file so that the library becomes a package for #its module contents. with open(path.join(self.dirpath, "__init__.py"), 'w') as f: f.write("# Auto-generated for package structure by fortpy.") #We also need to make sure that the fortpy deallocator module is present for #compilation in the shared library. if not path.isdir(self.f90path): mkdir(self.f90path) #Copy the ftypes.py module shipped with fortpy to the local directory. ftypes = path.join(get_fortpy_templates(), "ftypes_dealloc.f90") from shutil import copy copy(ftypes, self.f90path)
[ "def", "_check_dir", "(", "self", ")", ":", "from", "os", "import", "path", ",", "mkdir", "if", "not", "path", ".", "isdir", "(", "self", ".", "dirpath", ")", ":", "mkdir", "(", "self", ".", "dirpath", ")", "#Copy the ftypes.py module shipped with fortpy to the local directory.", "ftypes", "=", "path", ".", "join", "(", "get_fortpy_templates", "(", ")", ",", "\"ftypes.py\"", ")", "from", "shutil", "import", "copy", "copy", "(", "ftypes", ",", "self", ".", "dirpath", ")", "#Create the __init__.py file so that the library becomes a package for", "#its module contents.", "with", "open", "(", "path", ".", "join", "(", "self", ".", "dirpath", ",", "\"__init__.py\"", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "\"# Auto-generated for package structure by fortpy.\"", ")", "#We also need to make sure that the fortpy deallocator module is present for", "#compilation in the shared library.", "if", "not", "path", ".", "isdir", "(", "self", ".", "f90path", ")", ":", "mkdir", "(", "self", ".", "f90path", ")", "#Copy the ftypes.py module shipped with fortpy to the local directory.", "ftypes", "=", "path", ".", "join", "(", "get_fortpy_templates", "(", ")", ",", "\"ftypes_dealloc.f90\"", ")", "from", "shutil", "import", "copy", "copy", "(", "ftypes", ",", "self", ".", "f90path", ")" ]
Makes sure that the working directory for the wrapper modules exists.
[ "Makes", "sure", "that", "the", "working", "directory", "for", "the", "wrapper", "modules", "exists", "." ]
python
train
blue-yonder/tsfresh
tsfresh/feature_extraction/feature_calculators.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L1135-L1148
def number_cwt_peaks(x, n): """ This feature calculator searches for different peaks in x. To do so, x is smoothed by a ricker wavelet and for widths ranging from 1 to n. This feature calculator returns the number of peaks that occur at enough width scales and with sufficiently high Signal-to-Noise-Ratio (SNR) :param x: the time series to calculate the feature of :type x: numpy.ndarray :param n: maximum width to consider :type n: int :return: the value of this feature :return type: int """ return len(find_peaks_cwt(vector=x, widths=np.array(list(range(1, n + 1))), wavelet=ricker))
[ "def", "number_cwt_peaks", "(", "x", ",", "n", ")", ":", "return", "len", "(", "find_peaks_cwt", "(", "vector", "=", "x", ",", "widths", "=", "np", ".", "array", "(", "list", "(", "range", "(", "1", ",", "n", "+", "1", ")", ")", ")", ",", "wavelet", "=", "ricker", ")", ")" ]
This feature calculator searches for different peaks in x. To do so, x is smoothed by a ricker wavelet and for widths ranging from 1 to n. This feature calculator returns the number of peaks that occur at enough width scales and with sufficiently high Signal-to-Noise-Ratio (SNR) :param x: the time series to calculate the feature of :type x: numpy.ndarray :param n: maximum width to consider :type n: int :return: the value of this feature :return type: int
[ "This", "feature", "calculator", "searches", "for", "different", "peaks", "in", "x", ".", "To", "do", "so", "x", "is", "smoothed", "by", "a", "ricker", "wavelet", "and", "for", "widths", "ranging", "from", "1", "to", "n", ".", "This", "feature", "calculator", "returns", "the", "number", "of", "peaks", "that", "occur", "at", "enough", "width", "scales", "and", "with", "sufficiently", "high", "Signal", "-", "to", "-", "Noise", "-", "Ratio", "(", "SNR", ")" ]
python
train
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/utils/inputs.py
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/inputs.py#L69-L88
def url_input(url_string, download=True): """ This method expects a direct URL link to an xml file. It will apply no modifications to the received URL string, so ensure good input. """ log.debug('URL Input - {0}'.format(url_string)) try: open_xml = urllib.request.urlopen(url_string) except urllib.error.URLError as err: print('utils.input.url_input received a bad URL, or could not connect') raise err else: #Employ a quick check on the mimetype of the link if not open_xml.headers['Content-Type'] == 'text/xml': sys.exit('URL request does not appear to be XML') filename = open_xml.headers['Content-Disposition'].split('\"')[1] if download: with open(filename, 'wb') as xml_file: xml_file.write(open_xml.read()) return openaccess_epub.utils.file_root_name(filename)
[ "def", "url_input", "(", "url_string", ",", "download", "=", "True", ")", ":", "log", ".", "debug", "(", "'URL Input - {0}'", ".", "format", "(", "url_string", ")", ")", "try", ":", "open_xml", "=", "urllib", ".", "request", ".", "urlopen", "(", "url_string", ")", "except", "urllib", ".", "error", ".", "URLError", "as", "err", ":", "print", "(", "'utils.input.url_input received a bad URL, or could not connect'", ")", "raise", "err", "else", ":", "#Employ a quick check on the mimetype of the link", "if", "not", "open_xml", ".", "headers", "[", "'Content-Type'", "]", "==", "'text/xml'", ":", "sys", ".", "exit", "(", "'URL request does not appear to be XML'", ")", "filename", "=", "open_xml", ".", "headers", "[", "'Content-Disposition'", "]", ".", "split", "(", "'\\\"'", ")", "[", "1", "]", "if", "download", ":", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "xml_file", ":", "xml_file", ".", "write", "(", "open_xml", ".", "read", "(", ")", ")", "return", "openaccess_epub", ".", "utils", ".", "file_root_name", "(", "filename", ")" ]
This method expects a direct URL link to an xml file. It will apply no modifications to the received URL string, so ensure good input.
[ "This", "method", "expects", "a", "direct", "URL", "link", "to", "an", "xml", "file", ".", "It", "will", "apply", "no", "modifications", "to", "the", "received", "URL", "string", "so", "ensure", "good", "input", "." ]
python
train
Microsoft/knack
knack/cli.py
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/cli.py#L164-L170
def exception_handler(self, ex): # pylint: disable=no-self-use """ The default exception handler """ if isinstance(ex, CLIError): logger.error(ex) else: logger.exception(ex) return 1
[ "def", "exception_handler", "(", "self", ",", "ex", ")", ":", "# pylint: disable=no-self-use", "if", "isinstance", "(", "ex", ",", "CLIError", ")", ":", "logger", ".", "error", "(", "ex", ")", "else", ":", "logger", ".", "exception", "(", "ex", ")", "return", "1" ]
The default exception handler
[ "The", "default", "exception", "handler" ]
python
train
basho/riak-python-client
riak/codecs/http.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/codecs/http.py#L264-L277
def _parse_content_type(self, value): """ Split the content-type header into two parts: 1) Actual main/sub encoding type 2) charset :param value: Complete MIME content-type string """ content_type, params = parse_header(value) if 'charset' in params: charset = params['charset'] else: charset = None return content_type, charset
[ "def", "_parse_content_type", "(", "self", ",", "value", ")", ":", "content_type", ",", "params", "=", "parse_header", "(", "value", ")", "if", "'charset'", "in", "params", ":", "charset", "=", "params", "[", "'charset'", "]", "else", ":", "charset", "=", "None", "return", "content_type", ",", "charset" ]
Split the content-type header into two parts: 1) Actual main/sub encoding type 2) charset :param value: Complete MIME content-type string
[ "Split", "the", "content", "-", "type", "header", "into", "two", "parts", ":", "1", ")", "Actual", "main", "/", "sub", "encoding", "type", "2", ")", "charset" ]
python
train
pydot/pydot
pydot.py
https://github.com/pydot/pydot/blob/48ba231b36012c5e13611807c9aac7d8ae8c15c4/pydot.py#L654-L686
def to_string(self): """Return string representation of node in DOT language.""" # RMF: special case defaults for node, edge and graph properties. # node = quote_if_necessary(self.obj_dict['name']) node_attr = list() for attr in sorted(self.obj_dict['attributes']): value = self.obj_dict['attributes'][attr] if value == '': value = '""' if value is not None: node_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) ) else: node_attr.append( attr ) # No point in having nodes setting any defaults if the don't set # any attributes... # if node in ('graph', 'node', 'edge') and len(node_attr) == 0: return '' node_attr = ', '.join(node_attr) if node_attr: node += ' [' + node_attr + ']' return node + ';'
[ "def", "to_string", "(", "self", ")", ":", "# RMF: special case defaults for node, edge and graph properties.", "#", "node", "=", "quote_if_necessary", "(", "self", ".", "obj_dict", "[", "'name'", "]", ")", "node_attr", "=", "list", "(", ")", "for", "attr", "in", "sorted", "(", "self", ".", "obj_dict", "[", "'attributes'", "]", ")", ":", "value", "=", "self", ".", "obj_dict", "[", "'attributes'", "]", "[", "attr", "]", "if", "value", "==", "''", ":", "value", "=", "'\"\"'", "if", "value", "is", "not", "None", ":", "node_attr", ".", "append", "(", "'%s=%s'", "%", "(", "attr", ",", "quote_if_necessary", "(", "value", ")", ")", ")", "else", ":", "node_attr", ".", "append", "(", "attr", ")", "# No point in having nodes setting any defaults if the don't set", "# any attributes...", "#", "if", "node", "in", "(", "'graph'", ",", "'node'", ",", "'edge'", ")", "and", "len", "(", "node_attr", ")", "==", "0", ":", "return", "''", "node_attr", "=", "', '", ".", "join", "(", "node_attr", ")", "if", "node_attr", ":", "node", "+=", "' ['", "+", "node_attr", "+", "']'", "return", "node", "+", "';'" ]
Return string representation of node in DOT language.
[ "Return", "string", "representation", "of", "node", "in", "DOT", "language", "." ]
python
train
edx/edx-enterprise
enterprise/utils.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/utils.py#L570-L600
def ungettext_min_max(singular, plural, range_text, min_val, max_val): """ Return grammatically correct, translated text based off of a minimum and maximum value. Example: min = 1, max = 1, singular = '{} hour required for this course', plural = '{} hours required for this course' output = '1 hour required for this course' min = 2, max = 2, singular = '{} hour required for this course', plural = '{} hours required for this course' output = '2 hours required for this course' min = 2, max = 4, range_text = '{}-{} hours required for this course' output = '2-4 hours required for this course' min = None, max = 2, plural = '{} hours required for this course' output = '2 hours required for this course' Expects ``range_text`` to already have a translation function called on it. Returns: ``None`` if both of the input values are ``None``. ``singular`` formatted if both are equal or one of the inputs, but not both, are ``None``, and the value is 1. ``plural`` formatted if both are equal or one of its inputs, but not both, are ``None``, and the value is > 1. ``range_text`` formatted if min != max and both are valid values. """ if min_val is None and max_val is None: return None if min_val == max_val or min_val is None or max_val is None: # pylint: disable=translation-of-non-string return ungettext(singular, plural, min_val or max_val).format(min_val or max_val) return range_text.format(min_val, max_val)
[ "def", "ungettext_min_max", "(", "singular", ",", "plural", ",", "range_text", ",", "min_val", ",", "max_val", ")", ":", "if", "min_val", "is", "None", "and", "max_val", "is", "None", ":", "return", "None", "if", "min_val", "==", "max_val", "or", "min_val", "is", "None", "or", "max_val", "is", "None", ":", "# pylint: disable=translation-of-non-string", "return", "ungettext", "(", "singular", ",", "plural", ",", "min_val", "or", "max_val", ")", ".", "format", "(", "min_val", "or", "max_val", ")", "return", "range_text", ".", "format", "(", "min_val", ",", "max_val", ")" ]
Return grammatically correct, translated text based off of a minimum and maximum value. Example: min = 1, max = 1, singular = '{} hour required for this course', plural = '{} hours required for this course' output = '1 hour required for this course' min = 2, max = 2, singular = '{} hour required for this course', plural = '{} hours required for this course' output = '2 hours required for this course' min = 2, max = 4, range_text = '{}-{} hours required for this course' output = '2-4 hours required for this course' min = None, max = 2, plural = '{} hours required for this course' output = '2 hours required for this course' Expects ``range_text`` to already have a translation function called on it. Returns: ``None`` if both of the input values are ``None``. ``singular`` formatted if both are equal or one of the inputs, but not both, are ``None``, and the value is 1. ``plural`` formatted if both are equal or one of its inputs, but not both, are ``None``, and the value is > 1. ``range_text`` formatted if min != max and both are valid values.
[ "Return", "grammatically", "correct", "translated", "text", "based", "off", "of", "a", "minimum", "and", "maximum", "value", "." ]
python
valid
seibert-media/Highton
highton/models/company.py
https://github.com/seibert-media/Highton/blob/1519e4fb105f62882c2e7bc81065d994649558d8/highton/models/company.py#L79-L92
def people(self): """ Retrieve all people of the company :return: list of people objects :rtype: list """ return fields.ListField(name=HightonConstants.PEOPLE, init_class=Person).decode( self.element_from_string( self._get_request( endpoint=self.ENDPOINT + '/' + str(self.id) + '/people', ).text ) )
[ "def", "people", "(", "self", ")", ":", "return", "fields", ".", "ListField", "(", "name", "=", "HightonConstants", ".", "PEOPLE", ",", "init_class", "=", "Person", ")", ".", "decode", "(", "self", ".", "element_from_string", "(", "self", ".", "_get_request", "(", "endpoint", "=", "self", ".", "ENDPOINT", "+", "'/'", "+", "str", "(", "self", ".", "id", ")", "+", "'/people'", ",", ")", ".", "text", ")", ")" ]
Retrieve all people of the company :return: list of people objects :rtype: list
[ "Retrieve", "all", "people", "of", "the", "company" ]
python
train
klmitch/framer
framer/framers.py
https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L700-L743
def to_frame(self, data, state): """ Extract a single frame from the data buffer. The consumed data should be removed from the buffer. If no complete frame can be read, must raise a ``NoFrames`` exception. :param data: A ``bytearray`` instance containing the data so far read. :param state: An instance of ``FramerState``. If the buffer contains a partial frame, this object can be used to store state information to allow the remainder of the frame to be read. :returns: A frame. The frame may be any object. The stock framers always return bytes. """ # Find the next packet start if not state.frame_start: # Find the begin sequence idx = data.find(self.begin) if idx < 0: # Couldn't find one raise exc.NoFrames() # Excise the begin sequence del data[:idx + len(self.begin)] # Now see if we can find the end sequence idx = data.find(self.end) if idx < 0: # We've found the start, but don't have the end yet state.frame_start = True raise exc.NoFrames() # Extract the frame frame = six.binary_type(data[:idx]) del data[:idx + len(self.end)] # Update the state state.frame_start = False # Unstuff the frame and return it return self.prefix.join(frame.split(self.nop))
[ "def", "to_frame", "(", "self", ",", "data", ",", "state", ")", ":", "# Find the next packet start", "if", "not", "state", ".", "frame_start", ":", "# Find the begin sequence", "idx", "=", "data", ".", "find", "(", "self", ".", "begin", ")", "if", "idx", "<", "0", ":", "# Couldn't find one", "raise", "exc", ".", "NoFrames", "(", ")", "# Excise the begin sequence", "del", "data", "[", ":", "idx", "+", "len", "(", "self", ".", "begin", ")", "]", "# Now see if we can find the end sequence", "idx", "=", "data", ".", "find", "(", "self", ".", "end", ")", "if", "idx", "<", "0", ":", "# We've found the start, but don't have the end yet", "state", ".", "frame_start", "=", "True", "raise", "exc", ".", "NoFrames", "(", ")", "# Extract the frame", "frame", "=", "six", ".", "binary_type", "(", "data", "[", ":", "idx", "]", ")", "del", "data", "[", ":", "idx", "+", "len", "(", "self", ".", "end", ")", "]", "# Update the state", "state", ".", "frame_start", "=", "False", "# Unstuff the frame and return it", "return", "self", ".", "prefix", ".", "join", "(", "frame", ".", "split", "(", "self", ".", "nop", ")", ")" ]
Extract a single frame from the data buffer. The consumed data should be removed from the buffer. If no complete frame can be read, must raise a ``NoFrames`` exception. :param data: A ``bytearray`` instance containing the data so far read. :param state: An instance of ``FramerState``. If the buffer contains a partial frame, this object can be used to store state information to allow the remainder of the frame to be read. :returns: A frame. The frame may be any object. The stock framers always return bytes.
[ "Extract", "a", "single", "frame", "from", "the", "data", "buffer", ".", "The", "consumed", "data", "should", "be", "removed", "from", "the", "buffer", ".", "If", "no", "complete", "frame", "can", "be", "read", "must", "raise", "a", "NoFrames", "exception", "." ]
python
train
mieubrisse/wunderpy2
wunderpy2/wunderclient.py
https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/wunderclient.py#L67-L69
def get_tasks(self, list_id, completed=False): ''' Gets tasks for the list with the given ID, filtered by the given completion flag ''' return tasks_endpoint.get_tasks(self, list_id, completed=completed)
[ "def", "get_tasks", "(", "self", ",", "list_id", ",", "completed", "=", "False", ")", ":", "return", "tasks_endpoint", ".", "get_tasks", "(", "self", ",", "list_id", ",", "completed", "=", "completed", ")" ]
Gets tasks for the list with the given ID, filtered by the given completion flag
[ "Gets", "tasks", "for", "the", "list", "with", "the", "given", "ID", "filtered", "by", "the", "given", "completion", "flag" ]
python
train
bretth/woven
woven/linux.py
https://github.com/bretth/woven/blob/ec1da7b401a335f43129e7115fe7a4d145649f1e/woven/linux.py#L186-L242
def install_packages(): """ Install a set of baseline packages and configure where necessary """ if env.verbosity: print env.host, "INSTALLING & CONFIGURING NODE PACKAGES:" #Get a list of installed packages p = run("dpkg -l | awk '/ii/ {print $2}'").split('\n') #Remove apparmor - TODO we may enable this later if env.overwrite or not server_state('apparmor-disabled') and 'apparmor' in p: with settings(warn_only=True): sudo('/etc/init.d/apparmor stop') sudo('update-rc.d -f apparmor remove') set_server_state('apparmor-disabled') #The principle we will use is to only install configurations and packages #if they do not already exist (ie not manually installed or other method) env.installed_packages[env.host] = [] role = env.role_lookup[env.host_string] packages = get_packages() for package in packages: if not package in p: install_package(package) if env.verbosity: print ' * installed',package env.installed_packages[env.host].append(package) if env.overwrite or env.installed_packages[env.host]: #always store the latest complete list set_server_state('packages_installed', packages) env.installed_packages[env.host] = packages if env.overwrite and 'apache2' in env.installed_packages[env.host]: #some sensible defaults -might move to putting this config in a template sudo("rm -f /etc/apache2/sites-enabled/000-default") sed('/etc/apache2/apache2.conf',before='KeepAlive On',after='KeepAlive Off',use_sudo=True, backup='') sed('/etc/apache2/apache2.conf',before='StartServers 2', after='StartServers 1', use_sudo=True, backup='') sed('/etc/apache2/apache2.conf',before='MaxClients 150', after='MaxClients 100', use_sudo=True, backup='') for module in env.APACHE_DISABLE_MODULES: sudo('rm -f /etc/apache2/mods-enabled/%s*'% module) #Install base python packages #We'll use easy_install at this stage since it doesn't download if the package #is current whereas pip always downloads. #Once both these packages mature we'll move to using the standard Ubuntu packages if (env.overwrite or not server_state('pip-venv-wrapper-installed')) and 'python-setuptools' in packages: sudo("easy_install virtualenv") sudo("easy_install pip") sudo("easy_install virtualenvwrapper") if env.verbosity: print " * easy installed pip, virtualenv, virtualenvwrapper" set_server_state('pip-venv-wrapper-installed') if not contains("/home/%s/.profile"% env.user,"source /usr/local/bin/virtualenvwrapper.sh"): append("/home/%s/.profile"% env.user, "export WORKON_HOME=$HOME/env") append("/home/%s/.profile"% env.user, "source /usr/local/bin/virtualenvwrapper.sh") #cleanup after easy_install sudo("rm -rf build")
[ "def", "install_packages", "(", ")", ":", "if", "env", ".", "verbosity", ":", "print", "env", ".", "host", ",", "\"INSTALLING & CONFIGURING NODE PACKAGES:\"", "#Get a list of installed packages", "p", "=", "run", "(", "\"dpkg -l | awk '/ii/ {print $2}'\"", ")", ".", "split", "(", "'\\n'", ")", "#Remove apparmor - TODO we may enable this later", "if", "env", ".", "overwrite", "or", "not", "server_state", "(", "'apparmor-disabled'", ")", "and", "'apparmor'", "in", "p", ":", "with", "settings", "(", "warn_only", "=", "True", ")", ":", "sudo", "(", "'/etc/init.d/apparmor stop'", ")", "sudo", "(", "'update-rc.d -f apparmor remove'", ")", "set_server_state", "(", "'apparmor-disabled'", ")", "#The principle we will use is to only install configurations and packages", "#if they do not already exist (ie not manually installed or other method)", "env", ".", "installed_packages", "[", "env", ".", "host", "]", "=", "[", "]", "role", "=", "env", ".", "role_lookup", "[", "env", ".", "host_string", "]", "packages", "=", "get_packages", "(", ")", "for", "package", "in", "packages", ":", "if", "not", "package", "in", "p", ":", "install_package", "(", "package", ")", "if", "env", ".", "verbosity", ":", "print", "' * installed'", ",", "package", "env", ".", "installed_packages", "[", "env", ".", "host", "]", ".", "append", "(", "package", ")", "if", "env", ".", "overwrite", "or", "env", ".", "installed_packages", "[", "env", ".", "host", "]", ":", "#always store the latest complete list", "set_server_state", "(", "'packages_installed'", ",", "packages", ")", "env", ".", "installed_packages", "[", "env", ".", "host", "]", "=", "packages", "if", "env", ".", "overwrite", "and", "'apache2'", "in", "env", ".", "installed_packages", "[", "env", ".", "host", "]", ":", "#some sensible defaults -might move to putting this config in a template", "sudo", "(", "\"rm -f /etc/apache2/sites-enabled/000-default\"", ")", "sed", "(", "'/etc/apache2/apache2.conf'", ",", "before", "=", "'KeepAlive On'", ",", "after", "=", "'KeepAlive Off'", ",", "use_sudo", "=", "True", ",", "backup", "=", "''", ")", "sed", "(", "'/etc/apache2/apache2.conf'", ",", "before", "=", "'StartServers 2'", ",", "after", "=", "'StartServers 1'", ",", "use_sudo", "=", "True", ",", "backup", "=", "''", ")", "sed", "(", "'/etc/apache2/apache2.conf'", ",", "before", "=", "'MaxClients 150'", ",", "after", "=", "'MaxClients 100'", ",", "use_sudo", "=", "True", ",", "backup", "=", "''", ")", "for", "module", "in", "env", ".", "APACHE_DISABLE_MODULES", ":", "sudo", "(", "'rm -f /etc/apache2/mods-enabled/%s*'", "%", "module", ")", "#Install base python packages", "#We'll use easy_install at this stage since it doesn't download if the package", "#is current whereas pip always downloads.", "#Once both these packages mature we'll move to using the standard Ubuntu packages", "if", "(", "env", ".", "overwrite", "or", "not", "server_state", "(", "'pip-venv-wrapper-installed'", ")", ")", "and", "'python-setuptools'", "in", "packages", ":", "sudo", "(", "\"easy_install virtualenv\"", ")", "sudo", "(", "\"easy_install pip\"", ")", "sudo", "(", "\"easy_install virtualenvwrapper\"", ")", "if", "env", ".", "verbosity", ":", "print", "\" * easy installed pip, virtualenv, virtualenvwrapper\"", "set_server_state", "(", "'pip-venv-wrapper-installed'", ")", "if", "not", "contains", "(", "\"/home/%s/.profile\"", "%", "env", ".", "user", ",", "\"source /usr/local/bin/virtualenvwrapper.sh\"", ")", ":", "append", "(", "\"/home/%s/.profile\"", "%", "env", ".", "user", ",", "\"export WORKON_HOME=$HOME/env\"", ")", "append", "(", "\"/home/%s/.profile\"", "%", "env", ".", "user", ",", "\"source /usr/local/bin/virtualenvwrapper.sh\"", ")", "#cleanup after easy_install", "sudo", "(", "\"rm -rf build\"", ")" ]
Install a set of baseline packages and configure where necessary
[ "Install", "a", "set", "of", "baseline", "packages", "and", "configure", "where", "necessary" ]
python
train
pypa/pipenv
pipenv/vendor/distlib/_backport/tarfile.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L516-L545
def _init_read_gz(self): """Initialize for reading a gzip compressed fileobj. """ self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS) self.dbuf = b"" # taken from gzip.GzipFile with some alterations if self.__read(2) != b"\037\213": raise ReadError("not a gzip file") if self.__read(1) != b"\010": raise CompressionError("unsupported compression method") flag = ord(self.__read(1)) self.__read(6) if flag & 4: xlen = ord(self.__read(1)) + 256 * ord(self.__read(1)) self.read(xlen) if flag & 8: while True: s = self.__read(1) if not s or s == NUL: break if flag & 16: while True: s = self.__read(1) if not s or s == NUL: break if flag & 2: self.__read(2)
[ "def", "_init_read_gz", "(", "self", ")", ":", "self", ".", "cmp", "=", "self", ".", "zlib", ".", "decompressobj", "(", "-", "self", ".", "zlib", ".", "MAX_WBITS", ")", "self", ".", "dbuf", "=", "b\"\"", "# taken from gzip.GzipFile with some alterations", "if", "self", ".", "__read", "(", "2", ")", "!=", "b\"\\037\\213\"", ":", "raise", "ReadError", "(", "\"not a gzip file\"", ")", "if", "self", ".", "__read", "(", "1", ")", "!=", "b\"\\010\"", ":", "raise", "CompressionError", "(", "\"unsupported compression method\"", ")", "flag", "=", "ord", "(", "self", ".", "__read", "(", "1", ")", ")", "self", ".", "__read", "(", "6", ")", "if", "flag", "&", "4", ":", "xlen", "=", "ord", "(", "self", ".", "__read", "(", "1", ")", ")", "+", "256", "*", "ord", "(", "self", ".", "__read", "(", "1", ")", ")", "self", ".", "read", "(", "xlen", ")", "if", "flag", "&", "8", ":", "while", "True", ":", "s", "=", "self", ".", "__read", "(", "1", ")", "if", "not", "s", "or", "s", "==", "NUL", ":", "break", "if", "flag", "&", "16", ":", "while", "True", ":", "s", "=", "self", ".", "__read", "(", "1", ")", "if", "not", "s", "or", "s", "==", "NUL", ":", "break", "if", "flag", "&", "2", ":", "self", ".", "__read", "(", "2", ")" ]
Initialize for reading a gzip compressed fileobj.
[ "Initialize", "for", "reading", "a", "gzip", "compressed", "fileobj", "." ]
python
train
contains-io/typet
typet/validation.py
https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/validation.py#L462-L478
def _get_args(cls, args): # type: (tuple) -> Tuple[type, slice, Callable] """Return the parameters necessary to check type boundaries. Args: args: A slice representing the minimum and maximum lengths allowed for values of that string. Returns: A tuple with three parameters: a type, a slice, and the len function. """ if isinstance(args, tuple): raise TypeError( "{}[...] takes exactly one argument.".format(cls.__name__) ) return super(_StringMeta, cls)._get_args((_STR_TYPE, args))
[ "def", "_get_args", "(", "cls", ",", "args", ")", ":", "# type: (tuple) -> Tuple[type, slice, Callable]", "if", "isinstance", "(", "args", ",", "tuple", ")", ":", "raise", "TypeError", "(", "\"{}[...] takes exactly one argument.\"", ".", "format", "(", "cls", ".", "__name__", ")", ")", "return", "super", "(", "_StringMeta", ",", "cls", ")", ".", "_get_args", "(", "(", "_STR_TYPE", ",", "args", ")", ")" ]
Return the parameters necessary to check type boundaries. Args: args: A slice representing the minimum and maximum lengths allowed for values of that string. Returns: A tuple with three parameters: a type, a slice, and the len function.
[ "Return", "the", "parameters", "necessary", "to", "check", "type", "boundaries", "." ]
python
train
carlthome/python-audio-effects
pysndfx/dsp.py
https://github.com/carlthome/python-audio-effects/blob/b2d85c166720c549c6ef3c382b561edd09229722/pysndfx/dsp.py#L278-L282
def gain(self, db): """gain takes one paramter: gain in dB.""" self.command.append('gain') self.command.append(db) return self
[ "def", "gain", "(", "self", ",", "db", ")", ":", "self", ".", "command", ".", "append", "(", "'gain'", ")", "self", ".", "command", ".", "append", "(", "db", ")", "return", "self" ]
gain takes one paramter: gain in dB.
[ "gain", "takes", "one", "paramter", ":", "gain", "in", "dB", "." ]
python
train
opennode/waldur-core
waldur_core/core/tasks.py
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/core/tasks.py#L402-L413
def apply_async(self, args=None, kwargs=None, **options): """ Checks whether task must be skipped and decreases the counter in that case. """ key = self._get_cache_key(args, kwargs) counter, penalty = cache.get(key, (0, 0)) if not counter: return super(PenalizedBackgroundTask, self).apply_async(args=args, kwargs=kwargs, **options) cache.set(key, (counter - 1, penalty), self.CACHE_LIFETIME) logger.info('The task %s will not be executed due to the penalty.' % self.name) return self.AsyncResult(options.get('task_id') or str(uuid4()))
[ "def", "apply_async", "(", "self", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "*", "*", "options", ")", ":", "key", "=", "self", ".", "_get_cache_key", "(", "args", ",", "kwargs", ")", "counter", ",", "penalty", "=", "cache", ".", "get", "(", "key", ",", "(", "0", ",", "0", ")", ")", "if", "not", "counter", ":", "return", "super", "(", "PenalizedBackgroundTask", ",", "self", ")", ".", "apply_async", "(", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "*", "*", "options", ")", "cache", ".", "set", "(", "key", ",", "(", "counter", "-", "1", ",", "penalty", ")", ",", "self", ".", "CACHE_LIFETIME", ")", "logger", ".", "info", "(", "'The task %s will not be executed due to the penalty.'", "%", "self", ".", "name", ")", "return", "self", ".", "AsyncResult", "(", "options", ".", "get", "(", "'task_id'", ")", "or", "str", "(", "uuid4", "(", ")", ")", ")" ]
Checks whether task must be skipped and decreases the counter in that case.
[ "Checks", "whether", "task", "must", "be", "skipped", "and", "decreases", "the", "counter", "in", "that", "case", "." ]
python
train
CellProfiler/centrosome
centrosome/neighmovetrack.py
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/neighmovetrack.py#L363-L386
def solve_assignement(self, costs): """ Solves assignment problem using Hungarian implementation by Brian M. Clapper. @param costs: square cost matrix @return: assignment function @rtype: int->int """ if costs is None or len(costs) == 0: return dict() n = costs.shape[0] pairs = [(i, j) for i in range(0, n) for j in range(0, n) if costs[i, j] < invalid_match] costs_list = [costs[i, j] for (i, j) in pairs] assignment = lapjv.lapjv(list(zip(*pairs))[0], list(zip(*pairs))[1], costs_list) indexes = enumerate(list(assignment[0])) return dict([(row, col) for row, col in indexes])
[ "def", "solve_assignement", "(", "self", ",", "costs", ")", ":", "if", "costs", "is", "None", "or", "len", "(", "costs", ")", "==", "0", ":", "return", "dict", "(", ")", "n", "=", "costs", ".", "shape", "[", "0", "]", "pairs", "=", "[", "(", "i", ",", "j", ")", "for", "i", "in", "range", "(", "0", ",", "n", ")", "for", "j", "in", "range", "(", "0", ",", "n", ")", "if", "costs", "[", "i", ",", "j", "]", "<", "invalid_match", "]", "costs_list", "=", "[", "costs", "[", "i", ",", "j", "]", "for", "(", "i", ",", "j", ")", "in", "pairs", "]", "assignment", "=", "lapjv", ".", "lapjv", "(", "list", "(", "zip", "(", "*", "pairs", ")", ")", "[", "0", "]", ",", "list", "(", "zip", "(", "*", "pairs", ")", ")", "[", "1", "]", ",", "costs_list", ")", "indexes", "=", "enumerate", "(", "list", "(", "assignment", "[", "0", "]", ")", ")", "return", "dict", "(", "[", "(", "row", ",", "col", ")", "for", "row", ",", "col", "in", "indexes", "]", ")" ]
Solves assignment problem using Hungarian implementation by Brian M. Clapper. @param costs: square cost matrix @return: assignment function @rtype: int->int
[ "Solves", "assignment", "problem", "using", "Hungarian", "implementation", "by", "Brian", "M", ".", "Clapper", "." ]
python
train
mikusjelly/apkutils
apkutils/apkfile.py
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1588-L1603
def close(self): """Close the file, and for mode 'w', 'x' and 'a' write the ending records.""" if self.fp is None: return try: if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records with self._lock: if self._seekable: self.fp.seek(self.start_dir) self._write_end_record() finally: fp = self.fp self.fp = None self._fpclose(fp)
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "fp", "is", "None", ":", "return", "try", ":", "if", "self", ".", "mode", "in", "(", "'w'", ",", "'x'", ",", "'a'", ")", "and", "self", ".", "_didModify", ":", "# write ending records", "with", "self", ".", "_lock", ":", "if", "self", ".", "_seekable", ":", "self", ".", "fp", ".", "seek", "(", "self", ".", "start_dir", ")", "self", ".", "_write_end_record", "(", ")", "finally", ":", "fp", "=", "self", ".", "fp", "self", ".", "fp", "=", "None", "self", ".", "_fpclose", "(", "fp", ")" ]
Close the file, and for mode 'w', 'x' and 'a' write the ending records.
[ "Close", "the", "file", "and", "for", "mode", "w", "x", "and", "a", "write", "the", "ending", "records", "." ]
python
train
pypa/pipenv
pipenv/vendor/pyparsing.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pyparsing.py#L2253-L2275
def ignore( self, other ): """ Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns. Example:: patt = OneOrMore(Word(alphas)) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] patt.ignore(cStyleComment) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] """ if isinstance(other, basestring): other = Suppress(other) if isinstance( other, Suppress ): if other not in self.ignoreExprs: self.ignoreExprs.append(other) else: self.ignoreExprs.append( Suppress( other.copy() ) ) return self
[ "def", "ignore", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "basestring", ")", ":", "other", "=", "Suppress", "(", "other", ")", "if", "isinstance", "(", "other", ",", "Suppress", ")", ":", "if", "other", "not", "in", "self", ".", "ignoreExprs", ":", "self", ".", "ignoreExprs", ".", "append", "(", "other", ")", "else", ":", "self", ".", "ignoreExprs", ".", "append", "(", "Suppress", "(", "other", ".", "copy", "(", ")", ")", ")", "return", "self" ]
Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns. Example:: patt = OneOrMore(Word(alphas)) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] patt.ignore(cStyleComment) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
[ "Define", "expression", "to", "be", "ignored", "(", "e", ".", "g", ".", "comments", ")", "while", "doing", "pattern", "matching", ";", "may", "be", "called", "repeatedly", "to", "define", "multiple", "comment", "or", "other", "ignorable", "patterns", "." ]
python
train
Asana/python-asana
asana/resources/gen/tasks.py
https://github.com/Asana/python-asana/blob/6deb7a34495db23f44858e53b6bb2c9eccff7872/asana/resources/gen/tasks.py#L167-L176
def dependents(self, task, params={}, **options): """Returns the compact representations of all of the dependents of a task. Parameters ---------- task : {Id} The task to get dependents on. [params] : {Object} Parameters for the request """ path = "/tasks/%s/dependents" % (task) return self.client.get(path, params, **options)
[ "def", "dependents", "(", "self", ",", "task", ",", "params", "=", "{", "}", ",", "*", "*", "options", ")", ":", "path", "=", "\"/tasks/%s/dependents\"", "%", "(", "task", ")", "return", "self", ".", "client", ".", "get", "(", "path", ",", "params", ",", "*", "*", "options", ")" ]
Returns the compact representations of all of the dependents of a task. Parameters ---------- task : {Id} The task to get dependents on. [params] : {Object} Parameters for the request
[ "Returns", "the", "compact", "representations", "of", "all", "of", "the", "dependents", "of", "a", "task", "." ]
python
train
barryp/py-amqplib
amqplib/client_0_8/method_framing.py
https://github.com/barryp/py-amqplib/blob/2b3a47de34b4712c111d0a55d7ff109dffc2a7b2/amqplib/client_0_8/method_framing.py#L156-L171
def _process_method_frame(self, channel, payload): """ Process Method frames """ method_sig = unpack('>HH', payload[:4]) args = AMQPReader(payload[4:]) if method_sig in _CONTENT_METHODS: # # Save what we've got so far and wait for the content-header # self.partial_messages[channel] = _PartialMessage(method_sig, args) self.expected_types[channel] = 2 else: self.queue.put((channel, method_sig, args, None))
[ "def", "_process_method_frame", "(", "self", ",", "channel", ",", "payload", ")", ":", "method_sig", "=", "unpack", "(", "'>HH'", ",", "payload", "[", ":", "4", "]", ")", "args", "=", "AMQPReader", "(", "payload", "[", "4", ":", "]", ")", "if", "method_sig", "in", "_CONTENT_METHODS", ":", "#", "# Save what we've got so far and wait for the content-header", "#", "self", ".", "partial_messages", "[", "channel", "]", "=", "_PartialMessage", "(", "method_sig", ",", "args", ")", "self", ".", "expected_types", "[", "channel", "]", "=", "2", "else", ":", "self", ".", "queue", ".", "put", "(", "(", "channel", ",", "method_sig", ",", "args", ",", "None", ")", ")" ]
Process Method frames
[ "Process", "Method", "frames" ]
python
train
mikedh/trimesh
trimesh/util.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/util.py#L731-L748
def distance_to_end(file_obj): """ For an open file object how far is it to the end Parameters ---------- file_obj: open file- like object Returns ---------- distance: int, bytes to end of file """ position_current = file_obj.tell() file_obj.seek(0, 2) position_end = file_obj.tell() file_obj.seek(position_current) distance = position_end - position_current return distance
[ "def", "distance_to_end", "(", "file_obj", ")", ":", "position_current", "=", "file_obj", ".", "tell", "(", ")", "file_obj", ".", "seek", "(", "0", ",", "2", ")", "position_end", "=", "file_obj", ".", "tell", "(", ")", "file_obj", ".", "seek", "(", "position_current", ")", "distance", "=", "position_end", "-", "position_current", "return", "distance" ]
For an open file object how far is it to the end Parameters ---------- file_obj: open file- like object Returns ---------- distance: int, bytes to end of file
[ "For", "an", "open", "file", "object", "how", "far", "is", "it", "to", "the", "end" ]
python
train
OpenKMIP/PyKMIP
kmip/pie/factory.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/pie/factory.py#L36-L72
def convert(self, obj): """ Convert a Pie object into a core secret object and vice versa. Args: obj (various): A Pie or core secret object to convert into the opposite object space. Required. Raises: TypeError: if the object type is unrecognized or unsupported. """ if isinstance(obj, pobjects.SymmetricKey): return self._build_core_key(obj, secrets.SymmetricKey) elif isinstance(obj, secrets.SymmetricKey): return self._build_pie_key(obj, pobjects.SymmetricKey) elif isinstance(obj, pobjects.PublicKey): return self._build_core_key(obj, secrets.PublicKey) elif isinstance(obj, secrets.PublicKey): return self._build_pie_key(obj, pobjects.PublicKey) elif isinstance(obj, pobjects.PrivateKey): return self._build_core_key(obj, secrets.PrivateKey) elif isinstance(obj, secrets.PrivateKey): return self._build_pie_key(obj, pobjects.PrivateKey) elif isinstance(obj, pobjects.Certificate): return self._build_core_certificate(obj) elif isinstance(obj, secrets.Certificate): return self._build_pie_certificate(obj) elif isinstance(obj, pobjects.SecretData): return self._build_core_secret_data(obj) elif isinstance(obj, secrets.SecretData): return self._build_pie_secret_data(obj) elif isinstance(obj, pobjects.OpaqueObject): return self._build_core_opaque_object(obj) elif isinstance(obj, secrets.OpaqueObject): return self._build_pie_opaque_object(obj) else: raise TypeError("object type unsupported and cannot be converted")
[ "def", "convert", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "pobjects", ".", "SymmetricKey", ")", ":", "return", "self", ".", "_build_core_key", "(", "obj", ",", "secrets", ".", "SymmetricKey", ")", "elif", "isinstance", "(", "obj", ",", "secrets", ".", "SymmetricKey", ")", ":", "return", "self", ".", "_build_pie_key", "(", "obj", ",", "pobjects", ".", "SymmetricKey", ")", "elif", "isinstance", "(", "obj", ",", "pobjects", ".", "PublicKey", ")", ":", "return", "self", ".", "_build_core_key", "(", "obj", ",", "secrets", ".", "PublicKey", ")", "elif", "isinstance", "(", "obj", ",", "secrets", ".", "PublicKey", ")", ":", "return", "self", ".", "_build_pie_key", "(", "obj", ",", "pobjects", ".", "PublicKey", ")", "elif", "isinstance", "(", "obj", ",", "pobjects", ".", "PrivateKey", ")", ":", "return", "self", ".", "_build_core_key", "(", "obj", ",", "secrets", ".", "PrivateKey", ")", "elif", "isinstance", "(", "obj", ",", "secrets", ".", "PrivateKey", ")", ":", "return", "self", ".", "_build_pie_key", "(", "obj", ",", "pobjects", ".", "PrivateKey", ")", "elif", "isinstance", "(", "obj", ",", "pobjects", ".", "Certificate", ")", ":", "return", "self", ".", "_build_core_certificate", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "secrets", ".", "Certificate", ")", ":", "return", "self", ".", "_build_pie_certificate", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "pobjects", ".", "SecretData", ")", ":", "return", "self", ".", "_build_core_secret_data", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "secrets", ".", "SecretData", ")", ":", "return", "self", ".", "_build_pie_secret_data", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "pobjects", ".", "OpaqueObject", ")", ":", "return", "self", ".", "_build_core_opaque_object", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "secrets", ".", "OpaqueObject", ")", ":", "return", "self", ".", "_build_pie_opaque_object", "(", "obj", ")", "else", ":", "raise", "TypeError", "(", "\"object type unsupported and cannot be converted\"", ")" ]
Convert a Pie object into a core secret object and vice versa. Args: obj (various): A Pie or core secret object to convert into the opposite object space. Required. Raises: TypeError: if the object type is unrecognized or unsupported.
[ "Convert", "a", "Pie", "object", "into", "a", "core", "secret", "object", "and", "vice", "versa", "." ]
python
test
baruwa-enterprise/BaruwaAPI
BaruwaAPI/resource.py
https://github.com/baruwa-enterprise/BaruwaAPI/blob/53335b377ccfd388e42f4f240f181eed72f51180/BaruwaAPI/resource.py#L94-L99
def set_user_passwd(self, userid, data): """Set user password""" return self.api_call( ENDPOINTS['users']['password'], dict(userid=userid), body=data)
[ "def", "set_user_passwd", "(", "self", ",", "userid", ",", "data", ")", ":", "return", "self", ".", "api_call", "(", "ENDPOINTS", "[", "'users'", "]", "[", "'password'", "]", ",", "dict", "(", "userid", "=", "userid", ")", ",", "body", "=", "data", ")" ]
Set user password
[ "Set", "user", "password" ]
python
train
iotile/typedargs
typedargs/shell.py
https://github.com/iotile/typedargs/blob/0a5091a664b9b4d836e091e9ba583e944f438fd8/typedargs/shell.py#L308-L380
def process_arguments(self, func, args): """Process arguments from the command line into positional and kw args. Arguments are consumed until the argument spec for the function is filled or a -- is found or there are no more arguments. Keyword arguments can be specified using --field=value, -f value or --field value. Positional arguments are specified just on the command line itself. If a keyword argument (`field`) is a boolean, it can be set to True by just passing --field or -f without needing to explicitly pass True unless this would cause ambiguity in parsing since the next expected positional argument is also a boolean or a string. Args: func (callable): A function previously annotated with type information args (list): A list of all of the potential arguments to this function. Returns: (args, kw_args, unused args): A tuple with a list of args, a dict of keyword args and a list of any unused args that were not processed. """ pos_args = [] kw_args = {} while len(args) > 0: if func.metadata.spec_filled(pos_args, kw_args) and not self._is_flag(args[0]): break arg = args.pop(0) if arg == '--': break elif self._is_flag(arg): arg_value = None arg_name = None if len(arg) == 2: arg_name = func.metadata.match_shortname(arg[1:], filled_args=pos_args) else: if not arg.startswith('--'): raise ArgumentError("Invalid method of specifying keyword argument that did not start with --", argument=arg) # Skip the -- arg = arg[2:] # Check if the value is embedded in the parameter # Make sure we allow the value after the equals sign to also # contain an equals sign. if '=' in arg: arg, arg_value = arg.split('=', 1) arg_name = func.metadata.match_shortname(arg, filled_args=pos_args) arg_type = func.metadata.param_type(arg_name) if arg_type is None: raise ArgumentError("Attempting to set a parameter from command line that does not have type information", argument=arg_name) # If we don't have a value yet, attempt to get one from the next parameter on the command line if arg_value is None: arg_value = self._extract_arg_value(arg_name, arg_type, args) kw_args[arg_name] = arg_value else: pos_args.append(arg) # Always check if there is a trailing '--' and chomp so that we always # start on a function name. This can happen if there is a gratuitous # -- for a 0 arg function or after an implicit boolean flag like -f -- if len(args) > 0 and args[0] == '--': args.pop(0) return pos_args, kw_args, args
[ "def", "process_arguments", "(", "self", ",", "func", ",", "args", ")", ":", "pos_args", "=", "[", "]", "kw_args", "=", "{", "}", "while", "len", "(", "args", ")", ">", "0", ":", "if", "func", ".", "metadata", ".", "spec_filled", "(", "pos_args", ",", "kw_args", ")", "and", "not", "self", ".", "_is_flag", "(", "args", "[", "0", "]", ")", ":", "break", "arg", "=", "args", ".", "pop", "(", "0", ")", "if", "arg", "==", "'--'", ":", "break", "elif", "self", ".", "_is_flag", "(", "arg", ")", ":", "arg_value", "=", "None", "arg_name", "=", "None", "if", "len", "(", "arg", ")", "==", "2", ":", "arg_name", "=", "func", ".", "metadata", ".", "match_shortname", "(", "arg", "[", "1", ":", "]", ",", "filled_args", "=", "pos_args", ")", "else", ":", "if", "not", "arg", ".", "startswith", "(", "'--'", ")", ":", "raise", "ArgumentError", "(", "\"Invalid method of specifying keyword argument that did not start with --\"", ",", "argument", "=", "arg", ")", "# Skip the --", "arg", "=", "arg", "[", "2", ":", "]", "# Check if the value is embedded in the parameter", "# Make sure we allow the value after the equals sign to also", "# contain an equals sign.", "if", "'='", "in", "arg", ":", "arg", ",", "arg_value", "=", "arg", ".", "split", "(", "'='", ",", "1", ")", "arg_name", "=", "func", ".", "metadata", ".", "match_shortname", "(", "arg", ",", "filled_args", "=", "pos_args", ")", "arg_type", "=", "func", ".", "metadata", ".", "param_type", "(", "arg_name", ")", "if", "arg_type", "is", "None", ":", "raise", "ArgumentError", "(", "\"Attempting to set a parameter from command line that does not have type information\"", ",", "argument", "=", "arg_name", ")", "# If we don't have a value yet, attempt to get one from the next parameter on the command line", "if", "arg_value", "is", "None", ":", "arg_value", "=", "self", ".", "_extract_arg_value", "(", "arg_name", ",", "arg_type", ",", "args", ")", "kw_args", "[", "arg_name", "]", "=", "arg_value", "else", ":", "pos_args", ".", "append", "(", "arg", ")", "# Always check if there is a trailing '--' and chomp so that we always", "# start on a function name. This can happen if there is a gratuitous", "# -- for a 0 arg function or after an implicit boolean flag like -f --", "if", "len", "(", "args", ")", ">", "0", "and", "args", "[", "0", "]", "==", "'--'", ":", "args", ".", "pop", "(", "0", ")", "return", "pos_args", ",", "kw_args", ",", "args" ]
Process arguments from the command line into positional and kw args. Arguments are consumed until the argument spec for the function is filled or a -- is found or there are no more arguments. Keyword arguments can be specified using --field=value, -f value or --field value. Positional arguments are specified just on the command line itself. If a keyword argument (`field`) is a boolean, it can be set to True by just passing --field or -f without needing to explicitly pass True unless this would cause ambiguity in parsing since the next expected positional argument is also a boolean or a string. Args: func (callable): A function previously annotated with type information args (list): A list of all of the potential arguments to this function. Returns: (args, kw_args, unused args): A tuple with a list of args, a dict of keyword args and a list of any unused args that were not processed.
[ "Process", "arguments", "from", "the", "command", "line", "into", "positional", "and", "kw", "args", "." ]
python
test
mbedmicro/pyOCD
pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/interface/pyusb_v2_backend.py#L168-L188
def get_all_connected_interfaces(): """! @brief Returns all the connected devices with a CMSIS-DAPv2 interface.""" # find all cmsis-dap devices try: all_devices = usb.core.find(find_all=True, custom_match=HasCmsisDapv2Interface()) except usb.core.NoBackendError: common.show_no_libusb_warning() return [] # iterate on all devices found boards = [] for board in all_devices: new_board = PyUSBv2() new_board.vid = board.idVendor new_board.pid = board.idProduct new_board.product_name = board.product new_board.vendor_name = board.manufacturer new_board.serial_number = board.serial_number boards.append(new_board) return boards
[ "def", "get_all_connected_interfaces", "(", ")", ":", "# find all cmsis-dap devices", "try", ":", "all_devices", "=", "usb", ".", "core", ".", "find", "(", "find_all", "=", "True", ",", "custom_match", "=", "HasCmsisDapv2Interface", "(", ")", ")", "except", "usb", ".", "core", ".", "NoBackendError", ":", "common", ".", "show_no_libusb_warning", "(", ")", "return", "[", "]", "# iterate on all devices found", "boards", "=", "[", "]", "for", "board", "in", "all_devices", ":", "new_board", "=", "PyUSBv2", "(", ")", "new_board", ".", "vid", "=", "board", ".", "idVendor", "new_board", ".", "pid", "=", "board", ".", "idProduct", "new_board", ".", "product_name", "=", "board", ".", "product", "new_board", ".", "vendor_name", "=", "board", ".", "manufacturer", "new_board", ".", "serial_number", "=", "board", ".", "serial_number", "boards", ".", "append", "(", "new_board", ")", "return", "boards" ]
! @brief Returns all the connected devices with a CMSIS-DAPv2 interface.
[ "!" ]
python
train
tanghaibao/jcvi
jcvi/variation/deconvolute.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/deconvolute.py#L131-L216
def split(args): """ %prog split barcodefile fastqfile1 .. Deconvolute fastq files into subsets of fastq reads, based on the barcodes in the barcodefile, which is a two-column file like: ID01 AGTCCAG Input fastqfiles can be several files. Output files are ID01.fastq, ID02.fastq, one file per line in barcodefile. When --paired is set, the number of input fastqfiles must be two. Output file (the deconvoluted reads) will be in interleaved format. """ p = OptionParser(split.__doc__) p.set_outdir(outdir="deconv") p.add_option("--nocheckprefix", default=False, action="store_true", help="Don't check shared prefix [default: %default]") p.add_option("--paired", default=False, action="store_true", help="Paired-end data [default: %default]") p.add_option("--append", default=False, action="store_true", help="Append barcode to 2nd read [default: %default]") p.set_cpus() opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) barcodefile = args[0] fastqfile = args[1:] paired = opts.paired append = opts.append if append: assert paired, "--append only works with --paired" nfiles = len(fastqfile) barcodes = [] fp = open(barcodefile) for row in fp: id, seq = row.split() for s in unpack_ambiguous(seq): barcodes.append(BarcodeLine._make((id, s))) nbc = len(barcodes) logging.debug("Imported {0} barcodes (ambiguous codes expanded).".format(nbc)) checkprefix = not opts.nocheckprefix if checkprefix: # Sanity check of shared prefix excludebarcodes = [] for bc in barcodes: exclude = [] for s in barcodes: if bc.id == s.id: continue assert bc.seq != s.seq if s.seq.startswith(bc.seq) and len(s.seq) > len(bc.seq): logging.error("{0} shares same prefix as {1}.".format(s, bc)) exclude.append(s) excludebarcodes.append(exclude) else: excludebarcodes = nbc * [[]] outdir = opts.outdir mkdir(outdir) cpus = opts.cpus logging.debug("Create a pool of {0} workers.".format(cpus)) pool = Pool(cpus) if paired: assert nfiles == 2, "You asked for --paired, but sent in {0} files".\ format(nfiles) split_fun = append_barcode_paired if append else split_barcode_paired mode = "paired" else: split_fun = split_barcode mode = "single" logging.debug("Mode: {0}".format(mode)) pool.map(split_fun, \ zip(barcodes, excludebarcodes, nbc * [outdir], nbc * [fastqfile]))
[ "def", "split", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "split", ".", "__doc__", ")", "p", ".", "set_outdir", "(", "outdir", "=", "\"deconv\"", ")", "p", ".", "add_option", "(", "\"--nocheckprefix\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Don't check shared prefix [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--paired\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Paired-end data [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--append\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Append barcode to 2nd read [default: %default]\"", ")", "p", ".", "set_cpus", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "barcodefile", "=", "args", "[", "0", "]", "fastqfile", "=", "args", "[", "1", ":", "]", "paired", "=", "opts", ".", "paired", "append", "=", "opts", ".", "append", "if", "append", ":", "assert", "paired", ",", "\"--append only works with --paired\"", "nfiles", "=", "len", "(", "fastqfile", ")", "barcodes", "=", "[", "]", "fp", "=", "open", "(", "barcodefile", ")", "for", "row", "in", "fp", ":", "id", ",", "seq", "=", "row", ".", "split", "(", ")", "for", "s", "in", "unpack_ambiguous", "(", "seq", ")", ":", "barcodes", ".", "append", "(", "BarcodeLine", ".", "_make", "(", "(", "id", ",", "s", ")", ")", ")", "nbc", "=", "len", "(", "barcodes", ")", "logging", ".", "debug", "(", "\"Imported {0} barcodes (ambiguous codes expanded).\"", ".", "format", "(", "nbc", ")", ")", "checkprefix", "=", "not", "opts", ".", "nocheckprefix", "if", "checkprefix", ":", "# Sanity check of shared prefix", "excludebarcodes", "=", "[", "]", "for", "bc", "in", "barcodes", ":", "exclude", "=", "[", "]", "for", "s", "in", "barcodes", ":", "if", "bc", ".", "id", "==", "s", ".", "id", ":", "continue", "assert", "bc", ".", "seq", "!=", "s", ".", "seq", "if", "s", ".", "seq", ".", "startswith", "(", "bc", ".", "seq", ")", "and", "len", "(", "s", ".", "seq", ")", ">", "len", "(", "bc", ".", "seq", ")", ":", "logging", ".", "error", "(", "\"{0} shares same prefix as {1}.\"", ".", "format", "(", "s", ",", "bc", ")", ")", "exclude", ".", "append", "(", "s", ")", "excludebarcodes", ".", "append", "(", "exclude", ")", "else", ":", "excludebarcodes", "=", "nbc", "*", "[", "[", "]", "]", "outdir", "=", "opts", ".", "outdir", "mkdir", "(", "outdir", ")", "cpus", "=", "opts", ".", "cpus", "logging", ".", "debug", "(", "\"Create a pool of {0} workers.\"", ".", "format", "(", "cpus", ")", ")", "pool", "=", "Pool", "(", "cpus", ")", "if", "paired", ":", "assert", "nfiles", "==", "2", ",", "\"You asked for --paired, but sent in {0} files\"", ".", "format", "(", "nfiles", ")", "split_fun", "=", "append_barcode_paired", "if", "append", "else", "split_barcode_paired", "mode", "=", "\"paired\"", "else", ":", "split_fun", "=", "split_barcode", "mode", "=", "\"single\"", "logging", ".", "debug", "(", "\"Mode: {0}\"", ".", "format", "(", "mode", ")", ")", "pool", ".", "map", "(", "split_fun", ",", "zip", "(", "barcodes", ",", "excludebarcodes", ",", "nbc", "*", "[", "outdir", "]", ",", "nbc", "*", "[", "fastqfile", "]", ")", ")" ]
%prog split barcodefile fastqfile1 .. Deconvolute fastq files into subsets of fastq reads, based on the barcodes in the barcodefile, which is a two-column file like: ID01 AGTCCAG Input fastqfiles can be several files. Output files are ID01.fastq, ID02.fastq, one file per line in barcodefile. When --paired is set, the number of input fastqfiles must be two. Output file (the deconvoluted reads) will be in interleaved format.
[ "%prog", "split", "barcodefile", "fastqfile1", ".." ]
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/middleware/session_cert.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/middleware/session_cert.py#L59-L67
def get_authenticated_subjects(cert_pem): """Return primary subject and set of equivalents authenticated by certificate. - ``cert_pem`` can be str or bytes """ if isinstance(cert_pem, str): cert_pem = cert_pem.encode('utf-8') return d1_common.cert.subjects.extract_subjects(cert_pem)
[ "def", "get_authenticated_subjects", "(", "cert_pem", ")", ":", "if", "isinstance", "(", "cert_pem", ",", "str", ")", ":", "cert_pem", "=", "cert_pem", ".", "encode", "(", "'utf-8'", ")", "return", "d1_common", ".", "cert", ".", "subjects", ".", "extract_subjects", "(", "cert_pem", ")" ]
Return primary subject and set of equivalents authenticated by certificate. - ``cert_pem`` can be str or bytes
[ "Return", "primary", "subject", "and", "set", "of", "equivalents", "authenticated", "by", "certificate", "." ]
python
train
alkivi-sas/python-alkivi-logger
alkivi/logger/logger.py
https://github.com/alkivi-sas/python-alkivi-logger/blob/e96d5a987a5c8789c51d4fa7541709e05b1f51e1/alkivi/logger/logger.py#L98-L101
def error(self, message, *args, **kwargs): """Should not happen ... """ self._log(logging.ERROR, message, *args, **kwargs)
[ "def", "error", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_log", "(", "logging", ".", "ERROR", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Should not happen ...
[ "Should", "not", "happen", "..." ]
python
train
OCHA-DAP/hdx-python-api
src/hdx/data/resource.py
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/resource.py#L295-L314
def get_all_resource_ids_in_datastore(configuration=None): # type: (Optional[Configuration]) -> List[str] """Get list of resources that have a datastore returning their ids. Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: List[str]: List of resource ids that are in the datastore """ resource = Resource(configuration=configuration) success, result = resource._read_from_hdx('datastore', '_table_metadata', 'resource_id', Resource.actions()['datastore_search'], limit=10000) resource_ids = list() if not success: logger.debug(result) else: for record in result['records']: resource_ids.append(record['name']) return resource_ids
[ "def", "get_all_resource_ids_in_datastore", "(", "configuration", "=", "None", ")", ":", "# type: (Optional[Configuration]) -> List[str]", "resource", "=", "Resource", "(", "configuration", "=", "configuration", ")", "success", ",", "result", "=", "resource", ".", "_read_from_hdx", "(", "'datastore'", ",", "'_table_metadata'", ",", "'resource_id'", ",", "Resource", ".", "actions", "(", ")", "[", "'datastore_search'", "]", ",", "limit", "=", "10000", ")", "resource_ids", "=", "list", "(", ")", "if", "not", "success", ":", "logger", ".", "debug", "(", "result", ")", "else", ":", "for", "record", "in", "result", "[", "'records'", "]", ":", "resource_ids", ".", "append", "(", "record", "[", "'name'", "]", ")", "return", "resource_ids" ]
Get list of resources that have a datastore returning their ids. Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: List[str]: List of resource ids that are in the datastore
[ "Get", "list", "of", "resources", "that", "have", "a", "datastore", "returning", "their", "ids", "." ]
python
train
googleapis/dialogflow-python-client-v2
samples/knowledge_base_management.py
https://github.com/googleapis/dialogflow-python-client-v2/blob/8c9c8709222efe427b76c9c8fcc04a0c4a0760b5/samples/knowledge_base_management.py#L94-L107
def delete_knowledge_base(project_id, knowledge_base_id): """Deletes a specific Knowledge base. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base.""" import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() knowledge_base_path = client.knowledge_base_path( project_id, knowledge_base_id) response = client.delete_knowledge_base(knowledge_base_path) print('Knowledge Base deleted.'.format(response))
[ "def", "delete_knowledge_base", "(", "project_id", ",", "knowledge_base_id", ")", ":", "import", "dialogflow_v2beta1", "as", "dialogflow", "client", "=", "dialogflow", ".", "KnowledgeBasesClient", "(", ")", "knowledge_base_path", "=", "client", ".", "knowledge_base_path", "(", "project_id", ",", "knowledge_base_id", ")", "response", "=", "client", ".", "delete_knowledge_base", "(", "knowledge_base_path", ")", "print", "(", "'Knowledge Base deleted.'", ".", "format", "(", "response", ")", ")" ]
Deletes a specific Knowledge base. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base.
[ "Deletes", "a", "specific", "Knowledge", "base", "." ]
python
train
obriencj/python-javatools
javatools/opcodes.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/opcodes.py#L105-L127
def __op(name, val, fmt=None, const=False, consume=0, produce=0): """ provides sensible defaults for a code, and registers it with the __OPTABLE for lookup. """ name = name.lower() # fmt can either be a str representing the struct to unpack, or a # callable to do more complex unpacking. If it's a str, create a # callable for it. if isinstance(fmt, str): fmt = partial(_unpack, compile_struct(fmt)) operand = (name, val, fmt, consume, produce, const) assert(name not in __OPTABLE) assert(val not in __OPTABLE) __OPTABLE[name] = operand __OPTABLE[val] = operand return val
[ "def", "__op", "(", "name", ",", "val", ",", "fmt", "=", "None", ",", "const", "=", "False", ",", "consume", "=", "0", ",", "produce", "=", "0", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "# fmt can either be a str representing the struct to unpack, or a", "# callable to do more complex unpacking. If it's a str, create a", "# callable for it.", "if", "isinstance", "(", "fmt", ",", "str", ")", ":", "fmt", "=", "partial", "(", "_unpack", ",", "compile_struct", "(", "fmt", ")", ")", "operand", "=", "(", "name", ",", "val", ",", "fmt", ",", "consume", ",", "produce", ",", "const", ")", "assert", "(", "name", "not", "in", "__OPTABLE", ")", "assert", "(", "val", "not", "in", "__OPTABLE", ")", "__OPTABLE", "[", "name", "]", "=", "operand", "__OPTABLE", "[", "val", "]", "=", "operand", "return", "val" ]
provides sensible defaults for a code, and registers it with the __OPTABLE for lookup.
[ "provides", "sensible", "defaults", "for", "a", "code", "and", "registers", "it", "with", "the", "__OPTABLE", "for", "lookup", "." ]
python
train
brocade/pynos
pynos/versions/base/yang/brocade_ras.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/brocade_ras.py#L217-L227
def system_switch_attributes_chassis_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") system = ET.SubElement(config, "system", xmlns="urn:brocade.com:mgmt:brocade-ras") switch_attributes = ET.SubElement(system, "switch-attributes") chassis_name = ET.SubElement(switch_attributes, "chassis-name") chassis_name.text = kwargs.pop('chassis_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "system_switch_attributes_chassis_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "system", "=", "ET", ".", "SubElement", "(", "config", ",", "\"system\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ras\"", ")", "switch_attributes", "=", "ET", ".", "SubElement", "(", "system", ",", "\"switch-attributes\"", ")", "chassis_name", "=", "ET", ".", "SubElement", "(", "switch_attributes", ",", "\"chassis-name\"", ")", "chassis_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'chassis_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
minio/minio-py
minio/api.py
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/api.py#L573-L645
def fget_object(self, bucket_name, object_name, file_path, request_headers=None, sse=None): """ Retrieves an object from a bucket and writes at file_path. Examples: minio.fget_object('foo', 'bar', 'localfile') :param bucket_name: Bucket to read object from. :param object_name: Name of the object to read. :param file_path: Local file path to save the object. :param request_headers: Any additional headers to be added with GET request. """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) stat = self.stat_object(bucket_name, object_name, sse) if os.path.isdir(file_path): raise OSError("file is a directory.") # Create top level directory if needed. top_level_dir = os.path.dirname(file_path) if top_level_dir: mkdir_p(top_level_dir) # Write to a temporary file "file_path.part.minio" before saving. file_part_path = file_path + stat.etag + '.part.minio' # Open file in 'write+append' mode. with open(file_part_path, 'ab') as file_part_data: # Save current file_part statinfo. file_statinfo = os.stat(file_part_path) # Get partial object. response = self._get_partial_object(bucket_name, object_name, offset=file_statinfo.st_size, length=0, request_headers=request_headers, sse=sse) # Save content_size to verify if we wrote more data. content_size = int(response.headers['content-length']) # Save total_written. total_written = 0 for data in response.stream(amt=1024 * 1024): file_part_data.write(data) total_written += len(data) # Release the connection from the response at this point. response.release_conn() # Verify if we wrote data properly. if total_written < content_size: msg = 'Data written {0} bytes is smaller than the' \ 'specified size {1} bytes'.format(total_written, content_size) raise InvalidSizeError(msg) if total_written > content_size: msg = 'Data written {0} bytes is in excess than the' \ 'specified size {1} bytes'.format(total_written, content_size) raise InvalidSizeError(msg) #Delete existing file to be compatible with Windows if os.path.exists(file_path): os.remove(file_path) #Rename with destination file path os.rename(file_part_path, file_path) # Return the stat return stat
[ "def", "fget_object", "(", "self", ",", "bucket_name", ",", "object_name", ",", "file_path", ",", "request_headers", "=", "None", ",", "sse", "=", "None", ")", ":", "is_valid_bucket_name", "(", "bucket_name", ")", "is_non_empty_string", "(", "object_name", ")", "stat", "=", "self", ".", "stat_object", "(", "bucket_name", ",", "object_name", ",", "sse", ")", "if", "os", ".", "path", ".", "isdir", "(", "file_path", ")", ":", "raise", "OSError", "(", "\"file is a directory.\"", ")", "# Create top level directory if needed.", "top_level_dir", "=", "os", ".", "path", ".", "dirname", "(", "file_path", ")", "if", "top_level_dir", ":", "mkdir_p", "(", "top_level_dir", ")", "# Write to a temporary file \"file_path.part.minio\" before saving.", "file_part_path", "=", "file_path", "+", "stat", ".", "etag", "+", "'.part.minio'", "# Open file in 'write+append' mode.", "with", "open", "(", "file_part_path", ",", "'ab'", ")", "as", "file_part_data", ":", "# Save current file_part statinfo.", "file_statinfo", "=", "os", ".", "stat", "(", "file_part_path", ")", "# Get partial object.", "response", "=", "self", ".", "_get_partial_object", "(", "bucket_name", ",", "object_name", ",", "offset", "=", "file_statinfo", ".", "st_size", ",", "length", "=", "0", ",", "request_headers", "=", "request_headers", ",", "sse", "=", "sse", ")", "# Save content_size to verify if we wrote more data.", "content_size", "=", "int", "(", "response", ".", "headers", "[", "'content-length'", "]", ")", "# Save total_written.", "total_written", "=", "0", "for", "data", "in", "response", ".", "stream", "(", "amt", "=", "1024", "*", "1024", ")", ":", "file_part_data", ".", "write", "(", "data", ")", "total_written", "+=", "len", "(", "data", ")", "# Release the connection from the response at this point.", "response", ".", "release_conn", "(", ")", "# Verify if we wrote data properly.", "if", "total_written", "<", "content_size", ":", "msg", "=", "'Data written {0} bytes is smaller than the'", "'specified size {1} bytes'", ".", "format", "(", "total_written", ",", "content_size", ")", "raise", "InvalidSizeError", "(", "msg", ")", "if", "total_written", ">", "content_size", ":", "msg", "=", "'Data written {0} bytes is in excess than the'", "'specified size {1} bytes'", ".", "format", "(", "total_written", ",", "content_size", ")", "raise", "InvalidSizeError", "(", "msg", ")", "#Delete existing file to be compatible with Windows", "if", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "os", ".", "remove", "(", "file_path", ")", "#Rename with destination file path", "os", ".", "rename", "(", "file_part_path", ",", "file_path", ")", "# Return the stat", "return", "stat" ]
Retrieves an object from a bucket and writes at file_path. Examples: minio.fget_object('foo', 'bar', 'localfile') :param bucket_name: Bucket to read object from. :param object_name: Name of the object to read. :param file_path: Local file path to save the object. :param request_headers: Any additional headers to be added with GET request.
[ "Retrieves", "an", "object", "from", "a", "bucket", "and", "writes", "at", "file_path", "." ]
python
train
ClimateImpactLab/DataFS
datafs/datafs.py
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/datafs.py#L544-L554
def search(ctx, tags, prefix=None): ''' List all archives matching tag search criteria ''' _generate_api(ctx) for i, match in enumerate(ctx.obj.api.search(*tags, prefix=prefix)): click.echo(match, nl=False) print('')
[ "def", "search", "(", "ctx", ",", "tags", ",", "prefix", "=", "None", ")", ":", "_generate_api", "(", "ctx", ")", "for", "i", ",", "match", "in", "enumerate", "(", "ctx", ".", "obj", ".", "api", ".", "search", "(", "*", "tags", ",", "prefix", "=", "prefix", ")", ")", ":", "click", ".", "echo", "(", "match", ",", "nl", "=", "False", ")", "print", "(", "''", ")" ]
List all archives matching tag search criteria
[ "List", "all", "archives", "matching", "tag", "search", "criteria" ]
python
train
openstax/cnx-publishing
cnxpublishing/publish.py
https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/publish.py#L255-L285
def _insert_resource_file(cursor, module_ident, resource): """Insert a resource into the modules_files table. This will create a new file entry or associates an existing one. """ with resource.open() as file: fileid, _ = _insert_file(cursor, file, resource.media_type) # Is this file legitimately used twice within the same content? cursor.execute("""\ select (fileid = %s) as is_same_file from module_files where module_ident = %s and filename = %s""", (fileid, module_ident, resource.filename,)) try: is_same_file = cursor.fetchone()[0] except TypeError: # NoneType is_same_file = None if is_same_file: # All is good, bail out. return elif is_same_file is not None: # pragma: no cover # This means the file is not the same, but a filename # conflict exists. # FFF At this time, it is impossible to get to this logic. raise Exception("filename conflict") args = (module_ident, fileid, resource.filename,) cursor.execute("""\ INSERT INTO module_files (module_ident, fileid, filename) VALUES (%s, %s, %s)""", args)
[ "def", "_insert_resource_file", "(", "cursor", ",", "module_ident", ",", "resource", ")", ":", "with", "resource", ".", "open", "(", ")", "as", "file", ":", "fileid", ",", "_", "=", "_insert_file", "(", "cursor", ",", "file", ",", "resource", ".", "media_type", ")", "# Is this file legitimately used twice within the same content?", "cursor", ".", "execute", "(", "\"\"\"\\\nselect\n (fileid = %s) as is_same_file\nfrom module_files\nwhere module_ident = %s and filename = %s\"\"\"", ",", "(", "fileid", ",", "module_ident", ",", "resource", ".", "filename", ",", ")", ")", "try", ":", "is_same_file", "=", "cursor", ".", "fetchone", "(", ")", "[", "0", "]", "except", "TypeError", ":", "# NoneType", "is_same_file", "=", "None", "if", "is_same_file", ":", "# All is good, bail out.", "return", "elif", "is_same_file", "is", "not", "None", ":", "# pragma: no cover", "# This means the file is not the same, but a filename", "# conflict exists.", "# FFF At this time, it is impossible to get to this logic.", "raise", "Exception", "(", "\"filename conflict\"", ")", "args", "=", "(", "module_ident", ",", "fileid", ",", "resource", ".", "filename", ",", ")", "cursor", ".", "execute", "(", "\"\"\"\\\nINSERT INTO module_files (module_ident, fileid, filename)\nVALUES (%s, %s, %s)\"\"\"", ",", "args", ")" ]
Insert a resource into the modules_files table. This will create a new file entry or associates an existing one.
[ "Insert", "a", "resource", "into", "the", "modules_files", "table", ".", "This", "will", "create", "a", "new", "file", "entry", "or", "associates", "an", "existing", "one", "." ]
python
valid
fjwCode/cerium
cerium/androiddriver.py
https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/androiddriver.py#L548-L551
def send_keyevents(self, keyevent: int) -> None: '''Simulates typing keyevents.''' self._execute('-s', self.device_sn, 'shell', 'input', 'keyevent', str(keyevent))
[ "def", "send_keyevents", "(", "self", ",", "keyevent", ":", "int", ")", "->", "None", ":", "self", ".", "_execute", "(", "'-s'", ",", "self", ".", "device_sn", ",", "'shell'", ",", "'input'", ",", "'keyevent'", ",", "str", "(", "keyevent", ")", ")" ]
Simulates typing keyevents.
[ "Simulates", "typing", "keyevents", "." ]
python
train
google/grr
grr/server/grr_response_server/export.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/export.py#L870-L884
def _SeparateTypes(self, metadata_value_pairs): """Separate files, registry keys, grep matches.""" registry_pairs = [] file_pairs = [] match_pairs = [] for metadata, result in metadata_value_pairs: if (result.stat_entry.pathspec.pathtype == rdf_paths.PathSpec.PathType.REGISTRY): registry_pairs.append((metadata, result.stat_entry)) else: file_pairs.append((metadata, result)) match_pairs.extend([(metadata, match) for match in result.matches]) return registry_pairs, file_pairs, match_pairs
[ "def", "_SeparateTypes", "(", "self", ",", "metadata_value_pairs", ")", ":", "registry_pairs", "=", "[", "]", "file_pairs", "=", "[", "]", "match_pairs", "=", "[", "]", "for", "metadata", ",", "result", "in", "metadata_value_pairs", ":", "if", "(", "result", ".", "stat_entry", ".", "pathspec", ".", "pathtype", "==", "rdf_paths", ".", "PathSpec", ".", "PathType", ".", "REGISTRY", ")", ":", "registry_pairs", ".", "append", "(", "(", "metadata", ",", "result", ".", "stat_entry", ")", ")", "else", ":", "file_pairs", ".", "append", "(", "(", "metadata", ",", "result", ")", ")", "match_pairs", ".", "extend", "(", "[", "(", "metadata", ",", "match", ")", "for", "match", "in", "result", ".", "matches", "]", ")", "return", "registry_pairs", ",", "file_pairs", ",", "match_pairs" ]
Separate files, registry keys, grep matches.
[ "Separate", "files", "registry", "keys", "grep", "matches", "." ]
python
train
seung-lab/cloud-volume
cloudvolume/storage.py
https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/storage.py#L287-L311
def put_files(self, files, content_type=None, compress=None, cache_control=None, block=True): """ Put lots of files at once and get a nice progress bar. It'll also wait for the upload to complete, just like get_files. Required: files: [ (filepath, content), .... ] """ def base_uploadfn(path, content, interface): interface.put_file(path, content, content_type, compress, cache_control=cache_control) for path, content in files: content = compression.compress(content, method=compress) uploadfn = partial(base_uploadfn, path, content) if len(self._threads): self.put(uploadfn) else: uploadfn(self._interface) if block: desc = 'Uploading' if self.progress else None self.wait(desc) return self
[ "def", "put_files", "(", "self", ",", "files", ",", "content_type", "=", "None", ",", "compress", "=", "None", ",", "cache_control", "=", "None", ",", "block", "=", "True", ")", ":", "def", "base_uploadfn", "(", "path", ",", "content", ",", "interface", ")", ":", "interface", ".", "put_file", "(", "path", ",", "content", ",", "content_type", ",", "compress", ",", "cache_control", "=", "cache_control", ")", "for", "path", ",", "content", "in", "files", ":", "content", "=", "compression", ".", "compress", "(", "content", ",", "method", "=", "compress", ")", "uploadfn", "=", "partial", "(", "base_uploadfn", ",", "path", ",", "content", ")", "if", "len", "(", "self", ".", "_threads", ")", ":", "self", ".", "put", "(", "uploadfn", ")", "else", ":", "uploadfn", "(", "self", ".", "_interface", ")", "if", "block", ":", "desc", "=", "'Uploading'", "if", "self", ".", "progress", "else", "None", "self", ".", "wait", "(", "desc", ")", "return", "self" ]
Put lots of files at once and get a nice progress bar. It'll also wait for the upload to complete, just like get_files. Required: files: [ (filepath, content), .... ]
[ "Put", "lots", "of", "files", "at", "once", "and", "get", "a", "nice", "progress", "bar", ".", "It", "ll", "also", "wait", "for", "the", "upload", "to", "complete", "just", "like", "get_files", "." ]
python
train
cackharot/suds-py3
suds/xsd/sxbase.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/xsd/sxbase.py#L149-L160
def get_child(self, name): """ Get (find) a I{non-attribute} child by name. @param name: A child name. @type name: str @return: A tuple: the requested (child, ancestry). @rtype: (L{SchemaObject}, [L{SchemaObject},..]) """ for child, ancestry in self.children(): if child.any() or child.name == name: return (child, ancestry) return (None, [])
[ "def", "get_child", "(", "self", ",", "name", ")", ":", "for", "child", ",", "ancestry", "in", "self", ".", "children", "(", ")", ":", "if", "child", ".", "any", "(", ")", "or", "child", ".", "name", "==", "name", ":", "return", "(", "child", ",", "ancestry", ")", "return", "(", "None", ",", "[", "]", ")" ]
Get (find) a I{non-attribute} child by name. @param name: A child name. @type name: str @return: A tuple: the requested (child, ancestry). @rtype: (L{SchemaObject}, [L{SchemaObject},..])
[ "Get", "(", "find", ")", "a", "I", "{", "non", "-", "attribute", "}", "child", "by", "name", "." ]
python
train
benvanwerkhoven/kernel_tuner
kernel_tuner/core.py
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/core.py#L180-L200
def compile_kernel(self, instance, verbose): """compile the kernel for this specific instance""" logging.debug('compile_kernel ' + instance.name) #compile kernel_string into device func func = None try: func = self.dev.compile(instance.name, instance.kernel_string) except Exception as e: #compiles may fail because certain kernel configurations use too #much shared memory for example, the desired behavior is to simply #skip over this configuration and try the next one if "uses too much shared data" in str(e): logging.debug('compile_kernel failed due to kernel using too much shared memory') if verbose: print("skipping config", instance.name, "reason: too much shared memory used") else: logging.debug('compile_kernel failed due to error: ' + str(e)) print("Error while compiling:", instance.name) raise e return func
[ "def", "compile_kernel", "(", "self", ",", "instance", ",", "verbose", ")", ":", "logging", ".", "debug", "(", "'compile_kernel '", "+", "instance", ".", "name", ")", "#compile kernel_string into device func", "func", "=", "None", "try", ":", "func", "=", "self", ".", "dev", ".", "compile", "(", "instance", ".", "name", ",", "instance", ".", "kernel_string", ")", "except", "Exception", "as", "e", ":", "#compiles may fail because certain kernel configurations use too", "#much shared memory for example, the desired behavior is to simply", "#skip over this configuration and try the next one", "if", "\"uses too much shared data\"", "in", "str", "(", "e", ")", ":", "logging", ".", "debug", "(", "'compile_kernel failed due to kernel using too much shared memory'", ")", "if", "verbose", ":", "print", "(", "\"skipping config\"", ",", "instance", ".", "name", ",", "\"reason: too much shared memory used\"", ")", "else", ":", "logging", ".", "debug", "(", "'compile_kernel failed due to error: '", "+", "str", "(", "e", ")", ")", "print", "(", "\"Error while compiling:\"", ",", "instance", ".", "name", ")", "raise", "e", "return", "func" ]
compile the kernel for this specific instance
[ "compile", "the", "kernel", "for", "this", "specific", "instance" ]
python
train
nicolargo/glances
glances/plugins/glances_ip.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_ip.py#L161-L176
def get(self): """Get the first public IP address returned by one of the online services.""" q = queue.Queue() for u, j, k in urls: t = threading.Thread(target=self._get_ip_public, args=(q, u, j, k)) t.daemon = True t.start() timer = Timer(self.timeout) ip = None while not timer.finished() and ip is None: if q.qsize() > 0: ip = q.get() return ', '.join(set([x.strip() for x in ip.split(',')]))
[ "def", "get", "(", "self", ")", ":", "q", "=", "queue", ".", "Queue", "(", ")", "for", "u", ",", "j", ",", "k", "in", "urls", ":", "t", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_get_ip_public", ",", "args", "=", "(", "q", ",", "u", ",", "j", ",", "k", ")", ")", "t", ".", "daemon", "=", "True", "t", ".", "start", "(", ")", "timer", "=", "Timer", "(", "self", ".", "timeout", ")", "ip", "=", "None", "while", "not", "timer", ".", "finished", "(", ")", "and", "ip", "is", "None", ":", "if", "q", ".", "qsize", "(", ")", ">", "0", ":", "ip", "=", "q", ".", "get", "(", ")", "return", "', '", ".", "join", "(", "set", "(", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "ip", ".", "split", "(", "','", ")", "]", ")", ")" ]
Get the first public IP address returned by one of the online services.
[ "Get", "the", "first", "public", "IP", "address", "returned", "by", "one", "of", "the", "online", "services", "." ]
python
train
inveniosoftware-attic/invenio-utils
invenio_utils/datastructures.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/datastructures.py#L418-L421
def flatten_multidict(multidict): """Return flattened dictionary from ``MultiDict``.""" return dict([(key, value if len(value) > 1 else value[0]) for (key, value) in multidict.iterlists()])
[ "def", "flatten_multidict", "(", "multidict", ")", ":", "return", "dict", "(", "[", "(", "key", ",", "value", "if", "len", "(", "value", ")", ">", "1", "else", "value", "[", "0", "]", ")", "for", "(", "key", ",", "value", ")", "in", "multidict", ".", "iterlists", "(", ")", "]", ")" ]
Return flattened dictionary from ``MultiDict``.
[ "Return", "flattened", "dictionary", "from", "MultiDict", "." ]
python
train
tamasgal/km3pipe
km3pipe/logger.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L33-L35
def deprecation(self, message, *args, **kws): """Show a deprecation warning.""" self._log(DEPRECATION, message, args, **kws)
[ "def", "deprecation", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kws", ")", ":", "self", ".", "_log", "(", "DEPRECATION", ",", "message", ",", "args", ",", "*", "*", "kws", ")" ]
Show a deprecation warning.
[ "Show", "a", "deprecation", "warning", "." ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/Utility.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/Utility.py#L598-L608
def importNode(self, document, node, deep=0): """Implements (well enough for our purposes) DOM node import.""" nodetype = node.nodeType if nodetype in (node.DOCUMENT_NODE, node.DOCUMENT_TYPE_NODE): raise DOMException('Illegal node type for importNode') if nodetype == node.ENTITY_REFERENCE_NODE: deep = 0 clone = node.cloneNode(deep) self._setOwnerDoc(document, clone) clone.__imported__ = 1 return clone
[ "def", "importNode", "(", "self", ",", "document", ",", "node", ",", "deep", "=", "0", ")", ":", "nodetype", "=", "node", ".", "nodeType", "if", "nodetype", "in", "(", "node", ".", "DOCUMENT_NODE", ",", "node", ".", "DOCUMENT_TYPE_NODE", ")", ":", "raise", "DOMException", "(", "'Illegal node type for importNode'", ")", "if", "nodetype", "==", "node", ".", "ENTITY_REFERENCE_NODE", ":", "deep", "=", "0", "clone", "=", "node", ".", "cloneNode", "(", "deep", ")", "self", ".", "_setOwnerDoc", "(", "document", ",", "clone", ")", "clone", ".", "__imported__", "=", "1", "return", "clone" ]
Implements (well enough for our purposes) DOM node import.
[ "Implements", "(", "well", "enough", "for", "our", "purposes", ")", "DOM", "node", "import", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/breakpoint.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L318-L330
def set_condition(self, condition = True): """ Sets a new condition callback for the breakpoint. @see: L{__init__} @type condition: function @param condition: (Optional) Condition callback function. """ if condition is None: self.__condition = True else: self.__condition = condition
[ "def", "set_condition", "(", "self", ",", "condition", "=", "True", ")", ":", "if", "condition", "is", "None", ":", "self", ".", "__condition", "=", "True", "else", ":", "self", ".", "__condition", "=", "condition" ]
Sets a new condition callback for the breakpoint. @see: L{__init__} @type condition: function @param condition: (Optional) Condition callback function.
[ "Sets", "a", "new", "condition", "callback", "for", "the", "breakpoint", "." ]
python
train
OnroerendErfgoed/crabpy
crabpy/gateway/crab.py
https://github.com/OnroerendErfgoed/crabpy/blob/3a6fd8bc5aca37c2a173e3ea94e4e468b8aa79c1/crabpy/gateway/crab.py#L38-L56
def crab_gateway_request(client, method, *args): ''' Utility function that helps making requests to the CRAB service. This is a specialised version of :func:`crabpy.client.crab_request` that allows adding extra functionality for the calls made by the gateway. :param client: A :class:`suds.client.Client` for the CRAB service. :param string action: Which method to call, eg. `ListGewesten` :returns: Result of the SOAP call. ''' try: return crab_request(client, method, *args) except WebFault as wf: err = GatewayRuntimeException( 'Could not execute request. Message from server:\n%s' % wf.fault['faultstring'], wf ) raise err
[ "def", "crab_gateway_request", "(", "client", ",", "method", ",", "*", "args", ")", ":", "try", ":", "return", "crab_request", "(", "client", ",", "method", ",", "*", "args", ")", "except", "WebFault", "as", "wf", ":", "err", "=", "GatewayRuntimeException", "(", "'Could not execute request. Message from server:\\n%s'", "%", "wf", ".", "fault", "[", "'faultstring'", "]", ",", "wf", ")", "raise", "err" ]
Utility function that helps making requests to the CRAB service. This is a specialised version of :func:`crabpy.client.crab_request` that allows adding extra functionality for the calls made by the gateway. :param client: A :class:`suds.client.Client` for the CRAB service. :param string action: Which method to call, eg. `ListGewesten` :returns: Result of the SOAP call.
[ "Utility", "function", "that", "helps", "making", "requests", "to", "the", "CRAB", "service", "." ]
python
train
rbarrois/mpdlcd
mpdlcd/display_pattern.py
https://github.com/rbarrois/mpdlcd/blob/85f16c8cc0883f8abb4c2cc7f69729c3e2f857da/mpdlcd/display_pattern.py#L42-L59
def parse(self): """Parse the lines, and fill self.line_fields accordingly.""" for line in self.lines: # Parse the line field_defs = self.parse_line(line) fields = [] # Convert field parameters into Field objects for (kind, options) in field_defs: logger.debug("Creating field %s(%r)", kind, options) fields.append(self.field_registry.create(kind, **options)) # Add the list of Field objects to the 'fields per line'. self.line_fields.append(fields) # Pre-fill the list of widgets for field in fields: self.widgets[field] = None
[ "def", "parse", "(", "self", ")", ":", "for", "line", "in", "self", ".", "lines", ":", "# Parse the line", "field_defs", "=", "self", ".", "parse_line", "(", "line", ")", "fields", "=", "[", "]", "# Convert field parameters into Field objects", "for", "(", "kind", ",", "options", ")", "in", "field_defs", ":", "logger", ".", "debug", "(", "\"Creating field %s(%r)\"", ",", "kind", ",", "options", ")", "fields", ".", "append", "(", "self", ".", "field_registry", ".", "create", "(", "kind", ",", "*", "*", "options", ")", ")", "# Add the list of Field objects to the 'fields per line'.", "self", ".", "line_fields", ".", "append", "(", "fields", ")", "# Pre-fill the list of widgets", "for", "field", "in", "fields", ":", "self", ".", "widgets", "[", "field", "]", "=", "None" ]
Parse the lines, and fill self.line_fields accordingly.
[ "Parse", "the", "lines", "and", "fill", "self", ".", "line_fields", "accordingly", "." ]
python
train
PhracturedBlue/asterisk_mbox
asterisk_mbox/__init__.py
https://github.com/PhracturedBlue/asterisk_mbox/blob/275de1e71ed05c6acff1a5fa87f754f4d385a372/asterisk_mbox/__init__.py#L73-L85
def start(self): """Start thread.""" if not self._thread: logging.info("Starting asterisk mbox thread") # Ensure signal queue is empty try: while True: self.signal.get(False) except queue.Empty: pass self._thread = threading.Thread(target=self._loop) self._thread.setDaemon(True) self._thread.start()
[ "def", "start", "(", "self", ")", ":", "if", "not", "self", ".", "_thread", ":", "logging", ".", "info", "(", "\"Starting asterisk mbox thread\"", ")", "# Ensure signal queue is empty", "try", ":", "while", "True", ":", "self", ".", "signal", ".", "get", "(", "False", ")", "except", "queue", ".", "Empty", ":", "pass", "self", ".", "_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_loop", ")", "self", ".", "_thread", ".", "setDaemon", "(", "True", ")", "self", ".", "_thread", ".", "start", "(", ")" ]
Start thread.
[ "Start", "thread", "." ]
python
train
linkhub-sdk/popbill.py
popbill/messageService.py
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L210-L261
def sendMMS_Multi(self, CorpNum, Sender, Subject, Contents, Messages, FilePath, reserveDT, adsYN=False, UserID=None, RequestNum=None): """ 멀티 문자메시지 다량 전송 args CorpNum : 팝빌회원 사업자번호 Sender : 발신자번호 (동보전송용) Subject : 장문 메시지 제목 (동보전송용) Contents : 장문 문자 내용 (동보전송용) Messages : 개별전송정보 배열 FilePath : 전송하고자 하는 파일 경로 reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss) UserID : 팝빌회원 아이디 RequestNum = 전송요청번호 return 접수번호 (receiptNum) raise PopbillException """ if Messages == None or len(Messages) < 1: raise PopbillException(-99999999, "전송할 메시지가 입력되지 않았습니다.") req = {} if Sender != None or Sender != '': req['snd'] = Sender if Contents != None or Contents != '': req['content'] = Contents if Subject != None or Subject != '': req['subject'] = Subject if reserveDT != None or reserveDT != '': req['sndDT'] = reserveDT if Messages != None or Messages != '': req['msgs'] = Messages if RequestNum != None or RequestNum != '': req['requestNum'] = RequestNum if adsYN: req['adsYN'] = True postData = self._stringtify(req) files = [] try: with open(FilePath, "rb") as F: files = [File(fieldName='file', fileName=F.name, fileData=F.read())] except IOError: raise PopbillException(-99999999, "해당경로에 파일이 없거나 읽을 수 없습니다.") result = self._httppost_files('/MMS', postData, files, CorpNum, UserID) return result.receiptNum
[ "def", "sendMMS_Multi", "(", "self", ",", "CorpNum", ",", "Sender", ",", "Subject", ",", "Contents", ",", "Messages", ",", "FilePath", ",", "reserveDT", ",", "adsYN", "=", "False", ",", "UserID", "=", "None", ",", "RequestNum", "=", "None", ")", ":", "if", "Messages", "==", "None", "or", "len", "(", "Messages", ")", "<", "1", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"전송할 메시지가 입력되지 않았습니다.\")\r", "", "req", "=", "{", "}", "if", "Sender", "!=", "None", "or", "Sender", "!=", "''", ":", "req", "[", "'snd'", "]", "=", "Sender", "if", "Contents", "!=", "None", "or", "Contents", "!=", "''", ":", "req", "[", "'content'", "]", "=", "Contents", "if", "Subject", "!=", "None", "or", "Subject", "!=", "''", ":", "req", "[", "'subject'", "]", "=", "Subject", "if", "reserveDT", "!=", "None", "or", "reserveDT", "!=", "''", ":", "req", "[", "'sndDT'", "]", "=", "reserveDT", "if", "Messages", "!=", "None", "or", "Messages", "!=", "''", ":", "req", "[", "'msgs'", "]", "=", "Messages", "if", "RequestNum", "!=", "None", "or", "RequestNum", "!=", "''", ":", "req", "[", "'requestNum'", "]", "=", "RequestNum", "if", "adsYN", ":", "req", "[", "'adsYN'", "]", "=", "True", "postData", "=", "self", ".", "_stringtify", "(", "req", ")", "files", "=", "[", "]", "try", ":", "with", "open", "(", "FilePath", ",", "\"rb\"", ")", "as", "F", ":", "files", "=", "[", "File", "(", "fieldName", "=", "'file'", ",", "fileName", "=", "F", ".", "name", ",", "fileData", "=", "F", ".", "read", "(", ")", ")", "]", "except", "IOError", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"해당경로에 파일이 없거나 읽을 수 없습니다.\")\r", "", "result", "=", "self", ".", "_httppost_files", "(", "'/MMS'", ",", "postData", ",", "files", ",", "CorpNum", ",", "UserID", ")", "return", "result", ".", "receiptNum" ]
멀티 문자메시지 다량 전송 args CorpNum : 팝빌회원 사업자번호 Sender : 발신자번호 (동보전송용) Subject : 장문 메시지 제목 (동보전송용) Contents : 장문 문자 내용 (동보전송용) Messages : 개별전송정보 배열 FilePath : 전송하고자 하는 파일 경로 reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss) UserID : 팝빌회원 아이디 RequestNum = 전송요청번호 return 접수번호 (receiptNum) raise PopbillException
[ "멀티", "문자메시지", "다량", "전송", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "Sender", ":", "발신자번호", "(", "동보전송용", ")", "Subject", ":", "장문", "메시지", "제목", "(", "동보전송용", ")", "Contents", ":", "장문", "문자", "내용", "(", "동보전송용", ")", "Messages", ":", "개별전송정보", "배열", "FilePath", ":", "전송하고자", "하는", "파일", "경로", "reserveDT", ":", "예약전송시간", "(", "형식", ".", "yyyyMMddHHmmss", ")", "UserID", ":", "팝빌회원", "아이디", "RequestNum", "=", "전송요청번호", "return", "접수번호", "(", "receiptNum", ")", "raise", "PopbillException" ]
python
train
aws/sagemaker-containers
src/sagemaker_containers/_modules.py
https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_modules.py#L96-L110
def install(path, capture_error=False): # type: (str, bool) -> None """Install a Python module in the executing Python environment. Args: path (str): Real path location of the Python module. capture_error (bool): Default false. If True, the running process captures the stderr, and appends it to the returned Exception message in case of errors. """ cmd = '%s -m pip install -U . ' % _process.python_executable() if has_requirements(path): cmd += '-r requirements.txt' logger.info('Installing module with the following command:\n%s', cmd) _process.check_error(shlex.split(cmd), _errors.InstallModuleError, cwd=path, capture_error=capture_error)
[ "def", "install", "(", "path", ",", "capture_error", "=", "False", ")", ":", "# type: (str, bool) -> None", "cmd", "=", "'%s -m pip install -U . '", "%", "_process", ".", "python_executable", "(", ")", "if", "has_requirements", "(", "path", ")", ":", "cmd", "+=", "'-r requirements.txt'", "logger", ".", "info", "(", "'Installing module with the following command:\\n%s'", ",", "cmd", ")", "_process", ".", "check_error", "(", "shlex", ".", "split", "(", "cmd", ")", ",", "_errors", ".", "InstallModuleError", ",", "cwd", "=", "path", ",", "capture_error", "=", "capture_error", ")" ]
Install a Python module in the executing Python environment. Args: path (str): Real path location of the Python module. capture_error (bool): Default false. If True, the running process captures the stderr, and appends it to the returned Exception message in case of errors.
[ "Install", "a", "Python", "module", "in", "the", "executing", "Python", "environment", ".", "Args", ":", "path", "(", "str", ")", ":", "Real", "path", "location", "of", "the", "Python", "module", ".", "capture_error", "(", "bool", ")", ":", "Default", "false", ".", "If", "True", "the", "running", "process", "captures", "the", "stderr", "and", "appends", "it", "to", "the", "returned", "Exception", "message", "in", "case", "of", "errors", "." ]
python
train
wummel/linkchecker
linkcheck/htmlutil/linkparse.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/htmlutil/linkparse.py#L164-L174
def is_meta_url (attr, attrs): """Check if the meta attributes contain a URL.""" res = False if attr == "content": equiv = attrs.get_true('http-equiv', u'').lower() scheme = attrs.get_true('scheme', u'').lower() res = equiv in (u'refresh',) or scheme in (u'dcterms.uri',) if attr == "href": rel = attrs.get_true('rel', u'').lower() res = rel in (u'shortcut icon', u'icon') return res
[ "def", "is_meta_url", "(", "attr", ",", "attrs", ")", ":", "res", "=", "False", "if", "attr", "==", "\"content\"", ":", "equiv", "=", "attrs", ".", "get_true", "(", "'http-equiv'", ",", "u''", ")", ".", "lower", "(", ")", "scheme", "=", "attrs", ".", "get_true", "(", "'scheme'", ",", "u''", ")", ".", "lower", "(", ")", "res", "=", "equiv", "in", "(", "u'refresh'", ",", ")", "or", "scheme", "in", "(", "u'dcterms.uri'", ",", ")", "if", "attr", "==", "\"href\"", ":", "rel", "=", "attrs", ".", "get_true", "(", "'rel'", ",", "u''", ")", ".", "lower", "(", ")", "res", "=", "rel", "in", "(", "u'shortcut icon'", ",", "u'icon'", ")", "return", "res" ]
Check if the meta attributes contain a URL.
[ "Check", "if", "the", "meta", "attributes", "contain", "a", "URL", "." ]
python
train
annoviko/pyclustering
pyclustering/cluster/optics.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/optics.py#L143-L182
def calculate_connvectivity_radius(self, amount_clusters, maximum_iterations = 100): """! @brief Calculates connectivity radius of allocation specified amount of clusters using ordering diagram and marks borders of clusters using indexes of values of ordering diagram. @details Parameter 'maximum_iterations' is used to protect from hanging when it is impossible to allocate specified number of clusters. @param[in] amount_clusters (uint): amount of clusters that should be allocated by calculated connectivity radius. @param[in] maximum_iterations (uint): maximum number of iteration for searching connectivity radius to allocated specified amount of clusters (by default it is restricted by 100 iterations). @return (double, list) Value of connectivity radius and borders of clusters like (radius, borders), radius may be 'None' as well as borders may be '[]' if connectivity radius hasn't been found for the specified amount of iterations. """ maximum_distance = max(self.__ordering) upper_distance = maximum_distance lower_distance = 0.0 result = None amount, borders = self.extract_cluster_amount(maximum_distance) if amount <= amount_clusters: for _ in range(maximum_iterations): radius = (lower_distance + upper_distance) / 2.0 amount, borders = self.extract_cluster_amount(radius) if amount == amount_clusters: result = radius break elif amount == 0: break elif amount > amount_clusters: lower_distance = radius elif amount < amount_clusters: upper_distance = radius return result, borders
[ "def", "calculate_connvectivity_radius", "(", "self", ",", "amount_clusters", ",", "maximum_iterations", "=", "100", ")", ":", "maximum_distance", "=", "max", "(", "self", ".", "__ordering", ")", "upper_distance", "=", "maximum_distance", "lower_distance", "=", "0.0", "result", "=", "None", "amount", ",", "borders", "=", "self", ".", "extract_cluster_amount", "(", "maximum_distance", ")", "if", "amount", "<=", "amount_clusters", ":", "for", "_", "in", "range", "(", "maximum_iterations", ")", ":", "radius", "=", "(", "lower_distance", "+", "upper_distance", ")", "/", "2.0", "amount", ",", "borders", "=", "self", ".", "extract_cluster_amount", "(", "radius", ")", "if", "amount", "==", "amount_clusters", ":", "result", "=", "radius", "break", "elif", "amount", "==", "0", ":", "break", "elif", "amount", ">", "amount_clusters", ":", "lower_distance", "=", "radius", "elif", "amount", "<", "amount_clusters", ":", "upper_distance", "=", "radius", "return", "result", ",", "borders" ]
! @brief Calculates connectivity radius of allocation specified amount of clusters using ordering diagram and marks borders of clusters using indexes of values of ordering diagram. @details Parameter 'maximum_iterations' is used to protect from hanging when it is impossible to allocate specified number of clusters. @param[in] amount_clusters (uint): amount of clusters that should be allocated by calculated connectivity radius. @param[in] maximum_iterations (uint): maximum number of iteration for searching connectivity radius to allocated specified amount of clusters (by default it is restricted by 100 iterations). @return (double, list) Value of connectivity radius and borders of clusters like (radius, borders), radius may be 'None' as well as borders may be '[]' if connectivity radius hasn't been found for the specified amount of iterations.
[ "!" ]
python
valid
IDSIA/sacred
sacred/ingredient.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/ingredient.py#L193-L211
def add_config(self, cfg_or_file=None, **kw_conf): """ Add a configuration entry to this ingredient/experiment. Can be called with a filename, a dictionary xor with keyword arguments. Supported formats for the config-file so far are: ``json``, ``pickle`` and ``yaml``. The resulting dictionary will be converted into a :class:`~sacred.config_scope.ConfigDict`. :param cfg_or_file: Configuration dictionary of filename of config file to add to this ingredient/experiment. :type cfg_or_file: dict or str :param kw_conf: Configuration entries to be added to this ingredient/experiment. """ self.configurations.append(self._create_config_dict(cfg_or_file, kw_conf))
[ "def", "add_config", "(", "self", ",", "cfg_or_file", "=", "None", ",", "*", "*", "kw_conf", ")", ":", "self", ".", "configurations", ".", "append", "(", "self", ".", "_create_config_dict", "(", "cfg_or_file", ",", "kw_conf", ")", ")" ]
Add a configuration entry to this ingredient/experiment. Can be called with a filename, a dictionary xor with keyword arguments. Supported formats for the config-file so far are: ``json``, ``pickle`` and ``yaml``. The resulting dictionary will be converted into a :class:`~sacred.config_scope.ConfigDict`. :param cfg_or_file: Configuration dictionary of filename of config file to add to this ingredient/experiment. :type cfg_or_file: dict or str :param kw_conf: Configuration entries to be added to this ingredient/experiment.
[ "Add", "a", "configuration", "entry", "to", "this", "ingredient", "/", "experiment", "." ]
python
train
ibis-project/ibis
ibis/pandas/udf.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/pandas/udf.py#L219-L237
def parameter_count(funcsig): """Get the number of positional-or-keyword or position-only parameters in a function signature. Parameters ---------- funcsig : inspect.Signature A UDF signature Returns ------- int The number of parameters """ return sum( param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY} for param in funcsig.parameters.values() if param.default is Parameter.empty )
[ "def", "parameter_count", "(", "funcsig", ")", ":", "return", "sum", "(", "param", ".", "kind", "in", "{", "param", ".", "POSITIONAL_OR_KEYWORD", ",", "param", ".", "POSITIONAL_ONLY", "}", "for", "param", "in", "funcsig", ".", "parameters", ".", "values", "(", ")", "if", "param", ".", "default", "is", "Parameter", ".", "empty", ")" ]
Get the number of positional-or-keyword or position-only parameters in a function signature. Parameters ---------- funcsig : inspect.Signature A UDF signature Returns ------- int The number of parameters
[ "Get", "the", "number", "of", "positional", "-", "or", "-", "keyword", "or", "position", "-", "only", "parameters", "in", "a", "function", "signature", "." ]
python
train
manicmaniac/headlessvim
headlessvim/arguments.py
https://github.com/manicmaniac/headlessvim/blob/3e4657f95d981ddf21fd285b7e1b9da2154f9cb9/headlessvim/arguments.py#L24-L35
def parse(self, args): """ :param args: arguments :type args: None or string or list of string :return: formatted arguments if specified else ``self.default_args`` :rtype: list of string """ if args is None: args = self._default_args if isinstance(args, six.string_types): args = shlex.split(args) return args
[ "def", "parse", "(", "self", ",", "args", ")", ":", "if", "args", "is", "None", ":", "args", "=", "self", ".", "_default_args", "if", "isinstance", "(", "args", ",", "six", ".", "string_types", ")", ":", "args", "=", "shlex", ".", "split", "(", "args", ")", "return", "args" ]
:param args: arguments :type args: None or string or list of string :return: formatted arguments if specified else ``self.default_args`` :rtype: list of string
[ ":", "param", "args", ":", "arguments", ":", "type", "args", ":", "None", "or", "string", "or", "list", "of", "string", ":", "return", ":", "formatted", "arguments", "if", "specified", "else", "self", ".", "default_args", ":", "rtype", ":", "list", "of", "string" ]
python
valid
NiklasRosenstein-Python/nr-deprecated
nr/py/bytecode.py
https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/py/bytecode.py#L175-L233
def get_assigned_name(frame): """ Checks the bytecode of *frame* to find the name of the variable a result is being assigned to and returns that name. Returns the full left operand of the assignment. Raises a #ValueError if the variable name could not be retrieved from the bytecode (eg. if an unpack sequence is on the left side of the assignment). > **Known Limitations**: The expression in the *frame* from which this > function is called must be the first part of that expression. For > example, `foo = [get_assigned_name(get_frame())] + [42]` works, > but `foo = [42, get_assigned_name(get_frame())]` does not! ```python >>> var = get_assigned_name(sys._getframe()) >>> assert var == 'var' ``` __Available in Python 3.4, 3.5__ """ SEARCHING, MATCHED = 1, 2 state = SEARCHING result = '' stacksize = 0 for op in dis.get_instructions(frame.f_code): if state == SEARCHING and op.offset == frame.f_lasti: if not op.opname.startswith('CALL_FUNCTION'): raise RuntimeError('get_assigned_name() requires entry at CALL_FUNCTION') state = MATCHED # For a top-level expression, the stack-size should be 1 after # the function at which we entered was executed. stacksize = 1 elif state == MATCHED: # Update the would-be size of the stack after this instruction. # If we're at zero, we found the last instruction of the expression. try: stacksize += get_stackdelta(op) except KeyError: raise RuntimeError('could not determined assigned name, instruction ' '{} is not supported'.format(op.opname)) if stacksize == 0: if op.opname not in ('STORE_NAME', 'STORE_ATTR', 'STORE_GLOBAL', 'STORE_FAST'): raise ValueError('expression is not assigned or branch is not first part of the expression') return result + op.argval elif stacksize < 0: raise ValueError('not a top-level expression') if op.opname.startswith('CALL_FUNCTION'): # Chained or nested function call. raise ValueError('inside a chained or nested function call') elif op.opname == 'LOAD_ATTR': result += op.argval + '.' if not result: raise RuntimeError('last frame instruction not found') assert False
[ "def", "get_assigned_name", "(", "frame", ")", ":", "SEARCHING", ",", "MATCHED", "=", "1", ",", "2", "state", "=", "SEARCHING", "result", "=", "''", "stacksize", "=", "0", "for", "op", "in", "dis", ".", "get_instructions", "(", "frame", ".", "f_code", ")", ":", "if", "state", "==", "SEARCHING", "and", "op", ".", "offset", "==", "frame", ".", "f_lasti", ":", "if", "not", "op", ".", "opname", ".", "startswith", "(", "'CALL_FUNCTION'", ")", ":", "raise", "RuntimeError", "(", "'get_assigned_name() requires entry at CALL_FUNCTION'", ")", "state", "=", "MATCHED", "# For a top-level expression, the stack-size should be 1 after", "# the function at which we entered was executed.", "stacksize", "=", "1", "elif", "state", "==", "MATCHED", ":", "# Update the would-be size of the stack after this instruction.", "# If we're at zero, we found the last instruction of the expression.", "try", ":", "stacksize", "+=", "get_stackdelta", "(", "op", ")", "except", "KeyError", ":", "raise", "RuntimeError", "(", "'could not determined assigned name, instruction '", "'{} is not supported'", ".", "format", "(", "op", ".", "opname", ")", ")", "if", "stacksize", "==", "0", ":", "if", "op", ".", "opname", "not", "in", "(", "'STORE_NAME'", ",", "'STORE_ATTR'", ",", "'STORE_GLOBAL'", ",", "'STORE_FAST'", ")", ":", "raise", "ValueError", "(", "'expression is not assigned or branch is not first part of the expression'", ")", "return", "result", "+", "op", ".", "argval", "elif", "stacksize", "<", "0", ":", "raise", "ValueError", "(", "'not a top-level expression'", ")", "if", "op", ".", "opname", ".", "startswith", "(", "'CALL_FUNCTION'", ")", ":", "# Chained or nested function call.", "raise", "ValueError", "(", "'inside a chained or nested function call'", ")", "elif", "op", ".", "opname", "==", "'LOAD_ATTR'", ":", "result", "+=", "op", ".", "argval", "+", "'.'", "if", "not", "result", ":", "raise", "RuntimeError", "(", "'last frame instruction not found'", ")", "assert", "False" ]
Checks the bytecode of *frame* to find the name of the variable a result is being assigned to and returns that name. Returns the full left operand of the assignment. Raises a #ValueError if the variable name could not be retrieved from the bytecode (eg. if an unpack sequence is on the left side of the assignment). > **Known Limitations**: The expression in the *frame* from which this > function is called must be the first part of that expression. For > example, `foo = [get_assigned_name(get_frame())] + [42]` works, > but `foo = [42, get_assigned_name(get_frame())]` does not! ```python >>> var = get_assigned_name(sys._getframe()) >>> assert var == 'var' ``` __Available in Python 3.4, 3.5__
[ "Checks", "the", "bytecode", "of", "*", "frame", "*", "to", "find", "the", "name", "of", "the", "variable", "a", "result", "is", "being", "assigned", "to", "and", "returns", "that", "name", ".", "Returns", "the", "full", "left", "operand", "of", "the", "assignment", ".", "Raises", "a", "#ValueError", "if", "the", "variable", "name", "could", "not", "be", "retrieved", "from", "the", "bytecode", "(", "eg", ".", "if", "an", "unpack", "sequence", "is", "on", "the", "left", "side", "of", "the", "assignment", ")", "." ]
python
train
rodluger/everest
everest/missions/k2/k2.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L715-L881
def GetNeighbors(EPIC, season=None, model=None, neighbors=10, mag_range=(11., 13.), cdpp_range=None, aperture_name='k2sff_15', cadence='lc', **kwargs): ''' Return `neighbors` random bright stars on the same module as `EPIC`. :param int EPIC: The EPIC ID number :param str model: The :py:obj:`everest` model name. Only used when \ imposing CDPP bounds. Default :py:obj:`None` :param int neighbors: Number of neighbors to return. Default 10 :param str aperture_name: The name of the aperture to use. Select \ `custom` to call \ :py:func:`GetCustomAperture`. Default `k2sff_15` :param str cadence: The light curve cadence. Default `lc` :param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. \ Default (11, 13) :param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. \ Default :py:obj:`None` ''' # Zero neighbors? if neighbors == 0: return [] # Get the IDs # Campaign no. if season is None: campaign = Season(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) else: campaign = season epics, kepmags, channels, short_cadence = np.array(GetK2Stars()[ campaign]).T short_cadence = np.array(short_cadence, dtype=bool) epics = np.array(epics, dtype=int) c = GetNeighboringChannels(Channel(EPIC, campaign=season)) # Manage kwargs if aperture_name is None: aperture_name = 'k2sff_15' if mag_range is None: mag_lo = -np.inf mag_hi = np.inf else: mag_lo = mag_range[0] mag_hi = mag_range[1] # K2-specific tweak. The short cadence stars are preferentially # really bright ones, so we won't get many neighbors if we # stick to the default magnitude range! I'm # therefore enforcing a lower magnitude cut-off of 8. if cadence == 'sc': mag_lo = 8. if cdpp_range is None: cdpp_lo = -np.inf cdpp_hi = np.inf else: cdpp_lo = cdpp_range[0] cdpp_hi = cdpp_range[1] targets = [] # First look for nearby targets, then relax the constraint # If still no targets, widen magnitude range for n in range(3): if n == 0: nearby = True elif n == 1: nearby = False elif n == 2: mag_lo -= 1 mag_hi += 1 # Loop over all stars for star, kp, channel, sc in zip(epics, kepmags, channels, short_cadence): # Preliminary vetting if not (((channel in c) if nearby else True) and (kp < mag_hi) \ and (kp > mag_lo) and (sc if cadence == 'sc' else True)): continue # Reject if self or if already in list if (star == EPIC) or (star in targets): continue # Ensure raw light curve file exists if not os.path.exists( os.path.join(TargetDirectory(star, campaign), 'data.npz')): continue # Ensure crowding is OK. This is quite conservative, as we # need to prevent potential astrophysical false positive # contamination from crowded planet-hosting neighbors when # doing neighboring PLD. contam = False data = np.load(os.path.join( TargetDirectory(star, campaign), 'data.npz')) aperture = data['apertures'][()][aperture_name] # Check that the aperture exists! if aperture is None: continue fpix = data['fpix'] for source in data['nearby'][()]: # Ignore self if source['ID'] == star: continue # Ignore really dim stars if source['mag'] < kp - 5: continue # Compute source position x = int(np.round(source['x'] - source['x0'])) y = int(np.round(source['y'] - source['y0'])) # If the source is within two pixels of the edge # of the target aperture, reject the target for j in [x - 2, x - 1, x, x + 1, x + 2]: if j < 0: # Outside the postage stamp continue for i in [y - 2, y - 1, y, y + 1, y + 2]: if i < 0: # Outside the postage stamp continue try: if aperture[i][j]: # Oh-oh! contam = True except IndexError: # Out of bounds... carry on! pass if contam: continue # HACK: This happens for K2SFF M67 targets in C05. # Let's skip them if aperture.shape != fpix.shape[1:]: continue # Reject if the model is not present if model is not None: if not os.path.exists(os.path.join( TargetDirectory(star, campaign), model + '.npz')): continue # Reject if CDPP out of range if cdpp_range is not None: cdpp = np.load(os.path.join(TargetDirectory( star, campaign), model + '.npz'))['cdpp'] if (cdpp > cdpp_hi) or (cdpp < cdpp_lo): continue # Passed all the tests! targets.append(star) # Do we have enough? If so, return if len(targets) == neighbors: random.shuffle(targets) return targets # If we get to this point, we didn't find enough neighbors... # Return what we have anyway. return targets
[ "def", "GetNeighbors", "(", "EPIC", ",", "season", "=", "None", ",", "model", "=", "None", ",", "neighbors", "=", "10", ",", "mag_range", "=", "(", "11.", ",", "13.", ")", ",", "cdpp_range", "=", "None", ",", "aperture_name", "=", "'k2sff_15'", ",", "cadence", "=", "'lc'", ",", "*", "*", "kwargs", ")", ":", "# Zero neighbors?", "if", "neighbors", "==", "0", ":", "return", "[", "]", "# Get the IDs", "# Campaign no.", "if", "season", "is", "None", ":", "campaign", "=", "Season", "(", "EPIC", ")", "if", "hasattr", "(", "campaign", ",", "'__len__'", ")", ":", "raise", "AttributeError", "(", "\"Please choose a campaign/season for this target: %s.\"", "%", "campaign", ")", "else", ":", "campaign", "=", "season", "epics", ",", "kepmags", ",", "channels", ",", "short_cadence", "=", "np", ".", "array", "(", "GetK2Stars", "(", ")", "[", "campaign", "]", ")", ".", "T", "short_cadence", "=", "np", ".", "array", "(", "short_cadence", ",", "dtype", "=", "bool", ")", "epics", "=", "np", ".", "array", "(", "epics", ",", "dtype", "=", "int", ")", "c", "=", "GetNeighboringChannels", "(", "Channel", "(", "EPIC", ",", "campaign", "=", "season", ")", ")", "# Manage kwargs", "if", "aperture_name", "is", "None", ":", "aperture_name", "=", "'k2sff_15'", "if", "mag_range", "is", "None", ":", "mag_lo", "=", "-", "np", ".", "inf", "mag_hi", "=", "np", ".", "inf", "else", ":", "mag_lo", "=", "mag_range", "[", "0", "]", "mag_hi", "=", "mag_range", "[", "1", "]", "# K2-specific tweak. The short cadence stars are preferentially", "# really bright ones, so we won't get many neighbors if we", "# stick to the default magnitude range! I'm", "# therefore enforcing a lower magnitude cut-off of 8.", "if", "cadence", "==", "'sc'", ":", "mag_lo", "=", "8.", "if", "cdpp_range", "is", "None", ":", "cdpp_lo", "=", "-", "np", ".", "inf", "cdpp_hi", "=", "np", ".", "inf", "else", ":", "cdpp_lo", "=", "cdpp_range", "[", "0", "]", "cdpp_hi", "=", "cdpp_range", "[", "1", "]", "targets", "=", "[", "]", "# First look for nearby targets, then relax the constraint", "# If still no targets, widen magnitude range", "for", "n", "in", "range", "(", "3", ")", ":", "if", "n", "==", "0", ":", "nearby", "=", "True", "elif", "n", "==", "1", ":", "nearby", "=", "False", "elif", "n", "==", "2", ":", "mag_lo", "-=", "1", "mag_hi", "+=", "1", "# Loop over all stars", "for", "star", ",", "kp", ",", "channel", ",", "sc", "in", "zip", "(", "epics", ",", "kepmags", ",", "channels", ",", "short_cadence", ")", ":", "# Preliminary vetting", "if", "not", "(", "(", "(", "channel", "in", "c", ")", "if", "nearby", "else", "True", ")", "and", "(", "kp", "<", "mag_hi", ")", "and", "(", "kp", ">", "mag_lo", ")", "and", "(", "sc", "if", "cadence", "==", "'sc'", "else", "True", ")", ")", ":", "continue", "# Reject if self or if already in list", "if", "(", "star", "==", "EPIC", ")", "or", "(", "star", "in", "targets", ")", ":", "continue", "# Ensure raw light curve file exists", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "TargetDirectory", "(", "star", ",", "campaign", ")", ",", "'data.npz'", ")", ")", ":", "continue", "# Ensure crowding is OK. This is quite conservative, as we", "# need to prevent potential astrophysical false positive", "# contamination from crowded planet-hosting neighbors when", "# doing neighboring PLD.", "contam", "=", "False", "data", "=", "np", ".", "load", "(", "os", ".", "path", ".", "join", "(", "TargetDirectory", "(", "star", ",", "campaign", ")", ",", "'data.npz'", ")", ")", "aperture", "=", "data", "[", "'apertures'", "]", "[", "(", ")", "]", "[", "aperture_name", "]", "# Check that the aperture exists!", "if", "aperture", "is", "None", ":", "continue", "fpix", "=", "data", "[", "'fpix'", "]", "for", "source", "in", "data", "[", "'nearby'", "]", "[", "(", ")", "]", ":", "# Ignore self", "if", "source", "[", "'ID'", "]", "==", "star", ":", "continue", "# Ignore really dim stars", "if", "source", "[", "'mag'", "]", "<", "kp", "-", "5", ":", "continue", "# Compute source position", "x", "=", "int", "(", "np", ".", "round", "(", "source", "[", "'x'", "]", "-", "source", "[", "'x0'", "]", ")", ")", "y", "=", "int", "(", "np", ".", "round", "(", "source", "[", "'y'", "]", "-", "source", "[", "'y0'", "]", ")", ")", "# If the source is within two pixels of the edge", "# of the target aperture, reject the target", "for", "j", "in", "[", "x", "-", "2", ",", "x", "-", "1", ",", "x", ",", "x", "+", "1", ",", "x", "+", "2", "]", ":", "if", "j", "<", "0", ":", "# Outside the postage stamp", "continue", "for", "i", "in", "[", "y", "-", "2", ",", "y", "-", "1", ",", "y", ",", "y", "+", "1", ",", "y", "+", "2", "]", ":", "if", "i", "<", "0", ":", "# Outside the postage stamp", "continue", "try", ":", "if", "aperture", "[", "i", "]", "[", "j", "]", ":", "# Oh-oh!", "contam", "=", "True", "except", "IndexError", ":", "# Out of bounds... carry on!", "pass", "if", "contam", ":", "continue", "# HACK: This happens for K2SFF M67 targets in C05.", "# Let's skip them", "if", "aperture", ".", "shape", "!=", "fpix", ".", "shape", "[", "1", ":", "]", ":", "continue", "# Reject if the model is not present", "if", "model", "is", "not", "None", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "TargetDirectory", "(", "star", ",", "campaign", ")", ",", "model", "+", "'.npz'", ")", ")", ":", "continue", "# Reject if CDPP out of range", "if", "cdpp_range", "is", "not", "None", ":", "cdpp", "=", "np", ".", "load", "(", "os", ".", "path", ".", "join", "(", "TargetDirectory", "(", "star", ",", "campaign", ")", ",", "model", "+", "'.npz'", ")", ")", "[", "'cdpp'", "]", "if", "(", "cdpp", ">", "cdpp_hi", ")", "or", "(", "cdpp", "<", "cdpp_lo", ")", ":", "continue", "# Passed all the tests!", "targets", ".", "append", "(", "star", ")", "# Do we have enough? If so, return", "if", "len", "(", "targets", ")", "==", "neighbors", ":", "random", ".", "shuffle", "(", "targets", ")", "return", "targets", "# If we get to this point, we didn't find enough neighbors...", "# Return what we have anyway.", "return", "targets" ]
Return `neighbors` random bright stars on the same module as `EPIC`. :param int EPIC: The EPIC ID number :param str model: The :py:obj:`everest` model name. Only used when \ imposing CDPP bounds. Default :py:obj:`None` :param int neighbors: Number of neighbors to return. Default 10 :param str aperture_name: The name of the aperture to use. Select \ `custom` to call \ :py:func:`GetCustomAperture`. Default `k2sff_15` :param str cadence: The light curve cadence. Default `lc` :param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. \ Default (11, 13) :param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. \ Default :py:obj:`None`
[ "Return", "neighbors", "random", "bright", "stars", "on", "the", "same", "module", "as", "EPIC", "." ]
python
train
KennethWilke/PingdomLib
pingdomlib/reports.py
https://github.com/KennethWilke/PingdomLib/blob/3ed1e481f9c9d16b032558d62fb05c2166e162ed/pingdomlib/reports.py#L101-L106
def delete(self): """Delete this email report""" response = self.pingdom.request('DELETE', 'reports.shared/%s' % self.id) return response.json()['message']
[ "def", "delete", "(", "self", ")", ":", "response", "=", "self", ".", "pingdom", ".", "request", "(", "'DELETE'", ",", "'reports.shared/%s'", "%", "self", ".", "id", ")", "return", "response", ".", "json", "(", ")", "[", "'message'", "]" ]
Delete this email report
[ "Delete", "this", "email", "report" ]
python
train
kennethreitz/maya
maya/core.py
https://github.com/kennethreitz/maya/blob/774b141d91a83a5d77cb5351db3d02bf50564b21/maya/core.py#L22-L43
def validate_class_type_arguments(operator): """ Decorator to validate all the arguments to function are of the type of calling class for passed operator """ def inner(function): def wrapper(self, *args, **kwargs): for arg in args + tuple(kwargs.values()): if not isinstance(arg, self.__class__): raise TypeError( 'unorderable types: {}() {} {}()'.format( type(self).__name__, operator, type(arg).__name__ ) ) return function(self, *args, **kwargs) return wrapper return inner
[ "def", "validate_class_type_arguments", "(", "operator", ")", ":", "def", "inner", "(", "function", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "arg", "in", "args", "+", "tuple", "(", "kwargs", ".", "values", "(", ")", ")", ":", "if", "not", "isinstance", "(", "arg", ",", "self", ".", "__class__", ")", ":", "raise", "TypeError", "(", "'unorderable types: {}() {} {}()'", ".", "format", "(", "type", "(", "self", ")", ".", "__name__", ",", "operator", ",", "type", "(", "arg", ")", ".", "__name__", ")", ")", "return", "function", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "inner" ]
Decorator to validate all the arguments to function are of the type of calling class for passed operator
[ "Decorator", "to", "validate", "all", "the", "arguments", "to", "function", "are", "of", "the", "type", "of", "calling", "class", "for", "passed", "operator" ]
python
train
saltstack/salt
salt/modules/mac_portspkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_portspkg.py#L243-L355
def install(name=None, refresh=False, pkgs=None, **kwargs): ''' Install the passed package(s) with ``port install`` name The name of the formula to be installed. Note that this parameter is ignored if "pkgs" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> version Specify a version to pkg to install. Ignored if pkgs is specified. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> salt '*' pkg.install git-core version='1.8.5.5' variant Specify a variant to pkg to install. Ignored if pkgs is specified. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> salt '*' pkg.install git-core version='1.8.5.5' variant='+credential_osxkeychain+doc+pcre' Multiple Package Installation Options: pkgs A list of formulas to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' salt '*' pkg.install pkgs='["[email protected]","bar"]' salt '*' pkg.install pkgs='["[email protected]+ssl","[email protected]"]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install 'package package package' ''' pkg_params, pkg_type = \ __salt__['pkg_resource.parse_targets'](name, pkgs, {}) if salt.utils.data.is_true(refresh): refresh_db() # Handle version kwarg for a single package target if pkgs is None: version_num = kwargs.get('version') variant_spec = kwargs.get('variant') spec = {} if version_num: spec['version'] = version_num if variant_spec: spec['variant'] = variant_spec pkg_params = {name: spec} if not pkg_params: return {} formulas_array = [] for pname, pparams in six.iteritems(pkg_params): formulas_array.append(pname) if pparams: if 'version' in pparams: formulas_array.append('@' + pparams['version']) if 'variant' in pparams: formulas_array.append(pparams['variant']) old = list_pkgs() cmd = ['port', 'install'] cmd.extend(formulas_array) err_message = '' try: salt.utils.mac_utils.execute_return_success(cmd) except CommandExecutionError as exc: err_message = exc.strerror __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if err_message: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': err_message, 'changes': ret}) return ret
[ "def", "install", "(", "name", "=", "None", ",", "refresh", "=", "False", ",", "pkgs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "pkg_params", ",", "pkg_type", "=", "__salt__", "[", "'pkg_resource.parse_targets'", "]", "(", "name", ",", "pkgs", ",", "{", "}", ")", "if", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "refresh", ")", ":", "refresh_db", "(", ")", "# Handle version kwarg for a single package target", "if", "pkgs", "is", "None", ":", "version_num", "=", "kwargs", ".", "get", "(", "'version'", ")", "variant_spec", "=", "kwargs", ".", "get", "(", "'variant'", ")", "spec", "=", "{", "}", "if", "version_num", ":", "spec", "[", "'version'", "]", "=", "version_num", "if", "variant_spec", ":", "spec", "[", "'variant'", "]", "=", "variant_spec", "pkg_params", "=", "{", "name", ":", "spec", "}", "if", "not", "pkg_params", ":", "return", "{", "}", "formulas_array", "=", "[", "]", "for", "pname", ",", "pparams", "in", "six", ".", "iteritems", "(", "pkg_params", ")", ":", "formulas_array", ".", "append", "(", "pname", ")", "if", "pparams", ":", "if", "'version'", "in", "pparams", ":", "formulas_array", ".", "append", "(", "'@'", "+", "pparams", "[", "'version'", "]", ")", "if", "'variant'", "in", "pparams", ":", "formulas_array", ".", "append", "(", "pparams", "[", "'variant'", "]", ")", "old", "=", "list_pkgs", "(", ")", "cmd", "=", "[", "'port'", ",", "'install'", "]", "cmd", ".", "extend", "(", "formulas_array", ")", "err_message", "=", "''", "try", ":", "salt", ".", "utils", ".", "mac_utils", ".", "execute_return_success", "(", "cmd", ")", "except", "CommandExecutionError", "as", "exc", ":", "err_message", "=", "exc", ".", "strerror", "__context__", ".", "pop", "(", "'pkg.list_pkgs'", ",", "None", ")", "new", "=", "list_pkgs", "(", ")", "ret", "=", "salt", ".", "utils", ".", "data", ".", "compare_dicts", "(", "old", ",", "new", ")", "if", "err_message", ":", "raise", "CommandExecutionError", "(", "'Problem encountered installing package(s)'", ",", "info", "=", "{", "'errors'", ":", "err_message", ",", "'changes'", ":", "ret", "}", ")", "return", "ret" ]
Install the passed package(s) with ``port install`` name The name of the formula to be installed. Note that this parameter is ignored if "pkgs" is passed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> version Specify a version to pkg to install. Ignored if pkgs is specified. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> salt '*' pkg.install git-core version='1.8.5.5' variant Specify a variant to pkg to install. Ignored if pkgs is specified. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> salt '*' pkg.install git-core version='1.8.5.5' variant='+credential_osxkeychain+doc+pcre' Multiple Package Installation Options: pkgs A list of formulas to install. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo","bar"]' salt '*' pkg.install pkgs='["[email protected]","bar"]' salt '*' pkg.install pkgs='["[email protected]+ssl","[email protected]"]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install 'package package package'
[ "Install", "the", "passed", "package", "(", "s", ")", "with", "port", "install" ]
python
train
BD2KGenomics/toil-scripts
src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py#L180-L238
def alignment(job, ids, input_args, sample): """ Runs BWA and then Bamsort on the supplied fastqs for this sample Input1: Toil Job instance Input2: jobstore id dictionary Input3: Input arguments dictionary Input4: Sample tuple -- contains uuid and urls for the sample """ uuid, urls = sample # ids['bam'] = job.fileStore.getEmptyFileStoreID() work_dir = job.fileStore.getLocalTempDir() output_dir = input_args['output_dir'] key_path = input_args['ssec'] cores = multiprocessing.cpu_count() # I/O return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai') # Get fastqs associated with this sample for url in urls: download_encrypted_file(work_dir, url, key_path, os.path.basename(url)) # Parameters for BWA and Bamsort docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)] bwa_command = ["jvivian/bwa", "mem", "-R", "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper".format(uuid), "-T", str(0), "-t", str(cores), "/data/ref.fa"] + [os.path.join('/data/', os.path.basename(x)) for x in urls] bamsort_command = ["jeltje/biobambam", "/usr/local/bin/bamsort", "inputformat=sam", "level=1", "inputthreads={}".format(cores), "outputthreads={}".format(cores), "calmdnm=1", "calmdnmrecompindetonly=1", "calmdnmreference=/data/ref.fa", "I=/data/{}".format(uuid + '.sam')] # Piping the output to a file handle with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out: subprocess.check_call(docker_cmd + bwa_command, stdout=f_out) with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out: subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out) # Save in JobStore # job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam')) ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam')) # Copy file to S3 if input_args['s3_dir']: job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=32, memory='20 G', disk='30 G') # Move file in output_dir if input_args['output_dir']: move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam'])
[ "def", "alignment", "(", "job", ",", "ids", ",", "input_args", ",", "sample", ")", ":", "uuid", ",", "urls", "=", "sample", "# ids['bam'] = job.fileStore.getEmptyFileStoreID()", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "output_dir", "=", "input_args", "[", "'output_dir'", "]", "key_path", "=", "input_args", "[", "'ssec'", "]", "cores", "=", "multiprocessing", ".", "cpu_count", "(", ")", "# I/O", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'ref.fa'", ",", "'ref.fa.amb'", ",", "'ref.fa.ann'", ",", "'ref.fa.bwt'", ",", "'ref.fa.pac'", ",", "'ref.fa.sa'", ",", "'ref.fa.fai'", ")", "# Get fastqs associated with this sample", "for", "url", "in", "urls", ":", "download_encrypted_file", "(", "work_dir", ",", "url", ",", "key_path", ",", "os", ".", "path", ".", "basename", "(", "url", ")", ")", "# Parameters for BWA and Bamsort", "docker_cmd", "=", "[", "'docker'", ",", "'run'", ",", "'--rm'", ",", "'-v'", ",", "'{}:/data'", ".", "format", "(", "work_dir", ")", "]", "bwa_command", "=", "[", "\"jvivian/bwa\"", ",", "\"mem\"", ",", "\"-R\"", ",", "\"@RG\\tID:{0}\\tPL:Illumina\\tSM:{0}\\tLB:KapaHyper\"", ".", "format", "(", "uuid", ")", ",", "\"-T\"", ",", "str", "(", "0", ")", ",", "\"-t\"", ",", "str", "(", "cores", ")", ",", "\"/data/ref.fa\"", "]", "+", "[", "os", ".", "path", ".", "join", "(", "'/data/'", ",", "os", ".", "path", ".", "basename", "(", "x", ")", ")", "for", "x", "in", "urls", "]", "bamsort_command", "=", "[", "\"jeltje/biobambam\"", ",", "\"/usr/local/bin/bamsort\"", ",", "\"inputformat=sam\"", ",", "\"level=1\"", ",", "\"inputthreads={}\"", ".", "format", "(", "cores", ")", ",", "\"outputthreads={}\"", ".", "format", "(", "cores", ")", ",", "\"calmdnm=1\"", ",", "\"calmdnmrecompindetonly=1\"", ",", "\"calmdnmreference=/data/ref.fa\"", ",", "\"I=/data/{}\"", ".", "format", "(", "uuid", "+", "'.sam'", ")", "]", "# Piping the output to a file handle", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "uuid", "+", "'.sam'", ")", ",", "'w'", ")", "as", "f_out", ":", "subprocess", ".", "check_call", "(", "docker_cmd", "+", "bwa_command", ",", "stdout", "=", "f_out", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "uuid", "+", "'.bam'", ")", ",", "'w'", ")", "as", "f_out", ":", "subprocess", ".", "check_call", "(", "docker_cmd", "+", "bamsort_command", ",", "stdout", "=", "f_out", ")", "# Save in JobStore", "# job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))", "ids", "[", "'bam'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "uuid", "+", "'.bam'", ")", ")", "# Copy file to S3", "if", "input_args", "[", "'s3_dir'", "]", ":", "job", ".", "addChildJobFn", "(", "upload_bam_to_s3", ",", "ids", ",", "input_args", ",", "sample", ",", "cores", "=", "32", ",", "memory", "=", "'20 G'", ",", "disk", "=", "'30 G'", ")", "# Move file in output_dir", "if", "input_args", "[", "'output_dir'", "]", ":", "move_to_output_dir", "(", "work_dir", ",", "output_dir", ",", "uuid", "=", "None", ",", "files", "=", "[", "uuid", "+", "'.bam'", "]", ")" ]
Runs BWA and then Bamsort on the supplied fastqs for this sample Input1: Toil Job instance Input2: jobstore id dictionary Input3: Input arguments dictionary Input4: Sample tuple -- contains uuid and urls for the sample
[ "Runs", "BWA", "and", "then", "Bamsort", "on", "the", "supplied", "fastqs", "for", "this", "sample" ]
python
train
openstack/networking-cisco
networking_cisco/plugins/cisco/device_manager/plugging_drivers/vif_hotplug_plugging_driver.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/plugging_drivers/vif_hotplug_plugging_driver.py#L164-L189
def setup_logical_port_connectivity(self, context, port_db, hosting_device_id): """Establishes connectivity for a logical port. This is done by hot plugging the interface(VIF) corresponding to the port from the VM. """ hosting_port = port_db.hosting_info.hosting_port if hosting_port: try: self._dev_mgr.svc_vm_mgr.interface_attach(hosting_device_id, hosting_port.id) LOG.debug("Setup logical port completed for port:%s", port_db.id) except nova_exc.Conflict as e: # VM is still in vm_state building LOG.debug("Failed to attach interface - spawn thread " "error %(error)s", {'error': str(e)}) self._gt_pool.spawn_n(self._attach_hosting_port, hosting_device_id, hosting_port.id) except Exception as e: LOG.error("Failed to attach interface mapped to port:" "%(p_id)s on hosting device:%(hd_id)s due to " "error %(error)s", {'p_id': hosting_port.id, 'hd_id': hosting_device_id, 'error': str(e)})
[ "def", "setup_logical_port_connectivity", "(", "self", ",", "context", ",", "port_db", ",", "hosting_device_id", ")", ":", "hosting_port", "=", "port_db", ".", "hosting_info", ".", "hosting_port", "if", "hosting_port", ":", "try", ":", "self", ".", "_dev_mgr", ".", "svc_vm_mgr", ".", "interface_attach", "(", "hosting_device_id", ",", "hosting_port", ".", "id", ")", "LOG", ".", "debug", "(", "\"Setup logical port completed for port:%s\"", ",", "port_db", ".", "id", ")", "except", "nova_exc", ".", "Conflict", "as", "e", ":", "# VM is still in vm_state building", "LOG", ".", "debug", "(", "\"Failed to attach interface - spawn thread \"", "\"error %(error)s\"", ",", "{", "'error'", ":", "str", "(", "e", ")", "}", ")", "self", ".", "_gt_pool", ".", "spawn_n", "(", "self", ".", "_attach_hosting_port", ",", "hosting_device_id", ",", "hosting_port", ".", "id", ")", "except", "Exception", "as", "e", ":", "LOG", ".", "error", "(", "\"Failed to attach interface mapped to port:\"", "\"%(p_id)s on hosting device:%(hd_id)s due to \"", "\"error %(error)s\"", ",", "{", "'p_id'", ":", "hosting_port", ".", "id", ",", "'hd_id'", ":", "hosting_device_id", ",", "'error'", ":", "str", "(", "e", ")", "}", ")" ]
Establishes connectivity for a logical port. This is done by hot plugging the interface(VIF) corresponding to the port from the VM.
[ "Establishes", "connectivity", "for", "a", "logical", "port", "." ]
python
train
openergy/oplus
oplus/epm/record.py
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/record.py#L85-L130
def _update_value_inert(self, index, value): """ is only called by _update_inert """ # get field descriptor field_descriptor = self._table._dev_descriptor.get_field_descriptor(index) # prepare value value = field_descriptor.deserialize(value, index) # unregister previous link if relevant if isinstance(value, Link): # de-activate current link if any current_link = self._data.get(index) if current_link is not None: current_link.unregister() # unregister previous hook if relevant if isinstance(value, RecordHook): current_record_hook = self._data.get(index) if current_record_hook is not None: current_record_hook.unregister() # unregister previous external file if relevant if isinstance(value, ExternalFile): current_external_file = self._data.get(index) if current_external_file is not None: current_external_file._dev_unregister() # if None remove and leave if value in (None, NONE_RECORD_HOOK, NONE_LINK, NONE_EXTERNAL_FILE): # we don't check required, because this method is called by _update_inert which does the job self._dev_set_none_without_unregistering(index, check_not_required=False) return # if relevant, store current pk to signal table old_hook = None if index == 0 and not self._table._dev_auto_pk: old_hook = self._data.get(0) # we use get, because record may not have a pk yet if it is being created # set value self._data[index] = value # signal pk update if relevant if old_hook is not None: self._table._dev_record_pk_was_updated(old_hook.target_value)
[ "def", "_update_value_inert", "(", "self", ",", "index", ",", "value", ")", ":", "# get field descriptor", "field_descriptor", "=", "self", ".", "_table", ".", "_dev_descriptor", ".", "get_field_descriptor", "(", "index", ")", "# prepare value", "value", "=", "field_descriptor", ".", "deserialize", "(", "value", ",", "index", ")", "# unregister previous link if relevant", "if", "isinstance", "(", "value", ",", "Link", ")", ":", "# de-activate current link if any", "current_link", "=", "self", ".", "_data", ".", "get", "(", "index", ")", "if", "current_link", "is", "not", "None", ":", "current_link", ".", "unregister", "(", ")", "# unregister previous hook if relevant", "if", "isinstance", "(", "value", ",", "RecordHook", ")", ":", "current_record_hook", "=", "self", ".", "_data", ".", "get", "(", "index", ")", "if", "current_record_hook", "is", "not", "None", ":", "current_record_hook", ".", "unregister", "(", ")", "# unregister previous external file if relevant", "if", "isinstance", "(", "value", ",", "ExternalFile", ")", ":", "current_external_file", "=", "self", ".", "_data", ".", "get", "(", "index", ")", "if", "current_external_file", "is", "not", "None", ":", "current_external_file", ".", "_dev_unregister", "(", ")", "# if None remove and leave", "if", "value", "in", "(", "None", ",", "NONE_RECORD_HOOK", ",", "NONE_LINK", ",", "NONE_EXTERNAL_FILE", ")", ":", "# we don't check required, because this method is called by _update_inert which does the job", "self", ".", "_dev_set_none_without_unregistering", "(", "index", ",", "check_not_required", "=", "False", ")", "return", "# if relevant, store current pk to signal table", "old_hook", "=", "None", "if", "index", "==", "0", "and", "not", "self", ".", "_table", ".", "_dev_auto_pk", ":", "old_hook", "=", "self", ".", "_data", ".", "get", "(", "0", ")", "# we use get, because record may not have a pk yet if it is being created", "# set value", "self", ".", "_data", "[", "index", "]", "=", "value", "# signal pk update if relevant", "if", "old_hook", "is", "not", "None", ":", "self", ".", "_table", ".", "_dev_record_pk_was_updated", "(", "old_hook", ".", "target_value", ")" ]
is only called by _update_inert
[ "is", "only", "called", "by", "_update_inert" ]
python
test
awslabs/serverless-application-model
samtranslator/model/__init__.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/model/__init__.py#L230-L255
def validate_properties(self): """Validates that the required properties for this Resource have been populated, and that all properties have valid values. :returns: True if all properties are valid :rtype: bool :raises TypeError: if any properties are invalid """ for name, property_type in self.property_types.items(): value = getattr(self, name) # If the property value is an intrinsic function, any remaining validation has to be left to CloudFormation if property_type.supports_intrinsics and self._is_intrinsic_function(value): continue # If the property value has not been set, verify that the property is not required. if value is None: if property_type.required: raise InvalidResourceException( self.logical_id, "Missing required property '{property_name}'.".format(property_name=name)) # Otherwise, validate the value of the property. elif not property_type.validate(value, should_raise=False): raise InvalidResourceException( self.logical_id, "Type of property '{property_name}' is invalid.".format(property_name=name))
[ "def", "validate_properties", "(", "self", ")", ":", "for", "name", ",", "property_type", "in", "self", ".", "property_types", ".", "items", "(", ")", ":", "value", "=", "getattr", "(", "self", ",", "name", ")", "# If the property value is an intrinsic function, any remaining validation has to be left to CloudFormation", "if", "property_type", ".", "supports_intrinsics", "and", "self", ".", "_is_intrinsic_function", "(", "value", ")", ":", "continue", "# If the property value has not been set, verify that the property is not required.", "if", "value", "is", "None", ":", "if", "property_type", ".", "required", ":", "raise", "InvalidResourceException", "(", "self", ".", "logical_id", ",", "\"Missing required property '{property_name}'.\"", ".", "format", "(", "property_name", "=", "name", ")", ")", "# Otherwise, validate the value of the property.", "elif", "not", "property_type", ".", "validate", "(", "value", ",", "should_raise", "=", "False", ")", ":", "raise", "InvalidResourceException", "(", "self", ".", "logical_id", ",", "\"Type of property '{property_name}' is invalid.\"", ".", "format", "(", "property_name", "=", "name", ")", ")" ]
Validates that the required properties for this Resource have been populated, and that all properties have valid values. :returns: True if all properties are valid :rtype: bool :raises TypeError: if any properties are invalid
[ "Validates", "that", "the", "required", "properties", "for", "this", "Resource", "have", "been", "populated", "and", "that", "all", "properties", "have", "valid", "values", "." ]
python
train
zhammer/faaspact-verifier
faaspact_verifier/definitions.py
https://github.com/zhammer/faaspact-verifier/blob/f2b7accb869bcadbe4aecbce1ca8e89d47843b44/faaspact_verifier/definitions.py#L101-L112
def _pluck_provider_state(raw_provider_state: Dict) -> ProviderState: """ >>> _pluck_provider_state({'name': 'there is an egg'}) ProviderState(descriptor='there is an egg', params=None) >>> _pluck_provider_state({'name': 'there is an egg called', 'params': {'name': 'humpty'}}) ProviderState(descriptor='there is an egg called', params={'name': 'humpty'}) """ return ProviderState( descriptor=raw_provider_state['name'], params=raw_provider_state.get('params') )
[ "def", "_pluck_provider_state", "(", "raw_provider_state", ":", "Dict", ")", "->", "ProviderState", ":", "return", "ProviderState", "(", "descriptor", "=", "raw_provider_state", "[", "'name'", "]", ",", "params", "=", "raw_provider_state", ".", "get", "(", "'params'", ")", ")" ]
>>> _pluck_provider_state({'name': 'there is an egg'}) ProviderState(descriptor='there is an egg', params=None) >>> _pluck_provider_state({'name': 'there is an egg called', 'params': {'name': 'humpty'}}) ProviderState(descriptor='there is an egg called', params={'name': 'humpty'})
[ ">>>", "_pluck_provider_state", "(", "{", "name", ":", "there", "is", "an", "egg", "}", ")", "ProviderState", "(", "descriptor", "=", "there", "is", "an", "egg", "params", "=", "None", ")" ]
python
train
numenta/nupic
src/nupic/swarming/ModelRunner.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/ModelRunner.py#L292-L391
def __runTaskMainLoop(self, numIters, learningOffAt=None): """ Main loop of the OPF Model Runner. Parameters: ----------------------------------------------------------------------- recordIterator: Iterator for counting number of records (see _runTask) learningOffAt: If not None, learning is turned off when we reach this iteration number """ ## Reset sequence states in the model, so it starts looking for a new ## sequence self._model.resetSequenceStates() self._currentRecordIndex = -1 while True: # If killed by a terminator, stop running if self._isKilled: break # If job stops or hypersearch ends, stop running if self._isCanceled: break # If the process is about to be killed, set as orphaned if self._isInterrupted.isSet(): self.__setAsOrphaned() break # If model is mature, stop running ONLY IF we are not the best model # for the job. Otherwise, keep running so we can keep returning # predictions to the user if self._isMature: if not self._isBestModel: self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED break else: self._cmpReason = self._jobsDAO.CMPL_REASON_EOF # Turn off learning? if learningOffAt is not None \ and self._currentRecordIndex == learningOffAt: self._model.disableLearning() # Read input record. Note that any failure here is a critical JOB failure # and results in the job being immediately canceled and marked as # failed. The runModelXXX code in hypesearch.utils, if it sees an # exception of type utils.JobFailException, will cancel the job and # copy the error message into the job record. try: inputRecord = self._inputSource.getNextRecordDict() if self._currentRecordIndex < 0: self._inputSource.setTimeout(10) except Exception, e: raise utils.JobFailException(ErrorCodes.streamReading, str(e.args), traceback.format_exc()) if inputRecord is None: # EOF self._cmpReason = self._jobsDAO.CMPL_REASON_EOF break if inputRecord: # Process input record self._currentRecordIndex += 1 result = self._model.run(inputRecord=inputRecord) # Compute metrics. result.metrics = self.__metricMgr.update(result) # If there are None, use defaults. see MetricsManager.getMetrics() # TODO remove this when JAVA API server is gone if not result.metrics: result.metrics = self.__metricMgr.getMetrics() # Write the result to the output cache. Don't write encodings, if they # were computed if InferenceElement.encodings in result.inferences: result.inferences.pop(InferenceElement.encodings) result.sensorInput.dataEncodings = None self._writePrediction(result) # Run periodic activities self._periodic.tick() if numIters >= 0 and self._currentRecordIndex >= numIters-1: break else: # Input source returned an empty record. # # NOTE: This is okay with Stream-based Source (when it times out # waiting for next record), but not okay with FileSource, which should # always return either with a valid record or None for EOF. raise ValueError("Got an empty record from FileSource: %r" % inputRecord)
[ "def", "__runTaskMainLoop", "(", "self", ",", "numIters", ",", "learningOffAt", "=", "None", ")", ":", "## Reset sequence states in the model, so it starts looking for a new", "## sequence", "self", ".", "_model", ".", "resetSequenceStates", "(", ")", "self", ".", "_currentRecordIndex", "=", "-", "1", "while", "True", ":", "# If killed by a terminator, stop running", "if", "self", ".", "_isKilled", ":", "break", "# If job stops or hypersearch ends, stop running", "if", "self", ".", "_isCanceled", ":", "break", "# If the process is about to be killed, set as orphaned", "if", "self", ".", "_isInterrupted", ".", "isSet", "(", ")", ":", "self", ".", "__setAsOrphaned", "(", ")", "break", "# If model is mature, stop running ONLY IF we are not the best model", "# for the job. Otherwise, keep running so we can keep returning", "# predictions to the user", "if", "self", ".", "_isMature", ":", "if", "not", "self", ".", "_isBestModel", ":", "self", ".", "_cmpReason", "=", "self", ".", "_jobsDAO", ".", "CMPL_REASON_STOPPED", "break", "else", ":", "self", ".", "_cmpReason", "=", "self", ".", "_jobsDAO", ".", "CMPL_REASON_EOF", "# Turn off learning?", "if", "learningOffAt", "is", "not", "None", "and", "self", ".", "_currentRecordIndex", "==", "learningOffAt", ":", "self", ".", "_model", ".", "disableLearning", "(", ")", "# Read input record. Note that any failure here is a critical JOB failure", "# and results in the job being immediately canceled and marked as", "# failed. The runModelXXX code in hypesearch.utils, if it sees an", "# exception of type utils.JobFailException, will cancel the job and", "# copy the error message into the job record.", "try", ":", "inputRecord", "=", "self", ".", "_inputSource", ".", "getNextRecordDict", "(", ")", "if", "self", ".", "_currentRecordIndex", "<", "0", ":", "self", ".", "_inputSource", ".", "setTimeout", "(", "10", ")", "except", "Exception", ",", "e", ":", "raise", "utils", ".", "JobFailException", "(", "ErrorCodes", ".", "streamReading", ",", "str", "(", "e", ".", "args", ")", ",", "traceback", ".", "format_exc", "(", ")", ")", "if", "inputRecord", "is", "None", ":", "# EOF", "self", ".", "_cmpReason", "=", "self", ".", "_jobsDAO", ".", "CMPL_REASON_EOF", "break", "if", "inputRecord", ":", "# Process input record", "self", ".", "_currentRecordIndex", "+=", "1", "result", "=", "self", ".", "_model", ".", "run", "(", "inputRecord", "=", "inputRecord", ")", "# Compute metrics.", "result", ".", "metrics", "=", "self", ".", "__metricMgr", ".", "update", "(", "result", ")", "# If there are None, use defaults. see MetricsManager.getMetrics()", "# TODO remove this when JAVA API server is gone", "if", "not", "result", ".", "metrics", ":", "result", ".", "metrics", "=", "self", ".", "__metricMgr", ".", "getMetrics", "(", ")", "# Write the result to the output cache. Don't write encodings, if they", "# were computed", "if", "InferenceElement", ".", "encodings", "in", "result", ".", "inferences", ":", "result", ".", "inferences", ".", "pop", "(", "InferenceElement", ".", "encodings", ")", "result", ".", "sensorInput", ".", "dataEncodings", "=", "None", "self", ".", "_writePrediction", "(", "result", ")", "# Run periodic activities", "self", ".", "_periodic", ".", "tick", "(", ")", "if", "numIters", ">=", "0", "and", "self", ".", "_currentRecordIndex", ">=", "numIters", "-", "1", ":", "break", "else", ":", "# Input source returned an empty record.", "#", "# NOTE: This is okay with Stream-based Source (when it times out", "# waiting for next record), but not okay with FileSource, which should", "# always return either with a valid record or None for EOF.", "raise", "ValueError", "(", "\"Got an empty record from FileSource: %r\"", "%", "inputRecord", ")" ]
Main loop of the OPF Model Runner. Parameters: ----------------------------------------------------------------------- recordIterator: Iterator for counting number of records (see _runTask) learningOffAt: If not None, learning is turned off when we reach this iteration number
[ "Main", "loop", "of", "the", "OPF", "Model", "Runner", "." ]
python
valid
molmod/molmod
molmod/molecules.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/molecules.py#L278-L292
def compute_rotsym(self, threshold=1e-3*angstrom): """Compute the rotational symmetry number. Optional argument: | ``threshold`` -- only when a rotation results in an rmsd below the given threshold, the rotation is considered to transform the molecule onto itself. """ # Generate a graph with a more permissive threshold for bond lengths: # (is convenient in case of transition state geometries) graph = MolecularGraph.from_geometry(self, scaling=1.5) try: return compute_rotsym(self, graph, threshold) except ValueError: raise ValueError("The rotational symmetry number can only be computed when the graph is fully connected.")
[ "def", "compute_rotsym", "(", "self", ",", "threshold", "=", "1e-3", "*", "angstrom", ")", ":", "# Generate a graph with a more permissive threshold for bond lengths:", "# (is convenient in case of transition state geometries)", "graph", "=", "MolecularGraph", ".", "from_geometry", "(", "self", ",", "scaling", "=", "1.5", ")", "try", ":", "return", "compute_rotsym", "(", "self", ",", "graph", ",", "threshold", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"The rotational symmetry number can only be computed when the graph is fully connected.\"", ")" ]
Compute the rotational symmetry number. Optional argument: | ``threshold`` -- only when a rotation results in an rmsd below the given threshold, the rotation is considered to transform the molecule onto itself.
[ "Compute", "the", "rotational", "symmetry", "number", "." ]
python
train
NuGrid/NuGridPy
nugridpy/data_plot.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/data_plot.py#L1463-L1466
def _xlimrev(self): ''' reverse xrange''' xmax,xmin=pyl.xlim() pyl.xlim(xmin,xmax)
[ "def", "_xlimrev", "(", "self", ")", ":", "xmax", ",", "xmin", "=", "pyl", ".", "xlim", "(", ")", "pyl", ".", "xlim", "(", "xmin", ",", "xmax", ")" ]
reverse xrange
[ "reverse", "xrange" ]
python
train
unbservices/clams
clams/__init__.py
https://github.com/unbservices/clams/blob/2ae0a36eb8f82a153d27f74ef37688f976952789/clams/__init__.py#L414-L438
def parse_args(self, args=None, namespace=None): """Parse the command-line arguments and call the associated handler. The signature is the same as `argparse.ArgumentParser.parse_args <https://docs.python.org/2/library/argparse.html#argparse.ArgumentParser.parse_args>`_. Args ---- args : list A list of argument strings. If ``None`` the list is taken from ``sys.argv``. namespace : argparse.Namespace A Namespace instance. Defaults to a new empty Namespace. Returns ------- The return value of the handler called with the populated Namespace as kwargs. """ assert self.initialized, '`init` must be called before `parse_args`.' namespace = self.parser.parse_args(args, namespace) handler = self._get_handler(namespace, remove_handler=True) if handler: return handler(**vars(namespace))
[ "def", "parse_args", "(", "self", ",", "args", "=", "None", ",", "namespace", "=", "None", ")", ":", "assert", "self", ".", "initialized", ",", "'`init` must be called before `parse_args`.'", "namespace", "=", "self", ".", "parser", ".", "parse_args", "(", "args", ",", "namespace", ")", "handler", "=", "self", ".", "_get_handler", "(", "namespace", ",", "remove_handler", "=", "True", ")", "if", "handler", ":", "return", "handler", "(", "*", "*", "vars", "(", "namespace", ")", ")" ]
Parse the command-line arguments and call the associated handler. The signature is the same as `argparse.ArgumentParser.parse_args <https://docs.python.org/2/library/argparse.html#argparse.ArgumentParser.parse_args>`_. Args ---- args : list A list of argument strings. If ``None`` the list is taken from ``sys.argv``. namespace : argparse.Namespace A Namespace instance. Defaults to a new empty Namespace. Returns ------- The return value of the handler called with the populated Namespace as kwargs.
[ "Parse", "the", "command", "-", "line", "arguments", "and", "call", "the", "associated", "handler", "." ]
python
train
cs01/pygdbmi
pygdbmi/gdbcontroller.py
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbcontroller.py#L180-L246
def write( self, mi_cmd_to_write, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True, read_response=True, ): """Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec. Args: mi_cmd_to_write (str or list): String to write to gdb. If list, it is joined by newlines. timeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0. raise_error_on_timeout (bool): If read_response is True, raise error if no response is received read_response (bool): Block and read response. If there is a separate thread running, this can be false, and the reading thread read the output. Returns: List of parsed gdb responses if read_response is True, otherwise [] Raises: NoGdbProcessError if there is no gdb subprocess running TypeError if mi_cmd_to_write is not valid """ self.verify_valid_gdb_subprocess() if timeout_sec < 0: self.logger.warning("timeout_sec was negative, replacing with 0") timeout_sec = 0 # Ensure proper type of the mi command if type(mi_cmd_to_write) in [str, unicode]: pass elif type(mi_cmd_to_write) == list: mi_cmd_to_write = "\n".join(mi_cmd_to_write) else: raise TypeError( "The gdb mi command must a be str or list. Got " + str(type(mi_cmd_to_write)) ) self.logger.debug("writing: %s", mi_cmd_to_write) if not mi_cmd_to_write.endswith("\n"): mi_cmd_to_write_nl = mi_cmd_to_write + "\n" else: mi_cmd_to_write_nl = mi_cmd_to_write if USING_WINDOWS: # select not implemented in windows for pipes # assume it's always ready outputready = [self.stdin_fileno] else: _, outputready, _ = select.select([], self.write_list, [], timeout_sec) for fileno in outputready: if fileno == self.stdin_fileno: # ready to write self.gdb_process.stdin.write(mi_cmd_to_write_nl.encode()) # don't forget to flush for Python3, otherwise gdb won't realize there is data # to evaluate, and we won't get a response self.gdb_process.stdin.flush() else: self.logger.error("got unexpected fileno %d" % fileno) if read_response is True: return self.get_gdb_response( timeout_sec=timeout_sec, raise_error_on_timeout=raise_error_on_timeout ) else: return []
[ "def", "write", "(", "self", ",", "mi_cmd_to_write", ",", "timeout_sec", "=", "DEFAULT_GDB_TIMEOUT_SEC", ",", "raise_error_on_timeout", "=", "True", ",", "read_response", "=", "True", ",", ")", ":", "self", ".", "verify_valid_gdb_subprocess", "(", ")", "if", "timeout_sec", "<", "0", ":", "self", ".", "logger", ".", "warning", "(", "\"timeout_sec was negative, replacing with 0\"", ")", "timeout_sec", "=", "0", "# Ensure proper type of the mi command", "if", "type", "(", "mi_cmd_to_write", ")", "in", "[", "str", ",", "unicode", "]", ":", "pass", "elif", "type", "(", "mi_cmd_to_write", ")", "==", "list", ":", "mi_cmd_to_write", "=", "\"\\n\"", ".", "join", "(", "mi_cmd_to_write", ")", "else", ":", "raise", "TypeError", "(", "\"The gdb mi command must a be str or list. Got \"", "+", "str", "(", "type", "(", "mi_cmd_to_write", ")", ")", ")", "self", ".", "logger", ".", "debug", "(", "\"writing: %s\"", ",", "mi_cmd_to_write", ")", "if", "not", "mi_cmd_to_write", ".", "endswith", "(", "\"\\n\"", ")", ":", "mi_cmd_to_write_nl", "=", "mi_cmd_to_write", "+", "\"\\n\"", "else", ":", "mi_cmd_to_write_nl", "=", "mi_cmd_to_write", "if", "USING_WINDOWS", ":", "# select not implemented in windows for pipes", "# assume it's always ready", "outputready", "=", "[", "self", ".", "stdin_fileno", "]", "else", ":", "_", ",", "outputready", ",", "_", "=", "select", ".", "select", "(", "[", "]", ",", "self", ".", "write_list", ",", "[", "]", ",", "timeout_sec", ")", "for", "fileno", "in", "outputready", ":", "if", "fileno", "==", "self", ".", "stdin_fileno", ":", "# ready to write", "self", ".", "gdb_process", ".", "stdin", ".", "write", "(", "mi_cmd_to_write_nl", ".", "encode", "(", ")", ")", "# don't forget to flush for Python3, otherwise gdb won't realize there is data", "# to evaluate, and we won't get a response", "self", ".", "gdb_process", ".", "stdin", ".", "flush", "(", ")", "else", ":", "self", ".", "logger", ".", "error", "(", "\"got unexpected fileno %d\"", "%", "fileno", ")", "if", "read_response", "is", "True", ":", "return", "self", ".", "get_gdb_response", "(", "timeout_sec", "=", "timeout_sec", ",", "raise_error_on_timeout", "=", "raise_error_on_timeout", ")", "else", ":", "return", "[", "]" ]
Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec. Args: mi_cmd_to_write (str or list): String to write to gdb. If list, it is joined by newlines. timeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0. raise_error_on_timeout (bool): If read_response is True, raise error if no response is received read_response (bool): Block and read response. If there is a separate thread running, this can be false, and the reading thread read the output. Returns: List of parsed gdb responses if read_response is True, otherwise [] Raises: NoGdbProcessError if there is no gdb subprocess running TypeError if mi_cmd_to_write is not valid
[ "Write", "to", "gdb", "process", ".", "Block", "while", "parsing", "responses", "from", "gdb", "for", "a", "maximum", "of", "timeout_sec", "." ]
python
valid
pycontribs/pyrax
pyrax/object_storage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L2389-L2396
def list(self, limit=None, marker=None, end_marker=None, prefix=None): """ List the containers in this account, using the parameters to control the pagination of containers, since by default only the first 10,000 containers are returned. """ return self._manager.list(limit=limit, marker=marker, end_marker=end_marker, prefix=prefix)
[ "def", "list", "(", "self", ",", "limit", "=", "None", ",", "marker", "=", "None", ",", "end_marker", "=", "None", ",", "prefix", "=", "None", ")", ":", "return", "self", ".", "_manager", ".", "list", "(", "limit", "=", "limit", ",", "marker", "=", "marker", ",", "end_marker", "=", "end_marker", ",", "prefix", "=", "prefix", ")" ]
List the containers in this account, using the parameters to control the pagination of containers, since by default only the first 10,000 containers are returned.
[ "List", "the", "containers", "in", "this", "account", "using", "the", "parameters", "to", "control", "the", "pagination", "of", "containers", "since", "by", "default", "only", "the", "first", "10", "000", "containers", "are", "returned", "." ]
python
train
mozilla/configman
configman/def_sources/for_argparse.py
https://github.com/mozilla/configman/blob/83159fed61cc4cbbe5a4a6a00d3acad8a0c39c96/configman/def_sources/for_argparse.py#L618-L640
def parse_known_args(self, args=None, namespace=None): """this method hijacks the normal argparse Namespace generation, shimming configman into the process. The return value will be a configman DotDict rather than an argparse Namespace.""" # load the config_manager within the scope of the method that uses it # so that we avoid circular references in the outer scope from configman.config_manager import ConfigurationManager configuration_manager = ConfigurationManager( definition_source=[self.get_required_config()], values_source_list=self.value_source_list, argv_source=args, app_name=self.prog, app_version=self.version, app_description=self.description, use_auto_help=False, ) conf = configuration_manager.get_config( mapping_class=create_key_translating_dot_dict( "HyphenUnderscoreDict", (('-', '_'),) ) ) return conf
[ "def", "parse_known_args", "(", "self", ",", "args", "=", "None", ",", "namespace", "=", "None", ")", ":", "# load the config_manager within the scope of the method that uses it", "# so that we avoid circular references in the outer scope", "from", "configman", ".", "config_manager", "import", "ConfigurationManager", "configuration_manager", "=", "ConfigurationManager", "(", "definition_source", "=", "[", "self", ".", "get_required_config", "(", ")", "]", ",", "values_source_list", "=", "self", ".", "value_source_list", ",", "argv_source", "=", "args", ",", "app_name", "=", "self", ".", "prog", ",", "app_version", "=", "self", ".", "version", ",", "app_description", "=", "self", ".", "description", ",", "use_auto_help", "=", "False", ",", ")", "conf", "=", "configuration_manager", ".", "get_config", "(", "mapping_class", "=", "create_key_translating_dot_dict", "(", "\"HyphenUnderscoreDict\"", ",", "(", "(", "'-'", ",", "'_'", ")", ",", ")", ")", ")", "return", "conf" ]
this method hijacks the normal argparse Namespace generation, shimming configman into the process. The return value will be a configman DotDict rather than an argparse Namespace.
[ "this", "method", "hijacks", "the", "normal", "argparse", "Namespace", "generation", "shimming", "configman", "into", "the", "process", ".", "The", "return", "value", "will", "be", "a", "configman", "DotDict", "rather", "than", "an", "argparse", "Namespace", "." ]
python
train
ibelie/typy
typy/google/protobuf/text_format.py
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L566-L611
def _MergeMessageField(self, tokenizer, message, field): """Merges a single scalar field into a message. Args: tokenizer: A tokenizer to parse the field value. message: The message of which field is a member. field: The descriptor of the field to be merged. Raises: ParseError: In case of text parsing problems. """ is_map_entry = _IsMapEntry(field) if tokenizer.TryConsume('<'): end_token = '>' else: tokenizer.Consume('{') end_token = '}' if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: if field.is_extension: sub_message = message.Extensions[field].add() elif is_map_entry: # pylint: disable=protected-access sub_message = field.message_type._concrete_class() else: sub_message = getattr(message, field.name).add() else: if field.is_extension: sub_message = message.Extensions[field] else: sub_message = getattr(message, field.name) sub_message.SetInParent() while not tokenizer.TryConsume(end_token): if tokenizer.AtEnd(): raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,)) self._MergeField(tokenizer, sub_message) if is_map_entry: value_cpptype = field.message_type.fields_by_name['value'].cpp_type if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: value = getattr(message, field.name)[sub_message.key] value.MergeFrom(sub_message.value) else: getattr(message, field.name)[sub_message.key] = sub_message.value
[ "def", "_MergeMessageField", "(", "self", ",", "tokenizer", ",", "message", ",", "field", ")", ":", "is_map_entry", "=", "_IsMapEntry", "(", "field", ")", "if", "tokenizer", ".", "TryConsume", "(", "'<'", ")", ":", "end_token", "=", "'>'", "else", ":", "tokenizer", ".", "Consume", "(", "'{'", ")", "end_token", "=", "'}'", "if", "field", ".", "label", "==", "descriptor", ".", "FieldDescriptor", ".", "LABEL_REPEATED", ":", "if", "field", ".", "is_extension", ":", "sub_message", "=", "message", ".", "Extensions", "[", "field", "]", ".", "add", "(", ")", "elif", "is_map_entry", ":", "# pylint: disable=protected-access", "sub_message", "=", "field", ".", "message_type", ".", "_concrete_class", "(", ")", "else", ":", "sub_message", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", ".", "add", "(", ")", "else", ":", "if", "field", ".", "is_extension", ":", "sub_message", "=", "message", ".", "Extensions", "[", "field", "]", "else", ":", "sub_message", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", "sub_message", ".", "SetInParent", "(", ")", "while", "not", "tokenizer", ".", "TryConsume", "(", "end_token", ")", ":", "if", "tokenizer", ".", "AtEnd", "(", ")", ":", "raise", "tokenizer", ".", "ParseErrorPreviousToken", "(", "'Expected \"%s\".'", "%", "(", "end_token", ",", ")", ")", "self", ".", "_MergeField", "(", "tokenizer", ",", "sub_message", ")", "if", "is_map_entry", ":", "value_cpptype", "=", "field", ".", "message_type", ".", "fields_by_name", "[", "'value'", "]", ".", "cpp_type", "if", "value_cpptype", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "value", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", "[", "sub_message", ".", "key", "]", "value", ".", "MergeFrom", "(", "sub_message", ".", "value", ")", "else", ":", "getattr", "(", "message", ",", "field", ".", "name", ")", "[", "sub_message", ".", "key", "]", "=", "sub_message", ".", "value" ]
Merges a single scalar field into a message. Args: tokenizer: A tokenizer to parse the field value. message: The message of which field is a member. field: The descriptor of the field to be merged. Raises: ParseError: In case of text parsing problems.
[ "Merges", "a", "single", "scalar", "field", "into", "a", "message", "." ]
python
valid
minhhoit/yacms
yacms/blog/management/commands/import_tumblr.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/blog/management/commands/import_tumblr.py#L25-L33
def title_from_content(content): """ Try and extract the first sentence from a block of test to use as a title. """ for end in (". ", "?", "!", "<br />", "\n", "</p>"): if end in content: content = content.split(end)[0] + end break return strip_tags(content)
[ "def", "title_from_content", "(", "content", ")", ":", "for", "end", "in", "(", "\". \"", ",", "\"?\"", ",", "\"!\"", ",", "\"<br />\"", ",", "\"\\n\"", ",", "\"</p>\"", ")", ":", "if", "end", "in", "content", ":", "content", "=", "content", ".", "split", "(", "end", ")", "[", "0", "]", "+", "end", "break", "return", "strip_tags", "(", "content", ")" ]
Try and extract the first sentence from a block of test to use as a title.
[ "Try", "and", "extract", "the", "first", "sentence", "from", "a", "block", "of", "test", "to", "use", "as", "a", "title", "." ]
python
train
creare-com/pydem
pydem/dem_processing.py
https://github.com/creare-com/pydem/blob/c2fc8d84cfb411df84f71a6dec9edc4b544f710a/pydem/dem_processing.py#L2100-L2235
def _mk_connectivity_flats(self, i12, j1, j2, mat_data, flats, elev, mag): """ Helper function for _mk_adjacency_matrix. This calcualtes the connectivity for flat regions. Every pixel in the flat will drain to a random pixel in the flat. This accumulates all the area in the flat region to a single pixel. All that area is then drained from that pixel to the surroundings on the flat. If the border of the flat has a single pixel with a much lower elevation, all the area will go towards that pixel. If the border has pixels with similar elevation, then the area will be distributed amongst all the border pixels proportional to their elevation. """ nn, mm = flats.shape NN = np.prod(flats.shape) # Label the flats assigned, n_flats = spndi.label(flats, FLATS_KERNEL3) flat_ids, flat_coords, flat_labelsf = _get_flat_ids(assigned) flat_j = [None] * n_flats flat_prop = [None] * n_flats flat_i = [None] * n_flats # Temporary array to find the flats edges = np.zeros_like(flats) # %% Calcute the flat drainage warn_flats = [] for ii in xrange(n_flats): ids_flats = flat_ids[flat_coords[ii]:flat_coords[ii+1]] edges[:] = 0 j = ids_flats % mm i = ids_flats // mm for iii in [-1, 0, 1]: for jjj in [-1, 0, 1]: i_2 = i + iii j_2 = j + jjj ids_tmp = (i_2 >= 0) & (j_2 >= 0) & (i_2 < nn) & (j_2 < mm) edges[i_2[ids_tmp], j_2[ids_tmp]] += \ FLATS_KERNEL3[iii+1, jjj+1] edges.ravel()[ids_flats] = 0 ids_edge = np.argwhere(edges.ravel()).squeeze() flat_elev_loc = elev.ravel()[ids_flats] # It is possble for the edges to merge 2 flats, so we need to # take the lower elevation to avoid large circular regions flat_elev = flat_elev_loc.min() loc_elev = elev.ravel()[ids_edge] # Filter out any elevations larger than the flat elevation # TODO: Figure out if this should be <= or < I_filt = loc_elev < flat_elev try: loc_elev = loc_elev[I_filt] loc_slope = mag.ravel()[ids_edge][I_filt] except: # If this is fully masked out (i.e. inside a no-data area) loc_elev = np.array([]) loc_slope = np.array([]) loc_dx = self.dX.mean() # Now I have to figure out if I should just use the minimum or # distribute amongst many pixels on the flat boundary n = len(loc_slope) if n == 0: # Flat does not have anywhere to drain # Let's see if the flat goes to the edge. If yes, we'll just # distribute the area along the edge. ids_flat_on_edge = ((ids_flats % mag.shape[1]) == 0) | \ ((ids_flats % mag.shape[1]) == (mag.shape[1] - 1)) | \ (ids_flats <= mag.shape[1]) | \ (ids_flats >= (mag.shape[1] * (mag.shape[0] - 1))) if ids_flat_on_edge.sum() == 0: warn_flats.append(ii) continue drain_ids = ids_flats[ids_flat_on_edge] loc_proportions = mag.ravel()[ids_flats[ids_flat_on_edge]] loc_proportions /= loc_proportions.sum() ids_flats = ids_flats[~ids_flat_on_edge] # This flat is entirely on the edge of the image if len(ids_flats) == 0: # therefore, whatever drains into it is done. continue flat_elev_loc = flat_elev_loc[~ids_flat_on_edge] else: # Flat has a place to drain to min_edges = np.zeros(loc_slope.shape, bool) min_edges[np.argmin(loc_slope)] = True # Add to the min edges any edge that is within an error # tolerance as small as the minimum min_edges = (loc_slope + loc_slope * loc_dx / 2) \ >= loc_slope[min_edges] drain_ids = ids_edge[I_filt][min_edges] loc_proportions = loc_slope[min_edges] loc_proportions /= loc_proportions.sum() # Now distribute the connectivity amongst the chosen elevations # proportional to their slopes # First, let all the the ids in the flats drain to 1 # flat id (for ease) one_id = np.zeros(ids_flats.size, bool) one_id[np.argmin(flat_elev_loc)] = True j1.ravel()[ids_flats[~one_id]] = ids_flats[one_id] mat_data.ravel()[ids_flats[~one_id]] = 1 # Negative indices will be eliminated before making the matix j2.ravel()[ids_flats[~one_id]] = -1 mat_data.ravel()[ids_flats[~one_id] + NN] = 0 # Now drain the 1 flat to the drains j1.ravel()[ids_flats[one_id]] = drain_ids[0] mat_data.ravel()[ids_flats[one_id]] = loc_proportions[0] if len(drain_ids) > 1: j2.ravel()[ids_flats[one_id]] = drain_ids[1] mat_data.ravel()[ids_flats[one_id] + NN] = loc_proportions[1] if len(loc_proportions > 2): flat_j[ii] = drain_ids[2:] flat_prop[ii] = loc_proportions[2:] flat_i[ii] = np.ones(drain_ids[2:].size, 'int64') * ids_flats[one_id] try: flat_j = np.concatenate([fj for fj in flat_j if fj is not None]) flat_prop = \ np.concatenate([fp for fp in flat_prop if fp is not None]) flat_i = np.concatenate([fi for fi in flat_i if fi is not None]) except: flat_j = np.array([], 'int64') flat_prop = np.array([], 'float64') flat_i = np.array([], 'int64') if len(warn_flats) > 0: warnings.warn("Warning %d flats had no place" % len(warn_flats) + " to drain to --> these are pits (check pit-remove" "algorithm).") return j1, j2, mat_data, flat_i, flat_j, flat_prop
[ "def", "_mk_connectivity_flats", "(", "self", ",", "i12", ",", "j1", ",", "j2", ",", "mat_data", ",", "flats", ",", "elev", ",", "mag", ")", ":", "nn", ",", "mm", "=", "flats", ".", "shape", "NN", "=", "np", ".", "prod", "(", "flats", ".", "shape", ")", "# Label the flats", "assigned", ",", "n_flats", "=", "spndi", ".", "label", "(", "flats", ",", "FLATS_KERNEL3", ")", "flat_ids", ",", "flat_coords", ",", "flat_labelsf", "=", "_get_flat_ids", "(", "assigned", ")", "flat_j", "=", "[", "None", "]", "*", "n_flats", "flat_prop", "=", "[", "None", "]", "*", "n_flats", "flat_i", "=", "[", "None", "]", "*", "n_flats", "# Temporary array to find the flats", "edges", "=", "np", ".", "zeros_like", "(", "flats", ")", "# %% Calcute the flat drainage", "warn_flats", "=", "[", "]", "for", "ii", "in", "xrange", "(", "n_flats", ")", ":", "ids_flats", "=", "flat_ids", "[", "flat_coords", "[", "ii", "]", ":", "flat_coords", "[", "ii", "+", "1", "]", "]", "edges", "[", ":", "]", "=", "0", "j", "=", "ids_flats", "%", "mm", "i", "=", "ids_flats", "//", "mm", "for", "iii", "in", "[", "-", "1", ",", "0", ",", "1", "]", ":", "for", "jjj", "in", "[", "-", "1", ",", "0", ",", "1", "]", ":", "i_2", "=", "i", "+", "iii", "j_2", "=", "j", "+", "jjj", "ids_tmp", "=", "(", "i_2", ">=", "0", ")", "&", "(", "j_2", ">=", "0", ")", "&", "(", "i_2", "<", "nn", ")", "&", "(", "j_2", "<", "mm", ")", "edges", "[", "i_2", "[", "ids_tmp", "]", ",", "j_2", "[", "ids_tmp", "]", "]", "+=", "FLATS_KERNEL3", "[", "iii", "+", "1", ",", "jjj", "+", "1", "]", "edges", ".", "ravel", "(", ")", "[", "ids_flats", "]", "=", "0", "ids_edge", "=", "np", ".", "argwhere", "(", "edges", ".", "ravel", "(", ")", ")", ".", "squeeze", "(", ")", "flat_elev_loc", "=", "elev", ".", "ravel", "(", ")", "[", "ids_flats", "]", "# It is possble for the edges to merge 2 flats, so we need to", "# take the lower elevation to avoid large circular regions", "flat_elev", "=", "flat_elev_loc", ".", "min", "(", ")", "loc_elev", "=", "elev", ".", "ravel", "(", ")", "[", "ids_edge", "]", "# Filter out any elevations larger than the flat elevation", "# TODO: Figure out if this should be <= or <", "I_filt", "=", "loc_elev", "<", "flat_elev", "try", ":", "loc_elev", "=", "loc_elev", "[", "I_filt", "]", "loc_slope", "=", "mag", ".", "ravel", "(", ")", "[", "ids_edge", "]", "[", "I_filt", "]", "except", ":", "# If this is fully masked out (i.e. inside a no-data area)", "loc_elev", "=", "np", ".", "array", "(", "[", "]", ")", "loc_slope", "=", "np", ".", "array", "(", "[", "]", ")", "loc_dx", "=", "self", ".", "dX", ".", "mean", "(", ")", "# Now I have to figure out if I should just use the minimum or", "# distribute amongst many pixels on the flat boundary", "n", "=", "len", "(", "loc_slope", ")", "if", "n", "==", "0", ":", "# Flat does not have anywhere to drain", "# Let's see if the flat goes to the edge. If yes, we'll just", "# distribute the area along the edge.", "ids_flat_on_edge", "=", "(", "(", "ids_flats", "%", "mag", ".", "shape", "[", "1", "]", ")", "==", "0", ")", "|", "(", "(", "ids_flats", "%", "mag", ".", "shape", "[", "1", "]", ")", "==", "(", "mag", ".", "shape", "[", "1", "]", "-", "1", ")", ")", "|", "(", "ids_flats", "<=", "mag", ".", "shape", "[", "1", "]", ")", "|", "(", "ids_flats", ">=", "(", "mag", ".", "shape", "[", "1", "]", "*", "(", "mag", ".", "shape", "[", "0", "]", "-", "1", ")", ")", ")", "if", "ids_flat_on_edge", ".", "sum", "(", ")", "==", "0", ":", "warn_flats", ".", "append", "(", "ii", ")", "continue", "drain_ids", "=", "ids_flats", "[", "ids_flat_on_edge", "]", "loc_proportions", "=", "mag", ".", "ravel", "(", ")", "[", "ids_flats", "[", "ids_flat_on_edge", "]", "]", "loc_proportions", "/=", "loc_proportions", ".", "sum", "(", ")", "ids_flats", "=", "ids_flats", "[", "~", "ids_flat_on_edge", "]", "# This flat is entirely on the edge of the image", "if", "len", "(", "ids_flats", ")", "==", "0", ":", "# therefore, whatever drains into it is done.", "continue", "flat_elev_loc", "=", "flat_elev_loc", "[", "~", "ids_flat_on_edge", "]", "else", ":", "# Flat has a place to drain to", "min_edges", "=", "np", ".", "zeros", "(", "loc_slope", ".", "shape", ",", "bool", ")", "min_edges", "[", "np", ".", "argmin", "(", "loc_slope", ")", "]", "=", "True", "# Add to the min edges any edge that is within an error", "# tolerance as small as the minimum", "min_edges", "=", "(", "loc_slope", "+", "loc_slope", "*", "loc_dx", "/", "2", ")", ">=", "loc_slope", "[", "min_edges", "]", "drain_ids", "=", "ids_edge", "[", "I_filt", "]", "[", "min_edges", "]", "loc_proportions", "=", "loc_slope", "[", "min_edges", "]", "loc_proportions", "/=", "loc_proportions", ".", "sum", "(", ")", "# Now distribute the connectivity amongst the chosen elevations", "# proportional to their slopes", "# First, let all the the ids in the flats drain to 1", "# flat id (for ease)", "one_id", "=", "np", ".", "zeros", "(", "ids_flats", ".", "size", ",", "bool", ")", "one_id", "[", "np", ".", "argmin", "(", "flat_elev_loc", ")", "]", "=", "True", "j1", ".", "ravel", "(", ")", "[", "ids_flats", "[", "~", "one_id", "]", "]", "=", "ids_flats", "[", "one_id", "]", "mat_data", ".", "ravel", "(", ")", "[", "ids_flats", "[", "~", "one_id", "]", "]", "=", "1", "# Negative indices will be eliminated before making the matix", "j2", ".", "ravel", "(", ")", "[", "ids_flats", "[", "~", "one_id", "]", "]", "=", "-", "1", "mat_data", ".", "ravel", "(", ")", "[", "ids_flats", "[", "~", "one_id", "]", "+", "NN", "]", "=", "0", "# Now drain the 1 flat to the drains", "j1", ".", "ravel", "(", ")", "[", "ids_flats", "[", "one_id", "]", "]", "=", "drain_ids", "[", "0", "]", "mat_data", ".", "ravel", "(", ")", "[", "ids_flats", "[", "one_id", "]", "]", "=", "loc_proportions", "[", "0", "]", "if", "len", "(", "drain_ids", ")", ">", "1", ":", "j2", ".", "ravel", "(", ")", "[", "ids_flats", "[", "one_id", "]", "]", "=", "drain_ids", "[", "1", "]", "mat_data", ".", "ravel", "(", ")", "[", "ids_flats", "[", "one_id", "]", "+", "NN", "]", "=", "loc_proportions", "[", "1", "]", "if", "len", "(", "loc_proportions", ">", "2", ")", ":", "flat_j", "[", "ii", "]", "=", "drain_ids", "[", "2", ":", "]", "flat_prop", "[", "ii", "]", "=", "loc_proportions", "[", "2", ":", "]", "flat_i", "[", "ii", "]", "=", "np", ".", "ones", "(", "drain_ids", "[", "2", ":", "]", ".", "size", ",", "'int64'", ")", "*", "ids_flats", "[", "one_id", "]", "try", ":", "flat_j", "=", "np", ".", "concatenate", "(", "[", "fj", "for", "fj", "in", "flat_j", "if", "fj", "is", "not", "None", "]", ")", "flat_prop", "=", "np", ".", "concatenate", "(", "[", "fp", "for", "fp", "in", "flat_prop", "if", "fp", "is", "not", "None", "]", ")", "flat_i", "=", "np", ".", "concatenate", "(", "[", "fi", "for", "fi", "in", "flat_i", "if", "fi", "is", "not", "None", "]", ")", "except", ":", "flat_j", "=", "np", ".", "array", "(", "[", "]", ",", "'int64'", ")", "flat_prop", "=", "np", ".", "array", "(", "[", "]", ",", "'float64'", ")", "flat_i", "=", "np", ".", "array", "(", "[", "]", ",", "'int64'", ")", "if", "len", "(", "warn_flats", ")", ">", "0", ":", "warnings", ".", "warn", "(", "\"Warning %d flats had no place\"", "%", "len", "(", "warn_flats", ")", "+", "\" to drain to --> these are pits (check pit-remove\"", "\"algorithm).\"", ")", "return", "j1", ",", "j2", ",", "mat_data", ",", "flat_i", ",", "flat_j", ",", "flat_prop" ]
Helper function for _mk_adjacency_matrix. This calcualtes the connectivity for flat regions. Every pixel in the flat will drain to a random pixel in the flat. This accumulates all the area in the flat region to a single pixel. All that area is then drained from that pixel to the surroundings on the flat. If the border of the flat has a single pixel with a much lower elevation, all the area will go towards that pixel. If the border has pixels with similar elevation, then the area will be distributed amongst all the border pixels proportional to their elevation.
[ "Helper", "function", "for", "_mk_adjacency_matrix", ".", "This", "calcualtes", "the", "connectivity", "for", "flat", "regions", ".", "Every", "pixel", "in", "the", "flat", "will", "drain", "to", "a", "random", "pixel", "in", "the", "flat", ".", "This", "accumulates", "all", "the", "area", "in", "the", "flat", "region", "to", "a", "single", "pixel", ".", "All", "that", "area", "is", "then", "drained", "from", "that", "pixel", "to", "the", "surroundings", "on", "the", "flat", ".", "If", "the", "border", "of", "the", "flat", "has", "a", "single", "pixel", "with", "a", "much", "lower", "elevation", "all", "the", "area", "will", "go", "towards", "that", "pixel", ".", "If", "the", "border", "has", "pixels", "with", "similar", "elevation", "then", "the", "area", "will", "be", "distributed", "amongst", "all", "the", "border", "pixels", "proportional", "to", "their", "elevation", "." ]
python
train
rocky/python-spark
example/gdb-loc/gdbloc/scanner.py
https://github.com/rocky/python-spark/blob/8899954bcf0e166726841a43e87c23790eb3441f/example/gdb-loc/gdbloc/scanner.py#L57-L79
def t_file_or_func(self, s): r'(?:[^*-+,\d\'"\t \n:][^\'"\t \n:,]*)|(?:^""".+""")|(?:\'\'\'.+\'\'\')' maybe_funcname = True if s == 'if': self.add_token('IF', s) return if s[0] in frozenset(('"', "'")): # Pick out text inside of triple-quoted string if ( (s.startswith("'''") and s.endswith("'''") ) or (s.startswith('"""') and s.endswith('"""') ) ): base = s[3:-3] else: # Pick out text inside singly-quote string base = s[1:-1] maybe_funcname = False else: base = s pos = self.pos if maybe_funcname and re.match('[a-zA-Z_][[a-zA-Z_.0-9\[\]]+\(\)', s): self.add_token('FUNCNAME', base) else: self.add_token('FILENAME', base) self.pos = pos + len(s)
[ "def", "t_file_or_func", "(", "self", ",", "s", ")", ":", "maybe_funcname", "=", "True", "if", "s", "==", "'if'", ":", "self", ".", "add_token", "(", "'IF'", ",", "s", ")", "return", "if", "s", "[", "0", "]", "in", "frozenset", "(", "(", "'\"'", ",", "\"'\"", ")", ")", ":", "# Pick out text inside of triple-quoted string", "if", "(", "(", "s", ".", "startswith", "(", "\"'''\"", ")", "and", "s", ".", "endswith", "(", "\"'''\"", ")", ")", "or", "(", "s", ".", "startswith", "(", "'\"\"\"'", ")", "and", "s", ".", "endswith", "(", "'\"\"\"'", ")", ")", ")", ":", "base", "=", "s", "[", "3", ":", "-", "3", "]", "else", ":", "# Pick out text inside singly-quote string", "base", "=", "s", "[", "1", ":", "-", "1", "]", "maybe_funcname", "=", "False", "else", ":", "base", "=", "s", "pos", "=", "self", ".", "pos", "if", "maybe_funcname", "and", "re", ".", "match", "(", "'[a-zA-Z_][[a-zA-Z_.0-9\\[\\]]+\\(\\)'", ",", "s", ")", ":", "self", ".", "add_token", "(", "'FUNCNAME'", ",", "base", ")", "else", ":", "self", ".", "add_token", "(", "'FILENAME'", ",", "base", ")", "self", ".", "pos", "=", "pos", "+", "len", "(", "s", ")" ]
r'(?:[^*-+,\d\'"\t \n:][^\'"\t \n:,]*)|(?:^""".+""")|(?:\'\'\'.+\'\'\')
[ "r", "(", "?", ":", "[", "^", "*", "-", "+", "\\", "d", "\\", "\\", "t", "\\", "n", ":", "]", "[", "^", "\\", "\\", "t", "\\", "n", ":", "]", "*", ")", "|", "(", "?", ":", "^", ".", "+", ")", "|", "(", "?", ":", "\\", "\\", "\\", ".", "+", "\\", "\\", "\\", ")" ]
python
train
boppreh/keyboard
keyboard/__init__.py
https://github.com/boppreh/keyboard/blob/dbb73dfff484f733d5fed8dbc53301af5b6c7f50/keyboard/__init__.py#L1067-L1121
def add_word_listener(word, callback, triggers=['space'], match_suffix=False, timeout=2): """ Invokes a callback every time a sequence of characters is typed (e.g. 'pet') and followed by a trigger key (e.g. space). Modifiers (e.g. alt, ctrl, shift) are ignored. - `word` the typed text to be matched. E.g. 'pet'. - `callback` is an argument-less function to be invoked each time the word is typed. - `triggers` is the list of keys that will cause a match to be checked. If the user presses some key that is not a character (len>1) and not in triggers, the characters so far will be discarded. By default the trigger is only `space`. - `match_suffix` defines if endings of words should also be checked instead of only whole words. E.g. if true, typing 'carpet'+space will trigger the listener for 'pet'. Defaults to false, only whole words are checked. - `timeout` is the maximum number of seconds between typed characters before the current word is discarded. Defaults to 2 seconds. Returns the event handler created. To remove a word listener use `remove_word_listener(word)` or `remove_word_listener(handler)`. Note: all actions are performed on key down. Key up events are ignored. Note: word mathes are **case sensitive**. """ state = _State() state.current = '' state.time = -1 def handler(event): name = event.name if event.event_type == KEY_UP or name in all_modifiers: return if timeout and event.time - state.time > timeout: state.current = '' state.time = event.time matched = state.current == word or (match_suffix and state.current.endswith(word)) if name in triggers and matched: callback() state.current = '' elif len(name) > 1: state.current = '' else: state.current += name hooked = hook(handler) def remove(): hooked() del _word_listeners[word] del _word_listeners[handler] del _word_listeners[remove] _word_listeners[word] = _word_listeners[handler] = _word_listeners[remove] = remove # TODO: allow multiple word listeners and removing them correctly. return remove
[ "def", "add_word_listener", "(", "word", ",", "callback", ",", "triggers", "=", "[", "'space'", "]", ",", "match_suffix", "=", "False", ",", "timeout", "=", "2", ")", ":", "state", "=", "_State", "(", ")", "state", ".", "current", "=", "''", "state", ".", "time", "=", "-", "1", "def", "handler", "(", "event", ")", ":", "name", "=", "event", ".", "name", "if", "event", ".", "event_type", "==", "KEY_UP", "or", "name", "in", "all_modifiers", ":", "return", "if", "timeout", "and", "event", ".", "time", "-", "state", ".", "time", ">", "timeout", ":", "state", ".", "current", "=", "''", "state", ".", "time", "=", "event", ".", "time", "matched", "=", "state", ".", "current", "==", "word", "or", "(", "match_suffix", "and", "state", ".", "current", ".", "endswith", "(", "word", ")", ")", "if", "name", "in", "triggers", "and", "matched", ":", "callback", "(", ")", "state", ".", "current", "=", "''", "elif", "len", "(", "name", ")", ">", "1", ":", "state", ".", "current", "=", "''", "else", ":", "state", ".", "current", "+=", "name", "hooked", "=", "hook", "(", "handler", ")", "def", "remove", "(", ")", ":", "hooked", "(", ")", "del", "_word_listeners", "[", "word", "]", "del", "_word_listeners", "[", "handler", "]", "del", "_word_listeners", "[", "remove", "]", "_word_listeners", "[", "word", "]", "=", "_word_listeners", "[", "handler", "]", "=", "_word_listeners", "[", "remove", "]", "=", "remove", "# TODO: allow multiple word listeners and removing them correctly.", "return", "remove" ]
Invokes a callback every time a sequence of characters is typed (e.g. 'pet') and followed by a trigger key (e.g. space). Modifiers (e.g. alt, ctrl, shift) are ignored. - `word` the typed text to be matched. E.g. 'pet'. - `callback` is an argument-less function to be invoked each time the word is typed. - `triggers` is the list of keys that will cause a match to be checked. If the user presses some key that is not a character (len>1) and not in triggers, the characters so far will be discarded. By default the trigger is only `space`. - `match_suffix` defines if endings of words should also be checked instead of only whole words. E.g. if true, typing 'carpet'+space will trigger the listener for 'pet'. Defaults to false, only whole words are checked. - `timeout` is the maximum number of seconds between typed characters before the current word is discarded. Defaults to 2 seconds. Returns the event handler created. To remove a word listener use `remove_word_listener(word)` or `remove_word_listener(handler)`. Note: all actions are performed on key down. Key up events are ignored. Note: word mathes are **case sensitive**.
[ "Invokes", "a", "callback", "every", "time", "a", "sequence", "of", "characters", "is", "typed", "(", "e", ".", "g", ".", "pet", ")", "and", "followed", "by", "a", "trigger", "key", "(", "e", ".", "g", ".", "space", ")", ".", "Modifiers", "(", "e", ".", "g", ".", "alt", "ctrl", "shift", ")", "are", "ignored", "." ]
python
train
dmwm/DBS
Server/Python/src/dbs/utils/dbsUtils.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/utils/dbsUtils.py#L69-L82
def jsonstreamer(func): """JSON streamer decorator""" def wrapper (self, *args, **kwds): gen = func (self, *args, **kwds) yield "[" firstItem = True for item in gen: if not firstItem: yield "," else: firstItem = False yield cjson.encode(item) yield "]" return wrapper
[ "def", "jsonstreamer", "(", "func", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "gen", "=", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwds", ")", "yield", "\"[\"", "firstItem", "=", "True", "for", "item", "in", "gen", ":", "if", "not", "firstItem", ":", "yield", "\",\"", "else", ":", "firstItem", "=", "False", "yield", "cjson", ".", "encode", "(", "item", ")", "yield", "\"]\"", "return", "wrapper" ]
JSON streamer decorator
[ "JSON", "streamer", "decorator" ]
python
train
ethereum/py_ecc
py_ecc/bls/utils.py
https://github.com/ethereum/py_ecc/blob/2088796c59574b256dc8e18f8c9351bc3688ca71/py_ecc/bls/utils.py#L118-L140
def decompress_G1(z: G1Compressed) -> G1Uncompressed: """ Recovers x and y coordinates from the compressed point. """ # b_flag == 1 indicates the infinity point b_flag = (z % POW_2_383) // POW_2_382 if b_flag == 1: return Z1 x = z % POW_2_381 # Try solving y coordinate from the equation Y^2 = X^3 + b # using quadratic residue y = pow((x**3 + b.n) % q, (q + 1) // 4, q) if pow(y, 2, q) != (x**3 + b.n) % q: raise ValueError( "The given point is not on G1: y**2 = x**3 + b" ) # Choose the y whose leftmost bit is equal to the a_flag a_flag = (z % POW_2_382) // POW_2_381 if (y * 2) // q != a_flag: y = q - y return (FQ(x), FQ(y), FQ(1))
[ "def", "decompress_G1", "(", "z", ":", "G1Compressed", ")", "->", "G1Uncompressed", ":", "# b_flag == 1 indicates the infinity point", "b_flag", "=", "(", "z", "%", "POW_2_383", ")", "//", "POW_2_382", "if", "b_flag", "==", "1", ":", "return", "Z1", "x", "=", "z", "%", "POW_2_381", "# Try solving y coordinate from the equation Y^2 = X^3 + b", "# using quadratic residue", "y", "=", "pow", "(", "(", "x", "**", "3", "+", "b", ".", "n", ")", "%", "q", ",", "(", "q", "+", "1", ")", "//", "4", ",", "q", ")", "if", "pow", "(", "y", ",", "2", ",", "q", ")", "!=", "(", "x", "**", "3", "+", "b", ".", "n", ")", "%", "q", ":", "raise", "ValueError", "(", "\"The given point is not on G1: y**2 = x**3 + b\"", ")", "# Choose the y whose leftmost bit is equal to the a_flag", "a_flag", "=", "(", "z", "%", "POW_2_382", ")", "//", "POW_2_381", "if", "(", "y", "*", "2", ")", "//", "q", "!=", "a_flag", ":", "y", "=", "q", "-", "y", "return", "(", "FQ", "(", "x", ")", ",", "FQ", "(", "y", ")", ",", "FQ", "(", "1", ")", ")" ]
Recovers x and y coordinates from the compressed point.
[ "Recovers", "x", "and", "y", "coordinates", "from", "the", "compressed", "point", "." ]
python
test
josuebrunel/myql
myql/contrib/table/table.py
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/table/table.py#L33-L38
def _xml_pretty_print(self, data): """Pretty print xml data """ raw_string = xtree.tostring(data, 'utf-8') parsed_string = minidom.parseString(raw_string) return parsed_string.toprettyxml(indent='\t')
[ "def", "_xml_pretty_print", "(", "self", ",", "data", ")", ":", "raw_string", "=", "xtree", ".", "tostring", "(", "data", ",", "'utf-8'", ")", "parsed_string", "=", "minidom", ".", "parseString", "(", "raw_string", ")", "return", "parsed_string", ".", "toprettyxml", "(", "indent", "=", "'\\t'", ")" ]
Pretty print xml data
[ "Pretty", "print", "xml", "data" ]
python
train
smdabdoub/phylotoast
phylotoast/util.py
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L37-L73
def parseFASTA(fastaFNH): """ Parse the records in a FASTA-format file keeping the file open, and reading through one line at a time. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data. """ recs = [] seq = [] seqID = "" descr = "" for line in file_handle(fastaFNH): line = line.strip() if line[0] == ";": continue if line[0] == ">": # conclude previous record if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) seq = [] # start new record line = line[1:].split(None, 1) seqID, descr = line[0], line[1] else: seq.append(line) # catch last seq in file if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) return recs
[ "def", "parseFASTA", "(", "fastaFNH", ")", ":", "recs", "=", "[", "]", "seq", "=", "[", "]", "seqID", "=", "\"\"", "descr", "=", "\"\"", "for", "line", "in", "file_handle", "(", "fastaFNH", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", "[", "0", "]", "==", "\";\"", ":", "continue", "if", "line", "[", "0", "]", "==", "\">\"", ":", "# conclude previous record", "if", "seq", ":", "recs", ".", "append", "(", "FASTARecord", "(", "seqID", ",", "descr", ",", "\"\"", ".", "join", "(", "seq", ")", ")", ")", "seq", "=", "[", "]", "# start new record", "line", "=", "line", "[", "1", ":", "]", ".", "split", "(", "None", ",", "1", ")", "seqID", ",", "descr", "=", "line", "[", "0", "]", ",", "line", "[", "1", "]", "else", ":", "seq", ".", "append", "(", "line", ")", "# catch last seq in file", "if", "seq", ":", "recs", ".", "append", "(", "FASTARecord", "(", "seqID", ",", "descr", ",", "\"\"", ".", "join", "(", "seq", ")", ")", ")", "return", "recs" ]
Parse the records in a FASTA-format file keeping the file open, and reading through one line at a time. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data.
[ "Parse", "the", "records", "in", "a", "FASTA", "-", "format", "file", "keeping", "the", "file", "open", "and", "reading", "through", "one", "line", "at", "a", "time", "." ]
python
train
AlecAivazis/graphql-over-kafka
nautilus/api/util/graphql_mutation_from_summary.py
https://github.com/AlecAivazis/graphql-over-kafka/blob/70e2acef27a2f87355590be1a6ca60ce3ab4d09c/nautilus/api/util/graphql_mutation_from_summary.py#L6-L38
def graphql_mutation_from_summary(summary): """ This function returns a graphql mutation corresponding to the provided summary. """ # get the name of the mutation from the summary mutation_name = summary['name'] # print(summary) # the treat the "type" string as a gra input_name = mutation_name + "Input" input_fields = build_native_type_dictionary(summary['inputs'], name=input_name, respect_required=True) # the inputs for the mutation are defined by a class record inputs = type('Input', (object,), input_fields) # the outputs for the mutation are attributes to the class record output_name = mutation_name + "Output" outputs = build_native_type_dictionary(summary['outputs'], name=output_name) # a no-op in order to satisfy the introspection query mutate = classmethod(lambda *_, **__ : 'hello') # create the appropriate mutation class record mutation = type(mutation_name, (graphene.Mutation,), { 'Input': inputs, 'mutate': mutate, **outputs }) # return the newly created mutation record return mutation
[ "def", "graphql_mutation_from_summary", "(", "summary", ")", ":", "# get the name of the mutation from the summary", "mutation_name", "=", "summary", "[", "'name'", "]", "# print(summary)", "# the treat the \"type\" string as a gra", "input_name", "=", "mutation_name", "+", "\"Input\"", "input_fields", "=", "build_native_type_dictionary", "(", "summary", "[", "'inputs'", "]", ",", "name", "=", "input_name", ",", "respect_required", "=", "True", ")", "# the inputs for the mutation are defined by a class record", "inputs", "=", "type", "(", "'Input'", ",", "(", "object", ",", ")", ",", "input_fields", ")", "# the outputs for the mutation are attributes to the class record", "output_name", "=", "mutation_name", "+", "\"Output\"", "outputs", "=", "build_native_type_dictionary", "(", "summary", "[", "'outputs'", "]", ",", "name", "=", "output_name", ")", "# a no-op in order to satisfy the introspection query", "mutate", "=", "classmethod", "(", "lambda", "*", "_", ",", "*", "*", "__", ":", "'hello'", ")", "# create the appropriate mutation class record", "mutation", "=", "type", "(", "mutation_name", ",", "(", "graphene", ".", "Mutation", ",", ")", ",", "{", "'Input'", ":", "inputs", ",", "'mutate'", ":", "mutate", ",", "*", "*", "outputs", "}", ")", "# return the newly created mutation record", "return", "mutation" ]
This function returns a graphql mutation corresponding to the provided summary.
[ "This", "function", "returns", "a", "graphql", "mutation", "corresponding", "to", "the", "provided", "summary", "." ]
python
train
Neurita/boyle
boyle/nifti/mask.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/mask.py#L65-L86
def load_mask_data(image, allow_empty=True): """Load a Nifti mask volume and return its data matrix as boolean and affine. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. allow_empty: boolean, optional Allow loading an empty mask (full of 0 values) Returns ------- numpy.ndarray with dtype==bool, numpy.ndarray of affine transformation """ mask = load_mask(image, allow_empty=allow_empty) return get_img_data(mask), mask.get_affine()
[ "def", "load_mask_data", "(", "image", ",", "allow_empty", "=", "True", ")", ":", "mask", "=", "load_mask", "(", "image", ",", "allow_empty", "=", "allow_empty", ")", "return", "get_img_data", "(", "mask", ")", ",", "mask", ".", "get_affine", "(", ")" ]
Load a Nifti mask volume and return its data matrix as boolean and affine. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. allow_empty: boolean, optional Allow loading an empty mask (full of 0 values) Returns ------- numpy.ndarray with dtype==bool, numpy.ndarray of affine transformation
[ "Load", "a", "Nifti", "mask", "volume", "and", "return", "its", "data", "matrix", "as", "boolean", "and", "affine", "." ]
python
valid
robotools/fontParts
Lib/fontParts/base/info.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/info.py#L288-L301
def _interpolate(self, factor, minInfo, maxInfo, round=True, suppressError=True): """ Subclasses may override this method. """ minInfo = minInfo._toMathInfo() maxInfo = maxInfo._toMathInfo() result = interpolate(minInfo, maxInfo, factor) if result is None and not suppressError: raise FontPartsError(("Info from font '%s' and font '%s' could not be " "interpolated.") % (minInfo.font.name, maxInfo.font.name)) if round: result = result.round() self._fromMathInfo(result)
[ "def", "_interpolate", "(", "self", ",", "factor", ",", "minInfo", ",", "maxInfo", ",", "round", "=", "True", ",", "suppressError", "=", "True", ")", ":", "minInfo", "=", "minInfo", ".", "_toMathInfo", "(", ")", "maxInfo", "=", "maxInfo", ".", "_toMathInfo", "(", ")", "result", "=", "interpolate", "(", "minInfo", ",", "maxInfo", ",", "factor", ")", "if", "result", "is", "None", "and", "not", "suppressError", ":", "raise", "FontPartsError", "(", "(", "\"Info from font '%s' and font '%s' could not be \"", "\"interpolated.\"", ")", "%", "(", "minInfo", ".", "font", ".", "name", ",", "maxInfo", ".", "font", ".", "name", ")", ")", "if", "round", ":", "result", "=", "result", ".", "round", "(", ")", "self", ".", "_fromMathInfo", "(", "result", ")" ]
Subclasses may override this method.
[ "Subclasses", "may", "override", "this", "method", "." ]
python
train
orbingol/NURBS-Python
geomdl/linalg.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L223-L234
def vector_magnitude(vector_in): """ Computes the magnitude of the input vector. :param vector_in: input vector :type vector_in: list, tuple :return: magnitude of the vector :rtype: float """ sq_sum = 0.0 for vin in vector_in: sq_sum += vin**2 return math.sqrt(sq_sum)
[ "def", "vector_magnitude", "(", "vector_in", ")", ":", "sq_sum", "=", "0.0", "for", "vin", "in", "vector_in", ":", "sq_sum", "+=", "vin", "**", "2", "return", "math", ".", "sqrt", "(", "sq_sum", ")" ]
Computes the magnitude of the input vector. :param vector_in: input vector :type vector_in: list, tuple :return: magnitude of the vector :rtype: float
[ "Computes", "the", "magnitude", "of", "the", "input", "vector", "." ]
python
train
PSPC-SPAC-buyandsell/von_anchor
von_anchor/a2a/docutil.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/a2a/docutil.py#L24-L32
def resource(ref: str, delimiter: str = None) -> str: """ Given a (URI) reference, return up to its delimiter (exclusively), or all of it if there is none. :param ref: reference :param delimiter: delimiter character (default None maps to '#', or ';' introduces identifiers) """ return ref.split(delimiter if delimiter else '#')[0]
[ "def", "resource", "(", "ref", ":", "str", ",", "delimiter", ":", "str", "=", "None", ")", "->", "str", ":", "return", "ref", ".", "split", "(", "delimiter", "if", "delimiter", "else", "'#'", ")", "[", "0", "]" ]
Given a (URI) reference, return up to its delimiter (exclusively), or all of it if there is none. :param ref: reference :param delimiter: delimiter character (default None maps to '#', or ';' introduces identifiers)
[ "Given", "a", "(", "URI", ")", "reference", "return", "up", "to", "its", "delimiter", "(", "exclusively", ")", "or", "all", "of", "it", "if", "there", "is", "none", "." ]
python
train
pybel/pybel-tools
src/pybel_tools/definition_utils/summary_independent.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/definition_utils/summary_independent.py#L22-L51
def get_merged_namespace_names(locations, check_keywords=True): """Loads many namespaces and combines their names. :param iter[str] locations: An iterable of URLs or file paths pointing to BEL namespaces. :param bool check_keywords: Should all the keywords be the same? Defaults to ``True`` :return: A dictionary of {names: labels} :rtype: dict[str, str] Example Usage >>> from pybel.resources import write_namespace >>> from pybel_tools.definition_utils import export_namespace, get_merged_namespace_names >>> graph = ... >>> original_ns_url = ... >>> export_namespace(graph, 'MBS') # Outputs in current directory to MBS.belns >>> value_dict = get_merged_namespace_names([original_ns_url, 'MBS.belns']) >>> with open('merged_namespace.belns', 'w') as f: >>> ... write_namespace('MyBrokenNamespace', 'MBS', 'Other', 'Charles Hoyt', 'PyBEL Citation', value_dict, file=f) """ resources = {location: get_bel_resource(location) for location in locations} if check_keywords: resource_keywords = set(config['Namespace']['Keyword'] for config in resources.values()) if 1 != len(resource_keywords): raise ValueError('Tried merging namespaces with different keywords: {}'.format(resource_keywords)) result = {} for resource in resources: result.update(resource['Values']) return result
[ "def", "get_merged_namespace_names", "(", "locations", ",", "check_keywords", "=", "True", ")", ":", "resources", "=", "{", "location", ":", "get_bel_resource", "(", "location", ")", "for", "location", "in", "locations", "}", "if", "check_keywords", ":", "resource_keywords", "=", "set", "(", "config", "[", "'Namespace'", "]", "[", "'Keyword'", "]", "for", "config", "in", "resources", ".", "values", "(", ")", ")", "if", "1", "!=", "len", "(", "resource_keywords", ")", ":", "raise", "ValueError", "(", "'Tried merging namespaces with different keywords: {}'", ".", "format", "(", "resource_keywords", ")", ")", "result", "=", "{", "}", "for", "resource", "in", "resources", ":", "result", ".", "update", "(", "resource", "[", "'Values'", "]", ")", "return", "result" ]
Loads many namespaces and combines their names. :param iter[str] locations: An iterable of URLs or file paths pointing to BEL namespaces. :param bool check_keywords: Should all the keywords be the same? Defaults to ``True`` :return: A dictionary of {names: labels} :rtype: dict[str, str] Example Usage >>> from pybel.resources import write_namespace >>> from pybel_tools.definition_utils import export_namespace, get_merged_namespace_names >>> graph = ... >>> original_ns_url = ... >>> export_namespace(graph, 'MBS') # Outputs in current directory to MBS.belns >>> value_dict = get_merged_namespace_names([original_ns_url, 'MBS.belns']) >>> with open('merged_namespace.belns', 'w') as f: >>> ... write_namespace('MyBrokenNamespace', 'MBS', 'Other', 'Charles Hoyt', 'PyBEL Citation', value_dict, file=f)
[ "Loads", "many", "namespaces", "and", "combines", "their", "names", "." ]
python
valid
stain/forgetSQL
lib/forgetSQL.py
https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L451-L604
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None): """Return a sql for the given operation. Possible operations: SELECT read data for this id SELECTALL read data for all ids INSERT insert data, create new id UPDATE update data for this id DELETE remove data for this id SQL will be built by data from _sqlFields, and will contain 0 or several %s for you to sprintf-format in later: SELECT --> len(cls._sqlPrimary) SELECTALL --> 0 %s INSERT --> len(cls._sqlFields) %s (including id) UPDATE --> len(cls._sqlFields) %s (including id) DELETE --> len(cls._sqlPrimary) (Note: INSERT and UPDATE will only change values in _sqlTable, so the actual number of fields for substitutions might be lower than len(cls._sqlFields) ) For INSERT you should use cls._nextSequence() to retrieve a new 'id' number. Note that if your sequences are not named tablename_primarykey_seq (ie. for table 'blapp' with primary key 'john_id', sequence name blapp_john_id_seq) you must give the sequence name as an optional argument to _nextSequence) Additional note: cls._nextSequence() MUST be overloaded for multi _sqlPrimary classes. Return a tuple. Return values will always be tuples: SELECT --> (sql, fields) SELECTALL -> sql, fields) INSERT -> (sql, fields) UPDATE -> (sql, fields) DELETE -> (sql,) -- for consistency fields will be object properties as a list, ie. the keys from cls._sqlFields. The purpose of this list is to give the programmer an idea of which order the keys are inserted in the SQL, giving help for retreiving (SELECT, SELECTALL) or inserting for %s (INSERT, DELETE). Why? Well, the keys are stored in a hash, and we cannot be sure about the order of hash.keys() from time to time, not even with the same instance. Optional where-parameter applies to SELECT, SELECTALL and DELETE. where should be a list or string of where clauses. """ # Normalize parameter for later comparissions operation = operation.upper() # Convert where to a list if it is a string if type(where) in (types.StringType, types.UnicodeType): where = (where,) if orderBy is None: orderBy = cls._orderBy if operation in ('SELECT', 'SELECTALL'): # Get the object fields and sql fields in the same # order to be able to reconstruct later. fields = [] sqlfields = [] for (field, sqlfield) in cls._sqlFields.items(): if selectfields is None or field in selectfields: fields.append(field) sqlfields.append(sqlfield) if not fields: # dirrrrrty! raise """ERROR: No fields defined, cannot create SQL. Maybe sqlPrimary is invalid? Fields asked: %s My fields: %s""" % (selectfields, cls._sqlFields) sql = "SELECT\n " sql += ', '.join(sqlfields) sql += "\nFROM\n " tables = cls._tables.keys() if not tables: raise "REALITY ERROR: No tables defined" sql += ', '.join(tables) tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks] # this MUST be here. if operation <> 'SELECTALL': for key in cls._sqlPrimary: tempWhere.append(cls._sqlFields[key] + "=%s") if where: tempWhere += where if(tempWhere): # Make sure to use paranteses in case someone has used # ORs in the WHERE-list.. sql += "\nWHERE\n (" sql += ') AND\n ('.join(tempWhere) sql += ')' if operation == 'SELECTALL' and orderBy: sql += '\nORDER BY\n ' if type(orderBy) in (types.TupleType, types.ListType): orderBy = [cls._sqlFields[x] for x in orderBy] orderBy = ',\n '.join(orderBy) else: orderBy = cls._sqlFields[orderBy] sql += orderBy return (sql, fields) elif operation in ('INSERT', 'UPDATE'): if operation == 'UPDATE': sql = 'UPDATE %s SET\n ' % cls._sqlTable else: sql = 'INSERT INTO %s (\n ' % cls._sqlTable set = [] fields = [] sqlfields = [] for (field, sqlfield) in cls._sqlFields.items(): if operation == 'UPDATE' and field in cls._sqlPrimary: continue if sqlfield.find(cls._sqlTable + '.') == 0: # It's a local field, chop of the table part sqlfield = sqlfield[len(cls._sqlTable)+1:] fields.append(field) sqlfields.append(sqlfield) set.append(sqlfield + '=%s') if operation == 'UPDATE': sql += ',\n '.join(set) sql += '\nWHERE\n ' tempWhere = [] for key in cls._sqlPrimary: tempWhere.append(cls._sqlFields[key] + "=%s") fields.append(key) sql += ' AND\n '.join(tempWhere) else: sql += ',\n '.join(sqlfields) sql += ')\nVALUES (\n ' sql += ',\n '.join(('%s',) * len(sqlfields)) sql += ')' return (sql, fields) elif operation == 'DELETE': sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE ' if where: sql += " AND\n ".join(where) else: for key in cls._sqlPrimary: tempWhere = [] for key in cls._sqlPrimary: tempWhere.append(cls._sqlFields[key] + "=%s") sql += ' AND\n '.join(tempWhere) return (sql, ) else: raise "Unknown operation", operation
[ "def", "_prepareSQL", "(", "cls", ",", "operation", "=", "\"SELECT\"", ",", "where", "=", "None", ",", "selectfields", "=", "None", ",", "orderBy", "=", "None", ")", ":", "# Normalize parameter for later comparissions", "operation", "=", "operation", ".", "upper", "(", ")", "# Convert where to a list if it is a string", "if", "type", "(", "where", ")", "in", "(", "types", ".", "StringType", ",", "types", ".", "UnicodeType", ")", ":", "where", "=", "(", "where", ",", ")", "if", "orderBy", "is", "None", ":", "orderBy", "=", "cls", ".", "_orderBy", "if", "operation", "in", "(", "'SELECT'", ",", "'SELECTALL'", ")", ":", "# Get the object fields and sql fields in the same", "# order to be able to reconstruct later.", "fields", "=", "[", "]", "sqlfields", "=", "[", "]", "for", "(", "field", ",", "sqlfield", ")", "in", "cls", ".", "_sqlFields", ".", "items", "(", ")", ":", "if", "selectfields", "is", "None", "or", "field", "in", "selectfields", ":", "fields", ".", "append", "(", "field", ")", "sqlfields", ".", "append", "(", "sqlfield", ")", "if", "not", "fields", ":", "# dirrrrrty!", "raise", "\"\"\"ERROR: No fields defined, cannot create SQL.\nMaybe sqlPrimary is invalid?\nFields asked: %s\nMy fields: %s\"\"\"", "%", "(", "selectfields", ",", "cls", ".", "_sqlFields", ")", "sql", "=", "\"SELECT\\n \"", "sql", "+=", "', '", ".", "join", "(", "sqlfields", ")", "sql", "+=", "\"\\nFROM\\n \"", "tables", "=", "cls", ".", "_tables", ".", "keys", "(", ")", "if", "not", "tables", ":", "raise", "\"REALITY ERROR: No tables defined\"", "sql", "+=", "', '", ".", "join", "(", "tables", ")", "tempWhere", "=", "[", "\"%s=%s\"", "%", "linkPair", "for", "linkPair", "in", "cls", ".", "_sqlLinks", "]", "# this MUST be here.", "if", "operation", "<>", "'SELECTALL'", ":", "for", "key", "in", "cls", ".", "_sqlPrimary", ":", "tempWhere", ".", "append", "(", "cls", ".", "_sqlFields", "[", "key", "]", "+", "\"=%s\"", ")", "if", "where", ":", "tempWhere", "+=", "where", "if", "(", "tempWhere", ")", ":", "# Make sure to use paranteses in case someone has used", "# ORs in the WHERE-list..", "sql", "+=", "\"\\nWHERE\\n (\"", "sql", "+=", "') AND\\n ('", ".", "join", "(", "tempWhere", ")", "sql", "+=", "')'", "if", "operation", "==", "'SELECTALL'", "and", "orderBy", ":", "sql", "+=", "'\\nORDER BY\\n '", "if", "type", "(", "orderBy", ")", "in", "(", "types", ".", "TupleType", ",", "types", ".", "ListType", ")", ":", "orderBy", "=", "[", "cls", ".", "_sqlFields", "[", "x", "]", "for", "x", "in", "orderBy", "]", "orderBy", "=", "',\\n '", ".", "join", "(", "orderBy", ")", "else", ":", "orderBy", "=", "cls", ".", "_sqlFields", "[", "orderBy", "]", "sql", "+=", "orderBy", "return", "(", "sql", ",", "fields", ")", "elif", "operation", "in", "(", "'INSERT'", ",", "'UPDATE'", ")", ":", "if", "operation", "==", "'UPDATE'", ":", "sql", "=", "'UPDATE %s SET\\n '", "%", "cls", ".", "_sqlTable", "else", ":", "sql", "=", "'INSERT INTO %s (\\n '", "%", "cls", ".", "_sqlTable", "set", "=", "[", "]", "fields", "=", "[", "]", "sqlfields", "=", "[", "]", "for", "(", "field", ",", "sqlfield", ")", "in", "cls", ".", "_sqlFields", ".", "items", "(", ")", ":", "if", "operation", "==", "'UPDATE'", "and", "field", "in", "cls", ".", "_sqlPrimary", ":", "continue", "if", "sqlfield", ".", "find", "(", "cls", ".", "_sqlTable", "+", "'.'", ")", "==", "0", ":", "# It's a local field, chop of the table part", "sqlfield", "=", "sqlfield", "[", "len", "(", "cls", ".", "_sqlTable", ")", "+", "1", ":", "]", "fields", ".", "append", "(", "field", ")", "sqlfields", ".", "append", "(", "sqlfield", ")", "set", ".", "append", "(", "sqlfield", "+", "'=%s'", ")", "if", "operation", "==", "'UPDATE'", ":", "sql", "+=", "',\\n '", ".", "join", "(", "set", ")", "sql", "+=", "'\\nWHERE\\n '", "tempWhere", "=", "[", "]", "for", "key", "in", "cls", ".", "_sqlPrimary", ":", "tempWhere", ".", "append", "(", "cls", ".", "_sqlFields", "[", "key", "]", "+", "\"=%s\"", ")", "fields", ".", "append", "(", "key", ")", "sql", "+=", "' AND\\n '", ".", "join", "(", "tempWhere", ")", "else", ":", "sql", "+=", "',\\n '", ".", "join", "(", "sqlfields", ")", "sql", "+=", "')\\nVALUES (\\n '", "sql", "+=", "',\\n '", ".", "join", "(", "(", "'%s'", ",", ")", "*", "len", "(", "sqlfields", ")", ")", "sql", "+=", "')'", "return", "(", "sql", ",", "fields", ")", "elif", "operation", "==", "'DELETE'", ":", "sql", "=", "'DELETE FROM '", "+", "cls", ".", "_sqlTable", "+", "' WHERE '", "if", "where", ":", "sql", "+=", "\" AND\\n \"", ".", "join", "(", "where", ")", "else", ":", "for", "key", "in", "cls", ".", "_sqlPrimary", ":", "tempWhere", "=", "[", "]", "for", "key", "in", "cls", ".", "_sqlPrimary", ":", "tempWhere", ".", "append", "(", "cls", ".", "_sqlFields", "[", "key", "]", "+", "\"=%s\"", ")", "sql", "+=", "' AND\\n '", ".", "join", "(", "tempWhere", ")", "return", "(", "sql", ",", ")", "else", ":", "raise", "\"Unknown operation\"", ",", "operation" ]
Return a sql for the given operation. Possible operations: SELECT read data for this id SELECTALL read data for all ids INSERT insert data, create new id UPDATE update data for this id DELETE remove data for this id SQL will be built by data from _sqlFields, and will contain 0 or several %s for you to sprintf-format in later: SELECT --> len(cls._sqlPrimary) SELECTALL --> 0 %s INSERT --> len(cls._sqlFields) %s (including id) UPDATE --> len(cls._sqlFields) %s (including id) DELETE --> len(cls._sqlPrimary) (Note: INSERT and UPDATE will only change values in _sqlTable, so the actual number of fields for substitutions might be lower than len(cls._sqlFields) ) For INSERT you should use cls._nextSequence() to retrieve a new 'id' number. Note that if your sequences are not named tablename_primarykey_seq (ie. for table 'blapp' with primary key 'john_id', sequence name blapp_john_id_seq) you must give the sequence name as an optional argument to _nextSequence) Additional note: cls._nextSequence() MUST be overloaded for multi _sqlPrimary classes. Return a tuple. Return values will always be tuples: SELECT --> (sql, fields) SELECTALL -> sql, fields) INSERT -> (sql, fields) UPDATE -> (sql, fields) DELETE -> (sql,) -- for consistency fields will be object properties as a list, ie. the keys from cls._sqlFields. The purpose of this list is to give the programmer an idea of which order the keys are inserted in the SQL, giving help for retreiving (SELECT, SELECTALL) or inserting for %s (INSERT, DELETE). Why? Well, the keys are stored in a hash, and we cannot be sure about the order of hash.keys() from time to time, not even with the same instance. Optional where-parameter applies to SELECT, SELECTALL and DELETE. where should be a list or string of where clauses.
[ "Return", "a", "sql", "for", "the", "given", "operation", "." ]
python
train
gwpy/gwpy
gwpy/detector/channel.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/channel.py#L785-L808
def query(cls, name, use_kerberos=None, debug=False): """Query the LIGO Channel Information System a `ChannelList`. Parameters ---------- name : `str` name of channel, or part of it. use_kerberos : `bool`, optional use an existing Kerberos ticket as the authentication credential, default behaviour will check for credentials and request username and password if none are found (`None`) debug : `bool`, optional print verbose HTTP connection status for debugging, default: `False` Returns ------- channels : `ChannelList` a new list containing all `Channels <Channel>` found. """ from .io import cis return cis.query(name, use_kerberos=use_kerberos, debug=debug)
[ "def", "query", "(", "cls", ",", "name", ",", "use_kerberos", "=", "None", ",", "debug", "=", "False", ")", ":", "from", ".", "io", "import", "cis", "return", "cis", ".", "query", "(", "name", ",", "use_kerberos", "=", "use_kerberos", ",", "debug", "=", "debug", ")" ]
Query the LIGO Channel Information System a `ChannelList`. Parameters ---------- name : `str` name of channel, or part of it. use_kerberos : `bool`, optional use an existing Kerberos ticket as the authentication credential, default behaviour will check for credentials and request username and password if none are found (`None`) debug : `bool`, optional print verbose HTTP connection status for debugging, default: `False` Returns ------- channels : `ChannelList` a new list containing all `Channels <Channel>` found.
[ "Query", "the", "LIGO", "Channel", "Information", "System", "a", "ChannelList", "." ]
python
train
pandas-dev/pandas
pandas/core/arrays/period.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/period.py#L211-L226
def _from_datetime64(cls, data, freq, tz=None): """ Construct a PeriodArray from a datetime64 array Parameters ---------- data : ndarray[datetime64[ns], datetime64[ns, tz]] freq : str or Tick tz : tzinfo, optional Returns ------- PeriodArray[freq] """ data, freq = dt64arr_to_periodarr(data, freq, tz) return cls(data, freq=freq)
[ "def", "_from_datetime64", "(", "cls", ",", "data", ",", "freq", ",", "tz", "=", "None", ")", ":", "data", ",", "freq", "=", "dt64arr_to_periodarr", "(", "data", ",", "freq", ",", "tz", ")", "return", "cls", "(", "data", ",", "freq", "=", "freq", ")" ]
Construct a PeriodArray from a datetime64 array Parameters ---------- data : ndarray[datetime64[ns], datetime64[ns, tz]] freq : str or Tick tz : tzinfo, optional Returns ------- PeriodArray[freq]
[ "Construct", "a", "PeriodArray", "from", "a", "datetime64", "array" ]
python
train
hadrianl/huobi
huobitrade/service.py
https://github.com/hadrianl/huobi/blob/bbfa2036703ee84a76d5d8e9f89c25fc8a55f2c7/huobitrade/service.py#L512-L521
def repay_loan(self, order_id, amount, _async=False): """ 归还借贷 :param order_id: :param amount: :return: """ params = {'order-id': order_id, 'amount': amount} path = f'/v1/margin/orders/{order_id}/repay' return api_key_post(params, path, _async=_async)
[ "def", "repay_loan", "(", "self", ",", "order_id", ",", "amount", ",", "_async", "=", "False", ")", ":", "params", "=", "{", "'order-id'", ":", "order_id", ",", "'amount'", ":", "amount", "}", "path", "=", "f'/v1/margin/orders/{order_id}/repay'", "return", "api_key_post", "(", "params", ",", "path", ",", "_async", "=", "_async", ")" ]
归还借贷 :param order_id: :param amount: :return:
[ "归还借贷", ":", "param", "order_id", ":", ":", "param", "amount", ":", ":", "return", ":" ]
python
train
Chilipp/psyplot
psyplot/data.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L795-L815
def _check_triangular_bounds(self, var, coords=None, axis='x', nans=None): """ Checks whether the bounds in the variable attribute are triangular Parameters ---------- %(CFDecoder.get_cell_node_coord.parameters)s Returns ------- bool or None True, if unstructered, None if it could not be determined xarray.Coordinate or None the bounds corrdinate (if existent)""" # !!! WILL BE REMOVED IN THE NEAR FUTURE! !!! bounds = self.get_cell_node_coord(var, coords, axis=axis, nans=nans) if bounds is not None: return bounds.shape[-1] == 3, bounds else: return None, None
[ "def", "_check_triangular_bounds", "(", "self", ",", "var", ",", "coords", "=", "None", ",", "axis", "=", "'x'", ",", "nans", "=", "None", ")", ":", "# !!! WILL BE REMOVED IN THE NEAR FUTURE! !!!", "bounds", "=", "self", ".", "get_cell_node_coord", "(", "var", ",", "coords", ",", "axis", "=", "axis", ",", "nans", "=", "nans", ")", "if", "bounds", "is", "not", "None", ":", "return", "bounds", ".", "shape", "[", "-", "1", "]", "==", "3", ",", "bounds", "else", ":", "return", "None", ",", "None" ]
Checks whether the bounds in the variable attribute are triangular Parameters ---------- %(CFDecoder.get_cell_node_coord.parameters)s Returns ------- bool or None True, if unstructered, None if it could not be determined xarray.Coordinate or None the bounds corrdinate (if existent)
[ "Checks", "whether", "the", "bounds", "in", "the", "variable", "attribute", "are", "triangular" ]
python
train
benedictpaten/sonLib
misc.py
https://github.com/benedictpaten/sonLib/blob/1decb75bb439b70721ec776f685ce98e25217d26/misc.py#L38-L42
def close(i, j, tolerance): """ check two float values are within a bound of one another """ return i <= j + tolerance and i >= j - tolerance
[ "def", "close", "(", "i", ",", "j", ",", "tolerance", ")", ":", "return", "i", "<=", "j", "+", "tolerance", "and", "i", ">=", "j", "-", "tolerance" ]
check two float values are within a bound of one another
[ "check", "two", "float", "values", "are", "within", "a", "bound", "of", "one", "another" ]
python
train