text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def incoming_args(self, nodeid):
"""
Return the arguments that target *nodeid*.
Valid arguments include regular variable arguments and scopal
(label-selecting or HCONS) arguments. MOD/EQ
links and intrinsic arguments are not included.
Args:
nodeid: the nodeid of the EP that is the arguments' target
Returns:
dict: `{source_nodeid: {rargname: value}}`
"""
_vars = self._vars
ep = self._eps[nodeid]
lbl = ep[2]
iv = ep[3].get(IVARG_ROLE)
in_args_list = []
# variable args
if iv in _vars:
for role, nids in _vars[iv]['refs'].items():
# ignore intrinsic args, even if shared
if role != IVARG_ROLE:
in_args_list.append((nids, role, iv))
if lbl in _vars:
for role, nids in _vars[lbl]['refs'].items():
# basic label equality isn't "incoming"; ignore
if role != 'LBL':
in_args_list.append((nids, role, lbl))
for nid, role, hi in _vars[lbl].get('hcrefs', []):
in_args_list.append(([nid], role, hi))
in_args = {}
for nids, role, tgt in in_args_list:
for nid in nids:
if nid not in in_args:
in_args[nid] = {}
in_args[nid][role] = tgt
return in_args | [
"def",
"incoming_args",
"(",
"self",
",",
"nodeid",
")",
":",
"_vars",
"=",
"self",
".",
"_vars",
"ep",
"=",
"self",
".",
"_eps",
"[",
"nodeid",
"]",
"lbl",
"=",
"ep",
"[",
"2",
"]",
"iv",
"=",
"ep",
"[",
"3",
"]",
".",
"get",
"(",
"IVARG_ROLE",
")",
"in_args_list",
"=",
"[",
"]",
"# variable args",
"if",
"iv",
"in",
"_vars",
":",
"for",
"role",
",",
"nids",
"in",
"_vars",
"[",
"iv",
"]",
"[",
"'refs'",
"]",
".",
"items",
"(",
")",
":",
"# ignore intrinsic args, even if shared",
"if",
"role",
"!=",
"IVARG_ROLE",
":",
"in_args_list",
".",
"append",
"(",
"(",
"nids",
",",
"role",
",",
"iv",
")",
")",
"if",
"lbl",
"in",
"_vars",
":",
"for",
"role",
",",
"nids",
"in",
"_vars",
"[",
"lbl",
"]",
"[",
"'refs'",
"]",
".",
"items",
"(",
")",
":",
"# basic label equality isn't \"incoming\"; ignore",
"if",
"role",
"!=",
"'LBL'",
":",
"in_args_list",
".",
"append",
"(",
"(",
"nids",
",",
"role",
",",
"lbl",
")",
")",
"for",
"nid",
",",
"role",
",",
"hi",
"in",
"_vars",
"[",
"lbl",
"]",
".",
"get",
"(",
"'hcrefs'",
",",
"[",
"]",
")",
":",
"in_args_list",
".",
"append",
"(",
"(",
"[",
"nid",
"]",
",",
"role",
",",
"hi",
")",
")",
"in_args",
"=",
"{",
"}",
"for",
"nids",
",",
"role",
",",
"tgt",
"in",
"in_args_list",
":",
"for",
"nid",
"in",
"nids",
":",
"if",
"nid",
"not",
"in",
"in_args",
":",
"in_args",
"[",
"nid",
"]",
"=",
"{",
"}",
"in_args",
"[",
"nid",
"]",
"[",
"role",
"]",
"=",
"tgt",
"return",
"in_args"
]
| 36.815789 | 15.131579 |
def process(self):
"""Construct and start a new File hunt.
Raises:
RuntimeError: if no items specified for collection.
"""
hunt = self.grr_api.Hunt(self.hunt_id).Get()
self.state.output = self.collect_hunt_results(hunt) | [
"def",
"process",
"(",
"self",
")",
":",
"hunt",
"=",
"self",
".",
"grr_api",
".",
"Hunt",
"(",
"self",
".",
"hunt_id",
")",
".",
"Get",
"(",
")",
"self",
".",
"state",
".",
"output",
"=",
"self",
".",
"collect_hunt_results",
"(",
"hunt",
")"
]
| 29.875 | 16.375 |
def standard(model, **kwargs):
"""
Create standard model of molecular evolution.
Parameters
----------
model : str
Model to create. See list of available models below
**kwargs:
Key word arguments to be passed to the model
**Available models**
- JC69:
Jukes-Cantor 1969 model. This model assumes equal frequencies
of the nucleotides and equal transition rates between nucleotide states.
For more info, see: Jukes and Cantor (1969).
Evolution of Protein Molecules. New York: Academic Press. pp. 21-132.
To create this model, use:
:code:`mygtr = GTR.standard(model='jc69', mu=<my_mu>, alphabet=<my_alph>)`
:code:`my_mu` - substitution rate (float)
:code:`my_alph` - alphabet (str: :code:`'nuc'` or :code:`'nuc_nogap'`)
- K80:
Kimura 1980 model. Assumes equal concentrations across nucleotides, but
allows different rates between transitions and transversions. The ratio
of the transversion/transition rates is given by kappa parameter.
For more info, see
Kimura (1980), J. Mol. Evol. 16 (2): 111-120. doi:10.1007/BF01731581.
Current implementation of the model does not account for the gaps.
:code:`mygtr = GTR.standard(model='k80', mu=<my_mu>, kappa=<my_kappa>)`
:code:`mu` - overall substitution rate (float)
:code:`kappa` - ratio of transversion/transition rates (float)
- F81:
Felsenstein 1981 model. Assumes non-equal concentrations across nucleotides,
but the transition rate between all states is assumed to be equal. See
Felsenstein (1981), J. Mol. Evol. 17 (6): 368-376. doi:10.1007/BF01734359
for details.
:code:`mygtr = GTR.standard(model='F81', mu=<mu>, pi=<pi>, alphabet=<alph>)`
:code:`mu` - substitution rate (float)
:code:`pi` - : nucleotide concentrations (numpy.array)
:code:`alphabet' - alphabet to use. (:code:`'nuc'` or :code:`'nuc_nogap'`)
- HKY85:
Hasegawa, Kishino and Yano 1985 model. Allows different concentrations of the
nucleotides (as in F81) + distinguishes between transition/transversion substitutions
(similar to K80). Link:
Hasegawa, Kishino, Yano (1985), J. Mol. Evol. 22 (2): 160-174. doi:10.1007/BF02101694
Current implementation of the model does not account for the gaps
:code:`mygtr = GTR.standard(model='HKY85', mu=<mu>, pi=<pi>, kappa=<kappa>)`
:code:`mu` - substitution rate (float)
:code:`pi` - : nucleotide concentrations (numpy.array)
:code:`kappa` - ratio of transversion/transition rates (float)
- T92:
Tamura 1992 model. Extending Kimura (1980) model for the case where a
G+C-content bias exists. Link:
Tamura K (1992), Mol. Biol. Evol. 9 (4): 678-687. DOI: 10.1093/oxfordjournals.molbev.a040752
Current implementation of the model does not account for the gaps
:code:`mygtr = GTR.standard(model='T92', mu=<mu>, pi_GC=<pi_gc>, kappa=<kappa>)`
:code:`mu` - substitution rate (float)
:code:`pi_GC` - : relative GC content
:code:`kappa` - ratio of transversion/transition rates (float)
- TN93:
Tamura and Nei 1993. The model distinguishes between the two different types of
transition: (A <-> G) is allowed to have a different rate to (C<->T).
Transversions have the same rate. The frequencies of the nucleotides are allowed
to be different. Link: Tamura, Nei (1993), MolBiol Evol. 10 (3): 512-526.
DOI:10.1093/oxfordjournals.molbev.a040023
:code:`mygtr = GTR.standard(model='TN93', mu=<mu>, kappa1=<k1>, kappa2=<k2>)`
:code:`mu` - substitution rate (float)
:code:`kappa1` - relative A<-->C, A<-->T, T<-->G and G<-->C rates (float)
:code:`kappa` - relative C<-->T rate (float)
.. Note::
Rate of A<-->G substitution is set to one. All other rates
(kappa1, kappa2) are specified relative to this rate
"""
from .nuc_models import JC69, K80, F81, HKY85, T92, TN93
from .aa_models import JTT92
if model.lower() in ['jc', 'jc69', 'jukes-cantor', 'jukes-cantor69', 'jukescantor', 'jukescantor69']:
return JC69(**kwargs)
elif model.lower() in ['k80', 'kimura80', 'kimura1980']:
return K80(**kwargs)
elif model.lower() in ['f81', 'felsenstein81', 'felsenstein1981']:
return F81(**kwargs)
elif model.lower() in ['hky', 'hky85', 'hky1985']:
return HKY85(**kwargs)
elif model.lower() in ['t92', 'tamura92', 'tamura1992']:
return T92(**kwargs)
elif model.lower() in ['tn93', 'tamura_nei_93', 'tamuranei93']:
return TN93(**kwargs)
elif model.lower() in ['jtt', 'jtt92']:
return JTT92(**kwargs)
else:
raise KeyError("The GTR model '{}' is not in the list of available models."
"".format(model)) | [
"def",
"standard",
"(",
"model",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"nuc_models",
"import",
"JC69",
",",
"K80",
",",
"F81",
",",
"HKY85",
",",
"T92",
",",
"TN93",
"from",
".",
"aa_models",
"import",
"JTT92",
"if",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'jc'",
",",
"'jc69'",
",",
"'jukes-cantor'",
",",
"'jukes-cantor69'",
",",
"'jukescantor'",
",",
"'jukescantor69'",
"]",
":",
"return",
"JC69",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'k80'",
",",
"'kimura80'",
",",
"'kimura1980'",
"]",
":",
"return",
"K80",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'f81'",
",",
"'felsenstein81'",
",",
"'felsenstein1981'",
"]",
":",
"return",
"F81",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'hky'",
",",
"'hky85'",
",",
"'hky1985'",
"]",
":",
"return",
"HKY85",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'t92'",
",",
"'tamura92'",
",",
"'tamura1992'",
"]",
":",
"return",
"T92",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'tn93'",
",",
"'tamura_nei_93'",
",",
"'tamuranei93'",
"]",
":",
"return",
"TN93",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'jtt'",
",",
"'jtt92'",
"]",
":",
"return",
"JTT92",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"KeyError",
"(",
"\"The GTR model '{}' is not in the list of available models.\"",
"\"\"",
".",
"format",
"(",
"model",
")",
")"
]
| 36.439716 | 31.588652 |
def add_document(self, item_uri, name, metadata,
content=None, docurl=None, file=None,
displaydoc=False, preferName=False,
contrib_id=None):
"""Add a document to an existing item
:param item_uri: the URI that references the item
:type item_uri: String
:param name: The document name
:type name: String
:param metadata: a dictionary of metadata values describing the document
:type metadata: Dict
:param content: optional content of the document
:type content: byte array
:param docurl: optional url referencing the document
:type docurl: String
:param file: optional full path to file to be uploaded
:type file: String
:param displaydoc: if True, make this the display document for the item
:type displaydoc: Boolean
:param preferName: if True, given document name will be the document id rather than
filename. Useful if you want to upload under a different filename.
:type preferName: Boolean
:param contrib_id: if present, add this document to this contribution as well as
associating it with the item
:type contrib_id: Integer
:rtype: String
:returns: The URL of the newly created document
"""
if not preferName and file is not None:
docid = os.path.basename(file)
else:
docid = name
docmeta = {"metadata": {"@context": self.context,
"@type": "foaf:Document",
"dcterms:identifier": docid,
}
}
# add in metadata we are passed
docmeta["metadata"].update(metadata)
if contrib_id:
docmeta['contribution_id'] = contrib_id
if content is not None:
docmeta['document_content'] = content
elif docurl is not None:
docmeta["metadata"]["dcterms:source"] = { "@id": docurl }
elif file is not None:
# we only pass the metadata part of the dictionary
docmeta = docmeta['metadata']
else:
raise Exception("One of content, docurl or file must be specified in add_document")
if file is not None:
result = self.api_request(item_uri, method='POST', data={'metadata': json.dumps(docmeta)}, file=file)
else:
result = self.api_request(item_uri, method='POST', data=json.dumps(docmeta))
self.__check_success(result)
if displaydoc:
itemmeta = {"http://alveo.edu.org/vocabulary/display_document": docid}
self.modify_item(item_uri, itemmeta)
doc_uri = item_uri + "/document/" + name
return doc_uri | [
"def",
"add_document",
"(",
"self",
",",
"item_uri",
",",
"name",
",",
"metadata",
",",
"content",
"=",
"None",
",",
"docurl",
"=",
"None",
",",
"file",
"=",
"None",
",",
"displaydoc",
"=",
"False",
",",
"preferName",
"=",
"False",
",",
"contrib_id",
"=",
"None",
")",
":",
"if",
"not",
"preferName",
"and",
"file",
"is",
"not",
"None",
":",
"docid",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file",
")",
"else",
":",
"docid",
"=",
"name",
"docmeta",
"=",
"{",
"\"metadata\"",
":",
"{",
"\"@context\"",
":",
"self",
".",
"context",
",",
"\"@type\"",
":",
"\"foaf:Document\"",
",",
"\"dcterms:identifier\"",
":",
"docid",
",",
"}",
"}",
"# add in metadata we are passed",
"docmeta",
"[",
"\"metadata\"",
"]",
".",
"update",
"(",
"metadata",
")",
"if",
"contrib_id",
":",
"docmeta",
"[",
"'contribution_id'",
"]",
"=",
"contrib_id",
"if",
"content",
"is",
"not",
"None",
":",
"docmeta",
"[",
"'document_content'",
"]",
"=",
"content",
"elif",
"docurl",
"is",
"not",
"None",
":",
"docmeta",
"[",
"\"metadata\"",
"]",
"[",
"\"dcterms:source\"",
"]",
"=",
"{",
"\"@id\"",
":",
"docurl",
"}",
"elif",
"file",
"is",
"not",
"None",
":",
"# we only pass the metadata part of the dictionary",
"docmeta",
"=",
"docmeta",
"[",
"'metadata'",
"]",
"else",
":",
"raise",
"Exception",
"(",
"\"One of content, docurl or file must be specified in add_document\"",
")",
"if",
"file",
"is",
"not",
"None",
":",
"result",
"=",
"self",
".",
"api_request",
"(",
"item_uri",
",",
"method",
"=",
"'POST'",
",",
"data",
"=",
"{",
"'metadata'",
":",
"json",
".",
"dumps",
"(",
"docmeta",
")",
"}",
",",
"file",
"=",
"file",
")",
"else",
":",
"result",
"=",
"self",
".",
"api_request",
"(",
"item_uri",
",",
"method",
"=",
"'POST'",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"docmeta",
")",
")",
"self",
".",
"__check_success",
"(",
"result",
")",
"if",
"displaydoc",
":",
"itemmeta",
"=",
"{",
"\"http://alveo.edu.org/vocabulary/display_document\"",
":",
"docid",
"}",
"self",
".",
"modify_item",
"(",
"item_uri",
",",
"itemmeta",
")",
"doc_uri",
"=",
"item_uri",
"+",
"\"/document/\"",
"+",
"name",
"return",
"doc_uri"
]
| 34.725 | 23 |
def main(self, *directories):
"""
The actual logic that runs the linters
"""
if not self.git and len(directories) == 0:
print ("ERROR: At least one directory must be provided (or the "
"--git-precommit flag must be passed.\n")
self.help()
return
if len(directories) > 0:
find = local['find']
files = []
for directory in directories:
real = os.path.expanduser(directory)
if not os.path.exists(real):
raise ValueError("{0} does not exist".format(directory))
files.extend(find(real, '-name', '*.py').strip().split('\n'))
if len(files) > 0:
print "Linting {0} python files.\n".format(len(files))
lint(files)
else:
print "No python files found to lint.\n"
else:
status = local['git']('status', '--porcelain', '-uno')
root = local['git']('rev-parse', '--show-toplevel').strip()
# get all modified or added python files
modified = re.findall(r"^[AM]\s+\S+\.py$", status, re.MULTILINE)
# now just get the path part, which all should be relative to the
# root
files = [os.path.join(root, line.split(' ', 1)[-1].strip())
for line in modified]
if len(files) > 0:
lint(files) | [
"def",
"main",
"(",
"self",
",",
"*",
"directories",
")",
":",
"if",
"not",
"self",
".",
"git",
"and",
"len",
"(",
"directories",
")",
"==",
"0",
":",
"print",
"(",
"\"ERROR: At least one directory must be provided (or the \"",
"\"--git-precommit flag must be passed.\\n\"",
")",
"self",
".",
"help",
"(",
")",
"return",
"if",
"len",
"(",
"directories",
")",
">",
"0",
":",
"find",
"=",
"local",
"[",
"'find'",
"]",
"files",
"=",
"[",
"]",
"for",
"directory",
"in",
"directories",
":",
"real",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"directory",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"real",
")",
":",
"raise",
"ValueError",
"(",
"\"{0} does not exist\"",
".",
"format",
"(",
"directory",
")",
")",
"files",
".",
"extend",
"(",
"find",
"(",
"real",
",",
"'-name'",
",",
"'*.py'",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
")",
"if",
"len",
"(",
"files",
")",
">",
"0",
":",
"print",
"\"Linting {0} python files.\\n\"",
".",
"format",
"(",
"len",
"(",
"files",
")",
")",
"lint",
"(",
"files",
")",
"else",
":",
"print",
"\"No python files found to lint.\\n\"",
"else",
":",
"status",
"=",
"local",
"[",
"'git'",
"]",
"(",
"'status'",
",",
"'--porcelain'",
",",
"'-uno'",
")",
"root",
"=",
"local",
"[",
"'git'",
"]",
"(",
"'rev-parse'",
",",
"'--show-toplevel'",
")",
".",
"strip",
"(",
")",
"# get all modified or added python files",
"modified",
"=",
"re",
".",
"findall",
"(",
"r\"^[AM]\\s+\\S+\\.py$\"",
",",
"status",
",",
"re",
".",
"MULTILINE",
")",
"# now just get the path part, which all should be relative to the",
"# root",
"files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"line",
".",
"split",
"(",
"' '",
",",
"1",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
")",
"for",
"line",
"in",
"modified",
"]",
"if",
"len",
"(",
"files",
")",
">",
"0",
":",
"lint",
"(",
"files",
")"
]
| 39 | 20.135135 |
def toplevel_packages():
""" Get package list, without sub-packages.
"""
packages = set(easy.options.setup.packages)
for pkg in list(packages):
packages -= set(p for p in packages if str(p).startswith(pkg + '.'))
return list(sorted(packages)) | [
"def",
"toplevel_packages",
"(",
")",
":",
"packages",
"=",
"set",
"(",
"easy",
".",
"options",
".",
"setup",
".",
"packages",
")",
"for",
"pkg",
"in",
"list",
"(",
"packages",
")",
":",
"packages",
"-=",
"set",
"(",
"p",
"for",
"p",
"in",
"packages",
"if",
"str",
"(",
"p",
")",
".",
"startswith",
"(",
"pkg",
"+",
"'.'",
")",
")",
"return",
"list",
"(",
"sorted",
"(",
"packages",
")",
")"
]
| 37.714286 | 10.857143 |
def main_photo(self):
"""Return user's main photo."""
if not self._main_photo:
self._main_photo = self.photos_factory()
return self._main_photo | [
"def",
"main_photo",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_main_photo",
":",
"self",
".",
"_main_photo",
"=",
"self",
".",
"photos_factory",
"(",
")",
"return",
"self",
".",
"_main_photo"
]
| 35 | 9.6 |
def read(self, pin):
""" Read the pin state of an input pin.
Make sure you put the pin in input modus with the IODIR* register or direction_* attribute first.
:Example:
>>> expander = MCP23017I2C(gw)
>>> # Read the logic level on pin B3
>>> expander.read('B3')
False
>>> # Read the logic level on pin A1
>>> expander.read('A1')
True
:param pin: The label for the pin to read. (Ex. A0)
:return: Boolean representing the input level
"""
port, pin = self.pin_to_port(pin)
self.i2c_write([0x12 + port])
raw = self.i2c_read(1)
value = struct.unpack('>B', raw)[0]
return (value & (1 << pin)) > 0 | [
"def",
"read",
"(",
"self",
",",
"pin",
")",
":",
"port",
",",
"pin",
"=",
"self",
".",
"pin_to_port",
"(",
"pin",
")",
"self",
".",
"i2c_write",
"(",
"[",
"0x12",
"+",
"port",
"]",
")",
"raw",
"=",
"self",
".",
"i2c_read",
"(",
"1",
")",
"value",
"=",
"struct",
".",
"unpack",
"(",
"'>B'",
",",
"raw",
")",
"[",
"0",
"]",
"return",
"(",
"value",
"&",
"(",
"1",
"<<",
"pin",
")",
")",
">",
"0"
]
| 32.5 | 16.409091 |
def _characteristics_discovered(self, service):
"""Called when GATT characteristics have been discovered."""
# Characteristics for the specified service were discovered. Update
# set of discovered services and signal when all have been discovered.
self._discovered_services.add(service)
if self._discovered_services >= set(self._peripheral.services()):
# Found all the services characteristics, finally time to fire the
# service discovery complete event.
self._discovered.set() | [
"def",
"_characteristics_discovered",
"(",
"self",
",",
"service",
")",
":",
"# Characteristics for the specified service were discovered. Update",
"# set of discovered services and signal when all have been discovered.",
"self",
".",
"_discovered_services",
".",
"add",
"(",
"service",
")",
"if",
"self",
".",
"_discovered_services",
">=",
"set",
"(",
"self",
".",
"_peripheral",
".",
"services",
"(",
")",
")",
":",
"# Found all the services characteristics, finally time to fire the",
"# service discovery complete event.",
"self",
".",
"_discovered",
".",
"set",
"(",
")"
]
| 60.777778 | 19 |
def write_haxis_pdb(self, filename='helical_axis.pdb', step_range=False, step=None, write_smooth_axis=True, write_orig_axis=False, write_curv=False, scale_curv=1):
"""To write trajectory of helcial-axis as a PDB format file.
Both local helical axis and global (smoothed) axis can be written to PDB file.
For global axis, curvature could be written in B-factor field of PDB file.
Parameters
----------
filename : str
Name of the output PDB format file.
step_range : bool
* ``step_range = True`` : Make axis smooth for the given range of base-steps
* ``step_range = False``: Make axis smooth for entire DNA. If original helical-axis of any base-step will
be found to be not available, error will be raised.
step : list
List containing lower and higher limit of base-steps range.
* This option only works with ``step_range=True``.
* This list should not contain more than two number.
* First number should be less than second number.
Example for base-step 4 to 15:
``step = [4,15] # step_range = True``
write_smooth_axis : bool
Write coordinates of smoothed helical axis as chain A.
write_orig_axis : bool
Write coordinates of original helical axis (output from do_x3dna) as chain B.
write_curv : bool
Write curvature of smoothed helical axis in B-factor column of PDB file.
scale_curv : int
Scaling of curvature. ``curvature * scale_curv`` is written in B-factor column of PDB file.
"""
if (step_range) and (step == None):
raise ValueError(
"See, documentation for step and step_range usage!!!")
if not write_orig_axis and not write_smooth_axis:
raise ValueError(
"Nothing to write as both \"write_orig_axis=False\" and \"write_smooth_axis=False\" !!!")
if step_range:
if (len(step) != 2):
raise ValueError("See, documentation for step usage!!!")
if step[0] > step[1]:
raise ValueError("See, documentation for step usage!!!")
# Original helical axis
if (write_orig_axis):
RawX, bp_idx = self.get_parameters(
'helical x-axis', step, bp_range=True)
RawY, dummy = self.get_parameters(
'helical y-axis', step, bp_range=True)
RawZ, dummy = self.get_parameters(
'helical z-axis', step, bp_range=True)
# Smoothed helical axis
if (write_smooth_axis):
SmoothX, bp_idx = self.get_parameters(
'helical x-axis smooth', step, bp_range=True)
SmoothY, bp_idx = self.get_parameters(
'helical y-axis smooth', step, bp_range=True)
SmoothZ, bp_idx = self.get_parameters(
'helical z-axis smooth', step, bp_range=True)
# curvature
if (write_curv):
curvature, bp_idx = self.get_parameters(
'curvature', step, bp_range=True)
else:
# Original helical axis
if (write_orig_axis):
RawX, bp_idx = self.get_parameters(
'helical x-axis', [1, self.num_step], bp_range=True)
RawY, dummy = self.get_parameters(
'helical y-axis', [1, self.num_step], bp_range=True)
RawZ, dummy = self.get_parameters(
'helical z-axis', [1, self.num_step], bp_range=True)
# Smoothed helical axis
if (write_smooth_axis):
SmoothX, bp_idx = self.get_parameters(
'helical x-axis smooth', [1, self.num_step], bp_range=True)
SmoothY, bp_idx = self.get_parameters(
'helical y-axis smooth', [1, self.num_step], bp_range=True)
SmoothZ, bp_idx = self.get_parameters(
'helical z-axis smooth', [1, self.num_step], bp_range=True)
# curvature
if (write_curv):
curvature, bp_idx = self.get_parameters(
'curvature', [1, self.num_step], bp_range=True)
if (write_orig_axis):
RawX = np.array(RawX).T
RawY = np.array(RawY).T
RawZ = np.array(RawZ).T
if (write_smooth_axis):
SmoothX = np.array(SmoothX).T
SmoothY = np.array(SmoothY).T
SmoothZ = np.array(SmoothZ).T
if (write_curv):
curvature = np.array(curvature).T
f = open(filename, 'w')
for i in range(len(self.time)):
f.write('%-6s %4d\n' % ("MODEL", i + 1))
bfactor = 0.00
if (write_smooth_axis):
for j in range(len(SmoothX[i])):
if (write_curv):
bfactor = curvature[i][j] * scale_curv
f.write('%-6s%5d %4s%1s%3s %1s%4d%1s %8.3f%8.3f%8.3f%6.2f%6.2f\n' % ("ATOM", j + 1, "CA",
" ", "AXS", "A", j + 1, " ", SmoothX[i][j], SmoothY[i][j], SmoothZ[i][j], 1.00, bfactor))
for j in range(len(SmoothX[i]) - 1):
f.write('CONECT %4d %4d\n' % (j + 1, j + 2))
f.write("TER\n")
if (write_orig_axis):
atomstart = 0
if (write_smooth_axis):
atomstart = len(SmoothX[i])
for j in range(len(RawX[i])):
f.write('%-6s%5d %4s%1s%3s %1s%4d%1s %8.3f%8.3f%8.3f%6.2f%6.2f\n' % ("ATOM", j + 1 +
atomstart, "O", " ", "AXS", "B", j + 1, " ", RawX[i][j], RawY[i][j], RawZ[i][j], 1.00, 0.00))
for j in range(len(RawX[i]) - 1):
f.write('CONECT %4d %4d\n' %
(j + 1 + atomstart, j + 2 + atomstart))
f.write("TER\n")
f.write("ENDMDL\n")
f.close() | [
"def",
"write_haxis_pdb",
"(",
"self",
",",
"filename",
"=",
"'helical_axis.pdb'",
",",
"step_range",
"=",
"False",
",",
"step",
"=",
"None",
",",
"write_smooth_axis",
"=",
"True",
",",
"write_orig_axis",
"=",
"False",
",",
"write_curv",
"=",
"False",
",",
"scale_curv",
"=",
"1",
")",
":",
"if",
"(",
"step_range",
")",
"and",
"(",
"step",
"==",
"None",
")",
":",
"raise",
"ValueError",
"(",
"\"See, documentation for step and step_range usage!!!\"",
")",
"if",
"not",
"write_orig_axis",
"and",
"not",
"write_smooth_axis",
":",
"raise",
"ValueError",
"(",
"\"Nothing to write as both \\\"write_orig_axis=False\\\" and \\\"write_smooth_axis=False\\\" !!!\"",
")",
"if",
"step_range",
":",
"if",
"(",
"len",
"(",
"step",
")",
"!=",
"2",
")",
":",
"raise",
"ValueError",
"(",
"\"See, documentation for step usage!!!\"",
")",
"if",
"step",
"[",
"0",
"]",
">",
"step",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"See, documentation for step usage!!!\"",
")",
"# Original helical axis",
"if",
"(",
"write_orig_axis",
")",
":",
"RawX",
",",
"bp_idx",
"=",
"self",
".",
"get_parameters",
"(",
"'helical x-axis'",
",",
"step",
",",
"bp_range",
"=",
"True",
")",
"RawY",
",",
"dummy",
"=",
"self",
".",
"get_parameters",
"(",
"'helical y-axis'",
",",
"step",
",",
"bp_range",
"=",
"True",
")",
"RawZ",
",",
"dummy",
"=",
"self",
".",
"get_parameters",
"(",
"'helical z-axis'",
",",
"step",
",",
"bp_range",
"=",
"True",
")",
"# Smoothed helical axis",
"if",
"(",
"write_smooth_axis",
")",
":",
"SmoothX",
",",
"bp_idx",
"=",
"self",
".",
"get_parameters",
"(",
"'helical x-axis smooth'",
",",
"step",
",",
"bp_range",
"=",
"True",
")",
"SmoothY",
",",
"bp_idx",
"=",
"self",
".",
"get_parameters",
"(",
"'helical y-axis smooth'",
",",
"step",
",",
"bp_range",
"=",
"True",
")",
"SmoothZ",
",",
"bp_idx",
"=",
"self",
".",
"get_parameters",
"(",
"'helical z-axis smooth'",
",",
"step",
",",
"bp_range",
"=",
"True",
")",
"# curvature",
"if",
"(",
"write_curv",
")",
":",
"curvature",
",",
"bp_idx",
"=",
"self",
".",
"get_parameters",
"(",
"'curvature'",
",",
"step",
",",
"bp_range",
"=",
"True",
")",
"else",
":",
"# Original helical axis",
"if",
"(",
"write_orig_axis",
")",
":",
"RawX",
",",
"bp_idx",
"=",
"self",
".",
"get_parameters",
"(",
"'helical x-axis'",
",",
"[",
"1",
",",
"self",
".",
"num_step",
"]",
",",
"bp_range",
"=",
"True",
")",
"RawY",
",",
"dummy",
"=",
"self",
".",
"get_parameters",
"(",
"'helical y-axis'",
",",
"[",
"1",
",",
"self",
".",
"num_step",
"]",
",",
"bp_range",
"=",
"True",
")",
"RawZ",
",",
"dummy",
"=",
"self",
".",
"get_parameters",
"(",
"'helical z-axis'",
",",
"[",
"1",
",",
"self",
".",
"num_step",
"]",
",",
"bp_range",
"=",
"True",
")",
"# Smoothed helical axis",
"if",
"(",
"write_smooth_axis",
")",
":",
"SmoothX",
",",
"bp_idx",
"=",
"self",
".",
"get_parameters",
"(",
"'helical x-axis smooth'",
",",
"[",
"1",
",",
"self",
".",
"num_step",
"]",
",",
"bp_range",
"=",
"True",
")",
"SmoothY",
",",
"bp_idx",
"=",
"self",
".",
"get_parameters",
"(",
"'helical y-axis smooth'",
",",
"[",
"1",
",",
"self",
".",
"num_step",
"]",
",",
"bp_range",
"=",
"True",
")",
"SmoothZ",
",",
"bp_idx",
"=",
"self",
".",
"get_parameters",
"(",
"'helical z-axis smooth'",
",",
"[",
"1",
",",
"self",
".",
"num_step",
"]",
",",
"bp_range",
"=",
"True",
")",
"# curvature",
"if",
"(",
"write_curv",
")",
":",
"curvature",
",",
"bp_idx",
"=",
"self",
".",
"get_parameters",
"(",
"'curvature'",
",",
"[",
"1",
",",
"self",
".",
"num_step",
"]",
",",
"bp_range",
"=",
"True",
")",
"if",
"(",
"write_orig_axis",
")",
":",
"RawX",
"=",
"np",
".",
"array",
"(",
"RawX",
")",
".",
"T",
"RawY",
"=",
"np",
".",
"array",
"(",
"RawY",
")",
".",
"T",
"RawZ",
"=",
"np",
".",
"array",
"(",
"RawZ",
")",
".",
"T",
"if",
"(",
"write_smooth_axis",
")",
":",
"SmoothX",
"=",
"np",
".",
"array",
"(",
"SmoothX",
")",
".",
"T",
"SmoothY",
"=",
"np",
".",
"array",
"(",
"SmoothY",
")",
".",
"T",
"SmoothZ",
"=",
"np",
".",
"array",
"(",
"SmoothZ",
")",
".",
"T",
"if",
"(",
"write_curv",
")",
":",
"curvature",
"=",
"np",
".",
"array",
"(",
"curvature",
")",
".",
"T",
"f",
"=",
"open",
"(",
"filename",
",",
"'w'",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"time",
")",
")",
":",
"f",
".",
"write",
"(",
"'%-6s %4d\\n'",
"%",
"(",
"\"MODEL\"",
",",
"i",
"+",
"1",
")",
")",
"bfactor",
"=",
"0.00",
"if",
"(",
"write_smooth_axis",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"SmoothX",
"[",
"i",
"]",
")",
")",
":",
"if",
"(",
"write_curv",
")",
":",
"bfactor",
"=",
"curvature",
"[",
"i",
"]",
"[",
"j",
"]",
"*",
"scale_curv",
"f",
".",
"write",
"(",
"'%-6s%5d %4s%1s%3s %1s%4d%1s %8.3f%8.3f%8.3f%6.2f%6.2f\\n'",
"%",
"(",
"\"ATOM\"",
",",
"j",
"+",
"1",
",",
"\"CA\"",
",",
"\" \"",
",",
"\"AXS\"",
",",
"\"A\"",
",",
"j",
"+",
"1",
",",
"\" \"",
",",
"SmoothX",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"SmoothY",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"SmoothZ",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"1.00",
",",
"bfactor",
")",
")",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"SmoothX",
"[",
"i",
"]",
")",
"-",
"1",
")",
":",
"f",
".",
"write",
"(",
"'CONECT %4d %4d\\n'",
"%",
"(",
"j",
"+",
"1",
",",
"j",
"+",
"2",
")",
")",
"f",
".",
"write",
"(",
"\"TER\\n\"",
")",
"if",
"(",
"write_orig_axis",
")",
":",
"atomstart",
"=",
"0",
"if",
"(",
"write_smooth_axis",
")",
":",
"atomstart",
"=",
"len",
"(",
"SmoothX",
"[",
"i",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"RawX",
"[",
"i",
"]",
")",
")",
":",
"f",
".",
"write",
"(",
"'%-6s%5d %4s%1s%3s %1s%4d%1s %8.3f%8.3f%8.3f%6.2f%6.2f\\n'",
"%",
"(",
"\"ATOM\"",
",",
"j",
"+",
"1",
"+",
"atomstart",
",",
"\"O\"",
",",
"\" \"",
",",
"\"AXS\"",
",",
"\"B\"",
",",
"j",
"+",
"1",
",",
"\" \"",
",",
"RawX",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"RawY",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"RawZ",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"1.00",
",",
"0.00",
")",
")",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"RawX",
"[",
"i",
"]",
")",
"-",
"1",
")",
":",
"f",
".",
"write",
"(",
"'CONECT %4d %4d\\n'",
"%",
"(",
"j",
"+",
"1",
"+",
"atomstart",
",",
"j",
"+",
"2",
"+",
"atomstart",
")",
")",
"f",
".",
"write",
"(",
"\"TER\\n\"",
")",
"f",
".",
"write",
"(",
"\"ENDMDL\\n\"",
")",
"f",
".",
"close",
"(",
")"
]
| 39.928571 | 25.935065 |
def detect_ts(df, max_anoms=0.10, direction='pos',
alpha=0.05, only_last=None, threshold=None,
e_value=False, longterm=False,
piecewise_median_period_weeks=2, plot=False,
y_log=False, xlabel = '', ylabel = 'count',
title=None, verbose=False):
"""
Anomaly Detection Using Seasonal Hybrid ESD Test
A technique for detecting anomalies in seasonal univariate time series where the input is a
series of <timestamp, value> pairs.
Args:
x: Time series as a two column data frame where the first column consists of the
timestamps and the second column consists of the observations.
max_anoms: Maximum number of anomalies that S-H-ESD will detect as a percentage of the
data.
direction: Directionality of the anomalies to be detected. Options are: ('pos' | 'neg' | 'both').
alpha: The level of statistical significance with which to accept or reject anomalies.
only_last: Find and report anomalies only within the last day or hr in the time series. Options: (None | 'day' | 'hr')
threshold: Only report positive going anoms above the threshold specified. Options are: (None | 'med_max' | 'p95' | 'p99')
e_value: Add an additional column to the anoms output containing the expected value.
longterm: Increase anom detection efficacy for time series that are greater than a month.
See Details below.
piecewise_median_period_weeks: The piecewise median time window as described in Vallis, Hochenbaum, and Kejariwal (2014). Defaults to 2.
plot: (Currently unsupported) A flag indicating if a plot with both the time series and the estimated anoms,
indicated by circles, should also be returned.
y_log: Apply log scaling to the y-axis. This helps with viewing plots that have extremely
large positive anomalies relative to the rest of the data.
xlabel: X-axis label to be added to the output plot.
ylabel: Y-axis label to be added to the output plot.
Details
'longterm' This option should be set when the input time series is longer than a month.
The option enables the approach described in Vallis, Hochenbaum, and Kejariwal (2014).
'threshold' Filter all negative anomalies and those anomalies whose magnitude is smaller
than one of the specified thresholds which include: the median
of the daily max values (med_max), the 95th percentile of the daily max values (p95), and the
99th percentile of the daily max values (p99).
'title' Title for the output plot.
'verbose' Enable debug messages
The returned value is a dictionary with the following components:
anoms: Data frame containing timestamps, values, and optionally expected values.
plot: A graphical object if plotting was requested by the user. The plot contains
the estimated anomalies annotated on the input time series
"""
if not isinstance(df, DataFrame):
raise ValueError("data must be a single data frame.")
else:
if len(df.columns) != 2 or not df.iloc[:,1].map(np.isreal).all():
raise ValueError(("data must be a 2 column data.frame, with the"
"first column being a set of timestamps, and "
"the second coloumn being numeric values."))
if (not (df.dtypes[0].type is np.datetime64)
and not (df.dtypes[0].type is np.int64)):
df = format_timestamp(df)
if list(df.columns.values) != ["timestamp", "value"]:
df.columns = ["timestamp", "value"]
# Sanity check all input parameters
if max_anoms > 0.49:
length = len(df.value)
raise ValueError(
("max_anoms must be less than 50% of "
"the data points (max_anoms =%f data_points =%s).")
% (round(max_anoms * length, 0), length))
if not direction in ['pos', 'neg', 'both']:
raise ValueError("direction options are: pos | neg | both.")
if not (0.01 <= alpha or alpha <= 0.1):
if verbose:
import warnings
warnings.warn(("alpha is the statistical signifigance, "
"and is usually between 0.01 and 0.1"))
if only_last and not only_last in ['day', 'hr']:
raise ValueError("only_last must be either 'day' or 'hr'")
if not threshold in [None,'med_max','p95','p99']:
raise ValueError("threshold options are: None | med_max | p95 | p99")
if not isinstance(e_value, bool):
raise ValueError("e_value must be a boolean")
if not isinstance(longterm, bool):
raise ValueError("longterm must be a boolean")
if piecewise_median_period_weeks < 2:
raise ValueError(
"piecewise_median_period_weeks must be at greater than 2 weeks")
if not isinstance(plot, bool):
raise ValueError("plot must be a boolean")
if not isinstance(y_log, bool):
raise ValueError("y_log must be a boolean")
if not isinstance(xlabel, string_types):
raise ValueError("xlabel must be a string")
if not isinstance(ylabel, string_types):
raise ValueError("ylabel must be a string")
if title and not isinstance(title, string_types):
raise ValueError("title must be a string")
if not title:
title = ''
else:
title = title + " : "
gran = get_gran(df)
if gran == "day":
num_days_per_line = 7
if isinstance(only_last, string_types) and only_last == 'hr':
only_last = 'day'
else:
num_days_per_line = 1
if gran == 'sec':
df.timestamp = date_format(df.timestamp, "%Y-%m-%d %H:%M:00")
df = format_timestamp(df.groupby('timestamp').aggregate(np.sum))
# if the data is daily, then we need to bump
# the period to weekly to get multiple examples
gran_period = {
'min': 1440,
'hr': 24,
'day': 7
}
period = gran_period.get(gran)
if not period:
raise ValueError('%s granularity detected. This is currently not supported.' % gran)
num_obs = len(df.value)
clamp = (1 / float(num_obs))
if max_anoms < clamp:
max_anoms = clamp
if longterm:
if gran == "day":
num_obs_in_period = period * piecewise_median_period_weeks + 1
num_days_in_period = 7 * piecewise_median_period_weeks + 1
else:
num_obs_in_period = period * 7 * piecewise_median_period_weeks
num_days_in_period = 7 * piecewise_median_period_weeks
last_date = df.timestamp.iloc[-1]
all_data = []
for j in range(0, len(df.timestamp), num_obs_in_period):
start_date = df.timestamp.iloc[j]
end_date = min(start_date
+ datetime.timedelta(days=num_days_in_period),
df.timestamp.iloc[-1])
# if there is at least 14 days left, subset it,
# otherwise subset last_date - 14days
if (end_date - start_date).days == num_days_in_period:
sub_df = df[(df.timestamp >= start_date)
& (df.timestamp < end_date)]
else:
sub_df = df[(df.timestamp >
(last_date - datetime.timedelta(days=num_days_in_period)))
& (df.timestamp <= last_date)]
all_data.append(sub_df)
else:
all_data = [df]
all_anoms = DataFrame(columns=['timestamp', 'value'])
seasonal_plus_trend = DataFrame(columns=['timestamp', 'value'])
# Detect anomalies on all data (either entire data in one-pass,
# or in 2 week blocks if longterm=TRUE)
for i in range(len(all_data)):
directions = {
'pos': Direction(True, True),
'neg': Direction(True, False),
'both': Direction(False, True)
}
anomaly_direction = directions[direction]
# detect_anoms actually performs the anomaly detection and
# returns the results in a list containing the anomalies
# as well as the decomposed components of the time series
# for further analysis.
s_h_esd_timestamps = detect_anoms(all_data[i], k=max_anoms, alpha=alpha,
num_obs_per_period=period,
use_decomp=True,
one_tail=anomaly_direction.one_tail,
upper_tail=anomaly_direction.upper_tail,
verbose=verbose)
# store decomposed components in local variable and overwrite
# s_h_esd_timestamps to contain only the anom timestamps
data_decomp = s_h_esd_timestamps['stl']
s_h_esd_timestamps = s_h_esd_timestamps['anoms']
# -- Step 3: Use detected anomaly timestamps to extract the actual
# anomalies (timestamp and value) from the data
if s_h_esd_timestamps:
anoms = all_data[i][all_data[i].timestamp.isin(s_h_esd_timestamps)]
else:
anoms = DataFrame(columns=['timestamp', 'value'])
# Filter the anomalies using one of the thresholding functions if applicable
if threshold:
# Calculate daily max values
periodic_maxes = df.groupby(
df.timestamp.map(Timestamp.date)).aggregate(np.max).value
# Calculate the threshold set by the user
if threshold == 'med_max':
thresh = periodic_maxes.median()
elif threshold == 'p95':
thresh = periodic_maxes.quantile(.95)
elif threshold == 'p99':
thresh = periodic_maxes.quantile(.99)
# Remove any anoms below the threshold
anoms = anoms[anoms.value >= thresh]
all_anoms = all_anoms.append(anoms)
seasonal_plus_trend = seasonal_plus_trend.append(data_decomp)
# Cleanup potential duplicates
try:
all_anoms.drop_duplicates(subset=['timestamp'], inplace=True)
seasonal_plus_trend.drop_duplicates(subset=['timestamp'], inplace=True)
except TypeError:
all_anoms.drop_duplicates(cols=['timestamp'], inplace=True)
seasonal_plus_trend.drop_duplicates(cols=['timestamp'], inplace=True)
# -- If only_last was set by the user,
# create subset of the data that represent the most recent day
if only_last:
start_date = df.timestamp.iloc[-1] - datetime.timedelta(days=7)
start_anoms = df.timestamp.iloc[-1] - datetime.timedelta(days=1)
if gran is "day":
breaks = 3 * 12
num_days_per_line = 7
else:
if only_last == 'day':
breaks = 12
else:
start_date = df.timestamp.iloc[-1] - datetime.timedelta(days=2)
# truncate to days
start_date = datetime.date(start_date.year,
start_date.month, start_date.day)
start_anoms = (df.timestamp.iloc[-1]
- datetime.timedelta(hours=1))
breaks = 3
# subset the last days worth of data
x_subset_single_day = df[df.timestamp > start_anoms]
# When plotting anoms for the last day only
# we only show the previous weeks data
x_subset_week = df[(df.timestamp <= start_anoms)
& (df.timestamp > start_date)]
if len(all_anoms) > 0:
all_anoms = all_anoms[all_anoms.timestamp >=
x_subset_single_day.timestamp.iloc[0]]
num_obs = len(x_subset_single_day.value)
# Calculate number of anomalies as a percentage
anom_pct = (len(df.value) / float(num_obs)) * 100
if anom_pct == 0:
return {
"anoms": None,
"plot": None
}
# The original R implementation handles plotting here.
# Plotting is currently not implemented in this version.
# if plot:
# plot_something()
all_anoms.index = all_anoms.timestamp
if e_value:
d = {
'timestamp': all_anoms.timestamp,
'anoms': all_anoms.value,
'expected_value': seasonal_plus_trend[
seasonal_plus_trend.timestamp.isin(
all_anoms.timestamp)].value
}
else:
d = {
'timestamp': all_anoms.timestamp,
'anoms': all_anoms.value
}
anoms = DataFrame(d, index=d['timestamp'].index)
return {
'anoms': anoms,
'plot': None
} | [
"def",
"detect_ts",
"(",
"df",
",",
"max_anoms",
"=",
"0.10",
",",
"direction",
"=",
"'pos'",
",",
"alpha",
"=",
"0.05",
",",
"only_last",
"=",
"None",
",",
"threshold",
"=",
"None",
",",
"e_value",
"=",
"False",
",",
"longterm",
"=",
"False",
",",
"piecewise_median_period_weeks",
"=",
"2",
",",
"plot",
"=",
"False",
",",
"y_log",
"=",
"False",
",",
"xlabel",
"=",
"''",
",",
"ylabel",
"=",
"'count'",
",",
"title",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"df",
",",
"DataFrame",
")",
":",
"raise",
"ValueError",
"(",
"\"data must be a single data frame.\"",
")",
"else",
":",
"if",
"len",
"(",
"df",
".",
"columns",
")",
"!=",
"2",
"or",
"not",
"df",
".",
"iloc",
"[",
":",
",",
"1",
"]",
".",
"map",
"(",
"np",
".",
"isreal",
")",
".",
"all",
"(",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"data must be a 2 column data.frame, with the\"",
"\"first column being a set of timestamps, and \"",
"\"the second coloumn being numeric values.\"",
")",
")",
"if",
"(",
"not",
"(",
"df",
".",
"dtypes",
"[",
"0",
"]",
".",
"type",
"is",
"np",
".",
"datetime64",
")",
"and",
"not",
"(",
"df",
".",
"dtypes",
"[",
"0",
"]",
".",
"type",
"is",
"np",
".",
"int64",
")",
")",
":",
"df",
"=",
"format_timestamp",
"(",
"df",
")",
"if",
"list",
"(",
"df",
".",
"columns",
".",
"values",
")",
"!=",
"[",
"\"timestamp\"",
",",
"\"value\"",
"]",
":",
"df",
".",
"columns",
"=",
"[",
"\"timestamp\"",
",",
"\"value\"",
"]",
"# Sanity check all input parameters",
"if",
"max_anoms",
">",
"0.49",
":",
"length",
"=",
"len",
"(",
"df",
".",
"value",
")",
"raise",
"ValueError",
"(",
"(",
"\"max_anoms must be less than 50% of \"",
"\"the data points (max_anoms =%f data_points =%s).\"",
")",
"%",
"(",
"round",
"(",
"max_anoms",
"*",
"length",
",",
"0",
")",
",",
"length",
")",
")",
"if",
"not",
"direction",
"in",
"[",
"'pos'",
",",
"'neg'",
",",
"'both'",
"]",
":",
"raise",
"ValueError",
"(",
"\"direction options are: pos | neg | both.\"",
")",
"if",
"not",
"(",
"0.01",
"<=",
"alpha",
"or",
"alpha",
"<=",
"0.1",
")",
":",
"if",
"verbose",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"(",
"\"alpha is the statistical signifigance, \"",
"\"and is usually between 0.01 and 0.1\"",
")",
")",
"if",
"only_last",
"and",
"not",
"only_last",
"in",
"[",
"'day'",
",",
"'hr'",
"]",
":",
"raise",
"ValueError",
"(",
"\"only_last must be either 'day' or 'hr'\"",
")",
"if",
"not",
"threshold",
"in",
"[",
"None",
",",
"'med_max'",
",",
"'p95'",
",",
"'p99'",
"]",
":",
"raise",
"ValueError",
"(",
"\"threshold options are: None | med_max | p95 | p99\"",
")",
"if",
"not",
"isinstance",
"(",
"e_value",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"\"e_value must be a boolean\"",
")",
"if",
"not",
"isinstance",
"(",
"longterm",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"\"longterm must be a boolean\"",
")",
"if",
"piecewise_median_period_weeks",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"piecewise_median_period_weeks must be at greater than 2 weeks\"",
")",
"if",
"not",
"isinstance",
"(",
"plot",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"\"plot must be a boolean\"",
")",
"if",
"not",
"isinstance",
"(",
"y_log",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"\"y_log must be a boolean\"",
")",
"if",
"not",
"isinstance",
"(",
"xlabel",
",",
"string_types",
")",
":",
"raise",
"ValueError",
"(",
"\"xlabel must be a string\"",
")",
"if",
"not",
"isinstance",
"(",
"ylabel",
",",
"string_types",
")",
":",
"raise",
"ValueError",
"(",
"\"ylabel must be a string\"",
")",
"if",
"title",
"and",
"not",
"isinstance",
"(",
"title",
",",
"string_types",
")",
":",
"raise",
"ValueError",
"(",
"\"title must be a string\"",
")",
"if",
"not",
"title",
":",
"title",
"=",
"''",
"else",
":",
"title",
"=",
"title",
"+",
"\" : \"",
"gran",
"=",
"get_gran",
"(",
"df",
")",
"if",
"gran",
"==",
"\"day\"",
":",
"num_days_per_line",
"=",
"7",
"if",
"isinstance",
"(",
"only_last",
",",
"string_types",
")",
"and",
"only_last",
"==",
"'hr'",
":",
"only_last",
"=",
"'day'",
"else",
":",
"num_days_per_line",
"=",
"1",
"if",
"gran",
"==",
"'sec'",
":",
"df",
".",
"timestamp",
"=",
"date_format",
"(",
"df",
".",
"timestamp",
",",
"\"%Y-%m-%d %H:%M:00\"",
")",
"df",
"=",
"format_timestamp",
"(",
"df",
".",
"groupby",
"(",
"'timestamp'",
")",
".",
"aggregate",
"(",
"np",
".",
"sum",
")",
")",
"# if the data is daily, then we need to bump",
"# the period to weekly to get multiple examples",
"gran_period",
"=",
"{",
"'min'",
":",
"1440",
",",
"'hr'",
":",
"24",
",",
"'day'",
":",
"7",
"}",
"period",
"=",
"gran_period",
".",
"get",
"(",
"gran",
")",
"if",
"not",
"period",
":",
"raise",
"ValueError",
"(",
"'%s granularity detected. This is currently not supported.'",
"%",
"gran",
")",
"num_obs",
"=",
"len",
"(",
"df",
".",
"value",
")",
"clamp",
"=",
"(",
"1",
"/",
"float",
"(",
"num_obs",
")",
")",
"if",
"max_anoms",
"<",
"clamp",
":",
"max_anoms",
"=",
"clamp",
"if",
"longterm",
":",
"if",
"gran",
"==",
"\"day\"",
":",
"num_obs_in_period",
"=",
"period",
"*",
"piecewise_median_period_weeks",
"+",
"1",
"num_days_in_period",
"=",
"7",
"*",
"piecewise_median_period_weeks",
"+",
"1",
"else",
":",
"num_obs_in_period",
"=",
"period",
"*",
"7",
"*",
"piecewise_median_period_weeks",
"num_days_in_period",
"=",
"7",
"*",
"piecewise_median_period_weeks",
"last_date",
"=",
"df",
".",
"timestamp",
".",
"iloc",
"[",
"-",
"1",
"]",
"all_data",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"df",
".",
"timestamp",
")",
",",
"num_obs_in_period",
")",
":",
"start_date",
"=",
"df",
".",
"timestamp",
".",
"iloc",
"[",
"j",
"]",
"end_date",
"=",
"min",
"(",
"start_date",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"num_days_in_period",
")",
",",
"df",
".",
"timestamp",
".",
"iloc",
"[",
"-",
"1",
"]",
")",
"# if there is at least 14 days left, subset it,",
"# otherwise subset last_date - 14days",
"if",
"(",
"end_date",
"-",
"start_date",
")",
".",
"days",
"==",
"num_days_in_period",
":",
"sub_df",
"=",
"df",
"[",
"(",
"df",
".",
"timestamp",
">=",
"start_date",
")",
"&",
"(",
"df",
".",
"timestamp",
"<",
"end_date",
")",
"]",
"else",
":",
"sub_df",
"=",
"df",
"[",
"(",
"df",
".",
"timestamp",
">",
"(",
"last_date",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"num_days_in_period",
")",
")",
")",
"&",
"(",
"df",
".",
"timestamp",
"<=",
"last_date",
")",
"]",
"all_data",
".",
"append",
"(",
"sub_df",
")",
"else",
":",
"all_data",
"=",
"[",
"df",
"]",
"all_anoms",
"=",
"DataFrame",
"(",
"columns",
"=",
"[",
"'timestamp'",
",",
"'value'",
"]",
")",
"seasonal_plus_trend",
"=",
"DataFrame",
"(",
"columns",
"=",
"[",
"'timestamp'",
",",
"'value'",
"]",
")",
"# Detect anomalies on all data (either entire data in one-pass,",
"# or in 2 week blocks if longterm=TRUE)",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"all_data",
")",
")",
":",
"directions",
"=",
"{",
"'pos'",
":",
"Direction",
"(",
"True",
",",
"True",
")",
",",
"'neg'",
":",
"Direction",
"(",
"True",
",",
"False",
")",
",",
"'both'",
":",
"Direction",
"(",
"False",
",",
"True",
")",
"}",
"anomaly_direction",
"=",
"directions",
"[",
"direction",
"]",
"# detect_anoms actually performs the anomaly detection and",
"# returns the results in a list containing the anomalies",
"# as well as the decomposed components of the time series",
"# for further analysis.",
"s_h_esd_timestamps",
"=",
"detect_anoms",
"(",
"all_data",
"[",
"i",
"]",
",",
"k",
"=",
"max_anoms",
",",
"alpha",
"=",
"alpha",
",",
"num_obs_per_period",
"=",
"period",
",",
"use_decomp",
"=",
"True",
",",
"one_tail",
"=",
"anomaly_direction",
".",
"one_tail",
",",
"upper_tail",
"=",
"anomaly_direction",
".",
"upper_tail",
",",
"verbose",
"=",
"verbose",
")",
"# store decomposed components in local variable and overwrite",
"# s_h_esd_timestamps to contain only the anom timestamps",
"data_decomp",
"=",
"s_h_esd_timestamps",
"[",
"'stl'",
"]",
"s_h_esd_timestamps",
"=",
"s_h_esd_timestamps",
"[",
"'anoms'",
"]",
"# -- Step 3: Use detected anomaly timestamps to extract the actual",
"# anomalies (timestamp and value) from the data",
"if",
"s_h_esd_timestamps",
":",
"anoms",
"=",
"all_data",
"[",
"i",
"]",
"[",
"all_data",
"[",
"i",
"]",
".",
"timestamp",
".",
"isin",
"(",
"s_h_esd_timestamps",
")",
"]",
"else",
":",
"anoms",
"=",
"DataFrame",
"(",
"columns",
"=",
"[",
"'timestamp'",
",",
"'value'",
"]",
")",
"# Filter the anomalies using one of the thresholding functions if applicable",
"if",
"threshold",
":",
"# Calculate daily max values",
"periodic_maxes",
"=",
"df",
".",
"groupby",
"(",
"df",
".",
"timestamp",
".",
"map",
"(",
"Timestamp",
".",
"date",
")",
")",
".",
"aggregate",
"(",
"np",
".",
"max",
")",
".",
"value",
"# Calculate the threshold set by the user",
"if",
"threshold",
"==",
"'med_max'",
":",
"thresh",
"=",
"periodic_maxes",
".",
"median",
"(",
")",
"elif",
"threshold",
"==",
"'p95'",
":",
"thresh",
"=",
"periodic_maxes",
".",
"quantile",
"(",
".95",
")",
"elif",
"threshold",
"==",
"'p99'",
":",
"thresh",
"=",
"periodic_maxes",
".",
"quantile",
"(",
".99",
")",
"# Remove any anoms below the threshold",
"anoms",
"=",
"anoms",
"[",
"anoms",
".",
"value",
">=",
"thresh",
"]",
"all_anoms",
"=",
"all_anoms",
".",
"append",
"(",
"anoms",
")",
"seasonal_plus_trend",
"=",
"seasonal_plus_trend",
".",
"append",
"(",
"data_decomp",
")",
"# Cleanup potential duplicates",
"try",
":",
"all_anoms",
".",
"drop_duplicates",
"(",
"subset",
"=",
"[",
"'timestamp'",
"]",
",",
"inplace",
"=",
"True",
")",
"seasonal_plus_trend",
".",
"drop_duplicates",
"(",
"subset",
"=",
"[",
"'timestamp'",
"]",
",",
"inplace",
"=",
"True",
")",
"except",
"TypeError",
":",
"all_anoms",
".",
"drop_duplicates",
"(",
"cols",
"=",
"[",
"'timestamp'",
"]",
",",
"inplace",
"=",
"True",
")",
"seasonal_plus_trend",
".",
"drop_duplicates",
"(",
"cols",
"=",
"[",
"'timestamp'",
"]",
",",
"inplace",
"=",
"True",
")",
"# -- If only_last was set by the user,",
"# create subset of the data that represent the most recent day",
"if",
"only_last",
":",
"start_date",
"=",
"df",
".",
"timestamp",
".",
"iloc",
"[",
"-",
"1",
"]",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"7",
")",
"start_anoms",
"=",
"df",
".",
"timestamp",
".",
"iloc",
"[",
"-",
"1",
"]",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"if",
"gran",
"is",
"\"day\"",
":",
"breaks",
"=",
"3",
"*",
"12",
"num_days_per_line",
"=",
"7",
"else",
":",
"if",
"only_last",
"==",
"'day'",
":",
"breaks",
"=",
"12",
"else",
":",
"start_date",
"=",
"df",
".",
"timestamp",
".",
"iloc",
"[",
"-",
"1",
"]",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"2",
")",
"# truncate to days",
"start_date",
"=",
"datetime",
".",
"date",
"(",
"start_date",
".",
"year",
",",
"start_date",
".",
"month",
",",
"start_date",
".",
"day",
")",
"start_anoms",
"=",
"(",
"df",
".",
"timestamp",
".",
"iloc",
"[",
"-",
"1",
"]",
"-",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"1",
")",
")",
"breaks",
"=",
"3",
"# subset the last days worth of data",
"x_subset_single_day",
"=",
"df",
"[",
"df",
".",
"timestamp",
">",
"start_anoms",
"]",
"# When plotting anoms for the last day only",
"# we only show the previous weeks data",
"x_subset_week",
"=",
"df",
"[",
"(",
"df",
".",
"timestamp",
"<=",
"start_anoms",
")",
"&",
"(",
"df",
".",
"timestamp",
">",
"start_date",
")",
"]",
"if",
"len",
"(",
"all_anoms",
")",
">",
"0",
":",
"all_anoms",
"=",
"all_anoms",
"[",
"all_anoms",
".",
"timestamp",
">=",
"x_subset_single_day",
".",
"timestamp",
".",
"iloc",
"[",
"0",
"]",
"]",
"num_obs",
"=",
"len",
"(",
"x_subset_single_day",
".",
"value",
")",
"# Calculate number of anomalies as a percentage",
"anom_pct",
"=",
"(",
"len",
"(",
"df",
".",
"value",
")",
"/",
"float",
"(",
"num_obs",
")",
")",
"*",
"100",
"if",
"anom_pct",
"==",
"0",
":",
"return",
"{",
"\"anoms\"",
":",
"None",
",",
"\"plot\"",
":",
"None",
"}",
"# The original R implementation handles plotting here.",
"# Plotting is currently not implemented in this version.",
"# if plot:",
"# plot_something()",
"all_anoms",
".",
"index",
"=",
"all_anoms",
".",
"timestamp",
"if",
"e_value",
":",
"d",
"=",
"{",
"'timestamp'",
":",
"all_anoms",
".",
"timestamp",
",",
"'anoms'",
":",
"all_anoms",
".",
"value",
",",
"'expected_value'",
":",
"seasonal_plus_trend",
"[",
"seasonal_plus_trend",
".",
"timestamp",
".",
"isin",
"(",
"all_anoms",
".",
"timestamp",
")",
"]",
".",
"value",
"}",
"else",
":",
"d",
"=",
"{",
"'timestamp'",
":",
"all_anoms",
".",
"timestamp",
",",
"'anoms'",
":",
"all_anoms",
".",
"value",
"}",
"anoms",
"=",
"DataFrame",
"(",
"d",
",",
"index",
"=",
"d",
"[",
"'timestamp'",
"]",
".",
"index",
")",
"return",
"{",
"'anoms'",
":",
"anoms",
",",
"'plot'",
":",
"None",
"}"
]
| 38.45625 | 24.4125 |
def get_fields(catalog, filter_in=None, filter_out=None, meta_field=None,
only_time_series=False, distribution_identifier=None):
"""Devuelve lista de campos del catálogo o de uno de sus metadatos.
Args:
catalog (dict, str or DataJson): Representación externa/interna de un
catálogo. Una representación _externa_ es un path local o una
URL remota a un archivo con la metadata de un catálogo, en
formato JSON o XLSX. La representación _interna_ de un catálogo
es un diccionario. Ejemplos: http://datos.gob.ar/data.json,
http://www.ign.gob.ar/descargas/geodatos/catalog.xlsx,
"/energia/catalog.xlsx".
filter_in (dict): Devuelve los campos cuyos atributos
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los campos que pertenezcan a un dataset
de ese publisher_name.
filter_out (dict): Devuelve los campos cuyos atributos no
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los campos que no pertenezcan a un
dataset de ese publisher_name.
meta_field (str): Nombre de un metadato de Field. En lugar de
devolver los objetos completos "Field", devuelve una lista de
valores para ese metadato presentes en el catálogo.
exclude_meta_fields (list): Metadatos de Field que se quieren
excluir de los objetos Field devueltos.
only_time_series (bool): Si es verdadero, sólo devuelve campos
que sean series de tiempo.
"""
filter_in = filter_in or {}
filter_out = filter_out or {}
catalog = read_catalog_obj(catalog)
# agrego atajos para filtros
if distribution_identifier:
if "distribution" not in filter_in:
filter_in["distribution"] = {}
filter_in["distribution"]["identifier"] = distribution_identifier
fields = []
for distribution in get_distributions(catalog, filter_in, filter_out,
only_time_series=only_time_series):
distribution_fields = distribution.get("field", [])
if isinstance(distribution_fields, list):
for field in distribution_fields:
if not only_time_series or field_is_time_series(field,
distribution):
# agrega el id del dataset
field["dataset_identifier"] = distribution[
"dataset_identifier"]
# agrega el id de la distribución
field["distribution_identifier"] = distribution.get(
"identifier")
fields.append(field)
filtered_fields = [field for field in fields if
_filter_dictionary(field, filter_in.get("field"),
filter_out.get("field"))]
if meta_field:
return [field[meta_field] for field in filtered_fields
if meta_field in field]
else:
return filtered_fields | [
"def",
"get_fields",
"(",
"catalog",
",",
"filter_in",
"=",
"None",
",",
"filter_out",
"=",
"None",
",",
"meta_field",
"=",
"None",
",",
"only_time_series",
"=",
"False",
",",
"distribution_identifier",
"=",
"None",
")",
":",
"filter_in",
"=",
"filter_in",
"or",
"{",
"}",
"filter_out",
"=",
"filter_out",
"or",
"{",
"}",
"catalog",
"=",
"read_catalog_obj",
"(",
"catalog",
")",
"# agrego atajos para filtros",
"if",
"distribution_identifier",
":",
"if",
"\"distribution\"",
"not",
"in",
"filter_in",
":",
"filter_in",
"[",
"\"distribution\"",
"]",
"=",
"{",
"}",
"filter_in",
"[",
"\"distribution\"",
"]",
"[",
"\"identifier\"",
"]",
"=",
"distribution_identifier",
"fields",
"=",
"[",
"]",
"for",
"distribution",
"in",
"get_distributions",
"(",
"catalog",
",",
"filter_in",
",",
"filter_out",
",",
"only_time_series",
"=",
"only_time_series",
")",
":",
"distribution_fields",
"=",
"distribution",
".",
"get",
"(",
"\"field\"",
",",
"[",
"]",
")",
"if",
"isinstance",
"(",
"distribution_fields",
",",
"list",
")",
":",
"for",
"field",
"in",
"distribution_fields",
":",
"if",
"not",
"only_time_series",
"or",
"field_is_time_series",
"(",
"field",
",",
"distribution",
")",
":",
"# agrega el id del dataset",
"field",
"[",
"\"dataset_identifier\"",
"]",
"=",
"distribution",
"[",
"\"dataset_identifier\"",
"]",
"# agrega el id de la distribución",
"field",
"[",
"\"distribution_identifier\"",
"]",
"=",
"distribution",
".",
"get",
"(",
"\"identifier\"",
")",
"fields",
".",
"append",
"(",
"field",
")",
"filtered_fields",
"=",
"[",
"field",
"for",
"field",
"in",
"fields",
"if",
"_filter_dictionary",
"(",
"field",
",",
"filter_in",
".",
"get",
"(",
"\"field\"",
")",
",",
"filter_out",
".",
"get",
"(",
"\"field\"",
")",
")",
"]",
"if",
"meta_field",
":",
"return",
"[",
"field",
"[",
"meta_field",
"]",
"for",
"field",
"in",
"filtered_fields",
"if",
"meta_field",
"in",
"field",
"]",
"else",
":",
"return",
"filtered_fields"
]
| 44.907895 | 21.644737 |
def trades(self, symbol='btcusd', since=0, limit_trades=50,
include_breaks=0):
"""
Send a request to get all public trades, return the response.
Arguments:
symbol -- currency symbol (default 'btcusd')
since -- only return trades after this unix timestamp (default 0)
limit_trades -- maximum number of trades to return (default 50).
include_breaks -- whether to display broken trades (default False)
"""
url = self.base_url + '/v1/trades/' + symbol
params = {
'since': since,
'limit_trades': limit_trades,
'include_breaks': include_breaks
}
return requests.get(url, params) | [
"def",
"trades",
"(",
"self",
",",
"symbol",
"=",
"'btcusd'",
",",
"since",
"=",
"0",
",",
"limit_trades",
"=",
"50",
",",
"include_breaks",
"=",
"0",
")",
":",
"url",
"=",
"self",
".",
"base_url",
"+",
"'/v1/trades/'",
"+",
"symbol",
"params",
"=",
"{",
"'since'",
":",
"since",
",",
"'limit_trades'",
":",
"limit_trades",
",",
"'include_breaks'",
":",
"include_breaks",
"}",
"return",
"requests",
".",
"get",
"(",
"url",
",",
"params",
")"
]
| 37 | 18.473684 |
def create_folder(self, name, parent_folder_id=0):
"""Create a folder
If the folder exists, a BoxError will be raised.
Args:
folder_id (int): Name of the folder.
parent_folder_id (int): ID of the folder where to create the new one.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
return self.__request("POST", "folders",
data={ "name": name,
"parent": {"id": unicode(parent_folder_id)} }) | [
"def",
"create_folder",
"(",
"self",
",",
"name",
",",
"parent_folder_id",
"=",
"0",
")",
":",
"return",
"self",
".",
"__request",
"(",
"\"POST\"",
",",
"\"folders\"",
",",
"data",
"=",
"{",
"\"name\"",
":",
"name",
",",
"\"parent\"",
":",
"{",
"\"id\"",
":",
"unicode",
"(",
"parent_folder_id",
")",
"}",
"}",
")"
]
| 31.913043 | 25.086957 |
def convert_value_to_standard_unit(value, symbol='i'):
# type: (Text, Text) -> float
"""
Converts between any two standard units of iota.
:param value:
Value (affixed) to convert. For example: '1.618 Mi'.
:param symbol:
Unit symbol of iota to convert to. For example: 'Gi'.
:return:
Float as units of given symbol to convert to.
"""
try:
# Get input value
value_tuple = value.split()
amount = float(value_tuple[0])
except (ValueError, IndexError, AttributeError):
raise with_context(
ValueError('Value to convert is not valid.'),
context={
'value': value,
},
)
try:
# Set unit symbols and find factor/multiplier.
unit_symbol_from = value_tuple[1]
unit_factor_from = float(STANDARD_UNITS[unit_symbol_from])
unit_factor_to = float(STANDARD_UNITS[symbol])
except (KeyError, IndexError):
# Invalid symbol or no factor
raise with_context(
ValueError('Invalid IOTA unit.'),
context={
'value': value,
'symbol': symbol,
},
)
return amount * (unit_factor_from / unit_factor_to) | [
"def",
"convert_value_to_standard_unit",
"(",
"value",
",",
"symbol",
"=",
"'i'",
")",
":",
"# type: (Text, Text) -> float",
"try",
":",
"# Get input value",
"value_tuple",
"=",
"value",
".",
"split",
"(",
")",
"amount",
"=",
"float",
"(",
"value_tuple",
"[",
"0",
"]",
")",
"except",
"(",
"ValueError",
",",
"IndexError",
",",
"AttributeError",
")",
":",
"raise",
"with_context",
"(",
"ValueError",
"(",
"'Value to convert is not valid.'",
")",
",",
"context",
"=",
"{",
"'value'",
":",
"value",
",",
"}",
",",
")",
"try",
":",
"# Set unit symbols and find factor/multiplier.",
"unit_symbol_from",
"=",
"value_tuple",
"[",
"1",
"]",
"unit_factor_from",
"=",
"float",
"(",
"STANDARD_UNITS",
"[",
"unit_symbol_from",
"]",
")",
"unit_factor_to",
"=",
"float",
"(",
"STANDARD_UNITS",
"[",
"symbol",
"]",
")",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"# Invalid symbol or no factor",
"raise",
"with_context",
"(",
"ValueError",
"(",
"'Invalid IOTA unit.'",
")",
",",
"context",
"=",
"{",
"'value'",
":",
"value",
",",
"'symbol'",
":",
"symbol",
",",
"}",
",",
")",
"return",
"amount",
"*",
"(",
"unit_factor_from",
"/",
"unit_factor_to",
")"
]
| 27.727273 | 19.136364 |
def RecurseKeys(self):
"""Recurses the subkeys starting with the key.
Yields:
WinRegistryKey: Windows Registry key.
"""
yield self
for subkey in self.GetSubkeys():
for key in subkey.RecurseKeys():
yield key | [
"def",
"RecurseKeys",
"(",
"self",
")",
":",
"yield",
"self",
"for",
"subkey",
"in",
"self",
".",
"GetSubkeys",
"(",
")",
":",
"for",
"key",
"in",
"subkey",
".",
"RecurseKeys",
"(",
")",
":",
"yield",
"key"
]
| 23.8 | 14.5 |
def _execute_cell_code(self, row, col, grid):
"""Executes cell code"""
key = row, col, grid.current_table
grid.code_array[key]
grid.ForceRefresh() | [
"def",
"_execute_cell_code",
"(",
"self",
",",
"row",
",",
"col",
",",
"grid",
")",
":",
"key",
"=",
"row",
",",
"col",
",",
"grid",
".",
"current_table",
"grid",
".",
"code_array",
"[",
"key",
"]",
"grid",
".",
"ForceRefresh",
"(",
")"
]
| 24.857143 | 16 |
def check_answer(self, task_input, language):
"""
Verify the answers in task_input. Returns six values
1st: True the input is **currently** valid. (may become invalid after running the code), False else
2nd: True if the input needs to be run in the VM, False else
3rd: Main message, as a list (that can be join with \n or <br/> for example)
4th: Problem specific message, as a dictionnary (tuple of result/text)
5th: Number of subproblems that (already) contain errors. <= Number of subproblems
6th: Number of errors in MCQ problems. Not linked to the number of subproblems
"""
valid = True
need_launch = False
main_message = []
problem_messages = {}
error_count = 0
multiple_choice_error_count = 0
for problem in self._problems:
problem_is_valid, problem_main_message, problem_s_messages, problem_mc_error_count = problem.check_answer(task_input, language)
if problem_is_valid is None:
need_launch = True
elif problem_is_valid == False:
error_count += 1
valid = False
if problem_main_message is not None:
main_message.append(problem_main_message)
if problem_s_messages is not None:
problem_messages[problem.get_id()] = (("success" if problem_is_valid else "failed"), problem_s_messages)
multiple_choice_error_count += problem_mc_error_count
return valid, need_launch, main_message, problem_messages, error_count, multiple_choice_error_count | [
"def",
"check_answer",
"(",
"self",
",",
"task_input",
",",
"language",
")",
":",
"valid",
"=",
"True",
"need_launch",
"=",
"False",
"main_message",
"=",
"[",
"]",
"problem_messages",
"=",
"{",
"}",
"error_count",
"=",
"0",
"multiple_choice_error_count",
"=",
"0",
"for",
"problem",
"in",
"self",
".",
"_problems",
":",
"problem_is_valid",
",",
"problem_main_message",
",",
"problem_s_messages",
",",
"problem_mc_error_count",
"=",
"problem",
".",
"check_answer",
"(",
"task_input",
",",
"language",
")",
"if",
"problem_is_valid",
"is",
"None",
":",
"need_launch",
"=",
"True",
"elif",
"problem_is_valid",
"==",
"False",
":",
"error_count",
"+=",
"1",
"valid",
"=",
"False",
"if",
"problem_main_message",
"is",
"not",
"None",
":",
"main_message",
".",
"append",
"(",
"problem_main_message",
")",
"if",
"problem_s_messages",
"is",
"not",
"None",
":",
"problem_messages",
"[",
"problem",
".",
"get_id",
"(",
")",
"]",
"=",
"(",
"(",
"\"success\"",
"if",
"problem_is_valid",
"else",
"\"failed\"",
")",
",",
"problem_s_messages",
")",
"multiple_choice_error_count",
"+=",
"problem_mc_error_count",
"return",
"valid",
",",
"need_launch",
",",
"main_message",
",",
"problem_messages",
",",
"error_count",
",",
"multiple_choice_error_count"
]
| 56.172414 | 25.344828 |
def stddev_samples(data, xcol, ycollist, delta=1.0):
"""Create a sample list that contains the mean and standard deviation of the original list. Each element in the returned list contains following values: [MEAN, STDDEV, MEAN - STDDEV*delta, MEAN + STDDEV*delta].
>>> chart_data.stddev_samples([ [1, 10, 15, 12, 15], [2, 5, 10, 5, 10], [3, 32, 33, 35, 36], [4,16,66, 67, 68] ], 0, range(1,5))
[(1, 13.0, 2.1213203435596424, 10.878679656440358, 15.121320343559642), (2, 7.5, 2.5, 5.0, 10.0), (3, 34.0, 1.5811388300841898, 32.418861169915807, 35.581138830084193), (4, 54.25, 22.094965489902897, 32.155034510097103, 76.344965489902904)]
"""
out = []
numcol = len(ycollist)
try:
for elem in data:
total = 0
for col in ycollist:
total += elem[col]
mean = float(total) / numcol
variance = 0
for col in ycollist:
variance += (mean - elem[col]) ** 2
stddev = math.sqrt(variance / numcol) * delta
out.append((elem[xcol], mean, stddev, mean - stddev, mean + stddev))
except IndexError:
raise IndexError("bad data: %s,xcol=%d,ycollist=%s" % (data, xcol, ycollist))
return out | [
"def",
"stddev_samples",
"(",
"data",
",",
"xcol",
",",
"ycollist",
",",
"delta",
"=",
"1.0",
")",
":",
"out",
"=",
"[",
"]",
"numcol",
"=",
"len",
"(",
"ycollist",
")",
"try",
":",
"for",
"elem",
"in",
"data",
":",
"total",
"=",
"0",
"for",
"col",
"in",
"ycollist",
":",
"total",
"+=",
"elem",
"[",
"col",
"]",
"mean",
"=",
"float",
"(",
"total",
")",
"/",
"numcol",
"variance",
"=",
"0",
"for",
"col",
"in",
"ycollist",
":",
"variance",
"+=",
"(",
"mean",
"-",
"elem",
"[",
"col",
"]",
")",
"**",
"2",
"stddev",
"=",
"math",
".",
"sqrt",
"(",
"variance",
"/",
"numcol",
")",
"*",
"delta",
"out",
".",
"append",
"(",
"(",
"elem",
"[",
"xcol",
"]",
",",
"mean",
",",
"stddev",
",",
"mean",
"-",
"stddev",
",",
"mean",
"+",
"stddev",
")",
")",
"except",
"IndexError",
":",
"raise",
"IndexError",
"(",
"\"bad data: %s,xcol=%d,ycollist=%s\"",
"%",
"(",
"data",
",",
"xcol",
",",
"ycollist",
")",
")",
"return",
"out"
]
| 48 | 30.52 |
def get_merkle_proof(self, tx_hash: str, is_full: bool = False) -> dict:
"""
This interface is used to get the corresponding merkle proof based on the specified hexadecimal hash value.
:param tx_hash: an hexadecimal transaction hash value.
:param is_full:
:return: the merkle proof in dictionary form.
"""
payload = self.generate_json_rpc_payload(RpcMethod.GET_MERKLE_PROOF, [tx_hash, 1])
response = self.__post(self.__url, payload)
if is_full:
return response
return response['result'] | [
"def",
"get_merkle_proof",
"(",
"self",
",",
"tx_hash",
":",
"str",
",",
"is_full",
":",
"bool",
"=",
"False",
")",
"->",
"dict",
":",
"payload",
"=",
"self",
".",
"generate_json_rpc_payload",
"(",
"RpcMethod",
".",
"GET_MERKLE_PROOF",
",",
"[",
"tx_hash",
",",
"1",
"]",
")",
"response",
"=",
"self",
".",
"__post",
"(",
"self",
".",
"__url",
",",
"payload",
")",
"if",
"is_full",
":",
"return",
"response",
"return",
"response",
"[",
"'result'",
"]"
]
| 43.615385 | 23.153846 |
def list(self, product, store_view=None, identifierType=None):
"""
Retrieve product image list
:param product: ID or SKU of product
:param store_view: Code or ID of store view
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: `list` of `dict`
"""
return self.call('catalog_product_attribute_media.list',
[product, store_view, identifierType]) | [
"def",
"list",
"(",
"self",
",",
"product",
",",
"store_view",
"=",
"None",
",",
"identifierType",
"=",
"None",
")",
":",
"return",
"self",
".",
"call",
"(",
"'catalog_product_attribute_media.list'",
",",
"[",
"product",
",",
"store_view",
",",
"identifierType",
"]",
")"
]
| 38.769231 | 17.384615 |
def _netstat_sunos():
'''
Return netstat information for SunOS flavors
'''
log.warning('User and program not (yet) supported on SunOS')
ret = []
for addr_family in ('inet', 'inet6'):
# Lookup TCP connections
cmd = 'netstat -f {0} -P tcp -an | tail +5'.format(addr_family)
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
ret.append({
'proto': 'tcp6' if addr_family == 'inet6' else 'tcp',
'recv-q': comps[5],
'send-q': comps[4],
'local-address': comps[0],
'remote-address': comps[1],
'state': comps[6]})
# Lookup UDP connections
cmd = 'netstat -f {0} -P udp -an | tail +5'.format(addr_family)
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
ret.append({
'proto': 'udp6' if addr_family == 'inet6' else 'udp',
'local-address': comps[0],
'remote-address': comps[1] if len(comps) > 2 else ''})
return ret | [
"def",
"_netstat_sunos",
"(",
")",
":",
"log",
".",
"warning",
"(",
"'User and program not (yet) supported on SunOS'",
")",
"ret",
"=",
"[",
"]",
"for",
"addr_family",
"in",
"(",
"'inet'",
",",
"'inet6'",
")",
":",
"# Lookup TCP connections",
"cmd",
"=",
"'netstat -f {0} -P tcp -an | tail +5'",
".",
"format",
"(",
"addr_family",
")",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"True",
")",
"for",
"line",
"in",
"out",
".",
"splitlines",
"(",
")",
":",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"ret",
".",
"append",
"(",
"{",
"'proto'",
":",
"'tcp6'",
"if",
"addr_family",
"==",
"'inet6'",
"else",
"'tcp'",
",",
"'recv-q'",
":",
"comps",
"[",
"5",
"]",
",",
"'send-q'",
":",
"comps",
"[",
"4",
"]",
",",
"'local-address'",
":",
"comps",
"[",
"0",
"]",
",",
"'remote-address'",
":",
"comps",
"[",
"1",
"]",
",",
"'state'",
":",
"comps",
"[",
"6",
"]",
"}",
")",
"# Lookup UDP connections",
"cmd",
"=",
"'netstat -f {0} -P udp -an | tail +5'",
".",
"format",
"(",
"addr_family",
")",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"True",
")",
"for",
"line",
"in",
"out",
".",
"splitlines",
"(",
")",
":",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"ret",
".",
"append",
"(",
"{",
"'proto'",
":",
"'udp6'",
"if",
"addr_family",
"==",
"'inet6'",
"else",
"'udp'",
",",
"'local-address'",
":",
"comps",
"[",
"0",
"]",
",",
"'remote-address'",
":",
"comps",
"[",
"1",
"]",
"if",
"len",
"(",
"comps",
")",
">",
"2",
"else",
"''",
"}",
")",
"return",
"ret"
]
| 37.419355 | 17.032258 |
def _clean_dict(row):
"""
Transform empty strings values of dict `row` to None.
"""
row_cleaned = {}
for key, val in row.items():
if val is None or val == '':
row_cleaned[key] = None
else:
row_cleaned[key] = val
return row_cleaned | [
"def",
"_clean_dict",
"(",
"row",
")",
":",
"row_cleaned",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"row",
".",
"items",
"(",
")",
":",
"if",
"val",
"is",
"None",
"or",
"val",
"==",
"''",
":",
"row_cleaned",
"[",
"key",
"]",
"=",
"None",
"else",
":",
"row_cleaned",
"[",
"key",
"]",
"=",
"val",
"return",
"row_cleaned"
]
| 25.818182 | 11.272727 |
def branchlist2branches(data, commdct, branchlist):
"""get branches from the branchlist"""
objkey = 'BranchList'.upper()
theobjects = data.dt[objkey]
fieldlists = []
objnames = [obj[1] for obj in theobjects]
for theobject in theobjects:
fieldlists.append(list(range(2, len(theobject))))
blists = extractfields(data, commdct, objkey, fieldlists)
thebranches = [branches for name, branches in zip(objnames, blists)
if name == branchlist]
return thebranches[0] | [
"def",
"branchlist2branches",
"(",
"data",
",",
"commdct",
",",
"branchlist",
")",
":",
"objkey",
"=",
"'BranchList'",
".",
"upper",
"(",
")",
"theobjects",
"=",
"data",
".",
"dt",
"[",
"objkey",
"]",
"fieldlists",
"=",
"[",
"]",
"objnames",
"=",
"[",
"obj",
"[",
"1",
"]",
"for",
"obj",
"in",
"theobjects",
"]",
"for",
"theobject",
"in",
"theobjects",
":",
"fieldlists",
".",
"append",
"(",
"list",
"(",
"range",
"(",
"2",
",",
"len",
"(",
"theobject",
")",
")",
")",
")",
"blists",
"=",
"extractfields",
"(",
"data",
",",
"commdct",
",",
"objkey",
",",
"fieldlists",
")",
"thebranches",
"=",
"[",
"branches",
"for",
"name",
",",
"branches",
"in",
"zip",
"(",
"objnames",
",",
"blists",
")",
"if",
"name",
"==",
"branchlist",
"]",
"return",
"thebranches",
"[",
"0",
"]"
]
| 42.416667 | 12.083333 |
def stop(self):
"""
The client stop method. If the client is currently connected
stop the thread and disconnect. Publish the disconnected
message if clean shutdown.
"""
# self.loop.call_soon_threadsafe(self.loop.stop)
# self.loop.stop()
# self._longPoll.cancel()
# self._shortPoll.cancel()
if self.connected:
LOGGER.info('Disconnecting from MQTT... {}:{}'.format(self._server, self._port))
self._mqttc.publish(self.topicSelfConnection, json.dumps({'node': self.profileNum, 'connected': False}), retain=True)
self._mqttc.loop_stop()
self._mqttc.disconnect()
try:
for watcher in self.__stopObservers:
watcher()
except KeyError as e:
LOGGER.exception('KeyError in gotConfig: {}'.format(e), exc_info=True) | [
"def",
"stop",
"(",
"self",
")",
":",
"# self.loop.call_soon_threadsafe(self.loop.stop)",
"# self.loop.stop()",
"# self._longPoll.cancel()",
"# self._shortPoll.cancel()",
"if",
"self",
".",
"connected",
":",
"LOGGER",
".",
"info",
"(",
"'Disconnecting from MQTT... {}:{}'",
".",
"format",
"(",
"self",
".",
"_server",
",",
"self",
".",
"_port",
")",
")",
"self",
".",
"_mqttc",
".",
"publish",
"(",
"self",
".",
"topicSelfConnection",
",",
"json",
".",
"dumps",
"(",
"{",
"'node'",
":",
"self",
".",
"profileNum",
",",
"'connected'",
":",
"False",
"}",
")",
",",
"retain",
"=",
"True",
")",
"self",
".",
"_mqttc",
".",
"loop_stop",
"(",
")",
"self",
".",
"_mqttc",
".",
"disconnect",
"(",
")",
"try",
":",
"for",
"watcher",
"in",
"self",
".",
"__stopObservers",
":",
"watcher",
"(",
")",
"except",
"KeyError",
"as",
"e",
":",
"LOGGER",
".",
"exception",
"(",
"'KeyError in gotConfig: {}'",
".",
"format",
"(",
"e",
")",
",",
"exc_info",
"=",
"True",
")"
]
| 43.3 | 19.7 |
def _wrap_data(data: Union[str, bytes]):
"""
Wraps data into the right event.
"""
MsgType = TextMessage if isinstance(data, str) else BytesMessage
return MsgType(data=data, frame_finished=True, message_finished=True) | [
"def",
"_wrap_data",
"(",
"data",
":",
"Union",
"[",
"str",
",",
"bytes",
"]",
")",
":",
"MsgType",
"=",
"TextMessage",
"if",
"isinstance",
"(",
"data",
",",
"str",
")",
"else",
"BytesMessage",
"return",
"MsgType",
"(",
"data",
"=",
"data",
",",
"frame_finished",
"=",
"True",
",",
"message_finished",
"=",
"True",
")"
]
| 41.833333 | 11.5 |
def totext(self) ->str:
"""
return blob content from StorageBlobModel instance to a string. Parameters are:
"""
sreturn = ''
if self.properties.content_settings.content_encoding is None:
raise AzureStorageWrapException(self, 'can not convert blob {!s} to text because content_encoding is not given'.format(self.name))
else:
sreturn = self.content.decode(self.properties.content_settings.content_encoding, 'ignore')
return sreturn | [
"def",
"totext",
"(",
"self",
")",
"->",
"str",
":",
"sreturn",
"=",
"''",
"if",
"self",
".",
"properties",
".",
"content_settings",
".",
"content_encoding",
"is",
"None",
":",
"raise",
"AzureStorageWrapException",
"(",
"self",
",",
"'can not convert blob {!s} to text because content_encoding is not given'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"else",
":",
"sreturn",
"=",
"self",
".",
"content",
".",
"decode",
"(",
"self",
".",
"properties",
".",
"content_settings",
".",
"content_encoding",
",",
"'ignore'",
")",
"return",
"sreturn"
]
| 45.545455 | 32.909091 |
def encodeIntoArray(self, x, output):
""" See method description in base.py """
if x is not None and not isinstance(x, numbers.Number):
raise TypeError(
"Expected a scalar input but got input of type %s" % type(x))
# Get the bucket index to use
bucketIdx = self.getBucketIndices(x)[0]
# None is returned for missing value in which case we return all 0's.
output[0:self.n] = 0
if bucketIdx is not None:
output[self.mapBucketIndexToNonZeroBits(bucketIdx)] = 1 | [
"def",
"encodeIntoArray",
"(",
"self",
",",
"x",
",",
"output",
")",
":",
"if",
"x",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"x",
",",
"numbers",
".",
"Number",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected a scalar input but got input of type %s\"",
"%",
"type",
"(",
"x",
")",
")",
"# Get the bucket index to use",
"bucketIdx",
"=",
"self",
".",
"getBucketIndices",
"(",
"x",
")",
"[",
"0",
"]",
"# None is returned for missing value in which case we return all 0's.",
"output",
"[",
"0",
":",
"self",
".",
"n",
"]",
"=",
"0",
"if",
"bucketIdx",
"is",
"not",
"None",
":",
"output",
"[",
"self",
".",
"mapBucketIndexToNonZeroBits",
"(",
"bucketIdx",
")",
"]",
"=",
"1"
]
| 35.5 | 20.142857 |
def backend_fields(self, fields):
'''Return a two elements tuple containing a list
of fields names and a list of field attribute names.'''
dfields = self.dfields
processed = set()
names = []
atts = []
pkname = self.pkname()
for name in fields:
if name == pkname or name in processed:
continue
elif name in dfields:
processed.add(name)
field = dfields[name]
names.append(field.name)
atts.append(field.attname)
else:
bname = name.split(JSPLITTER)[0]
if bname in dfields:
field = dfields[bname]
if field.type in ('json object', 'related object'):
processed.add(name)
names.append(name)
atts.append(name)
return names, atts | [
"def",
"backend_fields",
"(",
"self",
",",
"fields",
")",
":",
"dfields",
"=",
"self",
".",
"dfields",
"processed",
"=",
"set",
"(",
")",
"names",
"=",
"[",
"]",
"atts",
"=",
"[",
"]",
"pkname",
"=",
"self",
".",
"pkname",
"(",
")",
"for",
"name",
"in",
"fields",
":",
"if",
"name",
"==",
"pkname",
"or",
"name",
"in",
"processed",
":",
"continue",
"elif",
"name",
"in",
"dfields",
":",
"processed",
".",
"add",
"(",
"name",
")",
"field",
"=",
"dfields",
"[",
"name",
"]",
"names",
".",
"append",
"(",
"field",
".",
"name",
")",
"atts",
".",
"append",
"(",
"field",
".",
"attname",
")",
"else",
":",
"bname",
"=",
"name",
".",
"split",
"(",
"JSPLITTER",
")",
"[",
"0",
"]",
"if",
"bname",
"in",
"dfields",
":",
"field",
"=",
"dfields",
"[",
"bname",
"]",
"if",
"field",
".",
"type",
"in",
"(",
"'json object'",
",",
"'related object'",
")",
":",
"processed",
".",
"add",
"(",
"name",
")",
"names",
".",
"append",
"(",
"name",
")",
"atts",
".",
"append",
"(",
"name",
")",
"return",
"names",
",",
"atts"
]
| 37.72 | 10.44 |
def get_verbatim(key, **kwargs):
'''
Gets a verbatim occurrence record without any interpretation
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get_verbatim(key = 1258202889)
occurrences.get_verbatim(key = 1227768771)
occurrences.get_verbatim(key = 1227769518)
'''
url = gbif_baseurl + 'occurrence/' + str(key) + '/verbatim'
out = gbif_GET(url, {}, **kwargs)
return out | [
"def",
"get_verbatim",
"(",
"key",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'occurrence/'",
"+",
"str",
"(",
"key",
")",
"+",
"'/verbatim'",
"out",
"=",
"gbif_GET",
"(",
"url",
",",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
"return",
"out"
]
| 27.944444 | 20.944444 |
def VEXTRACTF128(cpu, dest, src, offset):
"""Extract Packed Floating-Point Values
Extracts 128-bits of packed floating-point values from the source
operand (second operand) at an 128-bit offset from imm8[0] into the
destination operand (first operand). The destination may be either an
XMM register or an 128-bit memory location.
"""
offset = offset.read()
dest.write(Operators.EXTRACT(src.read(), offset * 128, (offset + 1) * 128)) | [
"def",
"VEXTRACTF128",
"(",
"cpu",
",",
"dest",
",",
"src",
",",
"offset",
")",
":",
"offset",
"=",
"offset",
".",
"read",
"(",
")",
"dest",
".",
"write",
"(",
"Operators",
".",
"EXTRACT",
"(",
"src",
".",
"read",
"(",
")",
",",
"offset",
"*",
"128",
",",
"(",
"offset",
"+",
"1",
")",
"*",
"128",
")",
")"
]
| 48.8 | 21 |
def auto_stratify(scores, **kwargs):
"""Generate Strata instance automatically
Parameters
----------
scores : array-like, shape=(n_items,)
ordered array of scores which quantify the classifier confidence for
the items in the pool. High scores indicate a high confidence that
the true label is a "1" (and vice versa for label "0").
**kwargs :
optional keyword arguments. May include 'stratification_method',
'stratification_n_strata', 'stratification_n_bins'.
Returns
-------
Strata instance
"""
if 'stratification_method' in kwargs:
method = kwargs['stratification_method']
else:
method = 'cum_sqrt_F'
if 'stratification_n_strata' in kwargs:
n_strata = kwargs['stratification_n_strata']
else:
n_strata = 'auto'
if 'stratification_n_bins' in kwargs:
n_bins = kwargs['stratification_n_bins']
strata = stratify_by_scores(scores, n_strata, method = method, \
n_bins = n_bins)
else:
strata = stratify_by_scores(scores, n_strata, method = method)
return strata | [
"def",
"auto_stratify",
"(",
"scores",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'stratification_method'",
"in",
"kwargs",
":",
"method",
"=",
"kwargs",
"[",
"'stratification_method'",
"]",
"else",
":",
"method",
"=",
"'cum_sqrt_F'",
"if",
"'stratification_n_strata'",
"in",
"kwargs",
":",
"n_strata",
"=",
"kwargs",
"[",
"'stratification_n_strata'",
"]",
"else",
":",
"n_strata",
"=",
"'auto'",
"if",
"'stratification_n_bins'",
"in",
"kwargs",
":",
"n_bins",
"=",
"kwargs",
"[",
"'stratification_n_bins'",
"]",
"strata",
"=",
"stratify_by_scores",
"(",
"scores",
",",
"n_strata",
",",
"method",
"=",
"method",
",",
"n_bins",
"=",
"n_bins",
")",
"else",
":",
"strata",
"=",
"stratify_by_scores",
"(",
"scores",
",",
"n_strata",
",",
"method",
"=",
"method",
")",
"return",
"strata"
]
| 34.121212 | 20.606061 |
def virt_env(self):
"""
Getter for this instance's virt env, creates it if needed
Returns:
lago.virt.VirtEnv: virt env instance used by this prefix
"""
if self._virt_env is None:
self._virt_env = self._create_virt_env()
return self._virt_env | [
"def",
"virt_env",
"(",
"self",
")",
":",
"if",
"self",
".",
"_virt_env",
"is",
"None",
":",
"self",
".",
"_virt_env",
"=",
"self",
".",
"_create_virt_env",
"(",
")",
"return",
"self",
".",
"_virt_env"
]
| 30.5 | 16.7 |
def remove_destinations(self, server_id, destination_paths):
# pylint: disable=line-too-long
"""
Remove listener destinations from a WBEM server, by deleting the
listener destination instances in the server.
The listener destinations must be owned or permanent (i.e. not static).
This method verifies that there are not currently any subscriptions on
the listener destinations to be removed, in order to handle server
implementations that do not ensure that on the server side as required
by :term:`DSP1054`.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
destination_paths (:class:`~pywbem.CIMInstanceName` or list of :class:`~pywbem.CIMInstanceName`):
Instance path(s) of the listener destination instance(s) in the
WBEM server.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
CIMError: CIM_ERR_FAILED, if there are referencing subscriptions.
""" # noqa: E501
# Validate server_id
server = self._get_server(server_id)
conn_id = server.conn.conn_id if server.conn is not None else None
# If list, recursively call this function with each list item.
if isinstance(destination_paths, list):
for dest_path in destination_paths:
self.remove_destinations(server_id, dest_path)
return
# Here, the variable will be a single list item.
dest_path = destination_paths
# Verify referencing subscriptions.
ref_paths = server.conn.ReferenceNames(
dest_path, ResultClass=SUBSCRIPTION_CLASSNAME)
if ref_paths:
# DSP1054 1.2 defines that this CIM error is raised by the server
# in that case, so we simulate that behavior on the client side.
raise CIMError(
CIM_ERR_FAILED,
"The listener destination is referenced by subscriptions.",
conn_id=conn_id)
server.conn.DeleteInstance(dest_path)
inst_list = self._owned_destinations[server_id]
# We iterate backwards because we change the list
for i in six.moves.range(len(inst_list) - 1, -1, -1):
inst = inst_list[i]
if inst.path == dest_path:
del inst_list[i] | [
"def",
"remove_destinations",
"(",
"self",
",",
"server_id",
",",
"destination_paths",
")",
":",
"# pylint: disable=line-too-long",
"# noqa: E501",
"# Validate server_id",
"server",
"=",
"self",
".",
"_get_server",
"(",
"server_id",
")",
"conn_id",
"=",
"server",
".",
"conn",
".",
"conn_id",
"if",
"server",
".",
"conn",
"is",
"not",
"None",
"else",
"None",
"# If list, recursively call this function with each list item.",
"if",
"isinstance",
"(",
"destination_paths",
",",
"list",
")",
":",
"for",
"dest_path",
"in",
"destination_paths",
":",
"self",
".",
"remove_destinations",
"(",
"server_id",
",",
"dest_path",
")",
"return",
"# Here, the variable will be a single list item.",
"dest_path",
"=",
"destination_paths",
"# Verify referencing subscriptions.",
"ref_paths",
"=",
"server",
".",
"conn",
".",
"ReferenceNames",
"(",
"dest_path",
",",
"ResultClass",
"=",
"SUBSCRIPTION_CLASSNAME",
")",
"if",
"ref_paths",
":",
"# DSP1054 1.2 defines that this CIM error is raised by the server",
"# in that case, so we simulate that behavior on the client side.",
"raise",
"CIMError",
"(",
"CIM_ERR_FAILED",
",",
"\"The listener destination is referenced by subscriptions.\"",
",",
"conn_id",
"=",
"conn_id",
")",
"server",
".",
"conn",
".",
"DeleteInstance",
"(",
"dest_path",
")",
"inst_list",
"=",
"self",
".",
"_owned_destinations",
"[",
"server_id",
"]",
"# We iterate backwards because we change the list",
"for",
"i",
"in",
"six",
".",
"moves",
".",
"range",
"(",
"len",
"(",
"inst_list",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"inst",
"=",
"inst_list",
"[",
"i",
"]",
"if",
"inst",
".",
"path",
"==",
"dest_path",
":",
"del",
"inst_list",
"[",
"i",
"]"
]
| 39.737705 | 23.540984 |
def show_linkinfo_output_show_link_info_linkinfo_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_linkinfo = ET.Element("show_linkinfo")
config = show_linkinfo
output = ET.SubElement(show_linkinfo, "output")
show_link_info = ET.SubElement(output, "show-link-info")
linkinfo_rbridgeid_key = ET.SubElement(show_link_info, "linkinfo-rbridgeid")
linkinfo_rbridgeid_key.text = kwargs.pop('linkinfo_rbridgeid')
linkinfo_version = ET.SubElement(show_link_info, "linkinfo-version")
linkinfo_version.text = kwargs.pop('linkinfo_version')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"show_linkinfo_output_show_link_info_linkinfo_version",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"show_linkinfo",
"=",
"ET",
".",
"Element",
"(",
"\"show_linkinfo\"",
")",
"config",
"=",
"show_linkinfo",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"show_linkinfo",
",",
"\"output\"",
")",
"show_link_info",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"show-link-info\"",
")",
"linkinfo_rbridgeid_key",
"=",
"ET",
".",
"SubElement",
"(",
"show_link_info",
",",
"\"linkinfo-rbridgeid\"",
")",
"linkinfo_rbridgeid_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'linkinfo_rbridgeid'",
")",
"linkinfo_version",
"=",
"ET",
".",
"SubElement",
"(",
"show_link_info",
",",
"\"linkinfo-version\"",
")",
"linkinfo_version",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'linkinfo_version'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
]
| 48.733333 | 19.6 |
def get_all_params(self, session=None):
"""Return the parameters in a list of array."""
_params = []
for p in self.all_params:
if session is None:
_params.append(p.eval())
else:
_params.append(session.run(p))
return _params | [
"def",
"get_all_params",
"(",
"self",
",",
"session",
"=",
"None",
")",
":",
"_params",
"=",
"[",
"]",
"for",
"p",
"in",
"self",
".",
"all_params",
":",
"if",
"session",
"is",
"None",
":",
"_params",
".",
"append",
"(",
"p",
".",
"eval",
"(",
")",
")",
"else",
":",
"_params",
".",
"append",
"(",
"session",
".",
"run",
"(",
"p",
")",
")",
"return",
"_params"
]
| 33.666667 | 9.333333 |
def add_secondary_ip(self, ip_address, interface=1):
"""Adds an IP address as a secondary IP address
:param ip_address: String IP address to add as a secondary IP
:param interface: Integer associated to the interface/device number
:return: None
:raises: AWSAPIError, EC2UtilError
"""
log = logging.getLogger(self.cls_logger + '.add_secondary_ip')
# Get the ENI ID
eni_id = self.get_eni_id(interface)
# Verify the ENI ID was found
if eni_id is None:
msg = 'Unable to find the corresponding ENI ID for interface: {i}'. \
format(i=interface)
log.error(msg)
raise EC2UtilError(msg)
else:
log.info('Found ENI ID: {e}'.format(e=eni_id))
# Assign the secondary IP address
log.info('Attempting to assign the secondary IP address...')
try:
self.client.assign_private_ip_addresses(
NetworkInterfaceId=eni_id,
PrivateIpAddresses=[
ip_address,
],
AllowReassignment=True
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to assign secondary IP address\n{e}'.format(e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
log.info('Successfully added secondary IP address {s} to ENI ID {e} on interface {i}'.format(
s=ip_address, e=eni_id, i=interface)) | [
"def",
"add_secondary_ip",
"(",
"self",
",",
"ip_address",
",",
"interface",
"=",
"1",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"self",
".",
"cls_logger",
"+",
"'.add_secondary_ip'",
")",
"# Get the ENI ID",
"eni_id",
"=",
"self",
".",
"get_eni_id",
"(",
"interface",
")",
"# Verify the ENI ID was found",
"if",
"eni_id",
"is",
"None",
":",
"msg",
"=",
"'Unable to find the corresponding ENI ID for interface: {i}'",
".",
"format",
"(",
"i",
"=",
"interface",
")",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"EC2UtilError",
"(",
"msg",
")",
"else",
":",
"log",
".",
"info",
"(",
"'Found ENI ID: {e}'",
".",
"format",
"(",
"e",
"=",
"eni_id",
")",
")",
"# Assign the secondary IP address",
"log",
".",
"info",
"(",
"'Attempting to assign the secondary IP address...'",
")",
"try",
":",
"self",
".",
"client",
".",
"assign_private_ip_addresses",
"(",
"NetworkInterfaceId",
"=",
"eni_id",
",",
"PrivateIpAddresses",
"=",
"[",
"ip_address",
",",
"]",
",",
"AllowReassignment",
"=",
"True",
")",
"except",
"ClientError",
":",
"_",
",",
"ex",
",",
"trace",
"=",
"sys",
".",
"exc_info",
"(",
")",
"msg",
"=",
"'Unable to assign secondary IP address\\n{e}'",
".",
"format",
"(",
"e",
"=",
"str",
"(",
"ex",
")",
")",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"AWSAPIError",
",",
"msg",
",",
"trace",
"log",
".",
"info",
"(",
"'Successfully added secondary IP address {s} to ENI ID {e} on interface {i}'",
".",
"format",
"(",
"s",
"=",
"ip_address",
",",
"e",
"=",
"eni_id",
",",
"i",
"=",
"interface",
")",
")"
]
| 38.794872 | 18.025641 |
def bulk(iterable, index=INDEX_NAME, doc_type=DOC_TYPE, action='index'):
"""
Wrapper of elasticsearch's bulk method
Converts an interable of models to document operations and submits them to
Elasticsearch. Returns a count of operations when done.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.bulk
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
"""
actions = compact(dict_to_op(
to_dict(model),
index_name=INDEX_NAME,
doc_type=DOC_TYPE,
op_type=action,
) for model in iterable)
# fail fast if there are no actions
if not actions:
return 0
items, _ = es_bulk(es_conn, actions, doc_type=doc_type, index=index)
return items | [
"def",
"bulk",
"(",
"iterable",
",",
"index",
"=",
"INDEX_NAME",
",",
"doc_type",
"=",
"DOC_TYPE",
",",
"action",
"=",
"'index'",
")",
":",
"actions",
"=",
"compact",
"(",
"dict_to_op",
"(",
"to_dict",
"(",
"model",
")",
",",
"index_name",
"=",
"INDEX_NAME",
",",
"doc_type",
"=",
"DOC_TYPE",
",",
"op_type",
"=",
"action",
",",
")",
"for",
"model",
"in",
"iterable",
")",
"# fail fast if there are no actions",
"if",
"not",
"actions",
":",
"return",
"0",
"items",
",",
"_",
"=",
"es_bulk",
"(",
"es_conn",
",",
"actions",
",",
"doc_type",
"=",
"doc_type",
",",
"index",
"=",
"index",
")",
"return",
"items"
]
| 32 | 23.666667 |
def expand_item(self, item, open_all=True):
"""Display a node as expanded
:param item: The item to show expanded
:param open_all: Whether all child nodes should be recursively
expanded.
"""
self.expand_row(self._view_path_for(item), open_all) | [
"def",
"expand_item",
"(",
"self",
",",
"item",
",",
"open_all",
"=",
"True",
")",
":",
"self",
".",
"expand_row",
"(",
"self",
".",
"_view_path_for",
"(",
"item",
")",
",",
"open_all",
")"
]
| 37.625 | 13.125 |
def generic_div(a, b):
"""Simple function to divide two numbers"""
logger.debug('Called generic_div({}, {})'.format(a, b))
return a / b | [
"def",
"generic_div",
"(",
"a",
",",
"b",
")",
":",
"logger",
".",
"debug",
"(",
"'Called generic_div({}, {})'",
".",
"format",
"(",
"a",
",",
"b",
")",
")",
"return",
"a",
"/",
"b"
]
| 36 | 15.25 |
def reset_socat(use_sudo=False):
"""
Finds and closes all processes of `socat`.
:param use_sudo: Use `sudo` command. As Docker-Fabric does not run `socat` with `sudo`, this is by default set to
``False``. Setting it to ``True`` could unintentionally remove instances from other users.
:type use_sudo: bool
"""
output = stdout_result('ps -o pid -C socat', quiet=True)
pids = output.split('\n')[1:]
puts("Removing process(es) with id(s) {0}.".format(', '.join(pids)))
which = sudo if use_sudo else run
which('kill {0}'.format(' '.join(pids)), quiet=True) | [
"def",
"reset_socat",
"(",
"use_sudo",
"=",
"False",
")",
":",
"output",
"=",
"stdout_result",
"(",
"'ps -o pid -C socat'",
",",
"quiet",
"=",
"True",
")",
"pids",
"=",
"output",
".",
"split",
"(",
"'\\n'",
")",
"[",
"1",
":",
"]",
"puts",
"(",
"\"Removing process(es) with id(s) {0}.\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"pids",
")",
")",
")",
"which",
"=",
"sudo",
"if",
"use_sudo",
"else",
"run",
"which",
"(",
"'kill {0}'",
".",
"format",
"(",
"' '",
".",
"join",
"(",
"pids",
")",
")",
",",
"quiet",
"=",
"True",
")"
]
| 45.153846 | 21.615385 |
async def async_enqueue_download(self, resource):
'''
Enqueue the download of the given foreign resource.
'''
worker = self.pick_sticky(resource.url_string)
await worker.enqueue(enums.Task.DOWNLOAD, (resource,)) | [
"async",
"def",
"async_enqueue_download",
"(",
"self",
",",
"resource",
")",
":",
"worker",
"=",
"self",
".",
"pick_sticky",
"(",
"resource",
".",
"url_string",
")",
"await",
"worker",
".",
"enqueue",
"(",
"enums",
".",
"Task",
".",
"DOWNLOAD",
",",
"(",
"resource",
",",
")",
")"
]
| 41 | 20.333333 |
def certify_dict(
value, schema=None, allow_extra=False, required=True, key_certifier=None, value_certifier=None,
include_collections=False,
):
"""
Certifies a dictionary, checking it against an optional schema.
The schema should be a dictionary, with keys corresponding to the expected keys in `value`,
but with the values replaced by functions which will be called to with the corresponding
value in the input.
A simple example:
>>> certifier = certify_dict(schema={
... 'id': certify_key(kind='Model'),
... 'count': certify_int(min=0),
... })
>>> certifier({'id': self.key, 'count': self.count})
:param dict|Mapping|MutableMapping value:
The value to be certified.
:param dict schema:
The schema against which the value should be checked.
:param bool allow_extra:
Set to `True` to ignore extra keys.
:param bool required:
Whether the value can't be `None`. Defaults to True.
:param callable key_certifier:
callable that receives the key to certify (ignoring schema keys).
:param callable value_certifier:
callable that receives the value to certify (ignoring schema values).
:param bool include_collections:
Include types from collections.
:return:
The certified dict.
:rtype:
dict|Mapping|MutableMapping
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid
"""
cls = dict
# Certify our kwargs:
certify_params(
(certify_bool, 'allow_extra', allow_extra),
(certify_bool, 'include_collections', include_collections),
)
if certify_required(
value=value,
required=required,
):
return
# Check the type(s):
types = [cls]
if include_collections:
types.extend([Mapping, MutableMapping])
types = tuple(types)
if not isinstance(value, types):
raise CertifierTypeError(
message="Expected {t} but the type is {cls!r}".format(
cls=cls,
t=value.__class__.__name__,
),
value=value,
required=required,
)
certify_dict_schema(
value=value,
schema=schema,
key_certifier=key_certifier,
value_certifier=value_certifier,
required=required,
allow_extra=allow_extra,
) | [
"def",
"certify_dict",
"(",
"value",
",",
"schema",
"=",
"None",
",",
"allow_extra",
"=",
"False",
",",
"required",
"=",
"True",
",",
"key_certifier",
"=",
"None",
",",
"value_certifier",
"=",
"None",
",",
"include_collections",
"=",
"False",
",",
")",
":",
"cls",
"=",
"dict",
"# Certify our kwargs:",
"certify_params",
"(",
"(",
"certify_bool",
",",
"'allow_extra'",
",",
"allow_extra",
")",
",",
"(",
"certify_bool",
",",
"'include_collections'",
",",
"include_collections",
")",
",",
")",
"if",
"certify_required",
"(",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
":",
"return",
"# Check the type(s):",
"types",
"=",
"[",
"cls",
"]",
"if",
"include_collections",
":",
"types",
".",
"extend",
"(",
"[",
"Mapping",
",",
"MutableMapping",
"]",
")",
"types",
"=",
"tuple",
"(",
"types",
")",
"if",
"not",
"isinstance",
"(",
"value",
",",
"types",
")",
":",
"raise",
"CertifierTypeError",
"(",
"message",
"=",
"\"Expected {t} but the type is {cls!r}\"",
".",
"format",
"(",
"cls",
"=",
"cls",
",",
"t",
"=",
"value",
".",
"__class__",
".",
"__name__",
",",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
"certify_dict_schema",
"(",
"value",
"=",
"value",
",",
"schema",
"=",
"schema",
",",
"key_certifier",
"=",
"key_certifier",
",",
"value_certifier",
"=",
"value_certifier",
",",
"required",
"=",
"required",
",",
"allow_extra",
"=",
"allow_extra",
",",
")"
]
| 30.948052 | 19.12987 |
def repair_duplicate_names(self):
"""
Prior to 1101.1.1, pmxbot would incorrectly create new karma records
for individuals with multiple names.
This routine corrects those records.
"""
for name in self._all_names():
cur = self.db.find({'names': name})
main_doc = next(cur)
for duplicate in cur:
query = {'_id': main_doc['_id']}
update = {
'$inc': {'value': duplicate['value']},
'$push': {'names': {'$each': duplicate['names']}},
}
self.db.update(query, update)
self.db.remove(duplicate) | [
"def",
"repair_duplicate_names",
"(",
"self",
")",
":",
"for",
"name",
"in",
"self",
".",
"_all_names",
"(",
")",
":",
"cur",
"=",
"self",
".",
"db",
".",
"find",
"(",
"{",
"'names'",
":",
"name",
"}",
")",
"main_doc",
"=",
"next",
"(",
"cur",
")",
"for",
"duplicate",
"in",
"cur",
":",
"query",
"=",
"{",
"'_id'",
":",
"main_doc",
"[",
"'_id'",
"]",
"}",
"update",
"=",
"{",
"'$inc'",
":",
"{",
"'value'",
":",
"duplicate",
"[",
"'value'",
"]",
"}",
",",
"'$push'",
":",
"{",
"'names'",
":",
"{",
"'$each'",
":",
"duplicate",
"[",
"'names'",
"]",
"}",
"}",
",",
"}",
"self",
".",
"db",
".",
"update",
"(",
"query",
",",
"update",
")",
"self",
".",
"db",
".",
"remove",
"(",
"duplicate",
")"
]
| 30.647059 | 10.882353 |
def _straight_line_vertices(adjacency_mat, node_coords, directed=False):
"""
Generate the vertices for straight lines between nodes.
If it is a directed graph, it also generates the vertices which can be
passed to an :class:`ArrowVisual`.
Parameters
----------
adjacency_mat : array
The adjacency matrix of the graph
node_coords : array
The current coordinates of all nodes in the graph
directed : bool
Wether the graph is directed. If this is true it will also generate
the vertices for arrows which can be passed to :class:`ArrowVisual`.
Returns
-------
vertices : tuple
Returns a tuple containing containing (`line_vertices`,
`arrow_vertices`)
"""
if not issparse(adjacency_mat):
adjacency_mat = np.asarray(adjacency_mat, float)
if (adjacency_mat.ndim != 2 or adjacency_mat.shape[0] !=
adjacency_mat.shape[1]):
raise ValueError("Adjacency matrix should be square.")
arrow_vertices = np.array([])
edges = _get_edges(adjacency_mat)
line_vertices = node_coords[edges.ravel()]
if directed:
arrows = np.array(list(_get_directed_edges(adjacency_mat)))
arrow_vertices = node_coords[arrows.ravel()]
arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)/2, 4))
return line_vertices, arrow_vertices | [
"def",
"_straight_line_vertices",
"(",
"adjacency_mat",
",",
"node_coords",
",",
"directed",
"=",
"False",
")",
":",
"if",
"not",
"issparse",
"(",
"adjacency_mat",
")",
":",
"adjacency_mat",
"=",
"np",
".",
"asarray",
"(",
"adjacency_mat",
",",
"float",
")",
"if",
"(",
"adjacency_mat",
".",
"ndim",
"!=",
"2",
"or",
"adjacency_mat",
".",
"shape",
"[",
"0",
"]",
"!=",
"adjacency_mat",
".",
"shape",
"[",
"1",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Adjacency matrix should be square.\"",
")",
"arrow_vertices",
"=",
"np",
".",
"array",
"(",
"[",
"]",
")",
"edges",
"=",
"_get_edges",
"(",
"adjacency_mat",
")",
"line_vertices",
"=",
"node_coords",
"[",
"edges",
".",
"ravel",
"(",
")",
"]",
"if",
"directed",
":",
"arrows",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"_get_directed_edges",
"(",
"adjacency_mat",
")",
")",
")",
"arrow_vertices",
"=",
"node_coords",
"[",
"arrows",
".",
"ravel",
"(",
")",
"]",
"arrow_vertices",
"=",
"arrow_vertices",
".",
"reshape",
"(",
"(",
"len",
"(",
"arrow_vertices",
")",
"/",
"2",
",",
"4",
")",
")",
"return",
"line_vertices",
",",
"arrow_vertices"
]
| 32.047619 | 22.333333 |
def prepare_data(self):
"""Prepare widget data for template."""
result = {}
for field in self.fields:
data = self.data.get(field.name)
result[field.name] = field.prepare_data(data)
return result | [
"def",
"prepare_data",
"(",
"self",
")",
":",
"result",
"=",
"{",
"}",
"for",
"field",
"in",
"self",
".",
"fields",
":",
"data",
"=",
"self",
".",
"data",
".",
"get",
"(",
"field",
".",
"name",
")",
"result",
"[",
"field",
".",
"name",
"]",
"=",
"field",
".",
"prepare_data",
"(",
"data",
")",
"return",
"result"
]
| 27.111111 | 18.333333 |
def agent_path(cls, project, agent):
"""Return a fully-qualified agent string."""
return google.api_core.path_template.expand(
'projects/{project}/agents/{agent}',
project=project,
agent=agent,
) | [
"def",
"agent_path",
"(",
"cls",
",",
"project",
",",
"agent",
")",
":",
"return",
"google",
".",
"api_core",
".",
"path_template",
".",
"expand",
"(",
"'projects/{project}/agents/{agent}'",
",",
"project",
"=",
"project",
",",
"agent",
"=",
"agent",
",",
")"
]
| 35.571429 | 11.857143 |
def get_fsapi_endpoint(self):
"""Parse the fsapi endpoint from the device url."""
endpoint = yield from self.__session.get(self.fsapi_device_url, timeout = self.timeout)
text = yield from endpoint.text(encoding='utf-8')
doc = objectify.fromstring(text)
return doc.webfsapi.text | [
"def",
"get_fsapi_endpoint",
"(",
"self",
")",
":",
"endpoint",
"=",
"yield",
"from",
"self",
".",
"__session",
".",
"get",
"(",
"self",
".",
"fsapi_device_url",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"text",
"=",
"yield",
"from",
"endpoint",
".",
"text",
"(",
"encoding",
"=",
"'utf-8'",
")",
"doc",
"=",
"objectify",
".",
"fromstring",
"(",
"text",
")",
"return",
"doc",
".",
"webfsapi",
".",
"text"
]
| 52 | 15.166667 |
def get_jids():
'''
Return a list of all job ids
'''
serv = _get_serv(ret=None)
jids = _get_list(serv, 'jids')
loads = serv.get_multi(jids) # {jid: load, jid: load, ...}
ret = {}
for jid, load in six.iteritems(loads):
ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load))
return ret | [
"def",
"get_jids",
"(",
")",
":",
"serv",
"=",
"_get_serv",
"(",
"ret",
"=",
"None",
")",
"jids",
"=",
"_get_list",
"(",
"serv",
",",
"'jids'",
")",
"loads",
"=",
"serv",
".",
"get_multi",
"(",
"jids",
")",
"# {jid: load, jid: load, ...}",
"ret",
"=",
"{",
"}",
"for",
"jid",
",",
"load",
"in",
"six",
".",
"iteritems",
"(",
"loads",
")",
":",
"ret",
"[",
"jid",
"]",
"=",
"salt",
".",
"utils",
".",
"jid",
".",
"format_jid_instance",
"(",
"jid",
",",
"salt",
".",
"utils",
".",
"json",
".",
"loads",
"(",
"load",
")",
")",
"return",
"ret"
]
| 31.181818 | 21.909091 |
def parse_token(self, token):
"""
Obtain a user from a signed token.
"""
try:
data = self.unsign(token)
except signing.SignatureExpired:
logger.debug("Expired token: %s", token)
return
except signing.BadSignature:
logger.debug("Bad token: %s", token)
return
except Exception:
logger.exception(
"Valid signature but unexpected token - if you changed "
"django-sesame settings, you must regenerate tokens")
return
user_pk, data = self.packer.unpack_pk(data)
user = self.get_user(user_pk)
if user is None:
logger.debug("Unknown token: %s", token)
return
h = crypto.pbkdf2(
self.get_revocation_key(user),
self.salt,
self.iterations,
digest=self.digest,
)
if not crypto.constant_time_compare(data, h):
logger.debug("Invalid token: %s", token)
return
logger.debug("Valid token for user %s: %s", user, token)
return user | [
"def",
"parse_token",
"(",
"self",
",",
"token",
")",
":",
"try",
":",
"data",
"=",
"self",
".",
"unsign",
"(",
"token",
")",
"except",
"signing",
".",
"SignatureExpired",
":",
"logger",
".",
"debug",
"(",
"\"Expired token: %s\"",
",",
"token",
")",
"return",
"except",
"signing",
".",
"BadSignature",
":",
"logger",
".",
"debug",
"(",
"\"Bad token: %s\"",
",",
"token",
")",
"return",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"\"Valid signature but unexpected token - if you changed \"",
"\"django-sesame settings, you must regenerate tokens\"",
")",
"return",
"user_pk",
",",
"data",
"=",
"self",
".",
"packer",
".",
"unpack_pk",
"(",
"data",
")",
"user",
"=",
"self",
".",
"get_user",
"(",
"user_pk",
")",
"if",
"user",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Unknown token: %s\"",
",",
"token",
")",
"return",
"h",
"=",
"crypto",
".",
"pbkdf2",
"(",
"self",
".",
"get_revocation_key",
"(",
"user",
")",
",",
"self",
".",
"salt",
",",
"self",
".",
"iterations",
",",
"digest",
"=",
"self",
".",
"digest",
",",
")",
"if",
"not",
"crypto",
".",
"constant_time_compare",
"(",
"data",
",",
"h",
")",
":",
"logger",
".",
"debug",
"(",
"\"Invalid token: %s\"",
",",
"token",
")",
"return",
"logger",
".",
"debug",
"(",
"\"Valid token for user %s: %s\"",
",",
"user",
",",
"token",
")",
"return",
"user"
]
| 32.735294 | 14.794118 |
def get_inputs_for_state(self, state):
"""Retrieves all input data of a state. If several data flows are connected to an input port the
most current data is used for the specific input port.
:param state: the state of which the input data is determined
:return: the input data of the target state
"""
result_dict = {}
tmp_dict = self.get_default_input_values_for_state(state)
result_dict.update(tmp_dict)
for input_port_key, value in state.input_data_ports.items():
# for all input keys fetch the correct data_flow connection and read data into the result_dict
actual_value = None
actual_value_time = 0
for data_flow_key, data_flow in self.data_flows.items():
if data_flow.to_key == input_port_key:
if data_flow.to_state == state.state_id:
# fetch data from the scoped_data list: the key is the data_port_key + the state_id
key = str(data_flow.from_key) + data_flow.from_state
if key in self.scoped_data:
if actual_value is None or actual_value_time < self.scoped_data[key].timestamp:
actual_value = deepcopy(self.scoped_data[key].value)
actual_value_time = self.scoped_data[key].timestamp
if actual_value is not None:
result_dict[value.name] = actual_value
return result_dict | [
"def",
"get_inputs_for_state",
"(",
"self",
",",
"state",
")",
":",
"result_dict",
"=",
"{",
"}",
"tmp_dict",
"=",
"self",
".",
"get_default_input_values_for_state",
"(",
"state",
")",
"result_dict",
".",
"update",
"(",
"tmp_dict",
")",
"for",
"input_port_key",
",",
"value",
"in",
"state",
".",
"input_data_ports",
".",
"items",
"(",
")",
":",
"# for all input keys fetch the correct data_flow connection and read data into the result_dict",
"actual_value",
"=",
"None",
"actual_value_time",
"=",
"0",
"for",
"data_flow_key",
",",
"data_flow",
"in",
"self",
".",
"data_flows",
".",
"items",
"(",
")",
":",
"if",
"data_flow",
".",
"to_key",
"==",
"input_port_key",
":",
"if",
"data_flow",
".",
"to_state",
"==",
"state",
".",
"state_id",
":",
"# fetch data from the scoped_data list: the key is the data_port_key + the state_id",
"key",
"=",
"str",
"(",
"data_flow",
".",
"from_key",
")",
"+",
"data_flow",
".",
"from_state",
"if",
"key",
"in",
"self",
".",
"scoped_data",
":",
"if",
"actual_value",
"is",
"None",
"or",
"actual_value_time",
"<",
"self",
".",
"scoped_data",
"[",
"key",
"]",
".",
"timestamp",
":",
"actual_value",
"=",
"deepcopy",
"(",
"self",
".",
"scoped_data",
"[",
"key",
"]",
".",
"value",
")",
"actual_value_time",
"=",
"self",
".",
"scoped_data",
"[",
"key",
"]",
".",
"timestamp",
"if",
"actual_value",
"is",
"not",
"None",
":",
"result_dict",
"[",
"value",
".",
"name",
"]",
"=",
"actual_value",
"return",
"result_dict"
]
| 48.645161 | 26.354839 |
def set_value(self, name, value, ignore_error=False, block_user_signals=False):
"""
Sets the variable of the supplied name to the supplied value.
Setting block_user_signals=True will temporarily block the widget from
sending any signals when setting the value.
"""
# first clean up the name
name = self._clean_up_name(name)
# If we're supposed to, block the user signals for this parameter
if block_user_signals: self.block_user_signals(name, ignore_error)
# now get the parameter object
x = self._find_parameter(name.split('/'), quiet=ignore_error)
# quit if it pooped.
if x == None: return None
# for lists, make sure the value exists!!
if x.type() in ['list']:
# Make sure it exists before trying to set it
if str(value) in list(x.forward.keys()): x.setValue(str(value))
# Otherwise default to the first key
else: x.setValue(list(x.forward.keys())[0])
# Bail to a hopeful set method for other types
else: x.setValue(eval(x.opts['type'])(value))
# If we're supposed to unblock the user signals for this parameter
if block_user_signals: self.unblock_user_signals(name, ignore_error)
return self | [
"def",
"set_value",
"(",
"self",
",",
"name",
",",
"value",
",",
"ignore_error",
"=",
"False",
",",
"block_user_signals",
"=",
"False",
")",
":",
"# first clean up the name",
"name",
"=",
"self",
".",
"_clean_up_name",
"(",
"name",
")",
"# If we're supposed to, block the user signals for this parameter",
"if",
"block_user_signals",
":",
"self",
".",
"block_user_signals",
"(",
"name",
",",
"ignore_error",
")",
"# now get the parameter object",
"x",
"=",
"self",
".",
"_find_parameter",
"(",
"name",
".",
"split",
"(",
"'/'",
")",
",",
"quiet",
"=",
"ignore_error",
")",
"# quit if it pooped.",
"if",
"x",
"==",
"None",
":",
"return",
"None",
"# for lists, make sure the value exists!!",
"if",
"x",
".",
"type",
"(",
")",
"in",
"[",
"'list'",
"]",
":",
"# Make sure it exists before trying to set it",
"if",
"str",
"(",
"value",
")",
"in",
"list",
"(",
"x",
".",
"forward",
".",
"keys",
"(",
")",
")",
":",
"x",
".",
"setValue",
"(",
"str",
"(",
"value",
")",
")",
"# Otherwise default to the first key",
"else",
":",
"x",
".",
"setValue",
"(",
"list",
"(",
"x",
".",
"forward",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
")",
"# Bail to a hopeful set method for other types",
"else",
":",
"x",
".",
"setValue",
"(",
"eval",
"(",
"x",
".",
"opts",
"[",
"'type'",
"]",
")",
"(",
"value",
")",
")",
"# If we're supposed to unblock the user signals for this parameter",
"if",
"block_user_signals",
":",
"self",
".",
"unblock_user_signals",
"(",
"name",
",",
"ignore_error",
")",
"return",
"self"
]
| 37.914286 | 23.4 |
def connect(self):
""" Connects to the device and starts the read thread """
self.serial = serial.Serial(port=self.port, baudrate=self.baudrate, timeout=self.timeout)
# Start read thread
self.alive = True
self.rxThread = threading.Thread(target=self._readLoop)
self.rxThread.daemon = True
self.rxThread.start() | [
"def",
"connect",
"(",
"self",
")",
":",
"self",
".",
"serial",
"=",
"serial",
".",
"Serial",
"(",
"port",
"=",
"self",
".",
"port",
",",
"baudrate",
"=",
"self",
".",
"baudrate",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"# Start read thread",
"self",
".",
"alive",
"=",
"True",
"self",
".",
"rxThread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_readLoop",
")",
"self",
".",
"rxThread",
".",
"daemon",
"=",
"True",
"self",
".",
"rxThread",
".",
"start",
"(",
")"
]
| 47 | 18.125 |
def _mute(self):
""" mute vlc """
if self.muted:
self._sendCommand("volume {}\n".format(self.actual_volume))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('VLC unmuted: {0} ({1}%)'.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume)))
else:
if self.actual_volume == -1:
self._get_volume()
self._sendCommand("volume 0\n")
if logger.isEnabledFor(logging.DEBUG):
logger.debug('VLC muted: 0 (0%)') | [
"def",
"_mute",
"(",
"self",
")",
":",
"if",
"self",
".",
"muted",
":",
"self",
".",
"_sendCommand",
"(",
"\"volume {}\\n\"",
".",
"format",
"(",
"self",
".",
"actual_volume",
")",
")",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"DEBUG",
")",
":",
"logger",
".",
"debug",
"(",
"'VLC unmuted: {0} ({1}%)'",
".",
"format",
"(",
"self",
".",
"actual_volume",
",",
"int",
"(",
"100",
"*",
"self",
".",
"actual_volume",
"/",
"self",
".",
"max_volume",
")",
")",
")",
"else",
":",
"if",
"self",
".",
"actual_volume",
"==",
"-",
"1",
":",
"self",
".",
"_get_volume",
"(",
")",
"self",
".",
"_sendCommand",
"(",
"\"volume 0\\n\"",
")",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"DEBUG",
")",
":",
"logger",
".",
"debug",
"(",
"'VLC muted: 0 (0%)'",
")"
]
| 41.769231 | 20.692308 |
def list_teams(profile="github", ignore_cache=False):
'''
Lists all teams with the organization.
profile
The name of the profile configuration to use. Defaults to ``github``.
ignore_cache
Bypasses the use of cached teams.
CLI Example:
.. code-block:: bash
salt myminion github.list_teams
.. versionadded:: 2016.11.0
'''
key = 'github.{0}:teams'.format(
_get_config_value(profile, 'org_name')
)
if key not in __context__ or ignore_cache:
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, 'org_name')
)
teams_data = organization.get_teams()
teams = {}
for team in teams_data:
# Note that _rawData is used to access some properties here as they
# are not exposed in older versions of PyGithub. It's VERY important
# to use team._rawData instead of team.raw_data, as the latter forces
# an API call to retrieve team details again.
teams[team.name] = {
'id': team.id,
'slug': team.slug,
'description': team._rawData['description'],
'permission': team.permission,
'privacy': team._rawData['privacy']
}
__context__[key] = teams
return __context__[key] | [
"def",
"list_teams",
"(",
"profile",
"=",
"\"github\"",
",",
"ignore_cache",
"=",
"False",
")",
":",
"key",
"=",
"'github.{0}:teams'",
".",
"format",
"(",
"_get_config_value",
"(",
"profile",
",",
"'org_name'",
")",
")",
"if",
"key",
"not",
"in",
"__context__",
"or",
"ignore_cache",
":",
"client",
"=",
"_get_client",
"(",
"profile",
")",
"organization",
"=",
"client",
".",
"get_organization",
"(",
"_get_config_value",
"(",
"profile",
",",
"'org_name'",
")",
")",
"teams_data",
"=",
"organization",
".",
"get_teams",
"(",
")",
"teams",
"=",
"{",
"}",
"for",
"team",
"in",
"teams_data",
":",
"# Note that _rawData is used to access some properties here as they",
"# are not exposed in older versions of PyGithub. It's VERY important",
"# to use team._rawData instead of team.raw_data, as the latter forces",
"# an API call to retrieve team details again.",
"teams",
"[",
"team",
".",
"name",
"]",
"=",
"{",
"'id'",
":",
"team",
".",
"id",
",",
"'slug'",
":",
"team",
".",
"slug",
",",
"'description'",
":",
"team",
".",
"_rawData",
"[",
"'description'",
"]",
",",
"'permission'",
":",
"team",
".",
"permission",
",",
"'privacy'",
":",
"team",
".",
"_rawData",
"[",
"'privacy'",
"]",
"}",
"__context__",
"[",
"key",
"]",
"=",
"teams",
"return",
"__context__",
"[",
"key",
"]"
]
| 30.818182 | 21.045455 |
def model_creation_opt(dicCnfg, aryMdlParams, strPathHrf=None, varRat=None,
lgcPrint=True):
"""
Create or load pRF model time courses.
Parameters
----------
dicCnfg : dict
Dictionary containing config parameters.
aryMdlParams : numpy arrays
x, y and sigma parameters.
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
`aryPrfTc[x-position, y-position, SD, volume]`.
"""
# *************************************************************************
# *** Load parameters from config file
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# *************************************************************************
if cfg.lgcCrteMdl:
# *********************************************************************
# *** Load spatial condition information
arySptExpInf = np.load(cfg.strSptExpInf)
# Here we assume scientific convention and orientation of images where
# the origin should fall in the lower left corner, the x-axis occupies
# the width and the y-axis occupies the height dimension of the screen.
# We also assume that the first dimension that the user provides
# indexes x and the second indexes the y-axis. Since python is column
# major (i.e. first indexes columns, only then rows), we need to rotate
# arySptExpInf by 90 degrees rightward. This will insure that with the
# 0th axis we index the scientific x-axis and higher values move us to
# the right on that x-axis. It will also ensure that the 1st
# python axis indexes the scientific y-axis and higher values will
# move us up.
arySptExpInf = np.rot90(arySptExpInf, k=3)
# *********************************************************************
# *********************************************************************
# *** Load temporal condition information
# load temporal information about presented stimuli
aryTmpExpInf = np.load(cfg.strTmpExpInf)
# add fourth column to make it appropriate for pyprf_feature
if aryTmpExpInf.shape[-1] == 3:
vecNewCol = np.greater(aryTmpExpInf[:, 0], 0).astype(np.float16)
aryTmpExpInf = np.concatenate(
(aryTmpExpInf, np.expand_dims(vecNewCol, axis=1)), axis=1)
# *********************************************************************
# *********************************************************************
# If desired by user, also create model parameters for supp surround
if varRat is not None:
aryMdlParamsSur = np.copy(aryMdlParams)
aryMdlParamsSur[:, 2] = aryMdlParamsSur[:, 2] * varRat
# *********************************************************************
# *********************************************************************
# *** Create 2D Gauss model responses to spatial conditions.
aryMdlRsp = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
aryMdlParams, cfg.varPar, lgcPrint=lgcPrint)
# If desired by user, also create model responses for supp surround
if varRat is not None:
aryMdlRspSur = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
aryMdlParamsSur, cfg.varPar,
lgcPrint=lgcPrint)
del(arySptExpInf)
# *********************************************************************
# *********************************************************************
# *** Create prf time course models
# Check whether path to npy file with hrf parameters was provided
if strPathHrf is not None:
if lgcPrint:
print('---------Load custom hrf parameters')
aryCstPrm = np.load(strPathHrf)
dctPrm = {}
dctPrm['peak_delay'] = aryCstPrm[0]
dctPrm['under_delay'] = aryCstPrm[1]
dctPrm['peak_disp'] = aryCstPrm[2]
dctPrm['under_disp'] = aryCstPrm[3]
dctPrm['p_u_ratio'] = aryCstPrm[4]
# If not, set dctPrm to None, which will result in default hrf params
else:
if lgcPrint:
print('---------Use default hrf parameters')
dctPrm = None
aryPrfTc = crt_prf_ftr_tc(aryMdlRsp, aryTmpExpInf, cfg.varNumVol,
cfg.varTr, cfg.varTmpOvsmpl,
cfg.switchHrfSet, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
cfg.varPar, dctPrm=dctPrm, lgcPrint=lgcPrint)
# If desired by user, create prf time course models for supp surround
if varRat is not None:
if lgcPrint:
print('---------Add suppressive surround')
aryPrfTcSur = crt_prf_ftr_tc(aryMdlRspSur, aryTmpExpInf,
cfg.varNumVol, cfg.varTr,
cfg.varTmpOvsmpl, cfg.switchHrfSet,
(int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
cfg.varPar, dctPrm=dctPrm,
lgcPrint=lgcPrint)
# Concatenate aryPrfTc and aryPrfTcSur
aryPrfTc = np.concatenate((aryPrfTc, aryPrfTcSur), axis=1)
# *********************************************************************
return aryPrfTc | [
"def",
"model_creation_opt",
"(",
"dicCnfg",
",",
"aryMdlParams",
",",
"strPathHrf",
"=",
"None",
",",
"varRat",
"=",
"None",
",",
"lgcPrint",
"=",
"True",
")",
":",
"# *************************************************************************",
"# *** Load parameters from config file",
"# Load config parameters from dictionary into namespace:",
"cfg",
"=",
"cls_set_config",
"(",
"dicCnfg",
")",
"# *************************************************************************",
"if",
"cfg",
".",
"lgcCrteMdl",
":",
"# *********************************************************************",
"# *** Load spatial condition information",
"arySptExpInf",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strSptExpInf",
")",
"# Here we assume scientific convention and orientation of images where",
"# the origin should fall in the lower left corner, the x-axis occupies",
"# the width and the y-axis occupies the height dimension of the screen.",
"# We also assume that the first dimension that the user provides",
"# indexes x and the second indexes the y-axis. Since python is column",
"# major (i.e. first indexes columns, only then rows), we need to rotate",
"# arySptExpInf by 90 degrees rightward. This will insure that with the",
"# 0th axis we index the scientific x-axis and higher values move us to",
"# the right on that x-axis. It will also ensure that the 1st",
"# python axis indexes the scientific y-axis and higher values will",
"# move us up.",
"arySptExpInf",
"=",
"np",
".",
"rot90",
"(",
"arySptExpInf",
",",
"k",
"=",
"3",
")",
"# *********************************************************************",
"# *********************************************************************",
"# *** Load temporal condition information",
"# load temporal information about presented stimuli",
"aryTmpExpInf",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strTmpExpInf",
")",
"# add fourth column to make it appropriate for pyprf_feature",
"if",
"aryTmpExpInf",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"3",
":",
"vecNewCol",
"=",
"np",
".",
"greater",
"(",
"aryTmpExpInf",
"[",
":",
",",
"0",
"]",
",",
"0",
")",
".",
"astype",
"(",
"np",
".",
"float16",
")",
"aryTmpExpInf",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryTmpExpInf",
",",
"np",
".",
"expand_dims",
"(",
"vecNewCol",
",",
"axis",
"=",
"1",
")",
")",
",",
"axis",
"=",
"1",
")",
"# *********************************************************************",
"# *********************************************************************",
"# If desired by user, also create model parameters for supp surround",
"if",
"varRat",
"is",
"not",
"None",
":",
"aryMdlParamsSur",
"=",
"np",
".",
"copy",
"(",
"aryMdlParams",
")",
"aryMdlParamsSur",
"[",
":",
",",
"2",
"]",
"=",
"aryMdlParamsSur",
"[",
":",
",",
"2",
"]",
"*",
"varRat",
"# *********************************************************************",
"# *********************************************************************",
"# *** Create 2D Gauss model responses to spatial conditions.",
"aryMdlRsp",
"=",
"crt_mdl_rsp",
"(",
"arySptExpInf",
",",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"aryMdlParams",
",",
"cfg",
".",
"varPar",
",",
"lgcPrint",
"=",
"lgcPrint",
")",
"# If desired by user, also create model responses for supp surround",
"if",
"varRat",
"is",
"not",
"None",
":",
"aryMdlRspSur",
"=",
"crt_mdl_rsp",
"(",
"arySptExpInf",
",",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"aryMdlParamsSur",
",",
"cfg",
".",
"varPar",
",",
"lgcPrint",
"=",
"lgcPrint",
")",
"del",
"(",
"arySptExpInf",
")",
"# *********************************************************************",
"# *********************************************************************",
"# *** Create prf time course models",
"# Check whether path to npy file with hrf parameters was provided",
"if",
"strPathHrf",
"is",
"not",
"None",
":",
"if",
"lgcPrint",
":",
"print",
"(",
"'---------Load custom hrf parameters'",
")",
"aryCstPrm",
"=",
"np",
".",
"load",
"(",
"strPathHrf",
")",
"dctPrm",
"=",
"{",
"}",
"dctPrm",
"[",
"'peak_delay'",
"]",
"=",
"aryCstPrm",
"[",
"0",
"]",
"dctPrm",
"[",
"'under_delay'",
"]",
"=",
"aryCstPrm",
"[",
"1",
"]",
"dctPrm",
"[",
"'peak_disp'",
"]",
"=",
"aryCstPrm",
"[",
"2",
"]",
"dctPrm",
"[",
"'under_disp'",
"]",
"=",
"aryCstPrm",
"[",
"3",
"]",
"dctPrm",
"[",
"'p_u_ratio'",
"]",
"=",
"aryCstPrm",
"[",
"4",
"]",
"# If not, set dctPrm to None, which will result in default hrf params",
"else",
":",
"if",
"lgcPrint",
":",
"print",
"(",
"'---------Use default hrf parameters'",
")",
"dctPrm",
"=",
"None",
"aryPrfTc",
"=",
"crt_prf_ftr_tc",
"(",
"aryMdlRsp",
",",
"aryTmpExpInf",
",",
"cfg",
".",
"varNumVol",
",",
"cfg",
".",
"varTr",
",",
"cfg",
".",
"varTmpOvsmpl",
",",
"cfg",
".",
"switchHrfSet",
",",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"cfg",
".",
"varPar",
",",
"dctPrm",
"=",
"dctPrm",
",",
"lgcPrint",
"=",
"lgcPrint",
")",
"# If desired by user, create prf time course models for supp surround",
"if",
"varRat",
"is",
"not",
"None",
":",
"if",
"lgcPrint",
":",
"print",
"(",
"'---------Add suppressive surround'",
")",
"aryPrfTcSur",
"=",
"crt_prf_ftr_tc",
"(",
"aryMdlRspSur",
",",
"aryTmpExpInf",
",",
"cfg",
".",
"varNumVol",
",",
"cfg",
".",
"varTr",
",",
"cfg",
".",
"varTmpOvsmpl",
",",
"cfg",
".",
"switchHrfSet",
",",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"cfg",
".",
"varPar",
",",
"dctPrm",
"=",
"dctPrm",
",",
"lgcPrint",
"=",
"lgcPrint",
")",
"# Concatenate aryPrfTc and aryPrfTcSur",
"aryPrfTc",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryPrfTc",
",",
"aryPrfTcSur",
")",
",",
"axis",
"=",
"1",
")",
"# *********************************************************************",
"return",
"aryPrfTc"
]
| 44.686131 | 25.883212 |
def object_properties_count(self, o):
""" returns the number of user browsable properties of an object. """
o_type = type(o)
if isinstance(o, (dict, list, tuple, set)):
return len(o)
elif isinstance(o, (type(None), bool, float,
str, int,
bytes, types.ModuleType,
types.MethodType, types.FunctionType)):
return 0
else:
# Following lines are used to debug variables members browsing
# and counting
# if False and str(o_type) == "<class 'socket._socketobject'>":
# print "@378"
# print dir(o)
# print "hasattr(o, '__dict__')=%s" % hasattr(o,'__dict__')
# count = 0
# if hasattr(o, '__dict__'):
# for m_name, m_value in o.__dict__.iteritems():
# if m_name.startswith('__'):
# print " %s=>False" % (m_name,)
# continue
# if type(m_value) in (types.ModuleType, types.MethodType, types.FunctionType,):
# print " %s=>False" % (m_name,)
# continue
# print " %s=>True" % (m_name,)
# count +=1
# print " %s => %s = %s" % (o, count, dir(o),)
# else:
try:
if hasattr(o, '__dict__'):
count = len([m_name for m_name, m_value in o.__dict__.items()
if not m_name.startswith('__')
and not type(m_value) in (types.ModuleType,
types.MethodType,
types.FunctionType,) ])
else:
count = 0
except:
# Thank you werkzeug __getattr__ overloading!
count = 0
return count | [
"def",
"object_properties_count",
"(",
"self",
",",
"o",
")",
":",
"o_type",
"=",
"type",
"(",
"o",
")",
"if",
"isinstance",
"(",
"o",
",",
"(",
"dict",
",",
"list",
",",
"tuple",
",",
"set",
")",
")",
":",
"return",
"len",
"(",
"o",
")",
"elif",
"isinstance",
"(",
"o",
",",
"(",
"type",
"(",
"None",
")",
",",
"bool",
",",
"float",
",",
"str",
",",
"int",
",",
"bytes",
",",
"types",
".",
"ModuleType",
",",
"types",
".",
"MethodType",
",",
"types",
".",
"FunctionType",
")",
")",
":",
"return",
"0",
"else",
":",
"# Following lines are used to debug variables members browsing",
"# and counting",
"# if False and str(o_type) == \"<class 'socket._socketobject'>\":",
"# print \"@378\"",
"# print dir(o)",
"# print \"hasattr(o, '__dict__')=%s\" % hasattr(o,'__dict__')",
"# count = 0",
"# if hasattr(o, '__dict__'):",
"# for m_name, m_value in o.__dict__.iteritems():",
"# if m_name.startswith('__'):",
"# print \" %s=>False\" % (m_name,)",
"# continue",
"# if type(m_value) in (types.ModuleType, types.MethodType, types.FunctionType,):",
"# print \" %s=>False\" % (m_name,)",
"# continue",
"# print \" %s=>True\" % (m_name,)",
"# count +=1",
"# print \" %s => %s = %s\" % (o, count, dir(o),)",
"# else:",
"try",
":",
"if",
"hasattr",
"(",
"o",
",",
"'__dict__'",
")",
":",
"count",
"=",
"len",
"(",
"[",
"m_name",
"for",
"m_name",
",",
"m_value",
"in",
"o",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"not",
"m_name",
".",
"startswith",
"(",
"'__'",
")",
"and",
"not",
"type",
"(",
"m_value",
")",
"in",
"(",
"types",
".",
"ModuleType",
",",
"types",
".",
"MethodType",
",",
"types",
".",
"FunctionType",
",",
")",
"]",
")",
"else",
":",
"count",
"=",
"0",
"except",
":",
"# Thank you werkzeug __getattr__ overloading!",
"count",
"=",
"0",
"return",
"count"
]
| 48.162791 | 19.674419 |
def receive(self, msg):
'''
The message received from the queue specify a method of the
class the actor represents. This invokes it. If the
communication is an ASK, sends the result back
to the channel included in the message as an
ASKRESPONSE.
If it is a FUTURE, generates a FUTURERESPONSE
to send the result to the manager.
:param msg: The message is a dictionary using the constants
defined in util.py (:mod:`pyactor.util`).
'''
if msg[TYPE] == TELL and msg[METHOD] == 'stop':
self.running = False
self.future_manager.stop()
else:
result = None
try:
invoke = getattr(self._obj, msg[METHOD])
params = msg[PARAMS]
result = invoke(*params[0], **params[1])
except Exception, e:
if msg[TYPE] == TELL:
print e
return
result = e
self.send_response(result, msg) | [
"def",
"receive",
"(",
"self",
",",
"msg",
")",
":",
"if",
"msg",
"[",
"TYPE",
"]",
"==",
"TELL",
"and",
"msg",
"[",
"METHOD",
"]",
"==",
"'stop'",
":",
"self",
".",
"running",
"=",
"False",
"self",
".",
"future_manager",
".",
"stop",
"(",
")",
"else",
":",
"result",
"=",
"None",
"try",
":",
"invoke",
"=",
"getattr",
"(",
"self",
".",
"_obj",
",",
"msg",
"[",
"METHOD",
"]",
")",
"params",
"=",
"msg",
"[",
"PARAMS",
"]",
"result",
"=",
"invoke",
"(",
"*",
"params",
"[",
"0",
"]",
",",
"*",
"*",
"params",
"[",
"1",
"]",
")",
"except",
"Exception",
",",
"e",
":",
"if",
"msg",
"[",
"TYPE",
"]",
"==",
"TELL",
":",
"print",
"e",
"return",
"result",
"=",
"e",
"self",
".",
"send_response",
"(",
"result",
",",
"msg",
")"
]
| 35.517241 | 16.689655 |
def fraction_correct_values(indices, x_values, y_values, x_cutoff = 1.0, y_cutoff = 1.0, ignore_null_values = False):
'''
An approximation to the metric used in the Kellogg et al. paper: "The fraction correct is defined as the number of mutations categorized correctly divided by the total number of mutations in the benchmark set."
'''
num_points = len(indices)
assert(num_points == len(x_values) == len(y_values))
correct = []
for i in range(num_points):
index = indices[i]
x = x_values[i]
y = y_values[i]
if (x == None or y == None or numpy.isnan(x) or numpy.isnan(y)) and ignore_null_values: # If we are missing values then we either discount the case or consider it as incorrect depending on ignore_null_values
correct.append(numpy.nan)
elif (x >= x_cutoff) and (y >= y_cutoff): # both positive
correct.append(1.0)
elif (x <= -x_cutoff) and (y <= -y_cutoff): # both negative
correct.append(1.0)
elif (-x_cutoff < x < x_cutoff) and (-y_cutoff < y < y_cutoff): # both neutral
correct.append(1.0)
else:
correct.append(0.0)
return correct | [
"def",
"fraction_correct_values",
"(",
"indices",
",",
"x_values",
",",
"y_values",
",",
"x_cutoff",
"=",
"1.0",
",",
"y_cutoff",
"=",
"1.0",
",",
"ignore_null_values",
"=",
"False",
")",
":",
"num_points",
"=",
"len",
"(",
"indices",
")",
"assert",
"(",
"num_points",
"==",
"len",
"(",
"x_values",
")",
"==",
"len",
"(",
"y_values",
")",
")",
"correct",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"num_points",
")",
":",
"index",
"=",
"indices",
"[",
"i",
"]",
"x",
"=",
"x_values",
"[",
"i",
"]",
"y",
"=",
"y_values",
"[",
"i",
"]",
"if",
"(",
"x",
"==",
"None",
"or",
"y",
"==",
"None",
"or",
"numpy",
".",
"isnan",
"(",
"x",
")",
"or",
"numpy",
".",
"isnan",
"(",
"y",
")",
")",
"and",
"ignore_null_values",
":",
"# If we are missing values then we either discount the case or consider it as incorrect depending on ignore_null_values",
"correct",
".",
"append",
"(",
"numpy",
".",
"nan",
")",
"elif",
"(",
"x",
">=",
"x_cutoff",
")",
"and",
"(",
"y",
">=",
"y_cutoff",
")",
":",
"# both positive",
"correct",
".",
"append",
"(",
"1.0",
")",
"elif",
"(",
"x",
"<=",
"-",
"x_cutoff",
")",
"and",
"(",
"y",
"<=",
"-",
"y_cutoff",
")",
":",
"# both negative",
"correct",
".",
"append",
"(",
"1.0",
")",
"elif",
"(",
"-",
"x_cutoff",
"<",
"x",
"<",
"x_cutoff",
")",
"and",
"(",
"-",
"y_cutoff",
"<",
"y",
"<",
"y_cutoff",
")",
":",
"# both neutral",
"correct",
".",
"append",
"(",
"1.0",
")",
"else",
":",
"correct",
".",
"append",
"(",
"0.0",
")",
"return",
"correct"
]
| 53.363636 | 35.727273 |
def run_script(self, script_name, keys=None, args=None):
"""
Execute a walrus script with the given arguments.
:param script_name: The base name of the script to execute.
:param list keys: Keys referenced by the script.
:param list args: Arguments passed in to the script.
:returns: Return value of script.
.. note:: Redis scripts require two parameters, ``keys``
and ``args``, which are referenced in lua as ``KEYS``
and ``ARGV``.
"""
return self._scripts[script_name](keys, args) | [
"def",
"run_script",
"(",
"self",
",",
"script_name",
",",
"keys",
"=",
"None",
",",
"args",
"=",
"None",
")",
":",
"return",
"self",
".",
"_scripts",
"[",
"script_name",
"]",
"(",
"keys",
",",
"args",
")"
]
| 40.428571 | 18.142857 |
def get_strike(self):
"""
Compute strike of each surface element and return area-weighted average
value (in range ``[0, 360]``) using formula from:
http://en.wikipedia.org/wiki/Mean_of_circular_quantities
Note that the original formula has been adapted to compute a weighted
rather than arithmetic mean.
"""
areas = self._get_areas()
strikes = numpy.array([surf.get_strike() for surf in self.surfaces])
v1 = (numpy.sum(areas * numpy.sin(numpy.radians(strikes))) /
numpy.sum(areas))
v2 = (numpy.sum(areas * numpy.cos(numpy.radians(strikes))) /
numpy.sum(areas))
return numpy.degrees(numpy.arctan2(v1, v2)) % 360 | [
"def",
"get_strike",
"(",
"self",
")",
":",
"areas",
"=",
"self",
".",
"_get_areas",
"(",
")",
"strikes",
"=",
"numpy",
".",
"array",
"(",
"[",
"surf",
".",
"get_strike",
"(",
")",
"for",
"surf",
"in",
"self",
".",
"surfaces",
"]",
")",
"v1",
"=",
"(",
"numpy",
".",
"sum",
"(",
"areas",
"*",
"numpy",
".",
"sin",
"(",
"numpy",
".",
"radians",
"(",
"strikes",
")",
")",
")",
"/",
"numpy",
".",
"sum",
"(",
"areas",
")",
")",
"v2",
"=",
"(",
"numpy",
".",
"sum",
"(",
"areas",
"*",
"numpy",
".",
"cos",
"(",
"numpy",
".",
"radians",
"(",
"strikes",
")",
")",
")",
"/",
"numpy",
".",
"sum",
"(",
"areas",
")",
")",
"return",
"numpy",
".",
"degrees",
"(",
"numpy",
".",
"arctan2",
"(",
"v1",
",",
"v2",
")",
")",
"%",
"360"
]
| 40 | 21.888889 |
def _invert(color, **kwargs):
""" Returns the inverse (negative) of a color.
The red, green, and blue values are inverted, while the opacity is left alone.
"""
col = ColorValue(color)
args = [
255.0 - col.value[0],
255.0 - col.value[1],
255.0 - col.value[2],
col.value[3],
]
inverted = ColorValue(args)
return inverted | [
"def",
"_invert",
"(",
"color",
",",
"*",
"*",
"kwargs",
")",
":",
"col",
"=",
"ColorValue",
"(",
"color",
")",
"args",
"=",
"[",
"255.0",
"-",
"col",
".",
"value",
"[",
"0",
"]",
",",
"255.0",
"-",
"col",
".",
"value",
"[",
"1",
"]",
",",
"255.0",
"-",
"col",
".",
"value",
"[",
"2",
"]",
",",
"col",
".",
"value",
"[",
"3",
"]",
",",
"]",
"inverted",
"=",
"ColorValue",
"(",
"args",
")",
"return",
"inverted"
]
| 28.769231 | 16.538462 |
def types(self):
"""A tuple containing the value types for this Slot.
The Python equivalent of the CLIPS deftemplate-slot-types function.
"""
data = clips.data.DataObject(self._env)
lib.EnvDeftemplateSlotTypes(
self._env, self._tpl, self._name, data.byref)
return tuple(data.value) if isinstance(data.value, list) else () | [
"def",
"types",
"(",
"self",
")",
":",
"data",
"=",
"clips",
".",
"data",
".",
"DataObject",
"(",
"self",
".",
"_env",
")",
"lib",
".",
"EnvDeftemplateSlotTypes",
"(",
"self",
".",
"_env",
",",
"self",
".",
"_tpl",
",",
"self",
".",
"_name",
",",
"data",
".",
"byref",
")",
"return",
"tuple",
"(",
"data",
".",
"value",
")",
"if",
"isinstance",
"(",
"data",
".",
"value",
",",
"list",
")",
"else",
"(",
")"
]
| 31.166667 | 23.25 |
def get_preference(self, pref_name):
""" Gets a single named preference
:returns: the value, typed to str/bool/int/float regarding its content.
"""
resp = self.request_single('GetPrefs', {'pref': {'name': pref_name}})
return utils.auto_type(resp['_content']) | [
"def",
"get_preference",
"(",
"self",
",",
"pref_name",
")",
":",
"resp",
"=",
"self",
".",
"request_single",
"(",
"'GetPrefs'",
",",
"{",
"'pref'",
":",
"{",
"'name'",
":",
"pref_name",
"}",
"}",
")",
"return",
"utils",
".",
"auto_type",
"(",
"resp",
"[",
"'_content'",
"]",
")"
]
| 41.857143 | 18.285714 |
def norm(self, x):
"""Calculate the array-weighted norm of an element.
Parameters
----------
x : `ProductSpaceElement`
Element whose norm is calculated.
Returns
-------
norm : float
The norm of the provided element.
"""
if self.exponent == 2.0:
norm_squared = self.inner(x, x).real # TODO: optimize?!
return np.sqrt(norm_squared)
else:
norms = np.fromiter(
(xi.norm() for xi in x), dtype=np.float64, count=len(x))
if self.exponent in (1.0, float('inf')):
norms *= self.array
else:
norms *= self.array ** (1.0 / self.exponent)
return float(np.linalg.norm(norms, ord=self.exponent)) | [
"def",
"norm",
"(",
"self",
",",
"x",
")",
":",
"if",
"self",
".",
"exponent",
"==",
"2.0",
":",
"norm_squared",
"=",
"self",
".",
"inner",
"(",
"x",
",",
"x",
")",
".",
"real",
"# TODO: optimize?!",
"return",
"np",
".",
"sqrt",
"(",
"norm_squared",
")",
"else",
":",
"norms",
"=",
"np",
".",
"fromiter",
"(",
"(",
"xi",
".",
"norm",
"(",
")",
"for",
"xi",
"in",
"x",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
",",
"count",
"=",
"len",
"(",
"x",
")",
")",
"if",
"self",
".",
"exponent",
"in",
"(",
"1.0",
",",
"float",
"(",
"'inf'",
")",
")",
":",
"norms",
"*=",
"self",
".",
"array",
"else",
":",
"norms",
"*=",
"self",
".",
"array",
"**",
"(",
"1.0",
"/",
"self",
".",
"exponent",
")",
"return",
"float",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"norms",
",",
"ord",
"=",
"self",
".",
"exponent",
")",
")"
]
| 31.36 | 18.48 |
def rmdir(self, tid):
"""
Directory removal. ``YTActions`` object under `tid` is told to clean all data, and then it is deleted.
Parameters
----------
tid : str
Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator.
"""
pt = self.PathType.get(tid)
if pt is self.PathType.main:
raise FuseOSError(errno.EINVAL)
elif pt is not self.PathType.subdir:
raise FuseOSError(errno.ENOTDIR)
try:
self.searches[tid[0]].clean()
del self.searches[tid[0]]
except KeyError:
raise FuseOSError(errno.ENOENT)
return 0 | [
"def",
"rmdir",
"(",
"self",
",",
"tid",
")",
":",
"pt",
"=",
"self",
".",
"PathType",
".",
"get",
"(",
"tid",
")",
"if",
"pt",
"is",
"self",
".",
"PathType",
".",
"main",
":",
"raise",
"FuseOSError",
"(",
"errno",
".",
"EINVAL",
")",
"elif",
"pt",
"is",
"not",
"self",
".",
"PathType",
".",
"subdir",
":",
"raise",
"FuseOSError",
"(",
"errno",
".",
"ENOTDIR",
")",
"try",
":",
"self",
".",
"searches",
"[",
"tid",
"[",
"0",
"]",
"]",
".",
"clean",
"(",
")",
"del",
"self",
".",
"searches",
"[",
"tid",
"[",
"0",
"]",
"]",
"except",
"KeyError",
":",
"raise",
"FuseOSError",
"(",
"errno",
".",
"ENOENT",
")",
"return",
"0"
]
| 26.576923 | 23.115385 |
def get_metric(self, timestamp):
"""Get a metric including all current time series.
Get a :class:`opencensus.metrics.export.metric.Metric` with one
:class:`opencensus.metrics.export.time_series.TimeSeries` for each
set of label values with a recorded measurement. Each `TimeSeries`
has a single point that represents the last recorded value.
:type timestamp: :class:`datetime.datetime`
:param timestamp: Recording time to report, usually the current time.
:rtype: :class:`opencensus.metrics.export.metric.Metric` or None
:return: A converted metric for all current measurements.
"""
if not self.points:
return None
with self._points_lock:
ts_list = get_timeseries_list(self.points, timestamp)
return metric.Metric(self.descriptor, ts_list) | [
"def",
"get_metric",
"(",
"self",
",",
"timestamp",
")",
":",
"if",
"not",
"self",
".",
"points",
":",
"return",
"None",
"with",
"self",
".",
"_points_lock",
":",
"ts_list",
"=",
"get_timeseries_list",
"(",
"self",
".",
"points",
",",
"timestamp",
")",
"return",
"metric",
".",
"Metric",
"(",
"self",
".",
"descriptor",
",",
"ts_list",
")"
]
| 42.6 | 23.85 |
def shorten_name(name, char_limit, side='right'):
"""Shorten `name` if it is longer than `char_limit`.
If `side` == "right" then the right side of the name is shortened;
if "left" then the left side is shortened.
In either case, the suffix of the name is preserved.
"""
# TODO: A more elegant way to do this?
if char_limit is not None and len(name) > char_limit:
info = get_fileinfo(name)
if info.numhdu is not None:
i = name.rindex('[')
s = (name[:i], name[i:])
len_sfx = len(s[1])
len_pfx = char_limit - len_sfx - 4 + 1
if len_pfx > 0:
if side == 'right':
name = '{0}...{1}'.format(s[0][:len_pfx], s[1])
elif side == 'left':
name = '...{0}{1}'.format(s[0][-len_pfx:], s[1])
else:
name = '...{0}'.format(s[1])
else:
len1 = char_limit - 3 + 1
if side == 'right':
name = '{0}...'.format(name[:len1])
elif side == 'left':
name = '...{0}'.format(name[-len1:])
return name | [
"def",
"shorten_name",
"(",
"name",
",",
"char_limit",
",",
"side",
"=",
"'right'",
")",
":",
"# TODO: A more elegant way to do this?",
"if",
"char_limit",
"is",
"not",
"None",
"and",
"len",
"(",
"name",
")",
">",
"char_limit",
":",
"info",
"=",
"get_fileinfo",
"(",
"name",
")",
"if",
"info",
".",
"numhdu",
"is",
"not",
"None",
":",
"i",
"=",
"name",
".",
"rindex",
"(",
"'['",
")",
"s",
"=",
"(",
"name",
"[",
":",
"i",
"]",
",",
"name",
"[",
"i",
":",
"]",
")",
"len_sfx",
"=",
"len",
"(",
"s",
"[",
"1",
"]",
")",
"len_pfx",
"=",
"char_limit",
"-",
"len_sfx",
"-",
"4",
"+",
"1",
"if",
"len_pfx",
">",
"0",
":",
"if",
"side",
"==",
"'right'",
":",
"name",
"=",
"'{0}...{1}'",
".",
"format",
"(",
"s",
"[",
"0",
"]",
"[",
":",
"len_pfx",
"]",
",",
"s",
"[",
"1",
"]",
")",
"elif",
"side",
"==",
"'left'",
":",
"name",
"=",
"'...{0}{1}'",
".",
"format",
"(",
"s",
"[",
"0",
"]",
"[",
"-",
"len_pfx",
":",
"]",
",",
"s",
"[",
"1",
"]",
")",
"else",
":",
"name",
"=",
"'...{0}'",
".",
"format",
"(",
"s",
"[",
"1",
"]",
")",
"else",
":",
"len1",
"=",
"char_limit",
"-",
"3",
"+",
"1",
"if",
"side",
"==",
"'right'",
":",
"name",
"=",
"'{0}...'",
".",
"format",
"(",
"name",
"[",
":",
"len1",
"]",
")",
"elif",
"side",
"==",
"'left'",
":",
"name",
"=",
"'...{0}'",
".",
"format",
"(",
"name",
"[",
"-",
"len1",
":",
"]",
")",
"return",
"name"
]
| 38.793103 | 12.482759 |
def process(self, username, password, remember=True):
"""
Process a login request
@type username: str
@type password: str
@param remember: Save the login session to disk
@type remember: bool
@raise BadLoginException: Login request failed
@return: Session cookies
@rtype: cookielib.LWPCookieJar
"""
self.log.debug('Processing login request')
self.browser.open(self.LOGIN_URL)
self.log.info('Login page loaded: %s', self.browser.title())
self.browser.select_form(nr=0)
# Set the fields
self.log.debug('Username: %s', username)
self.log.debug('Password: %s', (password[0] + '*' * (len(password) - 2) + password[-1]))
self.log.debug('Remember: %s', remember)
self.browser.form[self.USERNAME_FIELD] = username
self.browser.form[self.PASSWORD_FIELD] = password
self.browser.find_control(self.REMEMBER_FIELD).items[0].selected = remember
# Submit the request
self.browser.submit()
self.log.debug('Response code: %s', self.browser.response().code)
self.log.debug('== Cookies ==')
for cookie in self.cookiejar:
self.log.debug(cookie)
self.cookies[cookie.name] = cookie.value
self.log.debug('== End Cookies ==')
# Make sure we successfully logged in
if self.LOGIN_COOKIE not in self.cookies:
raise BadLoginException('No login cookie returned, this probably means an invalid login was provided')
# Should we save our login session?
if remember:
self.log.info('Saving login session to disk')
self.cookiejar.save()
self.log.info('Login request successful')
return self.cookiejar | [
"def",
"process",
"(",
"self",
",",
"username",
",",
"password",
",",
"remember",
"=",
"True",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Processing login request'",
")",
"self",
".",
"browser",
".",
"open",
"(",
"self",
".",
"LOGIN_URL",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Login page loaded: %s'",
",",
"self",
".",
"browser",
".",
"title",
"(",
")",
")",
"self",
".",
"browser",
".",
"select_form",
"(",
"nr",
"=",
"0",
")",
"# Set the fields",
"self",
".",
"log",
".",
"debug",
"(",
"'Username: %s'",
",",
"username",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Password: %s'",
",",
"(",
"password",
"[",
"0",
"]",
"+",
"'*'",
"*",
"(",
"len",
"(",
"password",
")",
"-",
"2",
")",
"+",
"password",
"[",
"-",
"1",
"]",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Remember: %s'",
",",
"remember",
")",
"self",
".",
"browser",
".",
"form",
"[",
"self",
".",
"USERNAME_FIELD",
"]",
"=",
"username",
"self",
".",
"browser",
".",
"form",
"[",
"self",
".",
"PASSWORD_FIELD",
"]",
"=",
"password",
"self",
".",
"browser",
".",
"find_control",
"(",
"self",
".",
"REMEMBER_FIELD",
")",
".",
"items",
"[",
"0",
"]",
".",
"selected",
"=",
"remember",
"# Submit the request",
"self",
".",
"browser",
".",
"submit",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Response code: %s'",
",",
"self",
".",
"browser",
".",
"response",
"(",
")",
".",
"code",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'== Cookies =='",
")",
"for",
"cookie",
"in",
"self",
".",
"cookiejar",
":",
"self",
".",
"log",
".",
"debug",
"(",
"cookie",
")",
"self",
".",
"cookies",
"[",
"cookie",
".",
"name",
"]",
"=",
"cookie",
".",
"value",
"self",
".",
"log",
".",
"debug",
"(",
"'== End Cookies =='",
")",
"# Make sure we successfully logged in",
"if",
"self",
".",
"LOGIN_COOKIE",
"not",
"in",
"self",
".",
"cookies",
":",
"raise",
"BadLoginException",
"(",
"'No login cookie returned, this probably means an invalid login was provided'",
")",
"# Should we save our login session?",
"if",
"remember",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Saving login session to disk'",
")",
"self",
".",
"cookiejar",
".",
"save",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Login request successful'",
")",
"return",
"self",
".",
"cookiejar"
]
| 37.765957 | 18.106383 |
def software_breakpoint_set(self, addr, thumb=False, arm=False, flash=False, ram=False):
"""Sets a software breakpoint at the specified address.
If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if
``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a
normal breakpoint is set.
If ``flash`` is ``True``, the breakpoint is set in flash, otherwise if
``ram`` is ``True``, the breakpoint is set in RAM. If both are
``True`` or both are ``False``, then the best option is chosen for
setting the breakpoint in software.
Args:
self (JLink): the ``JLink`` instance
addr (int): the address where the breakpoint will be set
thumb (bool): boolean indicating to set the breakpoint in THUMB mode
arm (bool): boolean indicating to set the breakpoint in ARM mode
flash (bool): boolean indicating to set the breakpoint in flash
ram (bool): boolean indicating to set the breakpoint in RAM
Returns:
An integer specifying the breakpoint handle. This handle should sbe
retained for future breakpoint operations.
Raises:
TypeError: if the given address is not an integer.
JLinkException: if the breakpoint could not be set.
"""
if flash and not ram:
flags = enums.JLinkBreakpoint.SW_FLASH
elif not flash and ram:
flags = enums.JLinkBreakpoint.SW_RAM
else:
flags = enums.JLinkBreakpoint.SW
if thumb:
flags = flags | enums.JLinkBreakpoint.THUMB
elif arm:
flags = flags | enums.JLinkBreakpoint.ARM
handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)
if handle <= 0:
raise errors.JLinkException('Software breakpoint could not be set.')
return handle | [
"def",
"software_breakpoint_set",
"(",
"self",
",",
"addr",
",",
"thumb",
"=",
"False",
",",
"arm",
"=",
"False",
",",
"flash",
"=",
"False",
",",
"ram",
"=",
"False",
")",
":",
"if",
"flash",
"and",
"not",
"ram",
":",
"flags",
"=",
"enums",
".",
"JLinkBreakpoint",
".",
"SW_FLASH",
"elif",
"not",
"flash",
"and",
"ram",
":",
"flags",
"=",
"enums",
".",
"JLinkBreakpoint",
".",
"SW_RAM",
"else",
":",
"flags",
"=",
"enums",
".",
"JLinkBreakpoint",
".",
"SW",
"if",
"thumb",
":",
"flags",
"=",
"flags",
"|",
"enums",
".",
"JLinkBreakpoint",
".",
"THUMB",
"elif",
"arm",
":",
"flags",
"=",
"flags",
"|",
"enums",
".",
"JLinkBreakpoint",
".",
"ARM",
"handle",
"=",
"self",
".",
"_dll",
".",
"JLINKARM_SetBPEx",
"(",
"int",
"(",
"addr",
")",
",",
"flags",
")",
"if",
"handle",
"<=",
"0",
":",
"raise",
"errors",
".",
"JLinkException",
"(",
"'Software breakpoint could not be set.'",
")",
"return",
"handle"
]
| 41.288889 | 25.066667 |
def add_ref(self, reftype, data):
"""Add a reference and returns the identifier."""
ref = (reftype, data)
try:
index = self.refs.index(ref)
except ValueError:
self.refs.append(ref)
index = len(self.refs) - 1
return str(index) | [
"def",
"add_ref",
"(",
"self",
",",
"reftype",
",",
"data",
")",
":",
"ref",
"=",
"(",
"reftype",
",",
"data",
")",
"try",
":",
"index",
"=",
"self",
".",
"refs",
".",
"index",
"(",
"ref",
")",
"except",
"ValueError",
":",
"self",
".",
"refs",
".",
"append",
"(",
"ref",
")",
"index",
"=",
"len",
"(",
"self",
".",
"refs",
")",
"-",
"1",
"return",
"str",
"(",
"index",
")"
]
| 32.555556 | 9.333333 |
def get_bitcoind( new_bitcoind_opts=None, reset=False, new=False ):
"""
Get or instantiate our bitcoind client.
Optionally re-set the bitcoind options.
"""
global bitcoind
if reset:
bitcoind = None
elif not new and bitcoind is not None:
return bitcoind
if new or bitcoind is None:
if new_bitcoind_opts is not None:
set_bitcoin_opts( new_bitcoind_opts )
bitcoin_opts = get_bitcoin_opts()
new_bitcoind = None
try:
try:
new_bitcoind = virtualchain.connect_bitcoind( bitcoin_opts )
except KeyError, ke:
log.exception(ke)
log.error("Invalid configuration: %s" % bitcoin_opts)
return None
if new:
return new_bitcoind
else:
# save for subsequent reuse
bitcoind = new_bitcoind
return bitcoind
except Exception, e:
log.exception( e )
return None | [
"def",
"get_bitcoind",
"(",
"new_bitcoind_opts",
"=",
"None",
",",
"reset",
"=",
"False",
",",
"new",
"=",
"False",
")",
":",
"global",
"bitcoind",
"if",
"reset",
":",
"bitcoind",
"=",
"None",
"elif",
"not",
"new",
"and",
"bitcoind",
"is",
"not",
"None",
":",
"return",
"bitcoind",
"if",
"new",
"or",
"bitcoind",
"is",
"None",
":",
"if",
"new_bitcoind_opts",
"is",
"not",
"None",
":",
"set_bitcoin_opts",
"(",
"new_bitcoind_opts",
")",
"bitcoin_opts",
"=",
"get_bitcoin_opts",
"(",
")",
"new_bitcoind",
"=",
"None",
"try",
":",
"try",
":",
"new_bitcoind",
"=",
"virtualchain",
".",
"connect_bitcoind",
"(",
"bitcoin_opts",
")",
"except",
"KeyError",
",",
"ke",
":",
"log",
".",
"exception",
"(",
"ke",
")",
"log",
".",
"error",
"(",
"\"Invalid configuration: %s\"",
"%",
"bitcoin_opts",
")",
"return",
"None",
"if",
"new",
":",
"return",
"new_bitcoind",
"else",
":",
"# save for subsequent reuse",
"bitcoind",
"=",
"new_bitcoind",
"return",
"bitcoind",
"except",
"Exception",
",",
"e",
":",
"log",
".",
"exception",
"(",
"e",
")",
"return",
"None"
]
| 24.102564 | 19.128205 |
def download(self,
url,
dest_path=None):
"""
:param url:
:type url: str
:param dest_path:
:type dest_path: str
"""
if os.path.exists(dest_path):
os.remove(dest_path)
resp = get(url, stream=True)
size = int(resp.headers.get("content-length"))
label = "Downloading {filename} ({size:.2f}MB)".format(
filename=os.path.basename(dest_path),
size=size / float(self.chunk_size) / self.chunk_size
)
with open_file(dest_path, 'wb') as file:
content_iter = resp.iter_content(chunk_size=self.chunk_size)
with progressbar(content_iter,
length=size / self.chunk_size,
label=label) as bar:
for chunk in bar:
if chunk:
file.write(chunk)
file.flush() | [
"def",
"download",
"(",
"self",
",",
"url",
",",
"dest_path",
"=",
"None",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dest_path",
")",
":",
"os",
".",
"remove",
"(",
"dest_path",
")",
"resp",
"=",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"size",
"=",
"int",
"(",
"resp",
".",
"headers",
".",
"get",
"(",
"\"content-length\"",
")",
")",
"label",
"=",
"\"Downloading {filename} ({size:.2f}MB)\"",
".",
"format",
"(",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"dest_path",
")",
",",
"size",
"=",
"size",
"/",
"float",
"(",
"self",
".",
"chunk_size",
")",
"/",
"self",
".",
"chunk_size",
")",
"with",
"open_file",
"(",
"dest_path",
",",
"'wb'",
")",
"as",
"file",
":",
"content_iter",
"=",
"resp",
".",
"iter_content",
"(",
"chunk_size",
"=",
"self",
".",
"chunk_size",
")",
"with",
"progressbar",
"(",
"content_iter",
",",
"length",
"=",
"size",
"/",
"self",
".",
"chunk_size",
",",
"label",
"=",
"label",
")",
"as",
"bar",
":",
"for",
"chunk",
"in",
"bar",
":",
"if",
"chunk",
":",
"file",
".",
"write",
"(",
"chunk",
")",
"file",
".",
"flush",
"(",
")"
]
| 33.607143 | 14.392857 |
def unindex_item(self, item):
"""
Un-index an item from our name_to_item dict.
:param item: the item to un-index
:type item: alignak.objects.item.Item
:return: None
"""
name_property = getattr(self.__class__, "name_property", None)
if name_property is None:
return
name = getattr(item, name_property, None)
if name is None:
return
self.name_to_item.pop(name, None) | [
"def",
"unindex_item",
"(",
"self",
",",
"item",
")",
":",
"name_property",
"=",
"getattr",
"(",
"self",
".",
"__class__",
",",
"\"name_property\"",
",",
"None",
")",
"if",
"name_property",
"is",
"None",
":",
"return",
"name",
"=",
"getattr",
"(",
"item",
",",
"name_property",
",",
"None",
")",
"if",
"name",
"is",
"None",
":",
"return",
"self",
".",
"name_to_item",
".",
"pop",
"(",
"name",
",",
"None",
")"
]
| 33.071429 | 11.071429 |
def _reduce_method(cls, func):
"""
Return a wrapped function for injecting numpy methods.
see ops.inject_coarsen_methods
"""
def wrapped_func(self, **kwargs):
from .dataarray import DataArray
reduced = self.obj.variable.coarsen(
self.windows, func, self.boundary, self.side)
coords = {}
for c, v in self.obj.coords.items():
if c == self.obj.name:
coords[c] = reduced
else:
if any(d in self.windows for d in v.dims):
coords[c] = v.variable.coarsen(
self.windows, self.coord_func[c],
self.boundary, self.side)
else:
coords[c] = v
return DataArray(reduced, dims=self.obj.dims, coords=coords)
return wrapped_func | [
"def",
"_reduce_method",
"(",
"cls",
",",
"func",
")",
":",
"def",
"wrapped_func",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"dataarray",
"import",
"DataArray",
"reduced",
"=",
"self",
".",
"obj",
".",
"variable",
".",
"coarsen",
"(",
"self",
".",
"windows",
",",
"func",
",",
"self",
".",
"boundary",
",",
"self",
".",
"side",
")",
"coords",
"=",
"{",
"}",
"for",
"c",
",",
"v",
"in",
"self",
".",
"obj",
".",
"coords",
".",
"items",
"(",
")",
":",
"if",
"c",
"==",
"self",
".",
"obj",
".",
"name",
":",
"coords",
"[",
"c",
"]",
"=",
"reduced",
"else",
":",
"if",
"any",
"(",
"d",
"in",
"self",
".",
"windows",
"for",
"d",
"in",
"v",
".",
"dims",
")",
":",
"coords",
"[",
"c",
"]",
"=",
"v",
".",
"variable",
".",
"coarsen",
"(",
"self",
".",
"windows",
",",
"self",
".",
"coord_func",
"[",
"c",
"]",
",",
"self",
".",
"boundary",
",",
"self",
".",
"side",
")",
"else",
":",
"coords",
"[",
"c",
"]",
"=",
"v",
"return",
"DataArray",
"(",
"reduced",
",",
"dims",
"=",
"self",
".",
"obj",
".",
"dims",
",",
"coords",
"=",
"coords",
")",
"return",
"wrapped_func"
]
| 37.791667 | 13.708333 |
def get(self, namespace, key):
"""Get a specific configuration item"""
cfg = self.dbconfig.get(key, namespace, as_object=True)
return self.make_response({
'message': None,
'config': cfg
}) | [
"def",
"get",
"(",
"self",
",",
"namespace",
",",
"key",
")",
":",
"cfg",
"=",
"self",
".",
"dbconfig",
".",
"get",
"(",
"key",
",",
"namespace",
",",
"as_object",
"=",
"True",
")",
"return",
"self",
".",
"make_response",
"(",
"{",
"'message'",
":",
"None",
",",
"'config'",
":",
"cfg",
"}",
")"
]
| 34 | 13.571429 |
def get_file_list(opts):
"""
Returns a list containing file paths of requested files to be parsed
using AnchorHub options.
:param opts: Namespace containing AnchorHub options, usually created from
command line arguments
:return: a list of absolute string file paths of files that should be
parsed
"""
if opts.is_dir:
# Input is a directory, get a list of files
return get_files(opts.abs_input, opts.extensions, exclude=[
opts.abs_output], recursive=opts.recursive)
elif os.path.isfile(opts.input):
# Input is a file, should only parse that one file
return [opts.abs_input]
else:
# Input is non-existent
return [] | [
"def",
"get_file_list",
"(",
"opts",
")",
":",
"if",
"opts",
".",
"is_dir",
":",
"# Input is a directory, get a list of files",
"return",
"get_files",
"(",
"opts",
".",
"abs_input",
",",
"opts",
".",
"extensions",
",",
"exclude",
"=",
"[",
"opts",
".",
"abs_output",
"]",
",",
"recursive",
"=",
"opts",
".",
"recursive",
")",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"opts",
".",
"input",
")",
":",
"# Input is a file, should only parse that one file",
"return",
"[",
"opts",
".",
"abs_input",
"]",
"else",
":",
"# Input is non-existent",
"return",
"[",
"]"
]
| 35.3 | 18.7 |
def compile(expr, params=None):
"""
Force compilation of expression as though it were an expression depending
on Clickhouse. Note you can also call expr.compile()
Returns
-------
compiled : string
"""
from ibis.clickhouse.compiler import to_sql
return to_sql(expr, dialect.make_context(params=params)) | [
"def",
"compile",
"(",
"expr",
",",
"params",
"=",
"None",
")",
":",
"from",
"ibis",
".",
"clickhouse",
".",
"compiler",
"import",
"to_sql",
"return",
"to_sql",
"(",
"expr",
",",
"dialect",
".",
"make_context",
"(",
"params",
"=",
"params",
")",
")"
]
| 27.333333 | 20.5 |
def send_notification_batch(self, notifications, topic=None, priority=NotificationPriority.Immediate,
expiration=None, collapse_id=None):
"""
Send a notification to a list of tokens in batch. Instead of sending a synchronous request
for each token, send multiple requests concurrently. This is done on the same connection,
using HTTP/2 streams (one request per stream).
APNs allows many streams simultaneously, but the number of streams can vary depending on
server load. This method reads the SETTINGS frame sent by the server to figure out the
maximum number of concurrent streams. Typically, APNs reports a maximum of 500.
The function returns a dictionary mapping each token to its result. The result is "Success"
if the token was sent successfully, or the string returned by APNs in the 'reason' field of
the response, if the token generated an error.
"""
notification_iterator = iter(notifications)
next_notification = next(notification_iterator, None)
# Make sure we're connected to APNs, so that we receive and process the server's SETTINGS
# frame before starting to send notifications.
self.connect()
results = {}
open_streams = collections.deque()
# Loop on the tokens, sending as many requests as possible concurrently to APNs.
# When reaching the maximum concurrent streams limit, wait for a response before sending
# another request.
while len(open_streams) > 0 or next_notification is not None:
# Update the max_concurrent_streams on every iteration since a SETTINGS frame can be
# sent by the server at any time.
self.update_max_concurrent_streams()
if self.should_send_notification(next_notification, open_streams):
logger.info('Sending to token %s', next_notification.token)
stream_id = self.send_notification_async(next_notification.token, next_notification.payload, topic,
priority, expiration, collapse_id)
open_streams.append(RequestStream(stream_id, next_notification.token))
next_notification = next(notification_iterator, None)
if next_notification is None:
# No tokens remaining. Proceed to get results for pending requests.
logger.info('Finished sending all tokens, waiting for pending requests.')
else:
# We have at least one request waiting for response (otherwise we would have either
# sent new requests or exited the while loop.) Wait for the first outstanding stream
# to return a response.
pending_stream = open_streams.popleft()
result = self.get_notification_result(pending_stream.stream_id)
logger.info('Got response for %s: %s', pending_stream.token, result)
results[pending_stream.token] = result
return results | [
"def",
"send_notification_batch",
"(",
"self",
",",
"notifications",
",",
"topic",
"=",
"None",
",",
"priority",
"=",
"NotificationPriority",
".",
"Immediate",
",",
"expiration",
"=",
"None",
",",
"collapse_id",
"=",
"None",
")",
":",
"notification_iterator",
"=",
"iter",
"(",
"notifications",
")",
"next_notification",
"=",
"next",
"(",
"notification_iterator",
",",
"None",
")",
"# Make sure we're connected to APNs, so that we receive and process the server's SETTINGS",
"# frame before starting to send notifications.",
"self",
".",
"connect",
"(",
")",
"results",
"=",
"{",
"}",
"open_streams",
"=",
"collections",
".",
"deque",
"(",
")",
"# Loop on the tokens, sending as many requests as possible concurrently to APNs.",
"# When reaching the maximum concurrent streams limit, wait for a response before sending",
"# another request.",
"while",
"len",
"(",
"open_streams",
")",
">",
"0",
"or",
"next_notification",
"is",
"not",
"None",
":",
"# Update the max_concurrent_streams on every iteration since a SETTINGS frame can be",
"# sent by the server at any time.",
"self",
".",
"update_max_concurrent_streams",
"(",
")",
"if",
"self",
".",
"should_send_notification",
"(",
"next_notification",
",",
"open_streams",
")",
":",
"logger",
".",
"info",
"(",
"'Sending to token %s'",
",",
"next_notification",
".",
"token",
")",
"stream_id",
"=",
"self",
".",
"send_notification_async",
"(",
"next_notification",
".",
"token",
",",
"next_notification",
".",
"payload",
",",
"topic",
",",
"priority",
",",
"expiration",
",",
"collapse_id",
")",
"open_streams",
".",
"append",
"(",
"RequestStream",
"(",
"stream_id",
",",
"next_notification",
".",
"token",
")",
")",
"next_notification",
"=",
"next",
"(",
"notification_iterator",
",",
"None",
")",
"if",
"next_notification",
"is",
"None",
":",
"# No tokens remaining. Proceed to get results for pending requests.",
"logger",
".",
"info",
"(",
"'Finished sending all tokens, waiting for pending requests.'",
")",
"else",
":",
"# We have at least one request waiting for response (otherwise we would have either",
"# sent new requests or exited the while loop.) Wait for the first outstanding stream",
"# to return a response.",
"pending_stream",
"=",
"open_streams",
".",
"popleft",
"(",
")",
"result",
"=",
"self",
".",
"get_notification_result",
"(",
"pending_stream",
".",
"stream_id",
")",
"logger",
".",
"info",
"(",
"'Got response for %s: %s'",
",",
"pending_stream",
".",
"token",
",",
"result",
")",
"results",
"[",
"pending_stream",
".",
"token",
"]",
"=",
"result",
"return",
"results"
]
| 61.42 | 34.34 |
def v(*args, **kwargs):
'''
print the name = values of any passed in variables
this prints out the passed in name, the value, and the file:line where the v()
method was called so you can easily find it and remove it later
example --
foo = 1
bar = [1, 2, 3]
out.v(foo, bar)
""" prints out:
foo = 1
bar =
[
0: 1,
1: 2,
2: 3
]
(/file:line)
"""
*args -- list -- the variables you want to see pretty printed for humans
'''
if not args:
raise ValueError("you didn't pass any arguments to print out")
with Reflect.context(args, **kwargs) as r:
instance = V_CLASS(r, stream, **kwargs)
instance() | [
"def",
"v",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"args",
":",
"raise",
"ValueError",
"(",
"\"you didn't pass any arguments to print out\"",
")",
"with",
"Reflect",
".",
"context",
"(",
"args",
",",
"*",
"*",
"kwargs",
")",
"as",
"r",
":",
"instance",
"=",
"V_CLASS",
"(",
"r",
",",
"stream",
",",
"*",
"*",
"kwargs",
")",
"instance",
"(",
")"
]
| 23.125 | 25.5625 |
def get(self, **url_params):
"""
Makes the HTTP GET to the url.
"""
if url_params:
self.http_method_args["params"].update(url_params)
return self.http_method("GET") | [
"def",
"get",
"(",
"self",
",",
"*",
"*",
"url_params",
")",
":",
"if",
"url_params",
":",
"self",
".",
"http_method_args",
"[",
"\"params\"",
"]",
".",
"update",
"(",
"url_params",
")",
"return",
"self",
".",
"http_method",
"(",
"\"GET\"",
")"
]
| 30 | 8 |
def request_time(self, req):
"""Return the current time in ms since the Unix Epoch."""
r = time.time()
self._time_result.set_value(r)
return ("ok", r) | [
"def",
"request_time",
"(",
"self",
",",
"req",
")",
":",
"r",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"_time_result",
".",
"set_value",
"(",
"r",
")",
"return",
"(",
"\"ok\"",
",",
"r",
")"
]
| 35.6 | 9.4 |
def _get_parallel_regions(data):
"""Retrieve regions to run in parallel, putting longest intervals first.
"""
callable_regions = tz.get_in(["config", "algorithm", "callable_regions"], data)
if not callable_regions:
raise ValueError("Did not find any callable regions for sample: %s\n"
"Check 'align/%s/*-callableblocks.bed' and 'regions' to examine callable regions"
% (dd.get_sample_name(data), dd.get_sample_name(data)))
with open(callable_regions) as in_handle:
regions = [(xs[0], int(xs[1]), int(xs[2])) for xs in
(l.rstrip().split("\t") for l in in_handle) if (len(xs) >= 3 and
not xs[0].startswith(("track", "browser",)))]
return regions | [
"def",
"_get_parallel_regions",
"(",
"data",
")",
":",
"callable_regions",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"callable_regions\"",
"]",
",",
"data",
")",
"if",
"not",
"callable_regions",
":",
"raise",
"ValueError",
"(",
"\"Did not find any callable regions for sample: %s\\n\"",
"\"Check 'align/%s/*-callableblocks.bed' and 'regions' to examine callable regions\"",
"%",
"(",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
",",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
")",
")",
"with",
"open",
"(",
"callable_regions",
")",
"as",
"in_handle",
":",
"regions",
"=",
"[",
"(",
"xs",
"[",
"0",
"]",
",",
"int",
"(",
"xs",
"[",
"1",
"]",
")",
",",
"int",
"(",
"xs",
"[",
"2",
"]",
")",
")",
"for",
"xs",
"in",
"(",
"l",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"for",
"l",
"in",
"in_handle",
")",
"if",
"(",
"len",
"(",
"xs",
")",
">=",
"3",
"and",
"not",
"xs",
"[",
"0",
"]",
".",
"startswith",
"(",
"(",
"\"track\"",
",",
"\"browser\"",
",",
")",
")",
")",
"]",
"return",
"regions"
]
| 62.692308 | 28.923077 |
def p_try_statement_1(self, p):
"""try_statement : TRY block catch"""
p[0] = ast.Try(statements=p[2], catch=p[3]) | [
"def",
"p_try_statement_1",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Try",
"(",
"statements",
"=",
"p",
"[",
"2",
"]",
",",
"catch",
"=",
"p",
"[",
"3",
"]",
")"
]
| 42.333333 | 6.666667 |
def stage(self, fileobj, creds=None, callback=None):
"""Stages data in a Mapbox-owned S3 bucket
If creds are not provided, temporary credentials will be
generated using the Mapbox API.
Parameters
----------
fileobj: file object or filename
A Python file object opened in binary mode or a filename.
creds: dict
AWS credentials allowing uploads to the destination bucket.
callback: func
A function that takes a number of bytes processed as its
sole argument.
Returns
-------
str
The URL of the staged data
"""
if not hasattr(fileobj, 'read'):
fileobj = open(fileobj, 'rb')
if not creds:
res = self._get_credentials()
creds = res.json()
session = boto3_session(
aws_access_key_id=creds['accessKeyId'],
aws_secret_access_key=creds['secretAccessKey'],
aws_session_token=creds['sessionToken'],
region_name='us-east-1')
s3 = session.resource('s3')
bucket = s3.Bucket(creds['bucket'])
key = creds['key']
bucket.upload_fileobj(fileobj, key, Callback=callback)
return creds['url'] | [
"def",
"stage",
"(",
"self",
",",
"fileobj",
",",
"creds",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"if",
"not",
"hasattr",
"(",
"fileobj",
",",
"'read'",
")",
":",
"fileobj",
"=",
"open",
"(",
"fileobj",
",",
"'rb'",
")",
"if",
"not",
"creds",
":",
"res",
"=",
"self",
".",
"_get_credentials",
"(",
")",
"creds",
"=",
"res",
".",
"json",
"(",
")",
"session",
"=",
"boto3_session",
"(",
"aws_access_key_id",
"=",
"creds",
"[",
"'accessKeyId'",
"]",
",",
"aws_secret_access_key",
"=",
"creds",
"[",
"'secretAccessKey'",
"]",
",",
"aws_session_token",
"=",
"creds",
"[",
"'sessionToken'",
"]",
",",
"region_name",
"=",
"'us-east-1'",
")",
"s3",
"=",
"session",
".",
"resource",
"(",
"'s3'",
")",
"bucket",
"=",
"s3",
".",
"Bucket",
"(",
"creds",
"[",
"'bucket'",
"]",
")",
"key",
"=",
"creds",
"[",
"'key'",
"]",
"bucket",
".",
"upload_fileobj",
"(",
"fileobj",
",",
"key",
",",
"Callback",
"=",
"callback",
")",
"return",
"creds",
"[",
"'url'",
"]"
]
| 30.292683 | 18.658537 |
def highs(self, *args):
"""
Generator yielding only the high tides.
Arguments:
see Tide.extrema()
"""
for t in ifilter(lambda e: e[2] == 'H', self.extrema(*args)):
yield t | [
"def",
"highs",
"(",
"self",
",",
"*",
"args",
")",
":",
"for",
"t",
"in",
"ifilter",
"(",
"lambda",
"e",
":",
"e",
"[",
"2",
"]",
"==",
"'H'",
",",
"self",
".",
"extrema",
"(",
"*",
"args",
")",
")",
":",
"yield",
"t"
]
| 22.375 | 14.875 |
def _set_get_arp(self, v, load=False):
"""
Setter method for get_arp, mapped from YANG variable /brocade_arp_rpc/get_arp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_arp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_arp() directly.
YANG Description: This RPC returns ARP entries of the managed entity.
Depending on the input argument, the ARP entries are
displayed. When there is no input argument entered, all
the ARP entries of the managed entity are returned.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_arp.get_arp, is_leaf=True, yang_name="get-arp", rest_name="get-arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'ArpShowAction'}}, namespace='urn:brocade.com:mgmt:brocade-arp', defining_module='brocade-arp', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_arp must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_arp.get_arp, is_leaf=True, yang_name="get-arp", rest_name="get-arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'ArpShowAction'}}, namespace='urn:brocade.com:mgmt:brocade-arp', defining_module='brocade-arp', yang_type='rpc', is_config=True)""",
})
self.__get_arp = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_get_arp",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"get_arp",
".",
"get_arp",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"get-arp\"",
",",
"rest_name",
"=",
"\"get-arp\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"False",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'hidden'",
":",
"u'rpccmd'",
",",
"u'actionpoint'",
":",
"u'ArpShowAction'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-arp'",
",",
"defining_module",
"=",
"'brocade-arp'",
",",
"yang_type",
"=",
"'rpc'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"get_arp must be of a type compatible with rpc\"\"\"",
",",
"'defined-type'",
":",
"\"rpc\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=get_arp.get_arp, is_leaf=True, yang_name=\"get-arp\", rest_name=\"get-arp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'ArpShowAction'}}, namespace='urn:brocade.com:mgmt:brocade-arp', defining_module='brocade-arp', yang_type='rpc', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__get_arp",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
]
| 63.851852 | 29.851852 |
def parse(self, buf: memoryview, params: Params) \
-> Tuple[Command, memoryview]:
"""Parse the given bytes into a command. The basic syntax is a tag
string, a command name, possibly some arguments, and then an endline.
If the command has a complete structure but cannot be parsed, an
:class:`InvalidCommand` is returned.
Args:
buf: The bytes to parse.
params: The parsing parameters.
"""
try:
tag, buf = Tag.parse(buf, params)
except NotParseable as exc:
return InvalidCommand(params, exc), buf[0:0]
else:
params = params.copy(tag=tag.value)
cmd_parts: List[bytes] = []
while True:
try:
_, buf = Space.parse(buf, params)
atom, buf = Atom.parse(buf, params)
cmd_parts.append(atom.value.upper())
except NotParseable as exc:
return InvalidCommand(params, exc), buf[0:0]
command = b' '.join(cmd_parts)
cmd_type = self.commands.get(command)
if not cmd_type:
return InvalidCommand(params, None, command), buf[0:0]
elif not cmd_type.compound:
break
params = params.copy(command_name=command)
try:
return cmd_type.parse(buf, params)
except NotParseable as exc:
return InvalidCommand(params, exc, command, cmd_type), buf[0:0] | [
"def",
"parse",
"(",
"self",
",",
"buf",
":",
"memoryview",
",",
"params",
":",
"Params",
")",
"->",
"Tuple",
"[",
"Command",
",",
"memoryview",
"]",
":",
"try",
":",
"tag",
",",
"buf",
"=",
"Tag",
".",
"parse",
"(",
"buf",
",",
"params",
")",
"except",
"NotParseable",
"as",
"exc",
":",
"return",
"InvalidCommand",
"(",
"params",
",",
"exc",
")",
",",
"buf",
"[",
"0",
":",
"0",
"]",
"else",
":",
"params",
"=",
"params",
".",
"copy",
"(",
"tag",
"=",
"tag",
".",
"value",
")",
"cmd_parts",
":",
"List",
"[",
"bytes",
"]",
"=",
"[",
"]",
"while",
"True",
":",
"try",
":",
"_",
",",
"buf",
"=",
"Space",
".",
"parse",
"(",
"buf",
",",
"params",
")",
"atom",
",",
"buf",
"=",
"Atom",
".",
"parse",
"(",
"buf",
",",
"params",
")",
"cmd_parts",
".",
"append",
"(",
"atom",
".",
"value",
".",
"upper",
"(",
")",
")",
"except",
"NotParseable",
"as",
"exc",
":",
"return",
"InvalidCommand",
"(",
"params",
",",
"exc",
")",
",",
"buf",
"[",
"0",
":",
"0",
"]",
"command",
"=",
"b' '",
".",
"join",
"(",
"cmd_parts",
")",
"cmd_type",
"=",
"self",
".",
"commands",
".",
"get",
"(",
"command",
")",
"if",
"not",
"cmd_type",
":",
"return",
"InvalidCommand",
"(",
"params",
",",
"None",
",",
"command",
")",
",",
"buf",
"[",
"0",
":",
"0",
"]",
"elif",
"not",
"cmd_type",
".",
"compound",
":",
"break",
"params",
"=",
"params",
".",
"copy",
"(",
"command_name",
"=",
"command",
")",
"try",
":",
"return",
"cmd_type",
".",
"parse",
"(",
"buf",
",",
"params",
")",
"except",
"NotParseable",
"as",
"exc",
":",
"return",
"InvalidCommand",
"(",
"params",
",",
"exc",
",",
"command",
",",
"cmd_type",
")",
",",
"buf",
"[",
"0",
":",
"0",
"]"
]
| 39.405405 | 14.783784 |
def _sentinel_parse_scene_id(sceneid):
"""Parse Sentinel-2 scene id.
Attributes
----------
sceneid : str
Sentinel-2 sceneid.
Returns
-------
out : dict
dictionary with metadata constructed from the sceneid.
e.g:
_sentinel_parse_scene_id('S2A_tile_20170323_07SNC_0')
{
"acquisitionDay": "23",
"acquisitionMonth": "03",
"acquisitionYear": "2017",
"key": "tiles/7/S/NC/2017/3/23/0",
"lat": "S",
"num": "0",
"satellite": "A",
"scene": "S2A_tile_20170323_07SNC_0",
"sensor": "2",
"sq": "NC",
"utm": "07",
}
"""
if not re.match("^S2[AB]_tile_[0-9]{8}_[0-9]{2}[A-Z]{3}_[0-9]$", sceneid):
raise InvalidSentinelSceneId("Could not match {}".format(sceneid))
sentinel_pattern = (
r"^S"
r"(?P<sensor>\w{1})"
r"(?P<satellite>[AB]{1})"
r"_tile_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<utm>[0-9]{2})"
r"(?P<lat>\w{1})"
r"(?P<sq>\w{2})"
r"_"
r"(?P<num>[0-9]{1})$"
)
meta = None
match = re.match(sentinel_pattern, sceneid, re.IGNORECASE)
if match:
meta = match.groupdict()
utm_zone = meta["utm"].lstrip("0")
grid_square = meta["sq"]
latitude_band = meta["lat"]
year = meta["acquisitionYear"]
month = meta["acquisitionMonth"].lstrip("0")
day = meta["acquisitionDay"].lstrip("0")
img_num = meta["num"]
meta["key"] = "tiles/{}/{}/{}/{}/{}/{}/{}".format(
utm_zone, latitude_band, grid_square, year, month, day, img_num
)
meta["scene"] = sceneid
return meta | [
"def",
"_sentinel_parse_scene_id",
"(",
"sceneid",
")",
":",
"if",
"not",
"re",
".",
"match",
"(",
"\"^S2[AB]_tile_[0-9]{8}_[0-9]{2}[A-Z]{3}_[0-9]$\"",
",",
"sceneid",
")",
":",
"raise",
"InvalidSentinelSceneId",
"(",
"\"Could not match {}\"",
".",
"format",
"(",
"sceneid",
")",
")",
"sentinel_pattern",
"=",
"(",
"r\"^S\"",
"r\"(?P<sensor>\\w{1})\"",
"r\"(?P<satellite>[AB]{1})\"",
"r\"_tile_\"",
"r\"(?P<acquisitionYear>[0-9]{4})\"",
"r\"(?P<acquisitionMonth>[0-9]{2})\"",
"r\"(?P<acquisitionDay>[0-9]{2})\"",
"r\"_\"",
"r\"(?P<utm>[0-9]{2})\"",
"r\"(?P<lat>\\w{1})\"",
"r\"(?P<sq>\\w{2})\"",
"r\"_\"",
"r\"(?P<num>[0-9]{1})$\"",
")",
"meta",
"=",
"None",
"match",
"=",
"re",
".",
"match",
"(",
"sentinel_pattern",
",",
"sceneid",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"match",
":",
"meta",
"=",
"match",
".",
"groupdict",
"(",
")",
"utm_zone",
"=",
"meta",
"[",
"\"utm\"",
"]",
".",
"lstrip",
"(",
"\"0\"",
")",
"grid_square",
"=",
"meta",
"[",
"\"sq\"",
"]",
"latitude_band",
"=",
"meta",
"[",
"\"lat\"",
"]",
"year",
"=",
"meta",
"[",
"\"acquisitionYear\"",
"]",
"month",
"=",
"meta",
"[",
"\"acquisitionMonth\"",
"]",
".",
"lstrip",
"(",
"\"0\"",
")",
"day",
"=",
"meta",
"[",
"\"acquisitionDay\"",
"]",
".",
"lstrip",
"(",
"\"0\"",
")",
"img_num",
"=",
"meta",
"[",
"\"num\"",
"]",
"meta",
"[",
"\"key\"",
"]",
"=",
"\"tiles/{}/{}/{}/{}/{}/{}/{}\"",
".",
"format",
"(",
"utm_zone",
",",
"latitude_band",
",",
"grid_square",
",",
"year",
",",
"month",
",",
"day",
",",
"img_num",
")",
"meta",
"[",
"\"scene\"",
"]",
"=",
"sceneid",
"return",
"meta"
]
| 25.085714 | 20.342857 |
def upload_local_file(self, local_file_path, file_obj=None):
"""Create a Stored File and upload it's data. This is a one part do it all type method. Here is what
it does:
1. "Discover" information about the file (mime-type, size)
2. Create the stored file object in slick
3. Upload (chunked) all the data in the local file
4. re-fetch the stored file object from slick, and return it
"""
if file_obj is None and not os.path.exists(local_file_path):
return
storedfile = StoredFile()
storedfile.mimetype = mimetypes.guess_type(local_file_path)[0]
storedfile.filename = os.path.basename(local_file_path)
if file_obj is None:
storedfile.length = os.stat(local_file_path).st_size
else:
file_obj.seek(0,os.SEEK_END)
storedfile.length = file_obj.tell()
file_obj.seek(0)
storedfile = self(storedfile).create()
md5 = hashlib.md5()
url = self(storedfile).getUrl() + "/addchunk"
if file_obj is None:
with open(local_file_path, 'rb') as filecontents:
upload_chunks(url, storedfile, filecontents)
else:
upload_chunks(url, storedfile, file_obj)
return self(storedfile).update() | [
"def",
"upload_local_file",
"(",
"self",
",",
"local_file_path",
",",
"file_obj",
"=",
"None",
")",
":",
"if",
"file_obj",
"is",
"None",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"local_file_path",
")",
":",
"return",
"storedfile",
"=",
"StoredFile",
"(",
")",
"storedfile",
".",
"mimetype",
"=",
"mimetypes",
".",
"guess_type",
"(",
"local_file_path",
")",
"[",
"0",
"]",
"storedfile",
".",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"local_file_path",
")",
"if",
"file_obj",
"is",
"None",
":",
"storedfile",
".",
"length",
"=",
"os",
".",
"stat",
"(",
"local_file_path",
")",
".",
"st_size",
"else",
":",
"file_obj",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_END",
")",
"storedfile",
".",
"length",
"=",
"file_obj",
".",
"tell",
"(",
")",
"file_obj",
".",
"seek",
"(",
"0",
")",
"storedfile",
"=",
"self",
"(",
"storedfile",
")",
".",
"create",
"(",
")",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"url",
"=",
"self",
"(",
"storedfile",
")",
".",
"getUrl",
"(",
")",
"+",
"\"/addchunk\"",
"if",
"file_obj",
"is",
"None",
":",
"with",
"open",
"(",
"local_file_path",
",",
"'rb'",
")",
"as",
"filecontents",
":",
"upload_chunks",
"(",
"url",
",",
"storedfile",
",",
"filecontents",
")",
"else",
":",
"upload_chunks",
"(",
"url",
",",
"storedfile",
",",
"file_obj",
")",
"return",
"self",
"(",
"storedfile",
")",
".",
"update",
"(",
")"
]
| 46.642857 | 16.321429 |
def base_argparser(argv=()):
"""Initial parser that can set values for the rest of the parsing process.
"""
global verbose
verbose = _not_verbose
_p = argparse.ArgumentParser(add_help=False)
_p.add_argument('--debug', action='store_true', help="turn on all the show and verbose options (mainly for debugging pydeps itself)")
_p.add_argument('--config', help="specify config file", metavar="FILE")
_p.add_argument('--no-config', help="disable processing of config files", action='store_true')
_p.add_argument('--version', action='store_true', help='print pydeps version')
_p.add_argument('-L', '--log', help=textwrap.dedent('''
set log-level to one of CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET.
'''))
_args, argv = _p.parse_known_args(argv)
if _args.log:
loglevels = "CRITICAL DEBUG ERROR FATAL INFO WARN"
if _args.log not in loglevels: # pragma: nocover
error('legal values for the -L parameter are:', loglevels)
loglevel = getattr(logging, _args.log)
else:
loglevel = None
logging.basicConfig(
level=loglevel,
format='%(filename)s:%(lineno)d: %(levelname)s: %(message)s'
)
if _args.version: # pragma: nocover
print("pydeps v" + __version__)
sys.exit(0)
return _p, _args, argv | [
"def",
"base_argparser",
"(",
"argv",
"=",
"(",
")",
")",
":",
"global",
"verbose",
"verbose",
"=",
"_not_verbose",
"_p",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"add_help",
"=",
"False",
")",
"_p",
".",
"add_argument",
"(",
"'--debug'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"turn on all the show and verbose options (mainly for debugging pydeps itself)\"",
")",
"_p",
".",
"add_argument",
"(",
"'--config'",
",",
"help",
"=",
"\"specify config file\"",
",",
"metavar",
"=",
"\"FILE\"",
")",
"_p",
".",
"add_argument",
"(",
"'--no-config'",
",",
"help",
"=",
"\"disable processing of config files\"",
",",
"action",
"=",
"'store_true'",
")",
"_p",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'print pydeps version'",
")",
"_p",
".",
"add_argument",
"(",
"'-L'",
",",
"'--log'",
",",
"help",
"=",
"textwrap",
".",
"dedent",
"(",
"'''\n set log-level to one of CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET.\n '''",
")",
")",
"_args",
",",
"argv",
"=",
"_p",
".",
"parse_known_args",
"(",
"argv",
")",
"if",
"_args",
".",
"log",
":",
"loglevels",
"=",
"\"CRITICAL DEBUG ERROR FATAL INFO WARN\"",
"if",
"_args",
".",
"log",
"not",
"in",
"loglevels",
":",
"# pragma: nocover",
"error",
"(",
"'legal values for the -L parameter are:'",
",",
"loglevels",
")",
"loglevel",
"=",
"getattr",
"(",
"logging",
",",
"_args",
".",
"log",
")",
"else",
":",
"loglevel",
"=",
"None",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"loglevel",
",",
"format",
"=",
"'%(filename)s:%(lineno)d: %(levelname)s: %(message)s'",
")",
"if",
"_args",
".",
"version",
":",
"# pragma: nocover",
"print",
"(",
"\"pydeps v\"",
"+",
"__version__",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"return",
"_p",
",",
"_args",
",",
"argv"
]
| 38.529412 | 25.088235 |
def use_mock(self, mock, *args, **kwarg):
"""
Context manager or decorator in order to use a coroutine as mock of service
endpoint in a test.
:param mock: Coroutine to use as mock. It should behave like :meth:`~ClientSession.request`.
:type mock: coroutine
:param service_name: Name of service where you want to use mock. If None it will be used
as soon as possible.
:type service_name: str
:param endpoint: Endpoint where you want to use mock. If None it will be used
as soon as possible.
:type endpoint: str
:param offset: Times it must be ignored before use. Default 0. Only positive integers.
:type offset: int
:param limit: Times it could be used. Default 1. 0 means no limit. Only positive integers.
:type limit: int
:return: UseMockDefinition
"""
return UseMockDefinition(mock, self, *args, **kwarg) | [
"def",
"use_mock",
"(",
"self",
",",
"mock",
",",
"*",
"args",
",",
"*",
"*",
"kwarg",
")",
":",
"return",
"UseMockDefinition",
"(",
"mock",
",",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwarg",
")"
]
| 47 | 23.8 |
def force_new_dynamic_value(self_, name): # pylint: disable-msg=E0213
"""
Force a new value to be generated for the dynamic attribute
name, and return it.
If name is not dynamic, its current value is returned
(i.e. equivalent to getattr(name).
"""
cls_or_slf = self_.self_or_cls
param_obj = cls_or_slf.param.objects('existing').get(name)
if not param_obj:
return getattr(cls_or_slf, name)
cls, slf = None, None
if isinstance(cls_or_slf,type):
cls = cls_or_slf
else:
slf = cls_or_slf
if not hasattr(param_obj,'_force'):
return param_obj.__get__(slf, cls)
else:
return param_obj._force(slf, cls) | [
"def",
"force_new_dynamic_value",
"(",
"self_",
",",
"name",
")",
":",
"# pylint: disable-msg=E0213",
"cls_or_slf",
"=",
"self_",
".",
"self_or_cls",
"param_obj",
"=",
"cls_or_slf",
".",
"param",
".",
"objects",
"(",
"'existing'",
")",
".",
"get",
"(",
"name",
")",
"if",
"not",
"param_obj",
":",
"return",
"getattr",
"(",
"cls_or_slf",
",",
"name",
")",
"cls",
",",
"slf",
"=",
"None",
",",
"None",
"if",
"isinstance",
"(",
"cls_or_slf",
",",
"type",
")",
":",
"cls",
"=",
"cls_or_slf",
"else",
":",
"slf",
"=",
"cls_or_slf",
"if",
"not",
"hasattr",
"(",
"param_obj",
",",
"'_force'",
")",
":",
"return",
"param_obj",
".",
"__get__",
"(",
"slf",
",",
"cls",
")",
"else",
":",
"return",
"param_obj",
".",
"_force",
"(",
"slf",
",",
"cls",
")"
]
| 31.083333 | 16.75 |
def transcribe_file(self, file_path, clip_length=10, compress=True):
'''
a method to transcribe the text from an audio file
EXAMPLE: https://github.com/dannguyen/watson-word-watcher
:param file_path: string with path to audio file on localhost
:param clip_length: [optional] integer with seconds to divide clips into
:param compress: [optional] boolean to convert file to audio/ogg
:return: dictionary with transcribed text segments in 'segments' key
'''
title = '%s.transcribe_file' % self.__class__.__name__
# validate inputs
input_fields = {
'file_path': file_path,
'clip_length': clip_length
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# run conversion
import os
if compress:
file_name, file_extension = os.path.splitext(file_path)
if file_extension != '.ogg':
file_path = self.convert_audio(file_path, 'audio/ogg', True)
# construct empty file details
file_details = {
'name': '',
'mimetype': '',
'extension': ''
}
# retrieve file name
file_arg = '%s(file_path=%s)' % (title, str(file_path))
split_file = os.path.split(file_path)
file_details['name'] = split_file[0]
if len(split_file) > 1:
file_details['name'] = split_file[1]
if not file_details['name']:
raise ValueError('%s must have a file name.' % file_arg)
# validate file extension
ext_kwargs = {
'file_name': file_details['name'],
'extension_map': self.fields.schema['audio_extensions'],
'method_title': title,
'argument_title': file_path
}
regex_details = self._validate_extension(**ext_kwargs)
file_details.update(**regex_details)
# retrieve byte data
if not os.path.exists(file_path):
raise ValueError('%s is not a valid file path.' % file_arg)
# validate file mimetype
if self.magic:
magic_details = self.magic.analyze(file_path)
mimetype_text = file_details['mimetype'][6:]
if mimetype_text not in magic_details['mimetype']:
raise ValueError('%s byte data mimetype %s does not match %s file extension.' % (file_arg, magic_details['mimetype'], file_details['extension']))
# import dependencies
from math import ceil
from moviepy.editor import AudioFileClip
# open audio file
audio = AudioFileClip(file_path)
audio_duration = audio.duration
# construct list of files to transcribe
file_list = []
if audio_duration < clip_length:
file_list.append(file_path)
else:
# create temporary audio files
clip_folder = self._create_folder()
count = 0
t_start = 0
while t_start < audio_duration:
t_end = t_start + clip_length
if t_end > audio_duration:
t_end = ceil(audio_duration)
segment = audio.subclip(t_start)
else:
segment = audio.subclip(t_start, t_end)
clip_name = 'audio%s.%s' % (count, file_details['extension'])
clip_path = os.path.join(clip_folder, clip_name)
segment.write_audiofile(clip_path, verbose=False)
file_list.append(clip_path)
count += 1
t_start = t_end
# run file transcription method
transcription_result = self._transcribe_files(file_list, file_details['mimetype'])
# remove temp files
if len(file_list) > 1:
from labpack.records.settings import remove_settings
for file in file_list:
remove_settings(file, remove_dir=True)
return transcription_result | [
"def",
"transcribe_file",
"(",
"self",
",",
"file_path",
",",
"clip_length",
"=",
"10",
",",
"compress",
"=",
"True",
")",
":",
"title",
"=",
"'%s.transcribe_file'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs\r",
"input_fields",
"=",
"{",
"'file_path'",
":",
"file_path",
",",
"'clip_length'",
":",
"clip_length",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# run conversion\r",
"import",
"os",
"if",
"compress",
":",
"file_name",
",",
"file_extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_path",
")",
"if",
"file_extension",
"!=",
"'.ogg'",
":",
"file_path",
"=",
"self",
".",
"convert_audio",
"(",
"file_path",
",",
"'audio/ogg'",
",",
"True",
")",
"# construct empty file details\r",
"file_details",
"=",
"{",
"'name'",
":",
"''",
",",
"'mimetype'",
":",
"''",
",",
"'extension'",
":",
"''",
"}",
"# retrieve file name\r",
"file_arg",
"=",
"'%s(file_path=%s)'",
"%",
"(",
"title",
",",
"str",
"(",
"file_path",
")",
")",
"split_file",
"=",
"os",
".",
"path",
".",
"split",
"(",
"file_path",
")",
"file_details",
"[",
"'name'",
"]",
"=",
"split_file",
"[",
"0",
"]",
"if",
"len",
"(",
"split_file",
")",
">",
"1",
":",
"file_details",
"[",
"'name'",
"]",
"=",
"split_file",
"[",
"1",
"]",
"if",
"not",
"file_details",
"[",
"'name'",
"]",
":",
"raise",
"ValueError",
"(",
"'%s must have a file name.'",
"%",
"file_arg",
")",
"# validate file extension\r",
"ext_kwargs",
"=",
"{",
"'file_name'",
":",
"file_details",
"[",
"'name'",
"]",
",",
"'extension_map'",
":",
"self",
".",
"fields",
".",
"schema",
"[",
"'audio_extensions'",
"]",
",",
"'method_title'",
":",
"title",
",",
"'argument_title'",
":",
"file_path",
"}",
"regex_details",
"=",
"self",
".",
"_validate_extension",
"(",
"*",
"*",
"ext_kwargs",
")",
"file_details",
".",
"update",
"(",
"*",
"*",
"regex_details",
")",
"# retrieve byte data\r",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"raise",
"ValueError",
"(",
"'%s is not a valid file path.'",
"%",
"file_arg",
")",
"# validate file mimetype\r",
"if",
"self",
".",
"magic",
":",
"magic_details",
"=",
"self",
".",
"magic",
".",
"analyze",
"(",
"file_path",
")",
"mimetype_text",
"=",
"file_details",
"[",
"'mimetype'",
"]",
"[",
"6",
":",
"]",
"if",
"mimetype_text",
"not",
"in",
"magic_details",
"[",
"'mimetype'",
"]",
":",
"raise",
"ValueError",
"(",
"'%s byte data mimetype %s does not match %s file extension.'",
"%",
"(",
"file_arg",
",",
"magic_details",
"[",
"'mimetype'",
"]",
",",
"file_details",
"[",
"'extension'",
"]",
")",
")",
"# import dependencies\r",
"from",
"math",
"import",
"ceil",
"from",
"moviepy",
".",
"editor",
"import",
"AudioFileClip",
"# open audio file\r",
"audio",
"=",
"AudioFileClip",
"(",
"file_path",
")",
"audio_duration",
"=",
"audio",
".",
"duration",
"# construct list of files to transcribe\r",
"file_list",
"=",
"[",
"]",
"if",
"audio_duration",
"<",
"clip_length",
":",
"file_list",
".",
"append",
"(",
"file_path",
")",
"else",
":",
"# create temporary audio files\r",
"clip_folder",
"=",
"self",
".",
"_create_folder",
"(",
")",
"count",
"=",
"0",
"t_start",
"=",
"0",
"while",
"t_start",
"<",
"audio_duration",
":",
"t_end",
"=",
"t_start",
"+",
"clip_length",
"if",
"t_end",
">",
"audio_duration",
":",
"t_end",
"=",
"ceil",
"(",
"audio_duration",
")",
"segment",
"=",
"audio",
".",
"subclip",
"(",
"t_start",
")",
"else",
":",
"segment",
"=",
"audio",
".",
"subclip",
"(",
"t_start",
",",
"t_end",
")",
"clip_name",
"=",
"'audio%s.%s'",
"%",
"(",
"count",
",",
"file_details",
"[",
"'extension'",
"]",
")",
"clip_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"clip_folder",
",",
"clip_name",
")",
"segment",
".",
"write_audiofile",
"(",
"clip_path",
",",
"verbose",
"=",
"False",
")",
"file_list",
".",
"append",
"(",
"clip_path",
")",
"count",
"+=",
"1",
"t_start",
"=",
"t_end",
"# run file transcription method\r",
"transcription_result",
"=",
"self",
".",
"_transcribe_files",
"(",
"file_list",
",",
"file_details",
"[",
"'mimetype'",
"]",
")",
"# remove temp files\r",
"if",
"len",
"(",
"file_list",
")",
">",
"1",
":",
"from",
"labpack",
".",
"records",
".",
"settings",
"import",
"remove_settings",
"for",
"file",
"in",
"file_list",
":",
"remove_settings",
"(",
"file",
",",
"remove_dir",
"=",
"True",
")",
"return",
"transcription_result"
]
| 37.504587 | 20.513761 |
def get_vulnerability_functions_04(fname):
"""
Parse the vulnerability model in NRML 0.4 format.
:param fname:
path of the vulnerability file
:returns:
a dictionary imt, taxonomy -> vulnerability function + vset
"""
categories = dict(assetCategory=set(), lossCategory=set(),
vulnerabilitySetID=set())
imts = set()
taxonomies = set()
vf_dict = {} # imt, taxonomy -> vulnerability function
for vset in nrml.read(fname).vulnerabilityModel:
categories['assetCategory'].add(vset['assetCategory'])
categories['lossCategory'].add(vset['lossCategory'])
categories['vulnerabilitySetID'].add(vset['vulnerabilitySetID'])
IML = vset.IML
imt_str = IML['IMT']
imls = ~IML
imts.add(imt_str)
for vfun in vset.getnodes('discreteVulnerability'):
taxonomy = vfun['vulnerabilityFunctionID']
if taxonomy in taxonomies:
raise InvalidFile(
'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
(taxonomy, fname, vfun.lineno))
taxonomies.add(taxonomy)
with context(fname, vfun):
loss_ratios = ~vfun.lossRatio
coefficients = ~vfun.coefficientsVariation
if len(loss_ratios) != len(imls):
raise InvalidFile(
'There are %d loss ratios, but %d imls: %s, line %d' %
(len(loss_ratios), len(imls), fname,
vfun.lossRatio.lineno))
if len(coefficients) != len(imls):
raise InvalidFile(
'There are %d coefficients, but %d imls: %s, line %d' %
(len(coefficients), len(imls), fname,
vfun.coefficientsVariation.lineno))
with context(fname, vfun):
vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction(
taxonomy, imt_str, imls, loss_ratios, coefficients,
vfun['probabilisticDistribution'])
categories['id'] = '_'.join(sorted(categories['vulnerabilitySetID']))
del categories['vulnerabilitySetID']
return vf_dict, categories | [
"def",
"get_vulnerability_functions_04",
"(",
"fname",
")",
":",
"categories",
"=",
"dict",
"(",
"assetCategory",
"=",
"set",
"(",
")",
",",
"lossCategory",
"=",
"set",
"(",
")",
",",
"vulnerabilitySetID",
"=",
"set",
"(",
")",
")",
"imts",
"=",
"set",
"(",
")",
"taxonomies",
"=",
"set",
"(",
")",
"vf_dict",
"=",
"{",
"}",
"# imt, taxonomy -> vulnerability function",
"for",
"vset",
"in",
"nrml",
".",
"read",
"(",
"fname",
")",
".",
"vulnerabilityModel",
":",
"categories",
"[",
"'assetCategory'",
"]",
".",
"add",
"(",
"vset",
"[",
"'assetCategory'",
"]",
")",
"categories",
"[",
"'lossCategory'",
"]",
".",
"add",
"(",
"vset",
"[",
"'lossCategory'",
"]",
")",
"categories",
"[",
"'vulnerabilitySetID'",
"]",
".",
"add",
"(",
"vset",
"[",
"'vulnerabilitySetID'",
"]",
")",
"IML",
"=",
"vset",
".",
"IML",
"imt_str",
"=",
"IML",
"[",
"'IMT'",
"]",
"imls",
"=",
"~",
"IML",
"imts",
".",
"add",
"(",
"imt_str",
")",
"for",
"vfun",
"in",
"vset",
".",
"getnodes",
"(",
"'discreteVulnerability'",
")",
":",
"taxonomy",
"=",
"vfun",
"[",
"'vulnerabilityFunctionID'",
"]",
"if",
"taxonomy",
"in",
"taxonomies",
":",
"raise",
"InvalidFile",
"(",
"'Duplicated vulnerabilityFunctionID: %s: %s, line %d'",
"%",
"(",
"taxonomy",
",",
"fname",
",",
"vfun",
".",
"lineno",
")",
")",
"taxonomies",
".",
"add",
"(",
"taxonomy",
")",
"with",
"context",
"(",
"fname",
",",
"vfun",
")",
":",
"loss_ratios",
"=",
"~",
"vfun",
".",
"lossRatio",
"coefficients",
"=",
"~",
"vfun",
".",
"coefficientsVariation",
"if",
"len",
"(",
"loss_ratios",
")",
"!=",
"len",
"(",
"imls",
")",
":",
"raise",
"InvalidFile",
"(",
"'There are %d loss ratios, but %d imls: %s, line %d'",
"%",
"(",
"len",
"(",
"loss_ratios",
")",
",",
"len",
"(",
"imls",
")",
",",
"fname",
",",
"vfun",
".",
"lossRatio",
".",
"lineno",
")",
")",
"if",
"len",
"(",
"coefficients",
")",
"!=",
"len",
"(",
"imls",
")",
":",
"raise",
"InvalidFile",
"(",
"'There are %d coefficients, but %d imls: %s, line %d'",
"%",
"(",
"len",
"(",
"coefficients",
")",
",",
"len",
"(",
"imls",
")",
",",
"fname",
",",
"vfun",
".",
"coefficientsVariation",
".",
"lineno",
")",
")",
"with",
"context",
"(",
"fname",
",",
"vfun",
")",
":",
"vf_dict",
"[",
"imt_str",
",",
"taxonomy",
"]",
"=",
"scientific",
".",
"VulnerabilityFunction",
"(",
"taxonomy",
",",
"imt_str",
",",
"imls",
",",
"loss_ratios",
",",
"coefficients",
",",
"vfun",
"[",
"'probabilisticDistribution'",
"]",
")",
"categories",
"[",
"'id'",
"]",
"=",
"'_'",
".",
"join",
"(",
"sorted",
"(",
"categories",
"[",
"'vulnerabilitySetID'",
"]",
")",
")",
"del",
"categories",
"[",
"'vulnerabilitySetID'",
"]",
"return",
"vf_dict",
",",
"categories"
]
| 44.55102 | 15.612245 |
def _create(cls, configuration=None, remoteckan=None, **kwargs):
# type: (Optional['Configuration'], Optional[ckanapi.RemoteCKAN], Any) -> str
"""
Create HDX configuration
Args:
configuration (Optional[Configuration]): Configuration instance. Defaults to setting one up from passed arguments.
remoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration.
**kwargs: See below
user_agent (str): User agent string. HDXPythonLibrary/X.X.X- is prefixed. Must be supplied if remoteckan is not.
user_agent_config_yaml (str): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.
user_agent_lookup (str): Lookup key for YAML. Ignored if user_agent supplied.
hdx_url (str): HDX url to use. Overrides hdx_site.
hdx_site (str): HDX site to use eg. prod, test.
hdx_read_only (bool): Whether to access HDX in read only mode. Defaults to False.
hdx_key (str): Your HDX key. Ignored if hdx_read_only = True.
hdx_config_dict (dict): HDX configuration dictionary to use instead of above 3 parameters OR
hdx_config_json (str): Path to JSON HDX configuration OR
hdx_config_yaml (str): Path to YAML HDX configuration
project_config_dict (dict): Project configuration dictionary OR
project_config_json (str): Path to JSON Project configuration OR
project_config_yaml (str): Path to YAML Project configuration
hdx_base_config_dict (dict): HDX base configuration dictionary OR
hdx_base_config_json (str): Path to JSON HDX base configuration OR
hdx_base_config_yaml (str): Path to YAML HDX base configuration. Defaults to library's internal hdx_base_configuration.yml.
Returns:
str: HDX site url
"""
kwargs = cls._environment_variables(**kwargs)
cls.setup(configuration, **kwargs)
cls._configuration.setup_remoteckan(remoteckan, **kwargs)
return cls._configuration.get_hdx_site_url() | [
"def",
"_create",
"(",
"cls",
",",
"configuration",
"=",
"None",
",",
"remoteckan",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (Optional['Configuration'], Optional[ckanapi.RemoteCKAN], Any) -> str",
"kwargs",
"=",
"cls",
".",
"_environment_variables",
"(",
"*",
"*",
"kwargs",
")",
"cls",
".",
"setup",
"(",
"configuration",
",",
"*",
"*",
"kwargs",
")",
"cls",
".",
"_configuration",
".",
"setup_remoteckan",
"(",
"remoteckan",
",",
"*",
"*",
"kwargs",
")",
"return",
"cls",
".",
"_configuration",
".",
"get_hdx_site_url",
"(",
")"
]
| 62.911765 | 36.323529 |
def list(self, pattern='*'):
"""Returns a list of groups that match the filters.
Args:
pattern: An optional pattern to filter the groups based on their display
name. This can include Unix shell-style wildcards. E.g.
``"Production*"``.
Returns:
A list of Group objects that match the filters.
"""
if self._group_dict is None:
self._group_dict = collections.OrderedDict(
(group.id, group) for group in self._client.list_groups())
return [group for group in self._group_dict.values()
if fnmatch.fnmatch(group.display_name, pattern)] | [
"def",
"list",
"(",
"self",
",",
"pattern",
"=",
"'*'",
")",
":",
"if",
"self",
".",
"_group_dict",
"is",
"None",
":",
"self",
".",
"_group_dict",
"=",
"collections",
".",
"OrderedDict",
"(",
"(",
"group",
".",
"id",
",",
"group",
")",
"for",
"group",
"in",
"self",
".",
"_client",
".",
"list_groups",
"(",
")",
")",
"return",
"[",
"group",
"for",
"group",
"in",
"self",
".",
"_group_dict",
".",
"values",
"(",
")",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"group",
".",
"display_name",
",",
"pattern",
")",
"]"
]
| 35.294118 | 21.176471 |
def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \
(default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name) | [
"def",
"saveAsTable",
"(",
"self",
",",
"name",
",",
"format",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"partitionBy",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"self",
".",
"mode",
"(",
"mode",
")",
".",
"options",
"(",
"*",
"*",
"options",
")",
"if",
"partitionBy",
"is",
"not",
"None",
":",
"self",
".",
"partitionBy",
"(",
"partitionBy",
")",
"if",
"format",
"is",
"not",
"None",
":",
"self",
".",
"format",
"(",
"format",
")",
"self",
".",
"_jwrite",
".",
"saveAsTable",
"(",
"name",
")"
]
| 49.615385 | 21.115385 |
def get_pool(cls) -> Pool:
"""
Yields:
existing db connection pool
"""
if len(cls._connection_params) < 5:
raise ConnectionError('Please call SQLStore.connect before calling this method')
if not cls._pool:
cls._pool = yield from create_pool(**cls._connection_params)
return cls._pool | [
"def",
"get_pool",
"(",
"cls",
")",
"->",
"Pool",
":",
"if",
"len",
"(",
"cls",
".",
"_connection_params",
")",
"<",
"5",
":",
"raise",
"ConnectionError",
"(",
"'Please call SQLStore.connect before calling this method'",
")",
"if",
"not",
"cls",
".",
"_pool",
":",
"cls",
".",
"_pool",
"=",
"yield",
"from",
"create_pool",
"(",
"*",
"*",
"cls",
".",
"_connection_params",
")",
"return",
"cls",
".",
"_pool"
]
| 35.8 | 15.8 |
def waitForNextState( self ):
'''After each command has been sent we wait for the observation to change as expected and a frame.'''
# wait for the observation position to have changed
print('Waiting for observation...', end=' ')
while True:
world_state = self.agent_host.peekWorldState()
if not world_state.is_mission_running:
print('mission ended.')
break
if not all(e.text=='{}' for e in world_state.observations):
obs = json.loads( world_state.observations[-1].text )
self.curr_x = obs[u'XPos']
self.curr_y = obs[u'YPos']
self.curr_z = obs[u'ZPos']
self.curr_yaw = obs[u'Yaw']
if self.require_move:
if math.fabs( self.curr_x - self.prev_x ) > self.tolerance or\
math.fabs( self.curr_y - self.prev_y ) > self.tolerance or\
math.fabs( self.curr_z - self.prev_z ) > self.tolerance:
print('received a move.')
break
elif self.require_yaw_change:
if math.fabs( self.curr_yaw - self.prev_yaw ) > self.tolerance:
print('received a turn.')
break
else:
print('received.')
break
# wait for the render position to have changed
print('Waiting for render...', end=' ')
while True:
world_state = self.agent_host.peekWorldState()
if not world_state.is_mission_running:
print('mission ended.')
break
if len(world_state.video_frames) > 0:
frame = world_state.video_frames[-1]
curr_x_from_render = frame.xPos
curr_y_from_render = frame.yPos
curr_z_from_render = frame.zPos
curr_yaw_from_render = frame.yaw
if self.require_move:
if math.fabs( curr_x_from_render - self.prev_x ) > self.tolerance or\
math.fabs( curr_y_from_render - self.prev_y ) > self.tolerance or\
math.fabs( curr_z_from_render - self.prev_z ) > self.tolerance:
print('received a move.')
break
elif self.require_yaw_change:
if math.fabs( curr_yaw_from_render - self.prev_yaw ) > self.tolerance:
print('received a turn.')
break
else:
print('received.')
break
num_frames_before_get = len(world_state.video_frames)
world_state = self.agent_host.getWorldState()
if save_images:
# save the frame, for debugging
if world_state.is_mission_running:
assert len(world_state.video_frames) > 0, 'No video frames!?'
frame = world_state.video_frames[-1]
image = Image.frombytes('RGB', (frame.width, frame.height), bytes(frame.pixels) )
self.iFrame = self.iFrame + 1
image.save( 'rep_' + str(self.rep).zfill(3) + '_saved_frame_' + str(self.iFrame).zfill(4) + '.png' )
if world_state.is_mission_running:
assert len(world_state.video_frames) > 0, 'No video frames!?'
num_frames_after_get = len(world_state.video_frames)
assert num_frames_after_get >= num_frames_before_get, 'Fewer frames after getWorldState!?'
frame = world_state.video_frames[-1]
obs = json.loads( world_state.observations[-1].text )
self.curr_x = obs[u'XPos']
self.curr_y = obs[u'YPos']
self.curr_z = obs[u'ZPos']
self.curr_yaw = obs[u'Yaw']
print('New position from observation:',self.curr_x,',',self.curr_y,',',self.curr_z,'yaw',self.curr_yaw, end=' ')
if math.fabs( self.curr_x - self.expected_x ) > self.tolerance or\
math.fabs( self.curr_y - self.expected_y ) > self.tolerance or\
math.fabs( self.curr_z - self.expected_z ) > self.tolerance or\
math.fabs( self.curr_yaw - self.expected_yaw ) > self.tolerance:
print(' - ERROR DETECTED! Expected:',self.expected_x,',',self.expected_y,',',self.expected_z,'yaw',self.expected_yaw)
exit(1)
else:
print('as expected.')
curr_x_from_render = frame.xPos
curr_y_from_render = frame.yPos
curr_z_from_render = frame.zPos
curr_yaw_from_render = frame.yaw
print('New position from render:',curr_x_from_render,',',curr_y_from_render,',',curr_z_from_render,'yaw',curr_yaw_from_render, end=' ')
if math.fabs( curr_x_from_render - self.expected_x ) > self.tolerance or\
math.fabs( curr_y_from_render - self.expected_y ) > self.tolerance or \
math.fabs( curr_z_from_render - self.expected_z ) > self.tolerance or \
math.fabs( curr_yaw_from_render - self.expected_yaw ) > self.tolerance:
print(' - ERROR DETECTED! Expected:',self.expected_x,',',self.expected_y,',',self.expected_z,'yaw',self.expected_yaw)
exit(1)
else:
print('as expected.')
self.prev_x = self.curr_x
self.prev_y = self.curr_y
self.prev_z = self.curr_z
self.prev_yaw = self.curr_yaw
return world_state | [
"def",
"waitForNextState",
"(",
"self",
")",
":",
"# wait for the observation position to have changed",
"print",
"(",
"'Waiting for observation...'",
",",
"end",
"=",
"' '",
")",
"while",
"True",
":",
"world_state",
"=",
"self",
".",
"agent_host",
".",
"peekWorldState",
"(",
")",
"if",
"not",
"world_state",
".",
"is_mission_running",
":",
"print",
"(",
"'mission ended.'",
")",
"break",
"if",
"not",
"all",
"(",
"e",
".",
"text",
"==",
"'{}'",
"for",
"e",
"in",
"world_state",
".",
"observations",
")",
":",
"obs",
"=",
"json",
".",
"loads",
"(",
"world_state",
".",
"observations",
"[",
"-",
"1",
"]",
".",
"text",
")",
"self",
".",
"curr_x",
"=",
"obs",
"[",
"u'XPos'",
"]",
"self",
".",
"curr_y",
"=",
"obs",
"[",
"u'YPos'",
"]",
"self",
".",
"curr_z",
"=",
"obs",
"[",
"u'ZPos'",
"]",
"self",
".",
"curr_yaw",
"=",
"obs",
"[",
"u'Yaw'",
"]",
"if",
"self",
".",
"require_move",
":",
"if",
"math",
".",
"fabs",
"(",
"self",
".",
"curr_x",
"-",
"self",
".",
"prev_x",
")",
">",
"self",
".",
"tolerance",
"or",
"math",
".",
"fabs",
"(",
"self",
".",
"curr_y",
"-",
"self",
".",
"prev_y",
")",
">",
"self",
".",
"tolerance",
"or",
"math",
".",
"fabs",
"(",
"self",
".",
"curr_z",
"-",
"self",
".",
"prev_z",
")",
">",
"self",
".",
"tolerance",
":",
"print",
"(",
"'received a move.'",
")",
"break",
"elif",
"self",
".",
"require_yaw_change",
":",
"if",
"math",
".",
"fabs",
"(",
"self",
".",
"curr_yaw",
"-",
"self",
".",
"prev_yaw",
")",
">",
"self",
".",
"tolerance",
":",
"print",
"(",
"'received a turn.'",
")",
"break",
"else",
":",
"print",
"(",
"'received.'",
")",
"break",
"# wait for the render position to have changed",
"print",
"(",
"'Waiting for render...'",
",",
"end",
"=",
"' '",
")",
"while",
"True",
":",
"world_state",
"=",
"self",
".",
"agent_host",
".",
"peekWorldState",
"(",
")",
"if",
"not",
"world_state",
".",
"is_mission_running",
":",
"print",
"(",
"'mission ended.'",
")",
"break",
"if",
"len",
"(",
"world_state",
".",
"video_frames",
")",
">",
"0",
":",
"frame",
"=",
"world_state",
".",
"video_frames",
"[",
"-",
"1",
"]",
"curr_x_from_render",
"=",
"frame",
".",
"xPos",
"curr_y_from_render",
"=",
"frame",
".",
"yPos",
"curr_z_from_render",
"=",
"frame",
".",
"zPos",
"curr_yaw_from_render",
"=",
"frame",
".",
"yaw",
"if",
"self",
".",
"require_move",
":",
"if",
"math",
".",
"fabs",
"(",
"curr_x_from_render",
"-",
"self",
".",
"prev_x",
")",
">",
"self",
".",
"tolerance",
"or",
"math",
".",
"fabs",
"(",
"curr_y_from_render",
"-",
"self",
".",
"prev_y",
")",
">",
"self",
".",
"tolerance",
"or",
"math",
".",
"fabs",
"(",
"curr_z_from_render",
"-",
"self",
".",
"prev_z",
")",
">",
"self",
".",
"tolerance",
":",
"print",
"(",
"'received a move.'",
")",
"break",
"elif",
"self",
".",
"require_yaw_change",
":",
"if",
"math",
".",
"fabs",
"(",
"curr_yaw_from_render",
"-",
"self",
".",
"prev_yaw",
")",
">",
"self",
".",
"tolerance",
":",
"print",
"(",
"'received a turn.'",
")",
"break",
"else",
":",
"print",
"(",
"'received.'",
")",
"break",
"num_frames_before_get",
"=",
"len",
"(",
"world_state",
".",
"video_frames",
")",
"world_state",
"=",
"self",
".",
"agent_host",
".",
"getWorldState",
"(",
")",
"if",
"save_images",
":",
"# save the frame, for debugging",
"if",
"world_state",
".",
"is_mission_running",
":",
"assert",
"len",
"(",
"world_state",
".",
"video_frames",
")",
">",
"0",
",",
"'No video frames!?'",
"frame",
"=",
"world_state",
".",
"video_frames",
"[",
"-",
"1",
"]",
"image",
"=",
"Image",
".",
"frombytes",
"(",
"'RGB'",
",",
"(",
"frame",
".",
"width",
",",
"frame",
".",
"height",
")",
",",
"bytes",
"(",
"frame",
".",
"pixels",
")",
")",
"self",
".",
"iFrame",
"=",
"self",
".",
"iFrame",
"+",
"1",
"image",
".",
"save",
"(",
"'rep_'",
"+",
"str",
"(",
"self",
".",
"rep",
")",
".",
"zfill",
"(",
"3",
")",
"+",
"'_saved_frame_'",
"+",
"str",
"(",
"self",
".",
"iFrame",
")",
".",
"zfill",
"(",
"4",
")",
"+",
"'.png'",
")",
"if",
"world_state",
".",
"is_mission_running",
":",
"assert",
"len",
"(",
"world_state",
".",
"video_frames",
")",
">",
"0",
",",
"'No video frames!?'",
"num_frames_after_get",
"=",
"len",
"(",
"world_state",
".",
"video_frames",
")",
"assert",
"num_frames_after_get",
">=",
"num_frames_before_get",
",",
"'Fewer frames after getWorldState!?'",
"frame",
"=",
"world_state",
".",
"video_frames",
"[",
"-",
"1",
"]",
"obs",
"=",
"json",
".",
"loads",
"(",
"world_state",
".",
"observations",
"[",
"-",
"1",
"]",
".",
"text",
")",
"self",
".",
"curr_x",
"=",
"obs",
"[",
"u'XPos'",
"]",
"self",
".",
"curr_y",
"=",
"obs",
"[",
"u'YPos'",
"]",
"self",
".",
"curr_z",
"=",
"obs",
"[",
"u'ZPos'",
"]",
"self",
".",
"curr_yaw",
"=",
"obs",
"[",
"u'Yaw'",
"]",
"print",
"(",
"'New position from observation:'",
",",
"self",
".",
"curr_x",
",",
"','",
",",
"self",
".",
"curr_y",
",",
"','",
",",
"self",
".",
"curr_z",
",",
"'yaw'",
",",
"self",
".",
"curr_yaw",
",",
"end",
"=",
"' '",
")",
"if",
"math",
".",
"fabs",
"(",
"self",
".",
"curr_x",
"-",
"self",
".",
"expected_x",
")",
">",
"self",
".",
"tolerance",
"or",
"math",
".",
"fabs",
"(",
"self",
".",
"curr_y",
"-",
"self",
".",
"expected_y",
")",
">",
"self",
".",
"tolerance",
"or",
"math",
".",
"fabs",
"(",
"self",
".",
"curr_z",
"-",
"self",
".",
"expected_z",
")",
">",
"self",
".",
"tolerance",
"or",
"math",
".",
"fabs",
"(",
"self",
".",
"curr_yaw",
"-",
"self",
".",
"expected_yaw",
")",
">",
"self",
".",
"tolerance",
":",
"print",
"(",
"' - ERROR DETECTED! Expected:'",
",",
"self",
".",
"expected_x",
",",
"','",
",",
"self",
".",
"expected_y",
",",
"','",
",",
"self",
".",
"expected_z",
",",
"'yaw'",
",",
"self",
".",
"expected_yaw",
")",
"exit",
"(",
"1",
")",
"else",
":",
"print",
"(",
"'as expected.'",
")",
"curr_x_from_render",
"=",
"frame",
".",
"xPos",
"curr_y_from_render",
"=",
"frame",
".",
"yPos",
"curr_z_from_render",
"=",
"frame",
".",
"zPos",
"curr_yaw_from_render",
"=",
"frame",
".",
"yaw",
"print",
"(",
"'New position from render:'",
",",
"curr_x_from_render",
",",
"','",
",",
"curr_y_from_render",
",",
"','",
",",
"curr_z_from_render",
",",
"'yaw'",
",",
"curr_yaw_from_render",
",",
"end",
"=",
"' '",
")",
"if",
"math",
".",
"fabs",
"(",
"curr_x_from_render",
"-",
"self",
".",
"expected_x",
")",
">",
"self",
".",
"tolerance",
"or",
"math",
".",
"fabs",
"(",
"curr_y_from_render",
"-",
"self",
".",
"expected_y",
")",
">",
"self",
".",
"tolerance",
"or",
"math",
".",
"fabs",
"(",
"curr_z_from_render",
"-",
"self",
".",
"expected_z",
")",
">",
"self",
".",
"tolerance",
"or",
"math",
".",
"fabs",
"(",
"curr_yaw_from_render",
"-",
"self",
".",
"expected_yaw",
")",
">",
"self",
".",
"tolerance",
":",
"print",
"(",
"' - ERROR DETECTED! Expected:'",
",",
"self",
".",
"expected_x",
",",
"','",
",",
"self",
".",
"expected_y",
",",
"','",
",",
"self",
".",
"expected_z",
",",
"'yaw'",
",",
"self",
".",
"expected_yaw",
")",
"exit",
"(",
"1",
")",
"else",
":",
"print",
"(",
"'as expected.'",
")",
"self",
".",
"prev_x",
"=",
"self",
".",
"curr_x",
"self",
".",
"prev_y",
"=",
"self",
".",
"curr_y",
"self",
".",
"prev_z",
"=",
"self",
".",
"curr_z",
"self",
".",
"prev_yaw",
"=",
"self",
".",
"curr_yaw",
"return",
"world_state"
]
| 53.285714 | 22.047619 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.