text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def three_d_effect(img, **kwargs):
"""Create 3D effect using convolution"""
w = kwargs.get('weight', 1)
LOG.debug("Applying 3D effect with weight %.2f", w)
kernel = np.array([[-w, 0, w],
[-w, 1, w],
[-w, 0, w]])
mode = kwargs.get('convolve_mode', 'same')
def func(band_data, kernel=kernel, mode=mode, index=None):
del index
delay = dask.delayed(_three_d_effect_delayed)(band_data, kernel, mode)
new_data = da.from_delayed(delay, shape=band_data.shape, dtype=band_data.dtype)
return new_data
return apply_enhancement(img.data, func, separate=True, pass_dask=True)
|
[
"def",
"three_d_effect",
"(",
"img",
",",
"*",
"*",
"kwargs",
")",
":",
"w",
"=",
"kwargs",
".",
"get",
"(",
"'weight'",
",",
"1",
")",
"LOG",
".",
"debug",
"(",
"\"Applying 3D effect with weight %.2f\"",
",",
"w",
")",
"kernel",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"-",
"w",
",",
"0",
",",
"w",
"]",
",",
"[",
"-",
"w",
",",
"1",
",",
"w",
"]",
",",
"[",
"-",
"w",
",",
"0",
",",
"w",
"]",
"]",
")",
"mode",
"=",
"kwargs",
".",
"get",
"(",
"'convolve_mode'",
",",
"'same'",
")",
"def",
"func",
"(",
"band_data",
",",
"kernel",
"=",
"kernel",
",",
"mode",
"=",
"mode",
",",
"index",
"=",
"None",
")",
":",
"del",
"index",
"delay",
"=",
"dask",
".",
"delayed",
"(",
"_three_d_effect_delayed",
")",
"(",
"band_data",
",",
"kernel",
",",
"mode",
")",
"new_data",
"=",
"da",
".",
"from_delayed",
"(",
"delay",
",",
"shape",
"=",
"band_data",
".",
"shape",
",",
"dtype",
"=",
"band_data",
".",
"dtype",
")",
"return",
"new_data",
"return",
"apply_enhancement",
"(",
"img",
".",
"data",
",",
"func",
",",
"separate",
"=",
"True",
",",
"pass_dask",
"=",
"True",
")"
] | 38.529412 | 20.882353 |
def config_attributes(self):
"""
Helper method used by TorConfig when generating a torrc file.
"""
rtn = [('HiddenServiceDir', str(self.dir))]
if self.conf._supports['HiddenServiceDirGroupReadable'] \
and self.group_readable:
rtn.append(('HiddenServiceDirGroupReadable', str(1)))
for port in self.ports:
rtn.append(('HiddenServicePort', str(port)))
if self.version:
rtn.append(('HiddenServiceVersion', str(self.version)))
for authline in self.authorize_client:
rtn.append(('HiddenServiceAuthorizeClient', str(authline)))
return rtn
|
[
"def",
"config_attributes",
"(",
"self",
")",
":",
"rtn",
"=",
"[",
"(",
"'HiddenServiceDir'",
",",
"str",
"(",
"self",
".",
"dir",
")",
")",
"]",
"if",
"self",
".",
"conf",
".",
"_supports",
"[",
"'HiddenServiceDirGroupReadable'",
"]",
"and",
"self",
".",
"group_readable",
":",
"rtn",
".",
"append",
"(",
"(",
"'HiddenServiceDirGroupReadable'",
",",
"str",
"(",
"1",
")",
")",
")",
"for",
"port",
"in",
"self",
".",
"ports",
":",
"rtn",
".",
"append",
"(",
"(",
"'HiddenServicePort'",
",",
"str",
"(",
"port",
")",
")",
")",
"if",
"self",
".",
"version",
":",
"rtn",
".",
"append",
"(",
"(",
"'HiddenServiceVersion'",
",",
"str",
"(",
"self",
".",
"version",
")",
")",
")",
"for",
"authline",
"in",
"self",
".",
"authorize_client",
":",
"rtn",
".",
"append",
"(",
"(",
"'HiddenServiceAuthorizeClient'",
",",
"str",
"(",
"authline",
")",
")",
")",
"return",
"rtn"
] | 40.5 | 17.125 |
def get_logical_plan(cluster, environ, topology, role=None):
'''
Get the logical plan state of a topology in a cluster
:param cluster:
:param environ:
:param topology:
:param role:
:return:
'''
params = dict(cluster=cluster, environ=environ, topology=topology)
if role is not None:
params['role'] = role
request_url = tornado.httputil.url_concat(
create_url(LOGICALPLAN_URL_FMT), params)
raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
|
[
"def",
"get_logical_plan",
"(",
"cluster",
",",
"environ",
",",
"topology",
",",
"role",
"=",
"None",
")",
":",
"params",
"=",
"dict",
"(",
"cluster",
"=",
"cluster",
",",
"environ",
"=",
"environ",
",",
"topology",
"=",
"topology",
")",
"if",
"role",
"is",
"not",
"None",
":",
"params",
"[",
"'role'",
"]",
"=",
"role",
"request_url",
"=",
"tornado",
".",
"httputil",
".",
"url_concat",
"(",
"create_url",
"(",
"LOGICALPLAN_URL_FMT",
")",
",",
"params",
")",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"(",
"yield",
"fetch_url_as_json",
"(",
"request_url",
")",
")",
")"
] | 31.466667 | 21.733333 |
def get_default_jvm_opts(tmp_dir=None, parallel_gc=False):
"""Retrieve default JVM tuning options
Avoids issues with multiple spun up Java processes running into out of memory errors.
Parallel GC can use a lot of cores on big machines and primarily helps reduce task latency
and responsiveness which are not needed for batch jobs.
https://github.com/bcbio/bcbio-nextgen/issues/532#issuecomment-50989027
https://wiki.csiro.au/pages/viewpage.action?pageId=545034311
http://stackoverflow.com/questions/9738911/javas-serial-garbage-collector-performing-far-better-than-other-garbage-collect
However, serial GC causes issues with Spark local runs so we use parallel for those cases:
https://github.com/broadinstitute/gatk/issues/3605#issuecomment-332370070
"""
opts = ["-XX:+UseSerialGC"] if not parallel_gc else []
if tmp_dir:
opts.append("-Djava.io.tmpdir=%s" % tmp_dir)
return opts
|
[
"def",
"get_default_jvm_opts",
"(",
"tmp_dir",
"=",
"None",
",",
"parallel_gc",
"=",
"False",
")",
":",
"opts",
"=",
"[",
"\"-XX:+UseSerialGC\"",
"]",
"if",
"not",
"parallel_gc",
"else",
"[",
"]",
"if",
"tmp_dir",
":",
"opts",
".",
"append",
"(",
"\"-Djava.io.tmpdir=%s\"",
"%",
"tmp_dir",
")",
"return",
"opts"
] | 57.8125 | 31 |
def validate_id(tx_body):
"""Validate the transaction ID of a transaction
Args:
tx_body (dict): The Transaction to be transformed.
"""
# NOTE: Remove reference to avoid side effects
# tx_body = deepcopy(tx_body)
tx_body = rapidjson.loads(rapidjson.dumps(tx_body))
try:
proposed_tx_id = tx_body['id']
except KeyError:
raise InvalidHash('No transaction id found!')
tx_body['id'] = None
tx_body_serialized = Transaction._to_str(tx_body)
valid_tx_id = Transaction._to_hash(tx_body_serialized)
if proposed_tx_id != valid_tx_id:
err_msg = ("The transaction's id '{}' isn't equal to "
"the hash of its body, i.e. it's not valid.")
raise InvalidHash(err_msg.format(proposed_tx_id))
|
[
"def",
"validate_id",
"(",
"tx_body",
")",
":",
"# NOTE: Remove reference to avoid side effects",
"# tx_body = deepcopy(tx_body)",
"tx_body",
"=",
"rapidjson",
".",
"loads",
"(",
"rapidjson",
".",
"dumps",
"(",
"tx_body",
")",
")",
"try",
":",
"proposed_tx_id",
"=",
"tx_body",
"[",
"'id'",
"]",
"except",
"KeyError",
":",
"raise",
"InvalidHash",
"(",
"'No transaction id found!'",
")",
"tx_body",
"[",
"'id'",
"]",
"=",
"None",
"tx_body_serialized",
"=",
"Transaction",
".",
"_to_str",
"(",
"tx_body",
")",
"valid_tx_id",
"=",
"Transaction",
".",
"_to_hash",
"(",
"tx_body_serialized",
")",
"if",
"proposed_tx_id",
"!=",
"valid_tx_id",
":",
"err_msg",
"=",
"(",
"\"The transaction's id '{}' isn't equal to \"",
"\"the hash of its body, i.e. it's not valid.\"",
")",
"raise",
"InvalidHash",
"(",
"err_msg",
".",
"format",
"(",
"proposed_tx_id",
")",
")"
] | 35.083333 | 20.416667 |
def bresenham_line(self, x0, y0, x1, y1, color=None, colorFunc=None):
"""
Draw line from point x0, y0 to x1, y1 using Bresenham's algorithm.
Will draw beyond matrix bounds.
"""
md.bresenham_line(self.set, x0, y0, x1, y1, color, colorFunc)
|
[
"def",
"bresenham_line",
"(",
"self",
",",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
",",
"color",
"=",
"None",
",",
"colorFunc",
"=",
"None",
")",
":",
"md",
".",
"bresenham_line",
"(",
"self",
".",
"set",
",",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
",",
"color",
",",
"colorFunc",
")"
] | 39 | 19 |
def evaluate(self, values):
''' Evaluates the expression to an integer
values is a dictionnary that associates n-bit variables to integer
values. Every symbolic variables used in the expression must be
represented.
For instance, let x and y 4-bit variables, and e = x+y:
>>> mba = MBA(4)
>>> x = mba.var('x')
>>> y = mba.var('y')
>>> e = x+y
To evaluate e with x=4 and y=5, we can do:
>>> e.eval({x: 4, y: 5})
9
If a variable is missing from values, an exception will occur. (x
or y in the example above)
'''
ret = self.mba.evaluate(self.vec, values)
if isinstance(ret, six.integer_types):
return ret
return self.from_vec(self.mba, ret)
|
[
"def",
"evaluate",
"(",
"self",
",",
"values",
")",
":",
"ret",
"=",
"self",
".",
"mba",
".",
"evaluate",
"(",
"self",
".",
"vec",
",",
"values",
")",
"if",
"isinstance",
"(",
"ret",
",",
"six",
".",
"integer_types",
")",
":",
"return",
"ret",
"return",
"self",
".",
"from_vec",
"(",
"self",
".",
"mba",
",",
"ret",
")"
] | 29.769231 | 22.461538 |
def clear_mappers():
"""
Clears all mappers set up by SA and also clears all custom "id" and
"slug" attributes inserted by the :func:`mapper` function in this module.
This should only ever be needed in a testing context.
"""
# Remove our hybrid property constructs.
for mpr, is_primary in _mapper_registry.items():
if is_primary:
for attr_name in ('id', 'slug'):
try:
attr = object.__getattribute__(mpr.class_, attr_name)
if isinstance(attr, hybrid_property):
if attr_name == 'id':
delattr(mpr.class_, attr_name)
else:
setattr(mpr.class_, attr_name, attr.descriptor)
except AttributeError:
pass
sa_clear_mappers()
|
[
"def",
"clear_mappers",
"(",
")",
":",
"# Remove our hybrid property constructs.",
"for",
"mpr",
",",
"is_primary",
"in",
"_mapper_registry",
".",
"items",
"(",
")",
":",
"if",
"is_primary",
":",
"for",
"attr_name",
"in",
"(",
"'id'",
",",
"'slug'",
")",
":",
"try",
":",
"attr",
"=",
"object",
".",
"__getattribute__",
"(",
"mpr",
".",
"class_",
",",
"attr_name",
")",
"if",
"isinstance",
"(",
"attr",
",",
"hybrid_property",
")",
":",
"if",
"attr_name",
"==",
"'id'",
":",
"delattr",
"(",
"mpr",
".",
"class_",
",",
"attr_name",
")",
"else",
":",
"setattr",
"(",
"mpr",
".",
"class_",
",",
"attr_name",
",",
"attr",
".",
"descriptor",
")",
"except",
"AttributeError",
":",
"pass",
"sa_clear_mappers",
"(",
")"
] | 40.095238 | 17.047619 |
def matching(self, packages):
"""Message for matching packages
"""
print("\nNot found package with the name [ {0}{1}{2} ]. "
"Matching packages:\nNOTE: Not dependenc"
"ies are resolved\n".format(self.meta.color["CYAN"],
"".join(packages),
self.meta.color["ENDC"]))
|
[
"def",
"matching",
"(",
"self",
",",
"packages",
")",
":",
"print",
"(",
"\"\\nNot found package with the name [ {0}{1}{2} ]. \"",
"\"Matching packages:\\nNOTE: Not dependenc\"",
"\"ies are resolved\\n\"",
".",
"format",
"(",
"self",
".",
"meta",
".",
"color",
"[",
"\"CYAN\"",
"]",
",",
"\"\"",
".",
"join",
"(",
"packages",
")",
",",
"self",
".",
"meta",
".",
"color",
"[",
"\"ENDC\"",
"]",
")",
")"
] | 49.125 | 15.5 |
def getobjpath(obj, path):
"""Returns an item or attribute of the object recursively.
Item names are specified between brackets, eg: [item].
Attribute names are prefixed with a dot (the first one is optional), eg: .attr
Example: getobjpath(obj, "attr1.attr2[item].attr3")
"""
if not path:
return obj
if path.startswith("["):
item = path[1:path.index("]")]
return getobjpath(obj[item], path[len(item) + 2:])
if path.startswith("."):
path = path[1:]
if "." in path or "[" in path:
dot_idx = path.find(".")
bracket_idx = path.find("[")
if dot_idx == -1 or bracket_idx < dot_idx:
idx = bracket_idx
next_idx = idx
else:
idx = dot_idx
next_idx = idx + 1
attr = path[:idx]
return getobjpath(getattr(obj, attr), path[next_idx:])
return getattr(obj, path)
|
[
"def",
"getobjpath",
"(",
"obj",
",",
"path",
")",
":",
"if",
"not",
"path",
":",
"return",
"obj",
"if",
"path",
".",
"startswith",
"(",
"\"[\"",
")",
":",
"item",
"=",
"path",
"[",
"1",
":",
"path",
".",
"index",
"(",
"\"]\"",
")",
"]",
"return",
"getobjpath",
"(",
"obj",
"[",
"item",
"]",
",",
"path",
"[",
"len",
"(",
"item",
")",
"+",
"2",
":",
"]",
")",
"if",
"path",
".",
"startswith",
"(",
"\".\"",
")",
":",
"path",
"=",
"path",
"[",
"1",
":",
"]",
"if",
"\".\"",
"in",
"path",
"or",
"\"[\"",
"in",
"path",
":",
"dot_idx",
"=",
"path",
".",
"find",
"(",
"\".\"",
")",
"bracket_idx",
"=",
"path",
".",
"find",
"(",
"\"[\"",
")",
"if",
"dot_idx",
"==",
"-",
"1",
"or",
"bracket_idx",
"<",
"dot_idx",
":",
"idx",
"=",
"bracket_idx",
"next_idx",
"=",
"idx",
"else",
":",
"idx",
"=",
"dot_idx",
"next_idx",
"=",
"idx",
"+",
"1",
"attr",
"=",
"path",
"[",
":",
"idx",
"]",
"return",
"getobjpath",
"(",
"getattr",
"(",
"obj",
",",
"attr",
")",
",",
"path",
"[",
"next_idx",
":",
"]",
")",
"return",
"getattr",
"(",
"obj",
",",
"path",
")"
] | 35.6 | 13.96 |
def _get_traceback_no_io():
"""
Return a version of L{traceback} that doesn't do I/O.
"""
try:
module = load_module(str("_traceback_no_io"), traceback)
except NotImplementedError:
# Can't fix the I/O problem, oh well:
return traceback
class FakeLineCache(object):
def checkcache(self, *args, **kwargs):
None
def getline(self, *args, **kwargs):
return ""
def lazycache(self, *args, **kwargs):
return None
module.linecache = FakeLineCache()
return module
|
[
"def",
"_get_traceback_no_io",
"(",
")",
":",
"try",
":",
"module",
"=",
"load_module",
"(",
"str",
"(",
"\"_traceback_no_io\"",
")",
",",
"traceback",
")",
"except",
"NotImplementedError",
":",
"# Can't fix the I/O problem, oh well:",
"return",
"traceback",
"class",
"FakeLineCache",
"(",
"object",
")",
":",
"def",
"checkcache",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"None",
"def",
"getline",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"\"\"",
"def",
"lazycache",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"None",
"module",
".",
"linecache",
"=",
"FakeLineCache",
"(",
")",
"return",
"module"
] | 25.045455 | 17.409091 |
def configure_user(self, user, attributes, attribute_mapping):
"""Configures a user after creation and returns the updated user.
By default, returns the user with his attributes updated.
"""
user.set_unusable_password()
return self.update_user(user, attributes, attribute_mapping,
force_save=True)
|
[
"def",
"configure_user",
"(",
"self",
",",
"user",
",",
"attributes",
",",
"attribute_mapping",
")",
":",
"user",
".",
"set_unusable_password",
"(",
")",
"return",
"self",
".",
"update_user",
"(",
"user",
",",
"attributes",
",",
"attribute_mapping",
",",
"force_save",
"=",
"True",
")"
] | 45.375 | 15.875 |
def _load_string_from_native_memory(self, addr_):
"""
Load zero terminated UTF-8 string from native memory.
:param addr_: Native load address.
:return: Loaded string.
"""
# check if addr is symbolic
if self.state.solver.symbolic(addr_):
l.error("Loading strings from symbolic addresses is not implemented. "
"Continue execution with an empty string.")
return ""
addr = self.state.solver.eval(addr_)
# load chars one by one
chars = []
for i in itertools.count():
str_byte = self.state.memory.load(addr+i, size=1)
if self.state.solver.symbolic(str_byte):
l.error("Loading of strings with symbolic chars is not supported. "
"Character %d is concretized.", i)
str_byte = self.state.solver.eval(str_byte)
if str_byte == 0:
break
chars.append(chr(str_byte))
return "".join(chars)
|
[
"def",
"_load_string_from_native_memory",
"(",
"self",
",",
"addr_",
")",
":",
"# check if addr is symbolic",
"if",
"self",
".",
"state",
".",
"solver",
".",
"symbolic",
"(",
"addr_",
")",
":",
"l",
".",
"error",
"(",
"\"Loading strings from symbolic addresses is not implemented. \"",
"\"Continue execution with an empty string.\"",
")",
"return",
"\"\"",
"addr",
"=",
"self",
".",
"state",
".",
"solver",
".",
"eval",
"(",
"addr_",
")",
"# load chars one by one",
"chars",
"=",
"[",
"]",
"for",
"i",
"in",
"itertools",
".",
"count",
"(",
")",
":",
"str_byte",
"=",
"self",
".",
"state",
".",
"memory",
".",
"load",
"(",
"addr",
"+",
"i",
",",
"size",
"=",
"1",
")",
"if",
"self",
".",
"state",
".",
"solver",
".",
"symbolic",
"(",
"str_byte",
")",
":",
"l",
".",
"error",
"(",
"\"Loading of strings with symbolic chars is not supported. \"",
"\"Character %d is concretized.\"",
",",
"i",
")",
"str_byte",
"=",
"self",
".",
"state",
".",
"solver",
".",
"eval",
"(",
"str_byte",
")",
"if",
"str_byte",
"==",
"0",
":",
"break",
"chars",
".",
"append",
"(",
"chr",
"(",
"str_byte",
")",
")",
"return",
"\"\"",
".",
"join",
"(",
"chars",
")"
] | 37.444444 | 16.333333 |
def user_id(self):
"""Who created the event (:class:`~hangups.user.UserID`)."""
return user.UserID(chat_id=self._event.sender_id.chat_id,
gaia_id=self._event.sender_id.gaia_id)
|
[
"def",
"user_id",
"(",
"self",
")",
":",
"return",
"user",
".",
"UserID",
"(",
"chat_id",
"=",
"self",
".",
"_event",
".",
"sender_id",
".",
"chat_id",
",",
"gaia_id",
"=",
"self",
".",
"_event",
".",
"sender_id",
".",
"gaia_id",
")"
] | 54 | 18 |
def open(self):
"""Retrieve this file's attributes from the server.
Returns a Future.
.. versionchanged:: 2.0
No longer accepts a callback argument.
.. versionchanged:: 0.2
:class:`~motor.MotorGridOut` now opens itself on demand, calling
``open`` explicitly is rarely needed.
"""
return self._framework.chain_return_value(self._ensure_file(),
self.get_io_loop(),
self)
|
[
"def",
"open",
"(",
"self",
")",
":",
"return",
"self",
".",
"_framework",
".",
"chain_return_value",
"(",
"self",
".",
"_ensure_file",
"(",
")",
",",
"self",
".",
"get_io_loop",
"(",
")",
",",
"self",
")"
] | 35.866667 | 20.266667 |
def generate_VJ_junction_transfer_matrices(self):
"""Compute the transfer matrices for the VJ junction.
Sets the attributes Tvj, Svj, Dvj, lTvj, and lDvj.
"""
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
#Compute Tvj
Tvj = {}
for aa in self.codons_dict.keys():
current_Tvj = np.zeros((4, 4))
for init_nt in 'ACGT':
for codon in self.codons_dict[aa]:
current_Tvj[nt2num[codon[2]], nt2num[init_nt]] += self.Rvj[nt2num[codon[2]],nt2num[codon[1]]]*self.Rvj[nt2num[codon[1]],nt2num[codon[0]]] * self.Rvj[nt2num[codon[0]],nt2num[init_nt]]
Tvj[aa] = current_Tvj
#Compute Svj
Svj = {}
for aa in self.codons_dict.keys():
current_Svj = np.zeros((4, 4))
for ins_nt in 'ACGT':
if any([codon.startswith(ins_nt) for codon in self.codons_dict[aa]]):
current_Svj[nt2num[ins_nt], :] = self.Rvj[nt2num[ins_nt], :]
Svj[aa] = current_Svj
#Compute Dvj
Dvj = {}
for aa in self.codons_dict.keys():
current_Dvj = np.zeros((4, 4))
for init_nt in 'ACGT':
for codon in self.codons_dict[aa]:
current_Dvj[nt2num[codon[2]], nt2num[init_nt]] += self.Rvj[nt2num[codon[1]],nt2num[codon[0]]] * self.Rvj[nt2num[codon[0]],nt2num[init_nt]]
Dvj[aa] = current_Dvj
#Compute lTvj
lTvj = {}
for aa in self.codons_dict.keys():
current_lTvj = np.zeros((4, 4))
for codon in self.codons_dict[aa]:
current_lTvj[nt2num[codon[2]], nt2num[codon[0]]] += self.Rvj[nt2num[codon[2]],nt2num[codon[1]]]*self.first_nt_bias_insVJ[nt2num[codon[1]]]
lTvj[aa] = current_lTvj
#Compute lDvj
lDvj = {}
for aa in self.codons_dict.keys():
current_lDvj = np.zeros((4, 4))
for codon in self.codons_dict[aa]:
current_lDvj[nt2num[codon[2]], nt2num[codon[0]]] += self.first_nt_bias_insVJ[nt2num[codon[1]]]
lDvj[aa] = current_lDvj
#Set the attributes
self.Tvj = Tvj
self.Svj = Svj
self.Dvj = Dvj
self.lTvj = lTvj
self.lDvj = lDvj
|
[
"def",
"generate_VJ_junction_transfer_matrices",
"(",
"self",
")",
":",
"nt2num",
"=",
"{",
"'A'",
":",
"0",
",",
"'C'",
":",
"1",
",",
"'G'",
":",
"2",
",",
"'T'",
":",
"3",
"}",
"#Compute Tvj",
"Tvj",
"=",
"{",
"}",
"for",
"aa",
"in",
"self",
".",
"codons_dict",
".",
"keys",
"(",
")",
":",
"current_Tvj",
"=",
"np",
".",
"zeros",
"(",
"(",
"4",
",",
"4",
")",
")",
"for",
"init_nt",
"in",
"'ACGT'",
":",
"for",
"codon",
"in",
"self",
".",
"codons_dict",
"[",
"aa",
"]",
":",
"current_Tvj",
"[",
"nt2num",
"[",
"codon",
"[",
"2",
"]",
"]",
",",
"nt2num",
"[",
"init_nt",
"]",
"]",
"+=",
"self",
".",
"Rvj",
"[",
"nt2num",
"[",
"codon",
"[",
"2",
"]",
"]",
",",
"nt2num",
"[",
"codon",
"[",
"1",
"]",
"]",
"]",
"*",
"self",
".",
"Rvj",
"[",
"nt2num",
"[",
"codon",
"[",
"1",
"]",
"]",
",",
"nt2num",
"[",
"codon",
"[",
"0",
"]",
"]",
"]",
"*",
"self",
".",
"Rvj",
"[",
"nt2num",
"[",
"codon",
"[",
"0",
"]",
"]",
",",
"nt2num",
"[",
"init_nt",
"]",
"]",
"Tvj",
"[",
"aa",
"]",
"=",
"current_Tvj",
"#Compute Svj",
"Svj",
"=",
"{",
"}",
"for",
"aa",
"in",
"self",
".",
"codons_dict",
".",
"keys",
"(",
")",
":",
"current_Svj",
"=",
"np",
".",
"zeros",
"(",
"(",
"4",
",",
"4",
")",
")",
"for",
"ins_nt",
"in",
"'ACGT'",
":",
"if",
"any",
"(",
"[",
"codon",
".",
"startswith",
"(",
"ins_nt",
")",
"for",
"codon",
"in",
"self",
".",
"codons_dict",
"[",
"aa",
"]",
"]",
")",
":",
"current_Svj",
"[",
"nt2num",
"[",
"ins_nt",
"]",
",",
":",
"]",
"=",
"self",
".",
"Rvj",
"[",
"nt2num",
"[",
"ins_nt",
"]",
",",
":",
"]",
"Svj",
"[",
"aa",
"]",
"=",
"current_Svj",
"#Compute Dvj ",
"Dvj",
"=",
"{",
"}",
"for",
"aa",
"in",
"self",
".",
"codons_dict",
".",
"keys",
"(",
")",
":",
"current_Dvj",
"=",
"np",
".",
"zeros",
"(",
"(",
"4",
",",
"4",
")",
")",
"for",
"init_nt",
"in",
"'ACGT'",
":",
"for",
"codon",
"in",
"self",
".",
"codons_dict",
"[",
"aa",
"]",
":",
"current_Dvj",
"[",
"nt2num",
"[",
"codon",
"[",
"2",
"]",
"]",
",",
"nt2num",
"[",
"init_nt",
"]",
"]",
"+=",
"self",
".",
"Rvj",
"[",
"nt2num",
"[",
"codon",
"[",
"1",
"]",
"]",
",",
"nt2num",
"[",
"codon",
"[",
"0",
"]",
"]",
"]",
"*",
"self",
".",
"Rvj",
"[",
"nt2num",
"[",
"codon",
"[",
"0",
"]",
"]",
",",
"nt2num",
"[",
"init_nt",
"]",
"]",
"Dvj",
"[",
"aa",
"]",
"=",
"current_Dvj",
"#Compute lTvj",
"lTvj",
"=",
"{",
"}",
"for",
"aa",
"in",
"self",
".",
"codons_dict",
".",
"keys",
"(",
")",
":",
"current_lTvj",
"=",
"np",
".",
"zeros",
"(",
"(",
"4",
",",
"4",
")",
")",
"for",
"codon",
"in",
"self",
".",
"codons_dict",
"[",
"aa",
"]",
":",
"current_lTvj",
"[",
"nt2num",
"[",
"codon",
"[",
"2",
"]",
"]",
",",
"nt2num",
"[",
"codon",
"[",
"0",
"]",
"]",
"]",
"+=",
"self",
".",
"Rvj",
"[",
"nt2num",
"[",
"codon",
"[",
"2",
"]",
"]",
",",
"nt2num",
"[",
"codon",
"[",
"1",
"]",
"]",
"]",
"*",
"self",
".",
"first_nt_bias_insVJ",
"[",
"nt2num",
"[",
"codon",
"[",
"1",
"]",
"]",
"]",
"lTvj",
"[",
"aa",
"]",
"=",
"current_lTvj",
"#Compute lDvj ",
"lDvj",
"=",
"{",
"}",
"for",
"aa",
"in",
"self",
".",
"codons_dict",
".",
"keys",
"(",
")",
":",
"current_lDvj",
"=",
"np",
".",
"zeros",
"(",
"(",
"4",
",",
"4",
")",
")",
"for",
"codon",
"in",
"self",
".",
"codons_dict",
"[",
"aa",
"]",
":",
"current_lDvj",
"[",
"nt2num",
"[",
"codon",
"[",
"2",
"]",
"]",
",",
"nt2num",
"[",
"codon",
"[",
"0",
"]",
"]",
"]",
"+=",
"self",
".",
"first_nt_bias_insVJ",
"[",
"nt2num",
"[",
"codon",
"[",
"1",
"]",
"]",
"]",
"lDvj",
"[",
"aa",
"]",
"=",
"current_lDvj",
"#Set the attributes",
"self",
".",
"Tvj",
"=",
"Tvj",
"self",
".",
"Svj",
"=",
"Svj",
"self",
".",
"Dvj",
"=",
"Dvj",
"self",
".",
"lTvj",
"=",
"lTvj",
"self",
".",
"lDvj",
"=",
"lDvj"
] | 39.627119 | 22.949153 |
def hazard_class_style(layer, classification, display_null=False):
"""Set colors to the layer according to the hazard.
:param layer: The layer to style.
:type layer: QgsVectorLayer
:param display_null: If we should display the null hazard zone. Default to
False.
:type display_null: bool
:param classification: The hazard classification to use.
:type classification: dict safe.definitions.hazard_classifications
"""
categories = []
# Conditional styling
attribute_table_styles = []
for hazard_class, (color, label) in list(classification.items()):
if hazard_class == not_exposed_class['key'] and not display_null:
# We don't want to display the null value (not exposed).
# We skip it.
continue
symbol = QgsSymbol.defaultSymbol(layer.geometryType())
symbol.setColor(color)
if is_line_layer(layer):
symbol.setWidth(line_width_exposure)
category = QgsRendererCategory(hazard_class, symbol, label)
categories.append(category)
style = QgsConditionalStyle()
style.setName(hazard_class)
style.setRule("hazard_class='%s'" % hazard_class)
style.setBackgroundColor(transparent)
symbol = QgsSymbol.defaultSymbol(QgsWkbTypes.PointGeometry)
symbol.setColor(color)
symbol.setSize(3)
style.setSymbol(symbol)
attribute_table_styles.append(style)
layer.conditionalStyles().setFieldStyles(
'hazard_class', attribute_table_styles)
renderer = QgsCategorizedSymbolRenderer(
hazard_class_field['field_name'], categories)
layer.setRenderer(renderer)
|
[
"def",
"hazard_class_style",
"(",
"layer",
",",
"classification",
",",
"display_null",
"=",
"False",
")",
":",
"categories",
"=",
"[",
"]",
"# Conditional styling",
"attribute_table_styles",
"=",
"[",
"]",
"for",
"hazard_class",
",",
"(",
"color",
",",
"label",
")",
"in",
"list",
"(",
"classification",
".",
"items",
"(",
")",
")",
":",
"if",
"hazard_class",
"==",
"not_exposed_class",
"[",
"'key'",
"]",
"and",
"not",
"display_null",
":",
"# We don't want to display the null value (not exposed).",
"# We skip it.",
"continue",
"symbol",
"=",
"QgsSymbol",
".",
"defaultSymbol",
"(",
"layer",
".",
"geometryType",
"(",
")",
")",
"symbol",
".",
"setColor",
"(",
"color",
")",
"if",
"is_line_layer",
"(",
"layer",
")",
":",
"symbol",
".",
"setWidth",
"(",
"line_width_exposure",
")",
"category",
"=",
"QgsRendererCategory",
"(",
"hazard_class",
",",
"symbol",
",",
"label",
")",
"categories",
".",
"append",
"(",
"category",
")",
"style",
"=",
"QgsConditionalStyle",
"(",
")",
"style",
".",
"setName",
"(",
"hazard_class",
")",
"style",
".",
"setRule",
"(",
"\"hazard_class='%s'\"",
"%",
"hazard_class",
")",
"style",
".",
"setBackgroundColor",
"(",
"transparent",
")",
"symbol",
"=",
"QgsSymbol",
".",
"defaultSymbol",
"(",
"QgsWkbTypes",
".",
"PointGeometry",
")",
"symbol",
".",
"setColor",
"(",
"color",
")",
"symbol",
".",
"setSize",
"(",
"3",
")",
"style",
".",
"setSymbol",
"(",
"symbol",
")",
"attribute_table_styles",
".",
"append",
"(",
"style",
")",
"layer",
".",
"conditionalStyles",
"(",
")",
".",
"setFieldStyles",
"(",
"'hazard_class'",
",",
"attribute_table_styles",
")",
"renderer",
"=",
"QgsCategorizedSymbolRenderer",
"(",
"hazard_class_field",
"[",
"'field_name'",
"]",
",",
"categories",
")",
"layer",
".",
"setRenderer",
"(",
"renderer",
")"
] | 35.673913 | 18.847826 |
def generate_folds(node_label_matrix, labelled_node_indices, number_of_categories, percentage, number_of_folds=10):
"""
Form the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds.
"""
number_of_labeled_nodes = labelled_node_indices.size
training_set_size = int(np.ceil(percentage*number_of_labeled_nodes/100))
####################################################################################################################
# Generate folds
####################################################################################################################
train_list = list()
test_list = list()
for trial in np.arange(number_of_folds):
train, test = valid_train_test(node_label_matrix[labelled_node_indices, :],
training_set_size,
number_of_categories,
trial)
train = labelled_node_indices[train]
test = labelled_node_indices[test]
train_list.append(train)
test_list.append(test)
folds = ((train, test) for train, test in zip(train_list, test_list))
return folds
|
[
"def",
"generate_folds",
"(",
"node_label_matrix",
",",
"labelled_node_indices",
",",
"number_of_categories",
",",
"percentage",
",",
"number_of_folds",
"=",
"10",
")",
":",
"number_of_labeled_nodes",
"=",
"labelled_node_indices",
".",
"size",
"training_set_size",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"percentage",
"*",
"number_of_labeled_nodes",
"/",
"100",
")",
")",
"####################################################################################################################",
"# Generate folds",
"####################################################################################################################",
"train_list",
"=",
"list",
"(",
")",
"test_list",
"=",
"list",
"(",
")",
"for",
"trial",
"in",
"np",
".",
"arange",
"(",
"number_of_folds",
")",
":",
"train",
",",
"test",
"=",
"valid_train_test",
"(",
"node_label_matrix",
"[",
"labelled_node_indices",
",",
":",
"]",
",",
"training_set_size",
",",
"number_of_categories",
",",
"trial",
")",
"train",
"=",
"labelled_node_indices",
"[",
"train",
"]",
"test",
"=",
"labelled_node_indices",
"[",
"test",
"]",
"train_list",
".",
"append",
"(",
"train",
")",
"test_list",
".",
"append",
"(",
"test",
")",
"folds",
"=",
"(",
"(",
"train",
",",
"test",
")",
"for",
"train",
",",
"test",
"in",
"zip",
"(",
"train_list",
",",
"test_list",
")",
")",
"return",
"folds"
] | 49.03125 | 29.65625 |
def compare_orderings(info_frags_records, linkage_orderings):
"""Given linkage groups and info_frags records, link pseudo-chromosomes to
scaffolds based on the initial contig composition of each group. Because
info_frags records are usually richer and may contain contigs not found
in linkage groups, those extra sequences are discarded.
Example with two linkage groups and two chromosomes:
>>> linkage_orderings = {
... 'linkage_group_1': [
... ['sctg_516', -3, 36614, 50582, 1],
... ['sctg_486', -3, 41893, 41893, 1],
... ['sctg_486', -3, 50054, 62841, 1],
... ['sctg_207', -3, 31842, 94039, 1],
... ['sctg_558', -3, 51212, 54212, 1],
... ],
... 'linkage_group_2': [
... ['sctg_308', -3, 15892, 25865, 1],
... ['sctg_842', -3, 0, 8974, 1],
... ['sctg_994', -3, 0, 81213, 1],
... ],
... }
>>> info_frags = {
... 'scaffold_A': [
... ['sctg_308', 996, 15892, 25865, 1],
... ['sctg_778', 1210, 45040, 78112, -1],
... ['sctg_842', 124, 0, 8974, 1],
... ],
... 'scaffold_B': [
... ['sctg_516', 5, 0, 38000, 1],
... ['sctg_486', 47, 42050, 49000, 1],
... ['sctg_1755', 878, 95001, 10844, -1],
... ['sctg_842', 126, 19000, 26084, 1],
... ['sctg_207', 705, 45500, 87056, 1],
... ],
... 'scaffold_C': [
... ['sctg_558', 745, 50045, 67851, 1],
... ['sctg_994', 12, 74201, 86010, -1],
... ],
... }
>>> matching_pairs = compare_orderings(info_frags, linkage_orderings)
>>> matching_pairs['scaffold_B']
(3, 'linkage_group_1', {'sctg_558': 'sctg_207'})
>>> matching_pairs['scaffold_A']
(2, 'linkage_group_2', {'sctg_994': 'sctg_842'})
"""
scaffolds = info_frags_records.keys()
linkage_groups = linkage_orderings.keys()
best_matching_table = dict()
for scaffold, linkage_group in itertools.product(
scaffolds, linkage_groups
):
lg_ordering = [
init_contig
for init_contig, _ in itertools.groupby(
linkage_orderings[linkage_group], operator.itemgetter(0)
)
]
scaffold_ordering = [
init_contig
for init_contig, bin_group in itertools.groupby(
info_frags_records[scaffold], operator.itemgetter(0)
)
if init_contig in lg_ordering
]
overlap = set(lg_ordering).intersection(set(scaffold_ordering))
missing_locations = dict()
for missing_block in sorted(set(lg_ordering) - set(overlap)):
for i, init_contig in enumerate(lg_ordering):
if init_contig == missing_block:
try:
block_before = lg_ordering[i - 1]
except IndexError:
block_before = "beginning"
missing_locations[missing_block] = block_before
try:
if len(overlap) > best_matching_table[scaffold][0]:
best_matching_table[scaffold] = (
len(overlap),
linkage_group,
missing_locations,
)
except KeyError:
best_matching_table[scaffold] = (
len(overlap),
linkage_group,
missing_locations,
)
return best_matching_table
|
[
"def",
"compare_orderings",
"(",
"info_frags_records",
",",
"linkage_orderings",
")",
":",
"scaffolds",
"=",
"info_frags_records",
".",
"keys",
"(",
")",
"linkage_groups",
"=",
"linkage_orderings",
".",
"keys",
"(",
")",
"best_matching_table",
"=",
"dict",
"(",
")",
"for",
"scaffold",
",",
"linkage_group",
"in",
"itertools",
".",
"product",
"(",
"scaffolds",
",",
"linkage_groups",
")",
":",
"lg_ordering",
"=",
"[",
"init_contig",
"for",
"init_contig",
",",
"_",
"in",
"itertools",
".",
"groupby",
"(",
"linkage_orderings",
"[",
"linkage_group",
"]",
",",
"operator",
".",
"itemgetter",
"(",
"0",
")",
")",
"]",
"scaffold_ordering",
"=",
"[",
"init_contig",
"for",
"init_contig",
",",
"bin_group",
"in",
"itertools",
".",
"groupby",
"(",
"info_frags_records",
"[",
"scaffold",
"]",
",",
"operator",
".",
"itemgetter",
"(",
"0",
")",
")",
"if",
"init_contig",
"in",
"lg_ordering",
"]",
"overlap",
"=",
"set",
"(",
"lg_ordering",
")",
".",
"intersection",
"(",
"set",
"(",
"scaffold_ordering",
")",
")",
"missing_locations",
"=",
"dict",
"(",
")",
"for",
"missing_block",
"in",
"sorted",
"(",
"set",
"(",
"lg_ordering",
")",
"-",
"set",
"(",
"overlap",
")",
")",
":",
"for",
"i",
",",
"init_contig",
"in",
"enumerate",
"(",
"lg_ordering",
")",
":",
"if",
"init_contig",
"==",
"missing_block",
":",
"try",
":",
"block_before",
"=",
"lg_ordering",
"[",
"i",
"-",
"1",
"]",
"except",
"IndexError",
":",
"block_before",
"=",
"\"beginning\"",
"missing_locations",
"[",
"missing_block",
"]",
"=",
"block_before",
"try",
":",
"if",
"len",
"(",
"overlap",
")",
">",
"best_matching_table",
"[",
"scaffold",
"]",
"[",
"0",
"]",
":",
"best_matching_table",
"[",
"scaffold",
"]",
"=",
"(",
"len",
"(",
"overlap",
")",
",",
"linkage_group",
",",
"missing_locations",
",",
")",
"except",
"KeyError",
":",
"best_matching_table",
"[",
"scaffold",
"]",
"=",
"(",
"len",
"(",
"overlap",
")",
",",
"linkage_group",
",",
"missing_locations",
",",
")",
"return",
"best_matching_table"
] | 36.683673 | 18.204082 |
def configure_crud(graph, ns, mappings):
"""
Register CRUD endpoints for a resource object.
:param mappings: a dictionary from operations to tuple, where each tuple contains
the target function and zero or more marshmallow schemas according
to the signature of the "register_<foo>_endpoint" functions
Example mapping:
{
Operation.Create: (create_foo, NewFooSchema(), FooSchema()),
Operation.Delete: (delete_foo,),
Operation.Retrieve: (retrieve_foo, FooSchema()),
Operation.Search: (search_foo, SearchFooSchema(), FooSchema(), [ResponseFormats.CSV]),
}
"""
convention = CRUDConvention(graph)
convention.configure(ns, mappings)
|
[
"def",
"configure_crud",
"(",
"graph",
",",
"ns",
",",
"mappings",
")",
":",
"convention",
"=",
"CRUDConvention",
"(",
"graph",
")",
"convention",
".",
"configure",
"(",
"ns",
",",
"mappings",
")"
] | 37.2 | 25.1 |
def _delete(
self, sock_info, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None, session=None, retryable_write=False):
"""Internal delete helper."""
common.validate_is_mapping("filter", criteria)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
delete_doc = SON([('q', criteria),
('limit', int(not multi))])
collation = validate_collation_or_none(collation)
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
delete_doc['collation'] = collation
command = SON([('delete', self.name),
('ordered', ordered),
('deletes', [delete_doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_DELETE.
return self._legacy_write(
sock_info, 'delete', command, op_id,
False, message.delete, self.__full_name, criteria,
False, write_concern.document,
self.__write_response_codec_options,
int(not multi))
# Delete command.
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
session=session,
client=self.__database.client,
retryable_write=retryable_write)
_check_write_command_response(result)
return result
|
[
"def",
"_delete",
"(",
"self",
",",
"sock_info",
",",
"criteria",
",",
"multi",
",",
"write_concern",
"=",
"None",
",",
"op_id",
"=",
"None",
",",
"ordered",
"=",
"True",
",",
"collation",
"=",
"None",
",",
"session",
"=",
"None",
",",
"retryable_write",
"=",
"False",
")",
":",
"common",
".",
"validate_is_mapping",
"(",
"\"filter\"",
",",
"criteria",
")",
"write_concern",
"=",
"write_concern",
"or",
"self",
".",
"write_concern",
"acknowledged",
"=",
"write_concern",
".",
"acknowledged",
"delete_doc",
"=",
"SON",
"(",
"[",
"(",
"'q'",
",",
"criteria",
")",
",",
"(",
"'limit'",
",",
"int",
"(",
"not",
"multi",
")",
")",
"]",
")",
"collation",
"=",
"validate_collation_or_none",
"(",
"collation",
")",
"if",
"collation",
"is",
"not",
"None",
":",
"if",
"sock_info",
".",
"max_wire_version",
"<",
"5",
":",
"raise",
"ConfigurationError",
"(",
"'Must be connected to MongoDB 3.4+ to use collations.'",
")",
"elif",
"not",
"acknowledged",
":",
"raise",
"ConfigurationError",
"(",
"'Collation is unsupported for unacknowledged writes.'",
")",
"else",
":",
"delete_doc",
"[",
"'collation'",
"]",
"=",
"collation",
"command",
"=",
"SON",
"(",
"[",
"(",
"'delete'",
",",
"self",
".",
"name",
")",
",",
"(",
"'ordered'",
",",
"ordered",
")",
",",
"(",
"'deletes'",
",",
"[",
"delete_doc",
"]",
")",
"]",
")",
"if",
"not",
"write_concern",
".",
"is_server_default",
":",
"command",
"[",
"'writeConcern'",
"]",
"=",
"write_concern",
".",
"document",
"if",
"not",
"sock_info",
".",
"op_msg_enabled",
"and",
"not",
"acknowledged",
":",
"# Legacy OP_DELETE.",
"return",
"self",
".",
"_legacy_write",
"(",
"sock_info",
",",
"'delete'",
",",
"command",
",",
"op_id",
",",
"False",
",",
"message",
".",
"delete",
",",
"self",
".",
"__full_name",
",",
"criteria",
",",
"False",
",",
"write_concern",
".",
"document",
",",
"self",
".",
"__write_response_codec_options",
",",
"int",
"(",
"not",
"multi",
")",
")",
"# Delete command.",
"result",
"=",
"sock_info",
".",
"command",
"(",
"self",
".",
"__database",
".",
"name",
",",
"command",
",",
"write_concern",
"=",
"write_concern",
",",
"codec_options",
"=",
"self",
".",
"__write_response_codec_options",
",",
"session",
"=",
"session",
",",
"client",
"=",
"self",
".",
"__database",
".",
"client",
",",
"retryable_write",
"=",
"retryable_write",
")",
"_check_write_command_response",
"(",
"result",
")",
"return",
"result"
] | 43.555556 | 12.6 |
def pop(self):
"""Pop the next future from the queue;
in progress futures have priority over those that have not yet started;
higher level futures have priority over lower level ones; """
self.updateQueue()
# If our buffer is underflowing, request more Futures
if self.timelen(self) < self.lowwatermark:
self.requestFuture()
# If an unmovable Future is ready to be executed, return it
if len(self.ready) != 0:
return self.ready.popleft()
# Then, use Futures in the movable queue
elif len(self.movable) != 0:
return self.movable.popleft()
else:
# Otherwise, block until a new task arrives
self.lastStatus = time.time()
while len(self) == 0:
# Block until message arrives
self.askForPreviousFutures()
self.socket._poll(POLLING_TIME)
self.updateQueue()
if len(self.ready) != 0:
return self.ready.popleft()
elif len(self.movable) != 0:
return self.movable.popleft()
|
[
"def",
"pop",
"(",
"self",
")",
":",
"self",
".",
"updateQueue",
"(",
")",
"# If our buffer is underflowing, request more Futures",
"if",
"self",
".",
"timelen",
"(",
"self",
")",
"<",
"self",
".",
"lowwatermark",
":",
"self",
".",
"requestFuture",
"(",
")",
"# If an unmovable Future is ready to be executed, return it",
"if",
"len",
"(",
"self",
".",
"ready",
")",
"!=",
"0",
":",
"return",
"self",
".",
"ready",
".",
"popleft",
"(",
")",
"# Then, use Futures in the movable queue",
"elif",
"len",
"(",
"self",
".",
"movable",
")",
"!=",
"0",
":",
"return",
"self",
".",
"movable",
".",
"popleft",
"(",
")",
"else",
":",
"# Otherwise, block until a new task arrives",
"self",
".",
"lastStatus",
"=",
"time",
".",
"time",
"(",
")",
"while",
"len",
"(",
"self",
")",
"==",
"0",
":",
"# Block until message arrives",
"self",
".",
"askForPreviousFutures",
"(",
")",
"self",
".",
"socket",
".",
"_poll",
"(",
"POLLING_TIME",
")",
"self",
".",
"updateQueue",
"(",
")",
"if",
"len",
"(",
"self",
".",
"ready",
")",
"!=",
"0",
":",
"return",
"self",
".",
"ready",
".",
"popleft",
"(",
")",
"elif",
"len",
"(",
"self",
".",
"movable",
")",
"!=",
"0",
":",
"return",
"self",
".",
"movable",
".",
"popleft",
"(",
")"
] | 38.482759 | 12.793103 |
def execute_and_recommend(self, drop_doses=False):
"""
Execute and recommend a best-fitting model. If drop_doses and no model
is recommended, drop the highest dose-group and repeat until either:
1. a model is recommended, or
2. the dataset is exhausted (i.e., only 3 dose-groups remain).
The session instance is equal to the final run which was executed; if doses were dropped
all previous sessions are saved in self.doses_dropped_sessions.
"""
self.execute()
self.recommend()
if not drop_doses:
return
while self.recommended_model is None and self.dataset.num_dose_groups > 3:
self.doses_dropped_sessions[self.doses_dropped] = self.clone()
self.dataset.drop_dose()
self.execute()
self.recommend()
self.doses_dropped += 1
|
[
"def",
"execute_and_recommend",
"(",
"self",
",",
"drop_doses",
"=",
"False",
")",
":",
"self",
".",
"execute",
"(",
")",
"self",
".",
"recommend",
"(",
")",
"if",
"not",
"drop_doses",
":",
"return",
"while",
"self",
".",
"recommended_model",
"is",
"None",
"and",
"self",
".",
"dataset",
".",
"num_dose_groups",
">",
"3",
":",
"self",
".",
"doses_dropped_sessions",
"[",
"self",
".",
"doses_dropped",
"]",
"=",
"self",
".",
"clone",
"(",
")",
"self",
".",
"dataset",
".",
"drop_dose",
"(",
")",
"self",
".",
"execute",
"(",
")",
"self",
".",
"recommend",
"(",
")",
"self",
".",
"doses_dropped",
"+=",
"1"
] | 37.869565 | 23.695652 |
def resize(mountpoint, size):
'''
Resize filesystem.
General options:
* **mountpoint**: Specify the BTRFS mountpoint to resize.
* **size**: ([+/-]<newsize>[kKmMgGtTpPeE]|max) Specify the new size of the target.
CLI Example:
.. code-block:: bash
salt '*' btrfs.resize /mountpoint size=+1g
salt '*' btrfs.resize /dev/sda1 size=max
'''
if size == 'max':
if not salt.utils.fsutils._is_device(mountpoint):
raise CommandExecutionError("Mountpoint \"{0}\" should be a valid device".format(mountpoint))
if not salt.utils.fsutils._get_mounts("btrfs").get(mountpoint):
raise CommandExecutionError("Device \"{0}\" should be mounted".format(mountpoint))
elif len(size) < 3 or size[0] not in '-+' \
or size[-1] not in 'kKmMgGtTpPeE' or re.sub(r"\d", "", size[1:][:-1]):
raise CommandExecutionError("Unknown size: \"{0}\". Expected: [+/-]<newsize>[kKmMgGtTpPeE]|max".format(size))
out = __salt__['cmd.run_all']('btrfs filesystem resize {0} {1}'.format(size, mountpoint))
salt.utils.fsutils._verify_run(out)
ret = {'log': out['stdout']}
ret.update(__salt__['btrfs.info'](mountpoint))
return ret
|
[
"def",
"resize",
"(",
"mountpoint",
",",
"size",
")",
":",
"if",
"size",
"==",
"'max'",
":",
"if",
"not",
"salt",
".",
"utils",
".",
"fsutils",
".",
"_is_device",
"(",
"mountpoint",
")",
":",
"raise",
"CommandExecutionError",
"(",
"\"Mountpoint \\\"{0}\\\" should be a valid device\"",
".",
"format",
"(",
"mountpoint",
")",
")",
"if",
"not",
"salt",
".",
"utils",
".",
"fsutils",
".",
"_get_mounts",
"(",
"\"btrfs\"",
")",
".",
"get",
"(",
"mountpoint",
")",
":",
"raise",
"CommandExecutionError",
"(",
"\"Device \\\"{0}\\\" should be mounted\"",
".",
"format",
"(",
"mountpoint",
")",
")",
"elif",
"len",
"(",
"size",
")",
"<",
"3",
"or",
"size",
"[",
"0",
"]",
"not",
"in",
"'-+'",
"or",
"size",
"[",
"-",
"1",
"]",
"not",
"in",
"'kKmMgGtTpPeE'",
"or",
"re",
".",
"sub",
"(",
"r\"\\d\"",
",",
"\"\"",
",",
"size",
"[",
"1",
":",
"]",
"[",
":",
"-",
"1",
"]",
")",
":",
"raise",
"CommandExecutionError",
"(",
"\"Unknown size: \\\"{0}\\\". Expected: [+/-]<newsize>[kKmMgGtTpPeE]|max\"",
".",
"format",
"(",
"size",
")",
")",
"out",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"'btrfs filesystem resize {0} {1}'",
".",
"format",
"(",
"size",
",",
"mountpoint",
")",
")",
"salt",
".",
"utils",
".",
"fsutils",
".",
"_verify_run",
"(",
"out",
")",
"ret",
"=",
"{",
"'log'",
":",
"out",
"[",
"'stdout'",
"]",
"}",
"ret",
".",
"update",
"(",
"__salt__",
"[",
"'btrfs.info'",
"]",
"(",
"mountpoint",
")",
")",
"return",
"ret"
] | 36.030303 | 30.515152 |
def cancel_scheduled_hangup(self, call_params):
"""REST Cancel a Scheduled Hangup Helper
"""
path = '/' + self.api_version + '/CancelScheduledHangup/'
method = 'POST'
return self.request(path, method, call_params)
|
[
"def",
"cancel_scheduled_hangup",
"(",
"self",
",",
"call_params",
")",
":",
"path",
"=",
"'/'",
"+",
"self",
".",
"api_version",
"+",
"'/CancelScheduledHangup/'",
"method",
"=",
"'POST'",
"return",
"self",
".",
"request",
"(",
"path",
",",
"method",
",",
"call_params",
")"
] | 41.333333 | 10.5 |
def put(self, request, bot_id, id, format=None):
"""
Update existing Messenger chat state
---
serializer: MessengerChatStateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
return super(MessengerChatStateDetail, self).put(request, bot_id, id, format)
|
[
"def",
"put",
"(",
"self",
",",
"request",
",",
"bot_id",
",",
"id",
",",
"format",
"=",
"None",
")",
":",
"return",
"super",
"(",
"MessengerChatStateDetail",
",",
"self",
")",
".",
"put",
"(",
"request",
",",
"bot_id",
",",
"id",
",",
"format",
")"
] | 34.083333 | 11.916667 |
def _setup(app, *, schema, title=None, app_key=APP_KEY, db=None):
"""Initialize the admin-on-rest admin"""
admin = web.Application(loop=app.loop)
app[app_key] = admin
loader = jinja2.FileSystemLoader([TEMPLATES_ROOT, ])
aiohttp_jinja2.setup(admin, loader=loader, app_key=TEMPLATE_APP_KEY)
if title:
schema.title = title
resources = [
init(db, info['table'], url=info['url'])
for init, info in schema.resources
]
admin_handler = AdminOnRestHandler(
admin,
resources=resources,
loop=app.loop,
schema=schema,
)
admin['admin_handler'] = admin_handler
setup_admin_on_rest_handlers(admin, admin_handler)
return admin
|
[
"def",
"_setup",
"(",
"app",
",",
"*",
",",
"schema",
",",
"title",
"=",
"None",
",",
"app_key",
"=",
"APP_KEY",
",",
"db",
"=",
"None",
")",
":",
"admin",
"=",
"web",
".",
"Application",
"(",
"loop",
"=",
"app",
".",
"loop",
")",
"app",
"[",
"app_key",
"]",
"=",
"admin",
"loader",
"=",
"jinja2",
".",
"FileSystemLoader",
"(",
"[",
"TEMPLATES_ROOT",
",",
"]",
")",
"aiohttp_jinja2",
".",
"setup",
"(",
"admin",
",",
"loader",
"=",
"loader",
",",
"app_key",
"=",
"TEMPLATE_APP_KEY",
")",
"if",
"title",
":",
"schema",
".",
"title",
"=",
"title",
"resources",
"=",
"[",
"init",
"(",
"db",
",",
"info",
"[",
"'table'",
"]",
",",
"url",
"=",
"info",
"[",
"'url'",
"]",
")",
"for",
"init",
",",
"info",
"in",
"schema",
".",
"resources",
"]",
"admin_handler",
"=",
"AdminOnRestHandler",
"(",
"admin",
",",
"resources",
"=",
"resources",
",",
"loop",
"=",
"app",
".",
"loop",
",",
"schema",
"=",
"schema",
",",
")",
"admin",
"[",
"'admin_handler'",
"]",
"=",
"admin_handler",
"setup_admin_on_rest_handlers",
"(",
"admin",
",",
"admin_handler",
")",
"return",
"admin"
] | 25.888889 | 21.814815 |
def formatTimeFromNow(secs=None):
""" Properly Format Time that is `x` seconds in the future
:param int secs: Seconds to go in the future (`x>0`) or the
past (`x<0`)
:return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`)
:rtype: str
"""
return datetime.utcfromtimestamp(time.time() + int(secs or 0)).strftime(timeFormat)
|
[
"def",
"formatTimeFromNow",
"(",
"secs",
"=",
"None",
")",
":",
"return",
"datetime",
".",
"utcfromtimestamp",
"(",
"time",
".",
"time",
"(",
")",
"+",
"int",
"(",
"secs",
"or",
"0",
")",
")",
".",
"strftime",
"(",
"timeFormat",
")"
] | 38.6 | 21.9 |
def dataset_exists(self, dataset):
"""Returns whether the given dataset exists.
If regional location is specified for the dataset, that is also checked
to be compatible with the remote dataset, otherwise an exception is thrown.
:param dataset:
:type dataset: BQDataset
"""
try:
response = self.client.datasets().get(projectId=dataset.project_id,
datasetId=dataset.dataset_id).execute()
if dataset.location is not None:
fetched_location = response.get('location')
if dataset.location != fetched_location:
raise Exception('''Dataset already exists with regional location {}. Can't use {}.'''.format(
fetched_location if fetched_location is not None else 'unspecified',
dataset.location))
except http.HttpError as ex:
if ex.resp.status == 404:
return False
raise
return True
|
[
"def",
"dataset_exists",
"(",
"self",
",",
"dataset",
")",
":",
"try",
":",
"response",
"=",
"self",
".",
"client",
".",
"datasets",
"(",
")",
".",
"get",
"(",
"projectId",
"=",
"dataset",
".",
"project_id",
",",
"datasetId",
"=",
"dataset",
".",
"dataset_id",
")",
".",
"execute",
"(",
")",
"if",
"dataset",
".",
"location",
"is",
"not",
"None",
":",
"fetched_location",
"=",
"response",
".",
"get",
"(",
"'location'",
")",
"if",
"dataset",
".",
"location",
"!=",
"fetched_location",
":",
"raise",
"Exception",
"(",
"'''Dataset already exists with regional location {}. Can't use {}.'''",
".",
"format",
"(",
"fetched_location",
"if",
"fetched_location",
"is",
"not",
"None",
"else",
"'unspecified'",
",",
"dataset",
".",
"location",
")",
")",
"except",
"http",
".",
"HttpError",
"as",
"ex",
":",
"if",
"ex",
".",
"resp",
".",
"status",
"==",
"404",
":",
"return",
"False",
"raise",
"return",
"True"
] | 41.72 | 24.48 |
def flatten_list(list_):
"""
Banana banana
"""
res = []
for elem in list_:
if isinstance(elem, list):
res.extend(flatten_list(elem))
else:
res.append(elem)
return res
|
[
"def",
"flatten_list",
"(",
"list_",
")",
":",
"res",
"=",
"[",
"]",
"for",
"elem",
"in",
"list_",
":",
"if",
"isinstance",
"(",
"elem",
",",
"list",
")",
":",
"res",
".",
"extend",
"(",
"flatten_list",
"(",
"elem",
")",
")",
"else",
":",
"res",
".",
"append",
"(",
"elem",
")",
"return",
"res"
] | 16.923077 | 18.307692 |
def _put_model(D, name, dat, m):
"""
Place the model data given, into the location (m) given.
:param dict D: Metadata (dataset)
:param str name: Model name (ex: chron0model0)
:param dict dat: Model data
:param regex m: Model name regex groups
:return dict D: Metadata (dataset)
"""
try:
# print("Placing model: {}".format(name))
_pc = m.group(1) + "Data"
_section = m.group(1) + m.group(2)
if _pc not in D:
# Section missing entirely? Can't continue
print("{} not found in the provided dataset. Please try again".format(_pc))
return
else:
if _section not in D[_pc]:
# Creates section: Example: D[chronData][chron0]
D[_pc][_section] = OrderedDict()
if "model" not in D[_pc][_section]:
# Creates model top level: Example: D[chronData][chron0]["model"]
D[_pc][_section]["model"] = OrderedDict()
if name not in D[_pc][_section]["model"]:
dat = _update_table_names(name, dat)
D[_pc][_section]["model"][name] = dat
else:
# Model already exists, should we overwrite it?
_prompt_overwrite = input(
"This model already exists in the dataset. Do you want to overwrite it? (y/n)")
# Yes, overwrite with the model data provided
if _prompt_overwrite == "y":
dat = _update_table_names(name, dat)
D[_pc][_section]["model"][name] = dat
# No, do not overwrite.
elif _prompt_overwrite == "n":
_name2 = _prompt_placement(D, "model")
_m = re.match(re_model_name, _name2)
if _m:
D = _put_model(D, _name2, dat, _m)
else:
print("Invalid choice")
except Exception as e:
print("addModel: Unable to put the model data into the dataset, {}".format(e))
return D
|
[
"def",
"_put_model",
"(",
"D",
",",
"name",
",",
"dat",
",",
"m",
")",
":",
"try",
":",
"# print(\"Placing model: {}\".format(name))",
"_pc",
"=",
"m",
".",
"group",
"(",
"1",
")",
"+",
"\"Data\"",
"_section",
"=",
"m",
".",
"group",
"(",
"1",
")",
"+",
"m",
".",
"group",
"(",
"2",
")",
"if",
"_pc",
"not",
"in",
"D",
":",
"# Section missing entirely? Can't continue",
"print",
"(",
"\"{} not found in the provided dataset. Please try again\"",
".",
"format",
"(",
"_pc",
")",
")",
"return",
"else",
":",
"if",
"_section",
"not",
"in",
"D",
"[",
"_pc",
"]",
":",
"# Creates section: Example: D[chronData][chron0]",
"D",
"[",
"_pc",
"]",
"[",
"_section",
"]",
"=",
"OrderedDict",
"(",
")",
"if",
"\"model\"",
"not",
"in",
"D",
"[",
"_pc",
"]",
"[",
"_section",
"]",
":",
"# Creates model top level: Example: D[chronData][chron0][\"model\"]",
"D",
"[",
"_pc",
"]",
"[",
"_section",
"]",
"[",
"\"model\"",
"]",
"=",
"OrderedDict",
"(",
")",
"if",
"name",
"not",
"in",
"D",
"[",
"_pc",
"]",
"[",
"_section",
"]",
"[",
"\"model\"",
"]",
":",
"dat",
"=",
"_update_table_names",
"(",
"name",
",",
"dat",
")",
"D",
"[",
"_pc",
"]",
"[",
"_section",
"]",
"[",
"\"model\"",
"]",
"[",
"name",
"]",
"=",
"dat",
"else",
":",
"# Model already exists, should we overwrite it?",
"_prompt_overwrite",
"=",
"input",
"(",
"\"This model already exists in the dataset. Do you want to overwrite it? (y/n)\"",
")",
"# Yes, overwrite with the model data provided",
"if",
"_prompt_overwrite",
"==",
"\"y\"",
":",
"dat",
"=",
"_update_table_names",
"(",
"name",
",",
"dat",
")",
"D",
"[",
"_pc",
"]",
"[",
"_section",
"]",
"[",
"\"model\"",
"]",
"[",
"name",
"]",
"=",
"dat",
"# No, do not overwrite.",
"elif",
"_prompt_overwrite",
"==",
"\"n\"",
":",
"_name2",
"=",
"_prompt_placement",
"(",
"D",
",",
"\"model\"",
")",
"_m",
"=",
"re",
".",
"match",
"(",
"re_model_name",
",",
"_name2",
")",
"if",
"_m",
":",
"D",
"=",
"_put_model",
"(",
"D",
",",
"_name2",
",",
"dat",
",",
"_m",
")",
"else",
":",
"print",
"(",
"\"Invalid choice\"",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"addModel: Unable to put the model data into the dataset, {}\"",
".",
"format",
"(",
"e",
")",
")",
"return",
"D"
] | 42.416667 | 16.583333 |
def err_write(self, msg, **kwargs):
r"""Print `msg` as an error message.
The message is buffered (won't display) until linefeed ("\n").
"""
if self._thread_invalid():
# special case: if a non-main thread writes to stderr
# i.e. due to an uncaught exception, pass it through
# without raising an additional exception.
self.async_call(self.err_write, msg, **kwargs)
return
return self.request('nvim_err_write', msg, **kwargs)
|
[
"def",
"err_write",
"(",
"self",
",",
"msg",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_thread_invalid",
"(",
")",
":",
"# special case: if a non-main thread writes to stderr",
"# i.e. due to an uncaught exception, pass it through",
"# without raising an additional exception.",
"self",
".",
"async_call",
"(",
"self",
".",
"err_write",
",",
"msg",
",",
"*",
"*",
"kwargs",
")",
"return",
"return",
"self",
".",
"request",
"(",
"'nvim_err_write'",
",",
"msg",
",",
"*",
"*",
"kwargs",
")"
] | 42.75 | 17 |
def ReadClientFullInfo(self, client_id):
"""Reads full client information for a single client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
Returns:
A `ClientFullInfo` instance for given client.
Raises:
UnknownClientError: if no client with such id was found.
"""
result = self.MultiReadClientFullInfo([client_id])
try:
return result[client_id]
except KeyError:
raise UnknownClientError(client_id)
|
[
"def",
"ReadClientFullInfo",
"(",
"self",
",",
"client_id",
")",
":",
"result",
"=",
"self",
".",
"MultiReadClientFullInfo",
"(",
"[",
"client_id",
"]",
")",
"try",
":",
"return",
"result",
"[",
"client_id",
"]",
"except",
"KeyError",
":",
"raise",
"UnknownClientError",
"(",
"client_id",
")"
] | 27.588235 | 20.294118 |
def _item_to_database(self, iterator, database_pb):
"""Convert a database protobuf to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type database_pb: :class:`~google.spanner.admin.database.v1.Database`
:param database_pb: A database returned from the API.
:rtype: :class:`~google.cloud.spanner_v1.database.Database`
:returns: The next database in the page.
"""
return Database.from_pb(database_pb, self, pool=BurstyPool())
|
[
"def",
"_item_to_database",
"(",
"self",
",",
"iterator",
",",
"database_pb",
")",
":",
"return",
"Database",
".",
"from_pb",
"(",
"database_pb",
",",
"self",
",",
"pool",
"=",
"BurstyPool",
"(",
")",
")"
] | 44.615385 | 23.769231 |
def connect(host='localhost', port=21050, database=None, timeout=None,
use_ssl=False, ca_cert=None, auth_mechanism='NOSASL', user=None,
password=None, kerberos_service_name='impala', use_ldap=None,
ldap_user=None, ldap_password=None, use_kerberos=None,
protocol=None, krb_host=None):
"""Get a connection to HiveServer2 (HS2).
These options are largely compatible with the impala-shell command line
arguments. See those docs for more information.
Parameters
----------
host : str
The hostname for HS2. For Impala, this can be any of the `impalad`s.
port : int, optional
The port number for HS2. The Impala default is 21050. The Hive port is
likely different.
database : str, optional
The default database. If `None`, the result is
implementation-dependent.
timeout : int, optional
Connection timeout in seconds. Default is no timeout.
use_ssl : bool, optional
Enable SSL.
ca_cert : str, optional
Local path to the the third-party CA certificate. If SSL is enabled but
the certificate is not specified, the server certificate will not be
validated.
auth_mechanism : {'NOSASL', 'PLAIN', 'GSSAPI', 'LDAP'}
Specify the authentication mechanism. `'NOSASL'` for unsecured Impala.
`'PLAIN'` for unsecured Hive (because Hive requires the SASL
transport). `'GSSAPI'` for Kerberos and `'LDAP'` for Kerberos with
LDAP.
user : str, optional
LDAP user, if applicable.
password : str, optional
LDAP password, if applicable.
kerberos_service_name : str, optional
Authenticate to a particular `impalad` service principal. Uses
`'impala'` by default.
use_ldap : bool, optional
Specify `auth_mechanism='LDAP'` instead.
.. deprecated:: 0.11.0
ldap_user : str, optional
Use `user` parameter instead.
.. deprecated:: 0.11.0
ldap_password : str, optional
Use `password` parameter instead.
.. deprecated:: 0.11.0
use_kerberos : bool, optional
Specify `auth_mechanism='GSSAPI'` instead.
.. deprecated:: 0.11.0
protocol : str, optional
Do not use. HiveServer2 is the only protocol currently supported.
.. deprecated:: 0.11.0
Returns
-------
HiveServer2Connection
A `Connection` object (DB API 2.0-compliant).
"""
# pylint: disable=too-many-locals
if use_kerberos is not None:
warn_deprecate('use_kerberos', 'auth_mechanism="GSSAPI"')
if use_kerberos:
auth_mechanism = 'GSSAPI'
if use_ldap is not None:
warn_deprecate('use_ldap', 'auth_mechanism="LDAP"')
if use_ldap:
auth_mechanism = 'LDAP'
if auth_mechanism:
auth_mechanism = auth_mechanism.upper()
else:
auth_mechanism = 'NOSASL'
if auth_mechanism not in AUTH_MECHANISMS:
raise NotSupportedError(
'Unsupported authentication mechanism: {0}'.format(auth_mechanism))
if ldap_user is not None:
warn_deprecate('ldap_user', 'user')
user = ldap_user
if ldap_password is not None:
warn_deprecate('ldap_password', 'password')
password = ldap_password
if protocol is not None:
if protocol.lower() == 'hiveserver2':
warn_protocol_param()
else:
raise NotSupportedError(
"'{0}' is not a supported protocol; only HiveServer2 is "
"supported".format(protocol))
service = hs2.connect(host=host, port=port,
timeout=timeout, use_ssl=use_ssl,
ca_cert=ca_cert, user=user, password=password,
kerberos_service_name=kerberos_service_name,
auth_mechanism=auth_mechanism, krb_host=krb_host)
return hs2.HiveServer2Connection(service, default_db=database)
|
[
"def",
"connect",
"(",
"host",
"=",
"'localhost'",
",",
"port",
"=",
"21050",
",",
"database",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"use_ssl",
"=",
"False",
",",
"ca_cert",
"=",
"None",
",",
"auth_mechanism",
"=",
"'NOSASL'",
",",
"user",
"=",
"None",
",",
"password",
"=",
"None",
",",
"kerberos_service_name",
"=",
"'impala'",
",",
"use_ldap",
"=",
"None",
",",
"ldap_user",
"=",
"None",
",",
"ldap_password",
"=",
"None",
",",
"use_kerberos",
"=",
"None",
",",
"protocol",
"=",
"None",
",",
"krb_host",
"=",
"None",
")",
":",
"# pylint: disable=too-many-locals",
"if",
"use_kerberos",
"is",
"not",
"None",
":",
"warn_deprecate",
"(",
"'use_kerberos'",
",",
"'auth_mechanism=\"GSSAPI\"'",
")",
"if",
"use_kerberos",
":",
"auth_mechanism",
"=",
"'GSSAPI'",
"if",
"use_ldap",
"is",
"not",
"None",
":",
"warn_deprecate",
"(",
"'use_ldap'",
",",
"'auth_mechanism=\"LDAP\"'",
")",
"if",
"use_ldap",
":",
"auth_mechanism",
"=",
"'LDAP'",
"if",
"auth_mechanism",
":",
"auth_mechanism",
"=",
"auth_mechanism",
".",
"upper",
"(",
")",
"else",
":",
"auth_mechanism",
"=",
"'NOSASL'",
"if",
"auth_mechanism",
"not",
"in",
"AUTH_MECHANISMS",
":",
"raise",
"NotSupportedError",
"(",
"'Unsupported authentication mechanism: {0}'",
".",
"format",
"(",
"auth_mechanism",
")",
")",
"if",
"ldap_user",
"is",
"not",
"None",
":",
"warn_deprecate",
"(",
"'ldap_user'",
",",
"'user'",
")",
"user",
"=",
"ldap_user",
"if",
"ldap_password",
"is",
"not",
"None",
":",
"warn_deprecate",
"(",
"'ldap_password'",
",",
"'password'",
")",
"password",
"=",
"ldap_password",
"if",
"protocol",
"is",
"not",
"None",
":",
"if",
"protocol",
".",
"lower",
"(",
")",
"==",
"'hiveserver2'",
":",
"warn_protocol_param",
"(",
")",
"else",
":",
"raise",
"NotSupportedError",
"(",
"\"'{0}' is not a supported protocol; only HiveServer2 is \"",
"\"supported\"",
".",
"format",
"(",
"protocol",
")",
")",
"service",
"=",
"hs2",
".",
"connect",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"timeout",
"=",
"timeout",
",",
"use_ssl",
"=",
"use_ssl",
",",
"ca_cert",
"=",
"ca_cert",
",",
"user",
"=",
"user",
",",
"password",
"=",
"password",
",",
"kerberos_service_name",
"=",
"kerberos_service_name",
",",
"auth_mechanism",
"=",
"auth_mechanism",
",",
"krb_host",
"=",
"krb_host",
")",
"return",
"hs2",
".",
"HiveServer2Connection",
"(",
"service",
",",
"default_db",
"=",
"database",
")"
] | 35.633028 | 20.12844 |
def create_cache_cluster(name, wait=600, security_groups=None,
region=None, key=None, keyid=None, profile=None, **args):
'''
Create a cache cluster.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.create_cache_cluster name=myCacheCluster \
Engine=redis \
CacheNodeType=cache.t2.micro \
NumCacheNodes=1 \
SecurityGroupIds='[sg-11223344]' \
CacheSubnetGroupName=myCacheSubnetGroup
'''
if security_groups:
if not isinstance(security_groups, list):
security_groups = [security_groups]
sgs = __salt__['boto_secgroup.convert_to_group_ids'](groups=security_groups, region=region,
key=key, keyid=keyid, profile=profile)
if 'SecurityGroupIds' not in args:
args['SecurityGroupIds'] = []
args['SecurityGroupIds'] += sgs
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
return _create_resource(name, name_param='CacheClusterId', desc='cache cluster',
res_type='cache_cluster', wait=wait, status_param='CacheClusterStatus',
region=region, key=key, keyid=keyid, profile=profile, **args)
|
[
"def",
"create_cache_cluster",
"(",
"name",
",",
"wait",
"=",
"600",
",",
"security_groups",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"*",
"*",
"args",
")",
":",
"if",
"security_groups",
":",
"if",
"not",
"isinstance",
"(",
"security_groups",
",",
"list",
")",
":",
"security_groups",
"=",
"[",
"security_groups",
"]",
"sgs",
"=",
"__salt__",
"[",
"'boto_secgroup.convert_to_group_ids'",
"]",
"(",
"groups",
"=",
"security_groups",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"'SecurityGroupIds'",
"not",
"in",
"args",
":",
"args",
"[",
"'SecurityGroupIds'",
"]",
"=",
"[",
"]",
"args",
"[",
"'SecurityGroupIds'",
"]",
"+=",
"sgs",
"args",
"=",
"dict",
"(",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"args",
".",
"items",
"(",
")",
"if",
"not",
"k",
".",
"startswith",
"(",
"'_'",
")",
"]",
")",
"return",
"_create_resource",
"(",
"name",
",",
"name_param",
"=",
"'CacheClusterId'",
",",
"desc",
"=",
"'cache cluster'",
",",
"res_type",
"=",
"'cache_cluster'",
",",
"wait",
"=",
"wait",
",",
"status_param",
"=",
"'CacheClusterStatus'",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
",",
"*",
"*",
"args",
")"
] | 54.571429 | 33.214286 |
def disconnect(self, message=""):
"""Hang up the connection.
Arguments:
message -- Quit message.
"""
try:
del self.connected
except AttributeError:
return
self.quit(message)
self.transport.close()
self._handle_event(Event("disconnect", self.server, "", [message]))
|
[
"def",
"disconnect",
"(",
"self",
",",
"message",
"=",
"\"\"",
")",
":",
"try",
":",
"del",
"self",
".",
"connected",
"except",
"AttributeError",
":",
"return",
"self",
".",
"quit",
"(",
"message",
")",
"self",
".",
"transport",
".",
"close",
"(",
")",
"self",
".",
"_handle_event",
"(",
"Event",
"(",
"\"disconnect\"",
",",
"self",
".",
"server",
",",
"\"\"",
",",
"[",
"message",
"]",
")",
")"
] | 22.0625 | 20.125 |
def hill_climbing(data, graph, **kwargs):
"""Hill Climbing optimization: a greedy exploration algorithm."""
nodelist = list(data.columns)
data = scale(data.values).astype('float32')
tested_candidates = [nx.adj_matrix(graph, nodelist=nodelist, weight=None)]
best_score = parallel_graph_evaluation(data, tested_candidates[0].todense(), ** kwargs)
best_candidate = graph
can_improve = True
while can_improve:
can_improve = False
for (i, j) in best_candidate.edges():
test_graph = deepcopy(best_candidate)
test_graph.add_edge(j, i, weight=test_graph[i][j]['weight'])
test_graph.remove_edge(i, j)
tadjmat = nx.adj_matrix(test_graph, nodelist=nodelist, weight=None)
if (nx.is_directed_acyclic_graph(test_graph) and not any([(tadjmat != cand).nnz ==
0 for cand in tested_candidates])):
tested_candidates.append(tadjmat)
score = parallel_graph_evaluation(data, tadjmat.todense(), **kwargs)
if score < best_score:
can_improve = True
best_candidate = test_graph
best_score = score
break
return best_candidate
|
[
"def",
"hill_climbing",
"(",
"data",
",",
"graph",
",",
"*",
"*",
"kwargs",
")",
":",
"nodelist",
"=",
"list",
"(",
"data",
".",
"columns",
")",
"data",
"=",
"scale",
"(",
"data",
".",
"values",
")",
".",
"astype",
"(",
"'float32'",
")",
"tested_candidates",
"=",
"[",
"nx",
".",
"adj_matrix",
"(",
"graph",
",",
"nodelist",
"=",
"nodelist",
",",
"weight",
"=",
"None",
")",
"]",
"best_score",
"=",
"parallel_graph_evaluation",
"(",
"data",
",",
"tested_candidates",
"[",
"0",
"]",
".",
"todense",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
"best_candidate",
"=",
"graph",
"can_improve",
"=",
"True",
"while",
"can_improve",
":",
"can_improve",
"=",
"False",
"for",
"(",
"i",
",",
"j",
")",
"in",
"best_candidate",
".",
"edges",
"(",
")",
":",
"test_graph",
"=",
"deepcopy",
"(",
"best_candidate",
")",
"test_graph",
".",
"add_edge",
"(",
"j",
",",
"i",
",",
"weight",
"=",
"test_graph",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"'weight'",
"]",
")",
"test_graph",
".",
"remove_edge",
"(",
"i",
",",
"j",
")",
"tadjmat",
"=",
"nx",
".",
"adj_matrix",
"(",
"test_graph",
",",
"nodelist",
"=",
"nodelist",
",",
"weight",
"=",
"None",
")",
"if",
"(",
"nx",
".",
"is_directed_acyclic_graph",
"(",
"test_graph",
")",
"and",
"not",
"any",
"(",
"[",
"(",
"tadjmat",
"!=",
"cand",
")",
".",
"nnz",
"==",
"0",
"for",
"cand",
"in",
"tested_candidates",
"]",
")",
")",
":",
"tested_candidates",
".",
"append",
"(",
"tadjmat",
")",
"score",
"=",
"parallel_graph_evaluation",
"(",
"data",
",",
"tadjmat",
".",
"todense",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
"if",
"score",
"<",
"best_score",
":",
"can_improve",
"=",
"True",
"best_candidate",
"=",
"test_graph",
"best_score",
"=",
"score",
"break",
"return",
"best_candidate"
] | 51.36 | 18.68 |
def _setLearningMode(self):
"""
Sets the learning mode.
"""
for region in self.L4Regions:
region.setParameter("learn", True)
for region in self.L2Regions:
region.setParameter("learningMode", True)
|
[
"def",
"_setLearningMode",
"(",
"self",
")",
":",
"for",
"region",
"in",
"self",
".",
"L4Regions",
":",
"region",
".",
"setParameter",
"(",
"\"learn\"",
",",
"True",
")",
"for",
"region",
"in",
"self",
".",
"L2Regions",
":",
"region",
".",
"setParameter",
"(",
"\"learningMode\"",
",",
"True",
")"
] | 27.625 | 5.875 |
def load_all(stream, Loader=None):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
"""
if Loader is None:
load_warning('load_all')
Loader = FullLoader
loader = Loader(stream)
try:
while loader.check_data():
yield loader.get_data()
finally:
loader.dispose()
|
[
"def",
"load_all",
"(",
"stream",
",",
"Loader",
"=",
"None",
")",
":",
"if",
"Loader",
"is",
"None",
":",
"load_warning",
"(",
"'load_all'",
")",
"Loader",
"=",
"FullLoader",
"loader",
"=",
"Loader",
"(",
"stream",
")",
"try",
":",
"while",
"loader",
".",
"check_data",
"(",
")",
":",
"yield",
"loader",
".",
"get_data",
"(",
")",
"finally",
":",
"loader",
".",
"dispose",
"(",
")"
] | 23.6 | 12.666667 |
def treat_values(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False):
"""Removes the nan, negative, and inf values in two numpy arrays"""
sim_copy = np.copy(simulated_array)
obs_copy = np.copy(observed_array)
# Checking to see if the vectors are the same length
assert sim_copy.ndim == 1, "The simulated array is not one dimensional."
assert obs_copy.ndim == 1, "The observed array is not one dimensional."
if sim_copy.size != obs_copy.size:
raise RuntimeError("The two ndarrays are not the same size.")
# Treat missing data in observed_array and simulated_array, rows in simulated_array or
# observed_array that contain nan values
all_treatment_array = np.ones(obs_copy.size, dtype=bool)
if np.any(np.isnan(obs_copy)) or np.any(np.isnan(sim_copy)):
if replace_nan is not None:
# Finding the NaNs
sim_nan = np.isnan(sim_copy)
obs_nan = np.isnan(obs_copy)
# Replacing the NaNs with the input
sim_copy[sim_nan] = replace_nan
obs_copy[obs_nan] = replace_nan
warnings.warn("Elements(s) {} contained NaN values in the simulated array and "
"elements(s) {} contained NaN values in the observed array and have been "
"replaced (Elements are zero indexed).".format(np.where(sim_nan)[0],
np.where(obs_nan)[0]),
UserWarning)
else:
# Getting the indices of the nan values, combining them, and informing user.
nan_indices_fcst = ~np.isnan(sim_copy)
nan_indices_obs = ~np.isnan(obs_copy)
all_nan_indices = np.logical_and(nan_indices_fcst, nan_indices_obs)
all_treatment_array = np.logical_and(all_treatment_array, all_nan_indices)
warnings.warn("Row(s) {} contained NaN values and the row(s) have been "
"removed (Rows are zero indexed).".format(np.where(~all_nan_indices)[0]),
UserWarning)
if np.any(np.isinf(obs_copy)) or np.any(np.isinf(sim_copy)):
if replace_nan is not None:
# Finding the NaNs
sim_inf = np.isinf(sim_copy)
obs_inf = np.isinf(obs_copy)
# Replacing the NaNs with the input
sim_copy[sim_inf] = replace_inf
obs_copy[obs_inf] = replace_inf
warnings.warn("Elements(s) {} contained Inf values in the simulated array and "
"elements(s) {} contained Inf values in the observed array and have been "
"replaced (Elements are zero indexed).".format(np.where(sim_inf)[0],
np.where(obs_inf)[0]),
UserWarning)
else:
inf_indices_fcst = ~(np.isinf(sim_copy))
inf_indices_obs = ~np.isinf(obs_copy)
all_inf_indices = np.logical_and(inf_indices_fcst, inf_indices_obs)
all_treatment_array = np.logical_and(all_treatment_array, all_inf_indices)
warnings.warn(
"Row(s) {} contained Inf or -Inf values and the row(s) have been removed (Rows "
"are zero indexed).".format(np.where(~all_inf_indices)[0]),
UserWarning
)
# Treat zero data in observed_array and simulated_array, rows in simulated_array or
# observed_array that contain zero values
if remove_zero:
if (obs_copy == 0).any() or (sim_copy == 0).any():
zero_indices_fcst = ~(sim_copy == 0)
zero_indices_obs = ~(obs_copy == 0)
all_zero_indices = np.logical_and(zero_indices_fcst, zero_indices_obs)
all_treatment_array = np.logical_and(all_treatment_array, all_zero_indices)
warnings.warn(
"Row(s) {} contained zero values and the row(s) have been removed (Rows are "
"zero indexed).".format(np.where(~all_zero_indices)[0]),
UserWarning
)
# Treat negative data in observed_array and simulated_array, rows in simulated_array or
# observed_array that contain negative values
# Ignore runtime warnings from comparing
if remove_neg:
with np.errstate(invalid='ignore'):
obs_copy_bool = obs_copy < 0
sim_copy_bool = sim_copy < 0
if obs_copy_bool.any() or sim_copy_bool.any():
neg_indices_fcst = ~sim_copy_bool
neg_indices_obs = ~obs_copy_bool
all_neg_indices = np.logical_and(neg_indices_fcst, neg_indices_obs)
all_treatment_array = np.logical_and(all_treatment_array, all_neg_indices)
warnings.warn("Row(s) {} contained negative values and the row(s) have been "
"removed (Rows are zero indexed).".format(np.where(~all_neg_indices)[0]),
UserWarning)
obs_copy = obs_copy[all_treatment_array]
sim_copy = sim_copy[all_treatment_array]
return sim_copy, obs_copy
|
[
"def",
"treat_values",
"(",
"simulated_array",
",",
"observed_array",
",",
"replace_nan",
"=",
"None",
",",
"replace_inf",
"=",
"None",
",",
"remove_neg",
"=",
"False",
",",
"remove_zero",
"=",
"False",
")",
":",
"sim_copy",
"=",
"np",
".",
"copy",
"(",
"simulated_array",
")",
"obs_copy",
"=",
"np",
".",
"copy",
"(",
"observed_array",
")",
"# Checking to see if the vectors are the same length",
"assert",
"sim_copy",
".",
"ndim",
"==",
"1",
",",
"\"The simulated array is not one dimensional.\"",
"assert",
"obs_copy",
".",
"ndim",
"==",
"1",
",",
"\"The observed array is not one dimensional.\"",
"if",
"sim_copy",
".",
"size",
"!=",
"obs_copy",
".",
"size",
":",
"raise",
"RuntimeError",
"(",
"\"The two ndarrays are not the same size.\"",
")",
"# Treat missing data in observed_array and simulated_array, rows in simulated_array or",
"# observed_array that contain nan values",
"all_treatment_array",
"=",
"np",
".",
"ones",
"(",
"obs_copy",
".",
"size",
",",
"dtype",
"=",
"bool",
")",
"if",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"obs_copy",
")",
")",
"or",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"sim_copy",
")",
")",
":",
"if",
"replace_nan",
"is",
"not",
"None",
":",
"# Finding the NaNs",
"sim_nan",
"=",
"np",
".",
"isnan",
"(",
"sim_copy",
")",
"obs_nan",
"=",
"np",
".",
"isnan",
"(",
"obs_copy",
")",
"# Replacing the NaNs with the input",
"sim_copy",
"[",
"sim_nan",
"]",
"=",
"replace_nan",
"obs_copy",
"[",
"obs_nan",
"]",
"=",
"replace_nan",
"warnings",
".",
"warn",
"(",
"\"Elements(s) {} contained NaN values in the simulated array and \"",
"\"elements(s) {} contained NaN values in the observed array and have been \"",
"\"replaced (Elements are zero indexed).\"",
".",
"format",
"(",
"np",
".",
"where",
"(",
"sim_nan",
")",
"[",
"0",
"]",
",",
"np",
".",
"where",
"(",
"obs_nan",
")",
"[",
"0",
"]",
")",
",",
"UserWarning",
")",
"else",
":",
"# Getting the indices of the nan values, combining them, and informing user.",
"nan_indices_fcst",
"=",
"~",
"np",
".",
"isnan",
"(",
"sim_copy",
")",
"nan_indices_obs",
"=",
"~",
"np",
".",
"isnan",
"(",
"obs_copy",
")",
"all_nan_indices",
"=",
"np",
".",
"logical_and",
"(",
"nan_indices_fcst",
",",
"nan_indices_obs",
")",
"all_treatment_array",
"=",
"np",
".",
"logical_and",
"(",
"all_treatment_array",
",",
"all_nan_indices",
")",
"warnings",
".",
"warn",
"(",
"\"Row(s) {} contained NaN values and the row(s) have been \"",
"\"removed (Rows are zero indexed).\"",
".",
"format",
"(",
"np",
".",
"where",
"(",
"~",
"all_nan_indices",
")",
"[",
"0",
"]",
")",
",",
"UserWarning",
")",
"if",
"np",
".",
"any",
"(",
"np",
".",
"isinf",
"(",
"obs_copy",
")",
")",
"or",
"np",
".",
"any",
"(",
"np",
".",
"isinf",
"(",
"sim_copy",
")",
")",
":",
"if",
"replace_nan",
"is",
"not",
"None",
":",
"# Finding the NaNs",
"sim_inf",
"=",
"np",
".",
"isinf",
"(",
"sim_copy",
")",
"obs_inf",
"=",
"np",
".",
"isinf",
"(",
"obs_copy",
")",
"# Replacing the NaNs with the input",
"sim_copy",
"[",
"sim_inf",
"]",
"=",
"replace_inf",
"obs_copy",
"[",
"obs_inf",
"]",
"=",
"replace_inf",
"warnings",
".",
"warn",
"(",
"\"Elements(s) {} contained Inf values in the simulated array and \"",
"\"elements(s) {} contained Inf values in the observed array and have been \"",
"\"replaced (Elements are zero indexed).\"",
".",
"format",
"(",
"np",
".",
"where",
"(",
"sim_inf",
")",
"[",
"0",
"]",
",",
"np",
".",
"where",
"(",
"obs_inf",
")",
"[",
"0",
"]",
")",
",",
"UserWarning",
")",
"else",
":",
"inf_indices_fcst",
"=",
"~",
"(",
"np",
".",
"isinf",
"(",
"sim_copy",
")",
")",
"inf_indices_obs",
"=",
"~",
"np",
".",
"isinf",
"(",
"obs_copy",
")",
"all_inf_indices",
"=",
"np",
".",
"logical_and",
"(",
"inf_indices_fcst",
",",
"inf_indices_obs",
")",
"all_treatment_array",
"=",
"np",
".",
"logical_and",
"(",
"all_treatment_array",
",",
"all_inf_indices",
")",
"warnings",
".",
"warn",
"(",
"\"Row(s) {} contained Inf or -Inf values and the row(s) have been removed (Rows \"",
"\"are zero indexed).\"",
".",
"format",
"(",
"np",
".",
"where",
"(",
"~",
"all_inf_indices",
")",
"[",
"0",
"]",
")",
",",
"UserWarning",
")",
"# Treat zero data in observed_array and simulated_array, rows in simulated_array or",
"# observed_array that contain zero values",
"if",
"remove_zero",
":",
"if",
"(",
"obs_copy",
"==",
"0",
")",
".",
"any",
"(",
")",
"or",
"(",
"sim_copy",
"==",
"0",
")",
".",
"any",
"(",
")",
":",
"zero_indices_fcst",
"=",
"~",
"(",
"sim_copy",
"==",
"0",
")",
"zero_indices_obs",
"=",
"~",
"(",
"obs_copy",
"==",
"0",
")",
"all_zero_indices",
"=",
"np",
".",
"logical_and",
"(",
"zero_indices_fcst",
",",
"zero_indices_obs",
")",
"all_treatment_array",
"=",
"np",
".",
"logical_and",
"(",
"all_treatment_array",
",",
"all_zero_indices",
")",
"warnings",
".",
"warn",
"(",
"\"Row(s) {} contained zero values and the row(s) have been removed (Rows are \"",
"\"zero indexed).\"",
".",
"format",
"(",
"np",
".",
"where",
"(",
"~",
"all_zero_indices",
")",
"[",
"0",
"]",
")",
",",
"UserWarning",
")",
"# Treat negative data in observed_array and simulated_array, rows in simulated_array or",
"# observed_array that contain negative values",
"# Ignore runtime warnings from comparing",
"if",
"remove_neg",
":",
"with",
"np",
".",
"errstate",
"(",
"invalid",
"=",
"'ignore'",
")",
":",
"obs_copy_bool",
"=",
"obs_copy",
"<",
"0",
"sim_copy_bool",
"=",
"sim_copy",
"<",
"0",
"if",
"obs_copy_bool",
".",
"any",
"(",
")",
"or",
"sim_copy_bool",
".",
"any",
"(",
")",
":",
"neg_indices_fcst",
"=",
"~",
"sim_copy_bool",
"neg_indices_obs",
"=",
"~",
"obs_copy_bool",
"all_neg_indices",
"=",
"np",
".",
"logical_and",
"(",
"neg_indices_fcst",
",",
"neg_indices_obs",
")",
"all_treatment_array",
"=",
"np",
".",
"logical_and",
"(",
"all_treatment_array",
",",
"all_neg_indices",
")",
"warnings",
".",
"warn",
"(",
"\"Row(s) {} contained negative values and the row(s) have been \"",
"\"removed (Rows are zero indexed).\"",
".",
"format",
"(",
"np",
".",
"where",
"(",
"~",
"all_neg_indices",
")",
"[",
"0",
"]",
")",
",",
"UserWarning",
")",
"obs_copy",
"=",
"obs_copy",
"[",
"all_treatment_array",
"]",
"sim_copy",
"=",
"sim_copy",
"[",
"all_treatment_array",
"]",
"return",
"sim_copy",
",",
"obs_copy"
] | 48.084906 | 25.509434 |
def get_counter(self, redis_conn=None, host='localhost', port=6379,
key='counter', cycle_time=5, start_time=None,
window=SECONDS_1_HOUR, roll=True, keep_max=12, start_at=0):
'''
Generate a new Counter
Useful for generic distributed counters
@param redis_conn: A premade redis connection (overrides host and port)
@param host: the redis host
@param port: the redis port
@param key: the key for your stats collection
@param cycle_time: how often to check for expiring counts
@param start_time: the time to start valid collection
@param window: how long to collect data for in seconds (if rolling)
@param roll: Roll the window after it expires, to continue collecting
on a new date based key.
@keep_max: If rolling the static window, the max number of prior
windows to keep
@param start_at: The integer to start counting at
'''
counter = Counter(key=key, cycle_time=cycle_time,
start_time=start_time, window=window, roll=roll,
keep_max=keep_max)
counter.setup(redis_conn=redis_conn, host=host, port=port)
return counter
|
[
"def",
"get_counter",
"(",
"self",
",",
"redis_conn",
"=",
"None",
",",
"host",
"=",
"'localhost'",
",",
"port",
"=",
"6379",
",",
"key",
"=",
"'counter'",
",",
"cycle_time",
"=",
"5",
",",
"start_time",
"=",
"None",
",",
"window",
"=",
"SECONDS_1_HOUR",
",",
"roll",
"=",
"True",
",",
"keep_max",
"=",
"12",
",",
"start_at",
"=",
"0",
")",
":",
"counter",
"=",
"Counter",
"(",
"key",
"=",
"key",
",",
"cycle_time",
"=",
"cycle_time",
",",
"start_time",
"=",
"start_time",
",",
"window",
"=",
"window",
",",
"roll",
"=",
"roll",
",",
"keep_max",
"=",
"keep_max",
")",
"counter",
".",
"setup",
"(",
"redis_conn",
"=",
"redis_conn",
",",
"host",
"=",
"host",
",",
"port",
"=",
"port",
")",
"return",
"counter"
] | 49.8 | 22.04 |
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
|
[
"def",
"dataset_from_dataframe",
"(",
"df",
",",
"delays",
"=",
"(",
"1",
",",
"2",
",",
"3",
")",
",",
"inputs",
"=",
"(",
"1",
",",
"2",
",",
"-",
"1",
")",
",",
"outputs",
"=",
"(",
"-",
"1",
",",
")",
",",
"normalize",
"=",
"False",
",",
"verbosity",
"=",
"1",
")",
":",
"if",
"isinstance",
"(",
"delays",
",",
"int",
")",
":",
"if",
"delays",
":",
"delays",
"=",
"range",
"(",
"1",
",",
"delays",
"+",
"1",
")",
"else",
":",
"delays",
"=",
"[",
"0",
"]",
"delays",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"array",
"(",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"delays",
"]",
")",
")",
"inputs",
"=",
"[",
"df",
".",
"columns",
"[",
"int",
"(",
"inp",
")",
"]",
"if",
"isinstance",
"(",
"inp",
",",
"(",
"float",
",",
"int",
")",
")",
"else",
"str",
"(",
"inp",
")",
"for",
"inp",
"in",
"inputs",
"]",
"outputs",
"=",
"[",
"df",
".",
"columns",
"[",
"int",
"(",
"out",
")",
"]",
"if",
"isinstance",
"(",
"out",
",",
"(",
"float",
",",
"int",
")",
")",
"else",
"str",
"(",
"out",
")",
"for",
"out",
"in",
"(",
"outputs",
"or",
"[",
"]",
")",
"]",
"inputs",
"=",
"[",
"fuzzy_get",
"(",
"df",
".",
"columns",
",",
"i",
")",
"for",
"i",
"in",
"inputs",
"]",
"outputs",
"=",
"[",
"fuzzy_get",
"(",
"df",
".",
"columns",
",",
"o",
")",
"for",
"o",
"in",
"outputs",
"]",
"N_inp",
"=",
"len",
"(",
"inputs",
")",
"N_out",
"=",
"len",
"(",
"outputs",
")",
"inp_outs",
"=",
"inputs",
"+",
"outputs",
"if",
"verbosity",
">",
"0",
":",
"print",
"(",
"\"inputs: {}\\noutputs: {}\\ndelays: {}\\n\"",
".",
"format",
"(",
"inputs",
",",
"outputs",
",",
"delays",
")",
")",
"means",
",",
"stds",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"inp_outs",
")",
")",
",",
"np",
".",
"ones",
"(",
"len",
"(",
"inp_outs",
")",
")",
"if",
"normalize",
":",
"means",
",",
"stds",
"=",
"df",
"[",
"inp_outs",
"]",
".",
"mean",
"(",
")",
",",
"df",
"[",
"inp_outs",
"]",
".",
"std",
"(",
")",
"if",
"normalize",
"and",
"verbosity",
">",
"0",
":",
"print",
"(",
"\"Input mean values (used to normalize input biases): {}\"",
".",
"format",
"(",
"means",
"[",
":",
"N_inp",
"]",
")",
")",
"print",
"(",
"\"Output mean values (used to normalize output biases): {}\"",
".",
"format",
"(",
"means",
"[",
"N_inp",
":",
"]",
")",
")",
"ds",
"=",
"pb",
".",
"datasets",
".",
"SupervisedDataSet",
"(",
"N_inp",
"*",
"len",
"(",
"delays",
")",
",",
"N_out",
")",
"if",
"verbosity",
">",
"0",
":",
"print",
"(",
"\"Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs\"",
".",
"format",
"(",
"len",
"(",
"df",
")",
",",
"ds",
".",
"indim",
",",
"ds",
".",
"outdim",
",",
"len",
"(",
"delays",
")",
",",
"len",
"(",
"inputs",
")",
",",
"len",
"(",
"outputs",
")",
")",
")",
"# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time",
"if",
"delays",
"==",
"np",
".",
"array",
"(",
"[",
"0",
"]",
")",
"and",
"not",
"normalize",
":",
"if",
"verbosity",
">",
"0",
":",
"print",
"(",
"\"No tapped delay lines (delays) were requested, so using undelayed features for the dataset.\"",
")",
"assert",
"(",
"df",
"[",
"inputs",
"]",
".",
"values",
".",
"shape",
"[",
"0",
"]",
"==",
"df",
"[",
"outputs",
"]",
".",
"values",
".",
"shape",
"[",
"0",
"]",
")",
"ds",
".",
"setField",
"(",
"'input'",
",",
"df",
"[",
"inputs",
"]",
".",
"values",
")",
"ds",
".",
"setField",
"(",
"'target'",
",",
"df",
"[",
"outputs",
"]",
".",
"values",
")",
"ds",
".",
"linkFields",
"(",
"[",
"'input'",
",",
"'target'",
"]",
")",
"# for inp, outp in zip(df[inputs].values, df[outputs].values):",
"# ds.appendLinked(inp, outp)",
"assert",
"(",
"len",
"(",
"ds",
"[",
"'input'",
"]",
")",
"==",
"len",
"(",
"ds",
"[",
"'target'",
"]",
")",
")",
"else",
":",
"for",
"i",
",",
"out_vec",
"in",
"enumerate",
"(",
"df",
"[",
"outputs",
"]",
".",
"values",
")",
":",
"if",
"verbosity",
">",
"0",
"and",
"i",
"%",
"100",
"==",
"0",
":",
"print",
"(",
"\"{}%\"",
".",
"format",
"(",
"i",
"/",
".01",
"/",
"len",
"(",
"df",
")",
")",
")",
"elif",
"verbosity",
">",
"1",
":",
"print",
"(",
"'sample[{i}].target={out_vec}'",
".",
"format",
"(",
"i",
"=",
"i",
",",
"out_vec",
"=",
"out_vec",
")",
")",
"if",
"i",
"<",
"max",
"(",
"delays",
")",
":",
"continue",
"inp_vec",
"=",
"[",
"]",
"for",
"delay",
"in",
"delays",
":",
"inp_vec",
"+=",
"list",
"(",
"(",
"df",
"[",
"inputs",
"]",
".",
"values",
"[",
"i",
"-",
"delay",
"]",
"-",
"means",
"[",
":",
"N_inp",
"]",
")",
"/",
"stds",
"[",
":",
"N_inp",
"]",
")",
"ds",
".",
"addSample",
"(",
"inp_vec",
",",
"(",
"out_vec",
"-",
"means",
"[",
"N_inp",
":",
"]",
")",
"/",
"stds",
"[",
"N_inp",
":",
"]",
")",
"if",
"verbosity",
">",
"0",
":",
"print",
"(",
"\"Dataset now has {} samples\"",
".",
"format",
"(",
"len",
"(",
"ds",
")",
")",
")",
"if",
"normalize",
":",
"return",
"ds",
",",
"means",
",",
"stds",
"else",
":",
"return",
"ds"
] | 48.35 | 28.4875 |
def get_expiration_date(self):
"""
Get the expiration date from the database.
:return: The expiration date from the database.
:rtype: str|None
"""
if self._authorization() and self.is_in_database() and not self.is_time_older():
# * We are authorized to work.
# and
# * The element we are testing is in the database.
# and
# * The expiration date is in the future.
# We get the expiration date from the database.
result = PyFunceble.INTERN["whois_db"][PyFunceble.INTERN["file_to_test"]][
PyFunceble.INTERN["to_test"]
]["expiration_date"]
if result:
# The expiration date from the database is not empty nor
# equal to None.
# We return it.
return result
# We return None, there is no data to work with.
return None
|
[
"def",
"get_expiration_date",
"(",
"self",
")",
":",
"if",
"self",
".",
"_authorization",
"(",
")",
"and",
"self",
".",
"is_in_database",
"(",
")",
"and",
"not",
"self",
".",
"is_time_older",
"(",
")",
":",
"# * We are authorized to work.",
"# and",
"# * The element we are testing is in the database.",
"# and",
"# * The expiration date is in the future.",
"# We get the expiration date from the database.",
"result",
"=",
"PyFunceble",
".",
"INTERN",
"[",
"\"whois_db\"",
"]",
"[",
"PyFunceble",
".",
"INTERN",
"[",
"\"file_to_test\"",
"]",
"]",
"[",
"PyFunceble",
".",
"INTERN",
"[",
"\"to_test\"",
"]",
"]",
"[",
"\"expiration_date\"",
"]",
"if",
"result",
":",
"# The expiration date from the database is not empty nor",
"# equal to None.",
"# We return it.",
"return",
"result",
"# We return None, there is no data to work with.",
"return",
"None"
] | 32.482759 | 21.172414 |
def _shouldConnect(self, node):
"""
Check whether this node should initiate a connection to another node
:param node: the other node
:type node: Node
"""
return isinstance(node, TCPNode) and node not in self._preventConnectNodes and (self._selfIsReadonlyNode or self._selfNode.address > node.address)
|
[
"def",
"_shouldConnect",
"(",
"self",
",",
"node",
")",
":",
"return",
"isinstance",
"(",
"node",
",",
"TCPNode",
")",
"and",
"node",
"not",
"in",
"self",
".",
"_preventConnectNodes",
"and",
"(",
"self",
".",
"_selfIsReadonlyNode",
"or",
"self",
".",
"_selfNode",
".",
"address",
">",
"node",
".",
"address",
")"
] | 38 | 28.888889 |
def transform(self, data):
"""
Transforms the data.
"""
if not self._get("fitted"):
raise RuntimeError("`transform` called before `fit` or `fit_transform`.")
data = data.copy()
output_column_prefix = self._get("output_column_prefix")
if output_column_prefix is None:
prefix = ""
else:
prefix = output_column_prefix + '.'
transform_function = self._get("transform_function")
feature_columns = self._get("features")
feature_columns = _internal_utils.select_feature_subset(data, feature_columns)
for f in feature_columns:
data[prefix + f] = transform_function(data[f])
return data
|
[
"def",
"transform",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"self",
".",
"_get",
"(",
"\"fitted\"",
")",
":",
"raise",
"RuntimeError",
"(",
"\"`transform` called before `fit` or `fit_transform`.\"",
")",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"output_column_prefix",
"=",
"self",
".",
"_get",
"(",
"\"output_column_prefix\"",
")",
"if",
"output_column_prefix",
"is",
"None",
":",
"prefix",
"=",
"\"\"",
"else",
":",
"prefix",
"=",
"output_column_prefix",
"+",
"'.'",
"transform_function",
"=",
"self",
".",
"_get",
"(",
"\"transform_function\"",
")",
"feature_columns",
"=",
"self",
".",
"_get",
"(",
"\"features\"",
")",
"feature_columns",
"=",
"_internal_utils",
".",
"select_feature_subset",
"(",
"data",
",",
"feature_columns",
")",
"for",
"f",
"in",
"feature_columns",
":",
"data",
"[",
"prefix",
"+",
"f",
"]",
"=",
"transform_function",
"(",
"data",
"[",
"f",
"]",
")",
"return",
"data"
] | 28.48 | 22.56 |
def connect(cls, database: str, user: str, password: str, host: str, port: int, *, use_pool: bool=True,
enable_ssl: bool=False, minsize=1, maxsize=50, keepalives_idle=5, keepalives_interval=4, echo=False,
**kwargs):
"""
Sets connection parameters
For more information on the parameters that is accepts,
see : http://www.postgresql.org/docs/9.2/static/libpq-connect.html
"""
cls._connection_params['database'] = database
cls._connection_params['user'] = user
cls._connection_params['password'] = password
cls._connection_params['host'] = host
cls._connection_params['port'] = port
cls._connection_params['sslmode'] = 'prefer' if enable_ssl else 'disable'
cls._connection_params['minsize'] = minsize
cls._connection_params['maxsize'] = maxsize
cls._connection_params['keepalives_idle'] = keepalives_idle
cls._connection_params['keepalives_interval'] = keepalives_interval
cls._connection_params['echo'] = echo
cls._connection_params.update(kwargs)
cls._use_pool = use_pool
|
[
"def",
"connect",
"(",
"cls",
",",
"database",
":",
"str",
",",
"user",
":",
"str",
",",
"password",
":",
"str",
",",
"host",
":",
"str",
",",
"port",
":",
"int",
",",
"*",
",",
"use_pool",
":",
"bool",
"=",
"True",
",",
"enable_ssl",
":",
"bool",
"=",
"False",
",",
"minsize",
"=",
"1",
",",
"maxsize",
"=",
"50",
",",
"keepalives_idle",
"=",
"5",
",",
"keepalives_interval",
"=",
"4",
",",
"echo",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
".",
"_connection_params",
"[",
"'database'",
"]",
"=",
"database",
"cls",
".",
"_connection_params",
"[",
"'user'",
"]",
"=",
"user",
"cls",
".",
"_connection_params",
"[",
"'password'",
"]",
"=",
"password",
"cls",
".",
"_connection_params",
"[",
"'host'",
"]",
"=",
"host",
"cls",
".",
"_connection_params",
"[",
"'port'",
"]",
"=",
"port",
"cls",
".",
"_connection_params",
"[",
"'sslmode'",
"]",
"=",
"'prefer'",
"if",
"enable_ssl",
"else",
"'disable'",
"cls",
".",
"_connection_params",
"[",
"'minsize'",
"]",
"=",
"minsize",
"cls",
".",
"_connection_params",
"[",
"'maxsize'",
"]",
"=",
"maxsize",
"cls",
".",
"_connection_params",
"[",
"'keepalives_idle'",
"]",
"=",
"keepalives_idle",
"cls",
".",
"_connection_params",
"[",
"'keepalives_interval'",
"]",
"=",
"keepalives_interval",
"cls",
".",
"_connection_params",
"[",
"'echo'",
"]",
"=",
"echo",
"cls",
".",
"_connection_params",
".",
"update",
"(",
"kwargs",
")",
"cls",
".",
"_use_pool",
"=",
"use_pool"
] | 53.619048 | 19.047619 |
def create_for_collection_items(item_type, hint):
"""
Helper method for collection items
:param item_type:
:return:
"""
# this leads to infinite loops
# try:
# prt_type = get_pretty_type_str(item_type)
# except:
# prt_type = str(item_type)
return TypeInformationRequiredError("Cannot parse object of type {t} as a collection: this type has no valid "
"PEP484 type hint about its contents: found {h}. Please use a standard "
"PEP484 declaration such as Dict[str, Foo] or List[Foo]"
"".format(t=str(item_type), h=hint))
|
[
"def",
"create_for_collection_items",
"(",
"item_type",
",",
"hint",
")",
":",
"# this leads to infinite loops",
"# try:",
"# prt_type = get_pretty_type_str(item_type)",
"# except:",
"# prt_type = str(item_type)",
"return",
"TypeInformationRequiredError",
"(",
"\"Cannot parse object of type {t} as a collection: this type has no valid \"",
"\"PEP484 type hint about its contents: found {h}. Please use a standard \"",
"\"PEP484 declaration such as Dict[str, Foo] or List[Foo]\"",
"\"\"",
".",
"format",
"(",
"t",
"=",
"str",
"(",
"item_type",
")",
",",
"h",
"=",
"hint",
")",
")"
] | 45.6875 | 25.6875 |
def all(user, groupby='week', summary='default', network=False,
split_week=False, split_day=False, filter_empty=True, attributes=True,
flatten=False):
"""
Returns a dictionary containing all bandicoot indicators for the user,
as well as reporting variables.
Relevant indicators are defined in the 'individual', and 'spatial' modules.
=================================== =======================================================================
Reporting variables Description
=================================== =======================================================================
antennas_path path of the CSV file containing antennas locations
attributes_path directory where attributes were loaded
version bandicoot version
groupby grouping method ('week' or None)
split_week whether or not indicators are also computed for weekday and weekend
split_day whether or not indicators are also computed for day and night
start_time time of the first record
end_time time of the last record
night_start, night_end start and end time to define nights
weekend days used to define the weekend (``[6, 7]`` by default, where 1 is Monday)
bins number of weeks if the record are grouped
has_call whether or not records include calls
has_text whether or not records include texts
has_home whether or not a :meth:`home location <bandicoot.core.User.recompute_home>` has been found
has_network whether or not correspondents where loaded
percent_records_missing_location percentage of records without location
antennas_missing_locations number of antennas missing a location
percent_outofnetwork_calls percentage of calls, received or emitted, made with a correspondant not loaded in the network
percent_outofnetwork_texts percentage of texts with contacts not loaded in the network
percent_outofnetwork_contacts percentage of contacts not loaded in the network
percent_outofnetwork_call_durations percentage of minutes of calls where the contact was not loaded in the network
number_of_records total number of records
number_of_weeks number of weeks with records
=================================== =======================================================================
We also include a last set of reporting variables, for the records ignored
at load-time. Values can be ignored due to missing or inconsistent fields
(e.g., not including a valid 'datetime' value).
.. code-block:: python
{
'all': 0,
'interaction': 0,
'direction': 0,
'correspondent_id': 0,
'datetime': 0,
'call_duration': 0
}
with the total number of records ignored (key ``'all'``), as well as the
number of records with faulty values for each columns.
"""
scalar_type = 'distribution_scalar' if groupby is not None else 'scalar'
summary_type = 'distribution_summarystats' if groupby is not None else 'summarystats'
number_of_interactions_in = partial(bc.individual.number_of_interactions, direction='in')
number_of_interactions_in.__name__ = 'number_of_interaction_in'
number_of_interactions_out = partial(bc.individual.number_of_interactions, direction='out')
number_of_interactions_out.__name__ = 'number_of_interaction_out'
functions = [
(bc.individual.active_days, scalar_type),
(bc.individual.number_of_contacts, scalar_type),
(bc.individual.call_duration, summary_type),
(bc.individual.percent_nocturnal, scalar_type),
(bc.individual.percent_initiated_conversations, scalar_type),
(bc.individual.percent_initiated_interactions, scalar_type),
(bc.individual.response_delay_text, summary_type),
(bc.individual.response_rate_text, scalar_type),
(bc.individual.entropy_of_contacts, scalar_type),
(bc.individual.balance_of_contacts, summary_type),
(bc.individual.interactions_per_contact, summary_type),
(bc.individual.interevent_time, summary_type),
(bc.individual.percent_pareto_interactions, scalar_type),
(bc.individual.percent_pareto_durations, scalar_type),
(bc.individual.number_of_interactions, scalar_type),
(number_of_interactions_in, scalar_type),
(number_of_interactions_out, scalar_type),
(bc.spatial.number_of_antennas, scalar_type),
(bc.spatial.entropy_of_antennas, scalar_type),
(bc.spatial.percent_at_home, scalar_type),
(bc.spatial.radius_of_gyration, scalar_type),
(bc.spatial.frequent_antennas, scalar_type),
(bc.spatial.churn_rate, scalar_type)
]
if user.has_recharges:
functions += [
(bc.recharge.amount_recharges, summary_type),
(bc.recharge.interevent_time_recharges, summary_type),
(bc.recharge.percent_pareto_recharges, scalar_type),
(bc.recharge.number_of_recharges, scalar_type),
(bc.recharge.average_balance_recharges, scalar_type)
]
network_functions = [
bc.network.clustering_coefficient_unweighted,
bc.network.clustering_coefficient_weighted,
bc.network.assortativity_attributes,
bc.network.assortativity_indicators
]
groups = list(group_records(user.records, groupby=groupby))
bins_with_data = len(groups)
groups = list(group_records_with_padding(user.records, groupby=groupby))
bins = len(groups)
bins_without_data = bins - bins_with_data
reporting = OrderedDict([
('antennas_path', user.antennas_path),
('attributes_path', user.attributes_path),
('recharges_path', user.attributes_path),
('version', bc.__version__),
('code_signature', bc.helper.tools.bandicoot_code_signature()),
('groupby', groupby),
('split_week', split_week),
('split_day', split_day),
('start_time', user.start_time and str(user.start_time)),
('end_time', user.end_time and str(user.end_time)),
('night_start', str(user.night_start)),
('night_end', str(user.night_end)),
('weekend', user.weekend),
('number_of_records', len(user.records)),
('number_of_antennas', len(user.antennas)),
('number_of_recharges', len(user.recharges)),
('bins', bins),
('bins_with_data', bins_with_data),
('bins_without_data', bins_without_data),
('has_call', user.has_call),
('has_text', user.has_text),
('has_home', user.has_home),
('has_recharges', user.has_recharges),
('has_attributes', user.has_attributes),
('has_network', user.has_network),
('percent_records_missing_location', bc.helper.tools.percent_records_missing_location(user)),
('antennas_missing_locations', bc.helper.tools.antennas_missing_locations(user)),
('percent_outofnetwork_calls', user.percent_outofnetwork_calls),
('percent_outofnetwork_texts', user.percent_outofnetwork_texts),
('percent_outofnetwork_contacts', user.percent_outofnetwork_contacts),
('percent_outofnetwork_call_durations', user.percent_outofnetwork_call_durations),
])
if user.ignored_records is not None:
reporting['ignored_records'] = OrderedDict(user.ignored_records)
returned = OrderedDict([
('name', user.name),
('reporting', reporting)
])
for fun, datatype in functions:
try:
metric = fun(user, groupby=groupby, summary=summary,
datatype=datatype, filter_empty=filter_empty,
split_week=split_week, split_day=split_day)
except ValueError:
metric = fun(user, groupby=groupby, datatype=datatype,
split_week=split_week, filter_empty=filter_empty,
split_day=split_day)
returned[fun.__name__] = metric
if network and user.has_network:
for fun in network_functions:
returned[fun.__name__] = fun(user)
if attributes and user.attributes != {}:
returned['attributes'] = OrderedDict(user.attributes)
if flatten is True:
return globals()['flatten'](returned)
return returned
|
[
"def",
"all",
"(",
"user",
",",
"groupby",
"=",
"'week'",
",",
"summary",
"=",
"'default'",
",",
"network",
"=",
"False",
",",
"split_week",
"=",
"False",
",",
"split_day",
"=",
"False",
",",
"filter_empty",
"=",
"True",
",",
"attributes",
"=",
"True",
",",
"flatten",
"=",
"False",
")",
":",
"scalar_type",
"=",
"'distribution_scalar'",
"if",
"groupby",
"is",
"not",
"None",
"else",
"'scalar'",
"summary_type",
"=",
"'distribution_summarystats'",
"if",
"groupby",
"is",
"not",
"None",
"else",
"'summarystats'",
"number_of_interactions_in",
"=",
"partial",
"(",
"bc",
".",
"individual",
".",
"number_of_interactions",
",",
"direction",
"=",
"'in'",
")",
"number_of_interactions_in",
".",
"__name__",
"=",
"'number_of_interaction_in'",
"number_of_interactions_out",
"=",
"partial",
"(",
"bc",
".",
"individual",
".",
"number_of_interactions",
",",
"direction",
"=",
"'out'",
")",
"number_of_interactions_out",
".",
"__name__",
"=",
"'number_of_interaction_out'",
"functions",
"=",
"[",
"(",
"bc",
".",
"individual",
".",
"active_days",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"number_of_contacts",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"call_duration",
",",
"summary_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"percent_nocturnal",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"percent_initiated_conversations",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"percent_initiated_interactions",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"response_delay_text",
",",
"summary_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"response_rate_text",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"entropy_of_contacts",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"balance_of_contacts",
",",
"summary_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"interactions_per_contact",
",",
"summary_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"interevent_time",
",",
"summary_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"percent_pareto_interactions",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"percent_pareto_durations",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"individual",
".",
"number_of_interactions",
",",
"scalar_type",
")",
",",
"(",
"number_of_interactions_in",
",",
"scalar_type",
")",
",",
"(",
"number_of_interactions_out",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"spatial",
".",
"number_of_antennas",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"spatial",
".",
"entropy_of_antennas",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"spatial",
".",
"percent_at_home",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"spatial",
".",
"radius_of_gyration",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"spatial",
".",
"frequent_antennas",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"spatial",
".",
"churn_rate",
",",
"scalar_type",
")",
"]",
"if",
"user",
".",
"has_recharges",
":",
"functions",
"+=",
"[",
"(",
"bc",
".",
"recharge",
".",
"amount_recharges",
",",
"summary_type",
")",
",",
"(",
"bc",
".",
"recharge",
".",
"interevent_time_recharges",
",",
"summary_type",
")",
",",
"(",
"bc",
".",
"recharge",
".",
"percent_pareto_recharges",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"recharge",
".",
"number_of_recharges",
",",
"scalar_type",
")",
",",
"(",
"bc",
".",
"recharge",
".",
"average_balance_recharges",
",",
"scalar_type",
")",
"]",
"network_functions",
"=",
"[",
"bc",
".",
"network",
".",
"clustering_coefficient_unweighted",
",",
"bc",
".",
"network",
".",
"clustering_coefficient_weighted",
",",
"bc",
".",
"network",
".",
"assortativity_attributes",
",",
"bc",
".",
"network",
".",
"assortativity_indicators",
"]",
"groups",
"=",
"list",
"(",
"group_records",
"(",
"user",
".",
"records",
",",
"groupby",
"=",
"groupby",
")",
")",
"bins_with_data",
"=",
"len",
"(",
"groups",
")",
"groups",
"=",
"list",
"(",
"group_records_with_padding",
"(",
"user",
".",
"records",
",",
"groupby",
"=",
"groupby",
")",
")",
"bins",
"=",
"len",
"(",
"groups",
")",
"bins_without_data",
"=",
"bins",
"-",
"bins_with_data",
"reporting",
"=",
"OrderedDict",
"(",
"[",
"(",
"'antennas_path'",
",",
"user",
".",
"antennas_path",
")",
",",
"(",
"'attributes_path'",
",",
"user",
".",
"attributes_path",
")",
",",
"(",
"'recharges_path'",
",",
"user",
".",
"attributes_path",
")",
",",
"(",
"'version'",
",",
"bc",
".",
"__version__",
")",
",",
"(",
"'code_signature'",
",",
"bc",
".",
"helper",
".",
"tools",
".",
"bandicoot_code_signature",
"(",
")",
")",
",",
"(",
"'groupby'",
",",
"groupby",
")",
",",
"(",
"'split_week'",
",",
"split_week",
")",
",",
"(",
"'split_day'",
",",
"split_day",
")",
",",
"(",
"'start_time'",
",",
"user",
".",
"start_time",
"and",
"str",
"(",
"user",
".",
"start_time",
")",
")",
",",
"(",
"'end_time'",
",",
"user",
".",
"end_time",
"and",
"str",
"(",
"user",
".",
"end_time",
")",
")",
",",
"(",
"'night_start'",
",",
"str",
"(",
"user",
".",
"night_start",
")",
")",
",",
"(",
"'night_end'",
",",
"str",
"(",
"user",
".",
"night_end",
")",
")",
",",
"(",
"'weekend'",
",",
"user",
".",
"weekend",
")",
",",
"(",
"'number_of_records'",
",",
"len",
"(",
"user",
".",
"records",
")",
")",
",",
"(",
"'number_of_antennas'",
",",
"len",
"(",
"user",
".",
"antennas",
")",
")",
",",
"(",
"'number_of_recharges'",
",",
"len",
"(",
"user",
".",
"recharges",
")",
")",
",",
"(",
"'bins'",
",",
"bins",
")",
",",
"(",
"'bins_with_data'",
",",
"bins_with_data",
")",
",",
"(",
"'bins_without_data'",
",",
"bins_without_data",
")",
",",
"(",
"'has_call'",
",",
"user",
".",
"has_call",
")",
",",
"(",
"'has_text'",
",",
"user",
".",
"has_text",
")",
",",
"(",
"'has_home'",
",",
"user",
".",
"has_home",
")",
",",
"(",
"'has_recharges'",
",",
"user",
".",
"has_recharges",
")",
",",
"(",
"'has_attributes'",
",",
"user",
".",
"has_attributes",
")",
",",
"(",
"'has_network'",
",",
"user",
".",
"has_network",
")",
",",
"(",
"'percent_records_missing_location'",
",",
"bc",
".",
"helper",
".",
"tools",
".",
"percent_records_missing_location",
"(",
"user",
")",
")",
",",
"(",
"'antennas_missing_locations'",
",",
"bc",
".",
"helper",
".",
"tools",
".",
"antennas_missing_locations",
"(",
"user",
")",
")",
",",
"(",
"'percent_outofnetwork_calls'",
",",
"user",
".",
"percent_outofnetwork_calls",
")",
",",
"(",
"'percent_outofnetwork_texts'",
",",
"user",
".",
"percent_outofnetwork_texts",
")",
",",
"(",
"'percent_outofnetwork_contacts'",
",",
"user",
".",
"percent_outofnetwork_contacts",
")",
",",
"(",
"'percent_outofnetwork_call_durations'",
",",
"user",
".",
"percent_outofnetwork_call_durations",
")",
",",
"]",
")",
"if",
"user",
".",
"ignored_records",
"is",
"not",
"None",
":",
"reporting",
"[",
"'ignored_records'",
"]",
"=",
"OrderedDict",
"(",
"user",
".",
"ignored_records",
")",
"returned",
"=",
"OrderedDict",
"(",
"[",
"(",
"'name'",
",",
"user",
".",
"name",
")",
",",
"(",
"'reporting'",
",",
"reporting",
")",
"]",
")",
"for",
"fun",
",",
"datatype",
"in",
"functions",
":",
"try",
":",
"metric",
"=",
"fun",
"(",
"user",
",",
"groupby",
"=",
"groupby",
",",
"summary",
"=",
"summary",
",",
"datatype",
"=",
"datatype",
",",
"filter_empty",
"=",
"filter_empty",
",",
"split_week",
"=",
"split_week",
",",
"split_day",
"=",
"split_day",
")",
"except",
"ValueError",
":",
"metric",
"=",
"fun",
"(",
"user",
",",
"groupby",
"=",
"groupby",
",",
"datatype",
"=",
"datatype",
",",
"split_week",
"=",
"split_week",
",",
"filter_empty",
"=",
"filter_empty",
",",
"split_day",
"=",
"split_day",
")",
"returned",
"[",
"fun",
".",
"__name__",
"]",
"=",
"metric",
"if",
"network",
"and",
"user",
".",
"has_network",
":",
"for",
"fun",
"in",
"network_functions",
":",
"returned",
"[",
"fun",
".",
"__name__",
"]",
"=",
"fun",
"(",
"user",
")",
"if",
"attributes",
"and",
"user",
".",
"attributes",
"!=",
"{",
"}",
":",
"returned",
"[",
"'attributes'",
"]",
"=",
"OrderedDict",
"(",
"user",
".",
"attributes",
")",
"if",
"flatten",
"is",
"True",
":",
"return",
"globals",
"(",
")",
"[",
"'flatten'",
"]",
"(",
"returned",
")",
"return",
"returned"
] | 48.457627 | 25.485876 |
def to_string(self, buf=None, format_abs_ref_as='string',
upper_triangle=True, header=True, index=True, **kwargs):
"""Render a DataFrame to a console-friendly tabular output.
Wrapper around the :meth:`pandas.DataFrame.to_string` method.
"""
out = self._sympy_formatter()
out = out._abs_ref_formatter(format_as=format_abs_ref_as)
if not upper_triangle:
out = out._remove_upper_triangle()
content = out._frame.to_string(buf=buf, header=header, index=index,
**kwargs)
if not index and not header:
# NOTE(the following might be removed in the future
# introduced because of formatting bug in pandas
# See https://github.com/pandas-dev/pandas/issues/13032)
space = ' ' * (out.loc[:, 'atom'].str.len().max()
- len(out.iloc[0, 0]))
content = space + content
return content
|
[
"def",
"to_string",
"(",
"self",
",",
"buf",
"=",
"None",
",",
"format_abs_ref_as",
"=",
"'string'",
",",
"upper_triangle",
"=",
"True",
",",
"header",
"=",
"True",
",",
"index",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"=",
"self",
".",
"_sympy_formatter",
"(",
")",
"out",
"=",
"out",
".",
"_abs_ref_formatter",
"(",
"format_as",
"=",
"format_abs_ref_as",
")",
"if",
"not",
"upper_triangle",
":",
"out",
"=",
"out",
".",
"_remove_upper_triangle",
"(",
")",
"content",
"=",
"out",
".",
"_frame",
".",
"to_string",
"(",
"buf",
"=",
"buf",
",",
"header",
"=",
"header",
",",
"index",
"=",
"index",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"index",
"and",
"not",
"header",
":",
"# NOTE(the following might be removed in the future",
"# introduced because of formatting bug in pandas",
"# See https://github.com/pandas-dev/pandas/issues/13032)",
"space",
"=",
"' '",
"*",
"(",
"out",
".",
"loc",
"[",
":",
",",
"'atom'",
"]",
".",
"str",
".",
"len",
"(",
")",
".",
"max",
"(",
")",
"-",
"len",
"(",
"out",
".",
"iloc",
"[",
"0",
",",
"0",
"]",
")",
")",
"content",
"=",
"space",
"+",
"content",
"return",
"content"
] | 46.428571 | 17.761905 |
def find(self, instance_id):
""" find an instance
Create a new instance and populate it with data stored if it exists.
Args:
instance_id (str): UUID of the instance
Returns:
AtlasServiceInstance.Instance: An instance
"""
instance = AtlasServiceInstance.Instance(instance_id, self.backend)
self.backend.storage.populate(instance)
return instance
|
[
"def",
"find",
"(",
"self",
",",
"instance_id",
")",
":",
"instance",
"=",
"AtlasServiceInstance",
".",
"Instance",
"(",
"instance_id",
",",
"self",
".",
"backend",
")",
"self",
".",
"backend",
".",
"storage",
".",
"populate",
"(",
"instance",
")",
"return",
"instance"
] | 31.857143 | 19.928571 |
def _finalize(self, chain=-1):
"""Finalize the chain for all tallyable objects."""
chain = range(self.chains)[chain]
for name in self.trace_names[chain]:
self._traces[name]._finalize(chain)
self.commit()
|
[
"def",
"_finalize",
"(",
"self",
",",
"chain",
"=",
"-",
"1",
")",
":",
"chain",
"=",
"range",
"(",
"self",
".",
"chains",
")",
"[",
"chain",
"]",
"for",
"name",
"in",
"self",
".",
"trace_names",
"[",
"chain",
"]",
":",
"self",
".",
"_traces",
"[",
"name",
"]",
".",
"_finalize",
"(",
"chain",
")",
"self",
".",
"commit",
"(",
")"
] | 40.333333 | 6.833333 |
def tournament(self,individuals,tourn_size, num_selections=None):
"""conducts tournament selection of size tourn_size"""
winners = []
locs = []
if num_selections is None:
num_selections = len(individuals)
for i in np.arange(num_selections):
# sample pool with replacement
pool_i = self.random_state.choice(len(individuals),size=tourn_size)
pool = []
for i in pool_i:
pool.append(np.mean(individuals[i].fitness))
# winner
locs.append(pool_i[np.argmin(pool)])
winners.append(copy.deepcopy(individuals[locs[-1]]))
return winners,locs
|
[
"def",
"tournament",
"(",
"self",
",",
"individuals",
",",
"tourn_size",
",",
"num_selections",
"=",
"None",
")",
":",
"winners",
"=",
"[",
"]",
"locs",
"=",
"[",
"]",
"if",
"num_selections",
"is",
"None",
":",
"num_selections",
"=",
"len",
"(",
"individuals",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"num_selections",
")",
":",
"# sample pool with replacement",
"pool_i",
"=",
"self",
".",
"random_state",
".",
"choice",
"(",
"len",
"(",
"individuals",
")",
",",
"size",
"=",
"tourn_size",
")",
"pool",
"=",
"[",
"]",
"for",
"i",
"in",
"pool_i",
":",
"pool",
".",
"append",
"(",
"np",
".",
"mean",
"(",
"individuals",
"[",
"i",
"]",
".",
"fitness",
")",
")",
"# winner",
"locs",
".",
"append",
"(",
"pool_i",
"[",
"np",
".",
"argmin",
"(",
"pool",
")",
"]",
")",
"winners",
".",
"append",
"(",
"copy",
".",
"deepcopy",
"(",
"individuals",
"[",
"locs",
"[",
"-",
"1",
"]",
"]",
")",
")",
"return",
"winners",
",",
"locs"
] | 37.5 | 17.722222 |
def roll_dice(roll, *, functions=True, floats=True):
"""
Rolls dice in dice notation with advanced syntax used according to tinyurl.com/pydice
:param roll: Roll in dice notation
:return: Result of roll, and an explanation string
"""
roll = ''.join(roll.split())
roll = regex.sub(r'(?<=d)%', '100', roll, regex.IGNORECASE)
roll = roll.replace('^', '**')
roll = zero_width_split(r'((?<=[\(\),%^\/+*-])(?=.))|((?<=.)(?=[\(\),%^\/+*-]))', roll) # Split the string on the boundary between operators and other chars
string = []
results = []
for group in roll:
if group in '()/=<>,%^+*-' or group in DEFAULT_FUNCTIONS: #Append operators without modification
results.append(group)
string.append(group)
continue
try:
explode = regex.match(r'^((\d*)d(\d+))!$', group, regex.IGNORECASE) # Regex for exploding dice, ie. 2d10!, 4d100!, d12!, etc.
specific_explode = regex.match(r'^((\d*)d(\d+))!(\d+)$', group) # Regex for exploding dice on specific number, ie. d20!10 or d12!4
comparison_explode = regex.match(r'^((\d*)d(\d+))!([<>])(\d+)$', group, regex.IGNORECASE) # Regex for exploding dice with a comparison, ie. d20!>10, d6!<2
penetrate = regex.match(r'^((\d*)d(\d+))!p$', group, regex.IGNORECASE) # Penetrating dice are the same as exploding except any dice after the initial number are added with a -1 penalty
specific_penetrate = regex.match(r'^((\d*)d(\d+))!p(\d+)$', group, regex.IGNORECASE) # See above
comparison_penetrate = regex.match(r'^((\d*)d(\d+))!p([<>])(\d+)$', group, regex.IGNORECASE) # See above
reroll = regex.match(r'^((\d*)d(\d+))([Rr])$', group, regex.IGNORECASE) # Reroll on a one, matches 1d6R, 4d12r, etc.
specific_reroll = regex.match(r'^((\d*)d(\d+))([Rr])(\d+)$', group, regex.IGNORECASE) # Reroll on a specific number
comparison_reroll = regex.match(r'^((\d*)d(\d+))([Rr])([<>])(\d+)$', group, regex.IGNORECASE) # Reroll on a comparison
success_comparison = regex.match(r'^((?:\d*)d(\d+))([<>])(\d+)$', group, regex.IGNORECASE) # Regex for dice with comparison, ie. 2d10>4, 5d3<2, etc.
success_fail_comparison = regex.match(r'^((?:\d*)d(\d+))(?|((<)(\d+)f(>)(\d+))|((>)(\d+)f(<)(\d+)))$', group, regex.IGNORECASE) # Regex for dice with success comparison and failure comparison.
keep = regex.match(r'^((?:\d*)d\d+)([Kk])(\d*)$', group, regex.IGNORECASE) # Regex for keeping a number of dice, ie. 2d10K, 2d10k3, etc.
drop = regex.match(r'^((?:\d*)d\d+)([Xx])(\d*)$', group, regex.IGNORECASE) # As above but with dropping dice and X
individual = regex.match(r'^((\d*)d(\d+))([asm])(\d+)$', group, regex.IGNORECASE) #Regex for rolling dice with a modifier attached to each roll
normal = regex.match(r'^((\d*)d(\d+))$', group, regex.IGNORECASE) # Regex for normal dice rolls
literal = regex.match(r'^(\d+)(?!\.)$', group, regex.IGNORECASE) # Regex for number literals.
float_literal = regex.match(r'^(\.\d+)|(\d+.\d+)$', group, regex.IGNORECASE) # Regex for floats
if explode is not None: # Handle exploding dice without a comparison modifier.
type_of_dice = int(explode[3])
result = []
last_result = roll_group(explode[1])
result.extend(last_result)
number_to_roll = num_equal(last_result, '=', type_of_dice)
while number_to_roll != 0:
last_result = roll_group(str(number_to_roll) + 'd' + str(type_of_dice)) # Reroll dice
result.extend(last_result)
number_to_roll = num_equal(last_result, '=', type_of_dice) # Check how many dice we have to reroll again
results.append(sum(result))
roll = ','.join([('!' + str(i) if i == type_of_dice else str(i)) for i in result]) # Build a string of the dice rolls, adding an exclamation mark before every roll that resulted in an explosion.
string.append('[%s]' % roll)
elif specific_explode is not None: # Handle exploding dice without a comparison modifier.
type_of_dice = int(specific_explode[3])
comparator = int(specific_explode[4])
assert 0 < comparator <= type_of_dice
result = []
last_result = roll_group(specific_explode[1])
result.extend(last_result)
number_to_roll = num_equal(last_result, '=', comparator)
while number_to_roll != 0:
last_result = roll_group(str(number_to_roll) + 'd' + str(type_of_dice))
result.extend(last_result)
number_to_roll = num_equal(last_result, '=', comparator)
results.append(sum(result))
roll = ','.join([('!' + str(i) if i == comparator else str(i)) for i in result]) # Build a string of the dice rolls, adding an exclamation mark before every roll that resulted in an explosion.
string.append('[%s]' % roll)
elif comparison_explode is not None: # Handle exploding dice with a comparison modifier
type_of_dice = int(comparison_explode[3])
comparator = int(comparison_explode[5])
if comparison_explode[4] == '>': # Ensure comparison is within bounds
assert 0 < comparator < type_of_dice
else:
assert 1 < comparator <= type_of_dice
result = []
last_result = roll_group(comparison_explode[1])
result.extend(last_result)
if comparison_explode[4] == '>':
number_to_roll = num_equal(last_result, '>', comparator)
while number_to_roll != 0:
last_result = roll_group(str(number_to_roll) + 'd' + str(type_of_dice))
result.extend(last_result)
number_to_roll = num_equal(last_result, '>', comparator)
roll = ','.join([('!' + str(i) if i > comparator else str(i)) for i in
result]) # Same as on other explodes except with a > or < comparison
else:
number_to_roll = num_equal(last_result, '<', comparator)
while number_to_roll != 0:
last_result = roll_group(str(number_to_roll) + 'd' + str(type_of_dice))
result.extend(last_result)
number_to_roll = num_equal(last_result, '<', comparator)
roll = ','.join([('!' + str(i) if i < comparator else str(i)) for i in
result]) # Same as on other explodes except with a > or < comparison
results.append(sum(result))
string.append('[%s]' % roll)
elif penetrate is not None: # Handle penetrating dice without a comparison modifier.
type_of_dice = int(penetrate[3])
first_num = int(penetrate[2])
result = []
last_result = roll_group(penetrate[1])
result.extend(last_result)
number_to_roll = num_equal(last_result, '=', type_of_dice)
while number_to_roll != 0:
last_result = roll_group(str(number_to_roll) + 'd' + str(type_of_dice))
result.extend(last_result)
number_to_roll = num_equal(last_result, '=', type_of_dice)
pre_result = result[:first_num] # Add the first rolls with no modifier
pre_result.extend([x - 1 for x in result[first_num:]]) # Add the second rolls with a -1 modifier
results.append(sum(pre_result))
roll = ','.join(['!' + str(i) if i == type_of_dice else str(i) for i in result[:first_num]]) # Add the first numbers, without the -1 but with a ! when roll is penetration
roll += (',' if len(pre_result) > first_num else '') # Only add the comma in between if there's at least one penetration
roll += ','.join([('!' + str(i) + '-1' if i == type_of_dice else str(i) + '-1') for i in result[first_num:]]) # Add the penetration dice with the '-1' tacked on the end
string.append('[%s]' % roll)
elif specific_penetrate is not None: # Handle penetrating dice without a comparison modifier.
type_of_dice = int(specific_penetrate[3])
first_num = int(specific_penetrate[2])
comparator = int(specific_penetrate[4])
assert 0 < comparator <= type_of_dice
result = []
last_result = roll_group(specific_penetrate[1])
result.extend(last_result)
number_to_roll = num_equal(last_result, '=', comparator)
while number_to_roll != 0:
last_result = roll_group(str(number_to_roll) + 'd' + str(type_of_dice))
result.extend(last_result)
number_to_roll = num_equal(last_result, '=', comparator)
pre_result = result[:first_num] # Same as normal penetration
pre_result.extend([x - 1 for x in result[first_num:]])
results.append(sum(pre_result))
roll = ','.join(['!' + str(i) if i == comparator else str(i) for i in result[:first_num]]) # Same as above
roll += (',' if len(pre_result) > first_num else '')
roll += ','.join([('!' + str(i) + '-1' if i == comparator else str(i) + '-1') for i in result[first_num:]])
string.append('[%s]' % roll)
elif comparison_penetrate is not None: # Handle penetrating dice without a comparison modifier.
type_of_dice = int(comparison_penetrate[3])
comparator = int(comparison_penetrate[5])
first_num = int(comparison_penetrate[2])
if comparison_penetrate[4] == '>': # Ensure comparison is within bounds
assert 0 < comparator < type_of_dice
else:
assert 1 < comparator <= type_of_dice
result = []
last_result = roll_group(comparison_penetrate[1])
result.extend(last_result)
# Do penetration based on more than or less than sign.
if comparison_penetrate[4] == '>':
number_to_roll = num_equal(last_result, '>', comparator)
while number_to_roll != 0:
last_result = roll_group(str(number_to_roll) + 'd' + str(type_of_dice))
result.extend(last_result)
number_to_roll = num_equal(last_result, '>', comparator)
else:
number_to_roll = num_equal(last_result, '<', comparator)
while number_to_roll != 0:
last_result = roll_group(str(number_to_roll) + 'd' + str(type_of_dice))
result.extend(last_result)
number_to_roll = num_equal(last_result, '<', comparator)
pre_result = result[:first_num]
pre_result.extend([x - 1 for x in result[first_num:]])
results.append(sum(pre_result))
if comparison_penetrate[4] == '>':
roll = ','.join(
['!' + str(i) if i > comparator else str(i) for i in result[:first_num]]) # Same as above
roll += (',' if len(pre_result) > first_num else '')
roll += ','.join(
[('!' + str(i) + '-1' if i > comparator else str(i) + '-1') for i in result[first_num:]])
else:
roll = ','.join(
['!' + str(i) if i < comparator else str(i) for i in result[:first_num]]) # Same as above
roll += (',' if len(pre_result) > first_num else '')
roll += ','.join(
[('!' + str(i) + '-1' if i < comparator else str(i) + '-1') for i in result[first_num:]])
string.append('[%s]' % roll)
elif reroll is not None: # Handle rerolling dice without a comparison modifier (ie. on 1)
type_of_dice = int(reroll[3])
result_strings = []
roll_strings = []
result = roll_group(reroll[1])
repeat = True if reroll[4] == 'R' else False # Reroll just once or infinite number of times
if repeat: #Handle rerolling the dice and building a string of all the rerolled ones
for i in range(len(result)):
prev = [result[i]]
while result[i] == 1:
result[i] = random.randint(1, type_of_dice)
prev.append(result[i])
roll_strings.append([str(x) for x in prev])
else:
for i in range(len(result)):
prev = [result[i]]
if result[i] == 1:
result[i] = random.randint(1, type_of_dice)
prev.append(result[i])
roll_strings.append([str(x) for x in prev])
results.append(sum(result))
for roll_string in roll_strings:
roll_string.reverse()
result_strings.append('%s' % roll_string[0] + ('~' if len(roll_string) > 1 else '') + '~'.join(roll_string[1:])) #Build the string
roll = ','.join(result_strings)
string.append('[%s]' % roll)
elif specific_reroll is not None: # Handle rerolling dice on a specific number, see reroll
type_of_dice = int(specific_reroll[3])
comparator = int(specific_reroll[5])
assert 0 < comparator <= type_of_dice # Ensure comparison is within bounds
result_strings = []
roll_strings = []
result = roll_group(specific_reroll[1])
repeat = True if specific_reroll[4] == 'R' else False
if repeat:
for i in range(len(result)):
prev = [result[i]]
while result[i] == comparator:
result[i] = random.randint(1, type_of_dice)
prev.append(result[i])
roll_strings.append([str(x) for x in prev])
else:
for i in range(len(result)):
prev = [result[i]]
if result[i] == comparator:
result[i] = random.randint(1, type_of_dice)
prev.append(result[i])
roll_strings.append([str(x) for x in prev])
results.append(sum(result))
for roll_string in roll_strings:
roll_string.reverse()
result_strings.append('%s' % roll_string[0] + ('~' if len(roll_string) > 1 else '') + '~'.join(roll_string[1:]))
roll = ','.join(result_strings)
string.append('[%s]' % roll)
elif comparison_reroll is not None: # Handle rerolling dice with a comparison modifier.
type_of_dice = int(comparison_reroll[3])
comparator = int(comparison_reroll[6])
if comparison_reroll[5] == '>': # Ensure comparison is within bounds
assert 0 < comparator < type_of_dice
else:
assert 1 < comparator <= type_of_dice
result_strings = []
roll_strings = []
result = roll_group(comparison_reroll[1])
repeat = True if comparison_reroll[4] == 'R' else False
if comparison_reroll[5] == '>':
if repeat:
for i in range(len(result)):
prev = [result[i]]
while result[i] > comparator:
result[i] = random.randint(1, type_of_dice)
prev.append(result[i])
roll_strings.append([str(x) for x in prev])
else:
for i in range(len(result)):
prev = [result[i]]
if result[i] > comparator:
result[i] = random.randint(1, type_of_dice)
prev.append(result[i])
roll_strings.append([str(x) for x in prev])
else:
if repeat:
for i in range(len(result)):
prev = [result[i]]
while result[i] < comparator:
result[i] = random.randint(1, type_of_dice)
prev.append(result[i])
roll_strings.append([str(x) for x in prev])
else:
for i in range(len(result)):
prev = [result[i]]
if result[i] < comparator:
result[i] = random.randint(1, type_of_dice)
prev.append(result[i])
roll_strings.append([str(x) for x in prev])
results.append(sum(result))
for roll_string in roll_strings:
roll_string.reverse()
result_strings.append('%s' % roll_string[0] + ('~' if len(roll_string) > 1 else '') + '~'.join(roll_string[1:]))
roll = ','.join(result_strings)
string.append('[%s]' % roll)
elif success_comparison is not None:
group_result = roll_group(success_comparison[1])
result = []
result_string = []
type_of_dice = int(success_comparison[2])
comparator = int(success_comparison[4])
if success_comparison[3] == '>': # Ensure comparison is within bounds
assert 0 < comparator < type_of_dice
else:
assert 1 < comparator <= type_of_dice
for die in group_result:
if success_comparison[3] == '>':
result.append(1 if die > comparator else 0)
result_string.append('!' + str(die) if die > comparator else str(die))
else:
result.append(1 if die < comparator else 0)
result_string.append('!' + str(die) if die < comparator else str(die))
results.append(sum(result))
roll = ','.join(result_string) # Craft the string, adding an exclamation mark before every string that passed the comparison.
string.append('[%s]' % roll)
elif success_fail_comparison is not None:
group_result = roll_group(success_fail_comparison[1])
result = []
result_string = []
type_of_dice = int(success_fail_comparison[2])
success_comp = int(success_fail_comparison[5])
fail_comp = int(success_fail_comparison[7])
# Ensure both comparisons are within bounds
if success_fail_comparison[4] == '>':
assert 0 < success_comp < type_of_dice
assert 1 < fail_comp <= type_of_dice
else:
assert 1 < success_comp <= type_of_dice
assert 0 < fail_comp < type_of_dice
for die in group_result:
if success_fail_comparison[4] == '>': # Get the actual list of successes and fails with both comparisons
if die > success_comp:
result.append(1)
result_string.append('!' + str(die))
elif die < fail_comp:
result.append(-1)
result_string.append('*' + str(die))
else:
result.append(0)
result_string.append(str(die))
else:
if die < success_comp:
result.append(1)
result_string.append('!' + str(die))
elif die > fail_comp:
result.append(-1)
result_string.append('*' + str(die))
else:
result.append(0)
result_string.append(str(die))
results.append(sum(result)) #
roll = ','.join(result_string)
string.append('[%s]' % roll)
elif keep is not None: # Handle rolling dice and keeping the x highest or lowest values
group_result = roll_group(keep[1])
group_result.sort(reverse=True if keep[
2] == 'K' else False) # Uppercase is keep highest and lowercase is keep lowest.
num_to_keep = int(keep[3] if keep[3] != '' else 1)
assert 1 <= num_to_keep < len(group_result)
results.append(sum(group_result[:num_to_keep]))
roll = ','.join([str(i) for i in group_result[
:num_to_keep]]) + ' ~~ ' # This time format the string with all kept rolls on the left and dropped rolls on the right
roll += ','.join([str(i) for i in group_result[num_to_keep:]])
string.append('[%s]' % roll)
elif drop is not None:
group_result = roll_group(drop[1])
group_result.sort(reverse=True if drop[2] == 'X' else False) # Same thing as keep dice
num_to_drop = int(drop[3] if drop[3] != '' else 1)
assert 1 <= num_to_drop < len(group_result)
results.append(sum(group_result[:num_to_drop]))
roll = ','.join([str(i) for i in group_result[num_to_drop:]]) + ' ~~ ' # Same as above.
roll += ','.join([str(i) for i in group_result[:num_to_drop]])
string.append('[%s]' % roll)
elif individual is not None:
group_result = roll_group(individual[1])
result = []
for i, j in enumerate(group_result): #add to each roll
if individual[4] == 'a':
result.append(j + int(individual[5]))
elif individual[4] == 's':
result.append(j - int(individual[5]))
elif individual[4] == 'm':
result.append(j * int(individual[5]))
else:
raise ValueError
results.append(sum(result))
roll = ','.join([str(x) + individual[4] + individual[5] for x in group_result]) #Create string with the modifier on each roll
string.append('[%s]' % roll)
elif normal is not None:
group_result = roll_group(group)
results.append(sum(group_result))
roll = ','.join([str(i) for i in group_result])
string.append('[%s]' % roll)
elif literal is not None:
results.append(int(literal[1])) # Just append the integer value
string.append(literal[1])
elif float_literal is not None:
if floats:
results.append(float(group))
string.append(group)
else:
raise TypeError
else:
raise Exception
except Exception:
raise DiceGroupException('"%s" is not a valid dicegroup.' % group)
parser = SimpleEval(floats=floats, functions=functions) #The parser object parses the dice rolls and functions
try:
final_result = parser.eval(''.join([str(x) for x in results])) #Call the parser to parse into one value
if not floats:
final_result = int(final_result)
except Exception:
raise DiceOperatorException('Error parsing operators and or functions')
#Create explanation string and remove extraneous spaces
explanation = ''.join(string)
explanation = zero_width_split(r"""((?<=[\/%^+])(?![\/,]))| # Split between /, %, ^, and +
((?<![\/,])(?=[\/%^+]))| # Same as above
((?<=[^(])(?=-))(?!-[^[]*])| # Split in front of - that are not in a roll
(?<=-)(?=[^\d()a-z])| # Same for splitting after - and before non-literals
(?<=[\d)\]]-)(?=.)(?![^[]*])| # Split after a - that is not in a roll
(?<=,)(?![^[]*])| # Split after a comma that is not in a roll
(?<=([^,]\*))(?!\*)| # Split after a * that is not in a roll
(?<![,\*])(?=\*) # Split before a * that is not in a roll""", explanation) #Split on ops to properly format the explanation
explanation = ' '.join(explanation)
explanation = explanation.strip()
explanation = regex.sub(r'[ \t]{2,}', ' ', explanation)
return final_result, explanation
|
[
"def",
"roll_dice",
"(",
"roll",
",",
"*",
",",
"functions",
"=",
"True",
",",
"floats",
"=",
"True",
")",
":",
"roll",
"=",
"''",
".",
"join",
"(",
"roll",
".",
"split",
"(",
")",
")",
"roll",
"=",
"regex",
".",
"sub",
"(",
"r'(?<=d)%'",
",",
"'100'",
",",
"roll",
",",
"regex",
".",
"IGNORECASE",
")",
"roll",
"=",
"roll",
".",
"replace",
"(",
"'^'",
",",
"'**'",
")",
"roll",
"=",
"zero_width_split",
"(",
"r'((?<=[\\(\\),%^\\/+*-])(?=.))|((?<=.)(?=[\\(\\),%^\\/+*-]))'",
",",
"roll",
")",
"# Split the string on the boundary between operators and other chars",
"string",
"=",
"[",
"]",
"results",
"=",
"[",
"]",
"for",
"group",
"in",
"roll",
":",
"if",
"group",
"in",
"'()/=<>,%^+*-'",
"or",
"group",
"in",
"DEFAULT_FUNCTIONS",
":",
"#Append operators without modification",
"results",
".",
"append",
"(",
"group",
")",
"string",
".",
"append",
"(",
"group",
")",
"continue",
"try",
":",
"explode",
"=",
"regex",
".",
"match",
"(",
"r'^((\\d*)d(\\d+))!$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# Regex for exploding dice, ie. 2d10!, 4d100!, d12!, etc.",
"specific_explode",
"=",
"regex",
".",
"match",
"(",
"r'^((\\d*)d(\\d+))!(\\d+)$'",
",",
"group",
")",
"# Regex for exploding dice on specific number, ie. d20!10 or d12!4",
"comparison_explode",
"=",
"regex",
".",
"match",
"(",
"r'^((\\d*)d(\\d+))!([<>])(\\d+)$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# Regex for exploding dice with a comparison, ie. d20!>10, d6!<2",
"penetrate",
"=",
"regex",
".",
"match",
"(",
"r'^((\\d*)d(\\d+))!p$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# Penetrating dice are the same as exploding except any dice after the initial number are added with a -1 penalty",
"specific_penetrate",
"=",
"regex",
".",
"match",
"(",
"r'^((\\d*)d(\\d+))!p(\\d+)$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# See above",
"comparison_penetrate",
"=",
"regex",
".",
"match",
"(",
"r'^((\\d*)d(\\d+))!p([<>])(\\d+)$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# See above",
"reroll",
"=",
"regex",
".",
"match",
"(",
"r'^((\\d*)d(\\d+))([Rr])$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# Reroll on a one, matches 1d6R, 4d12r, etc.",
"specific_reroll",
"=",
"regex",
".",
"match",
"(",
"r'^((\\d*)d(\\d+))([Rr])(\\d+)$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# Reroll on a specific number",
"comparison_reroll",
"=",
"regex",
".",
"match",
"(",
"r'^((\\d*)d(\\d+))([Rr])([<>])(\\d+)$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# Reroll on a comparison",
"success_comparison",
"=",
"regex",
".",
"match",
"(",
"r'^((?:\\d*)d(\\d+))([<>])(\\d+)$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# Regex for dice with comparison, ie. 2d10>4, 5d3<2, etc.",
"success_fail_comparison",
"=",
"regex",
".",
"match",
"(",
"r'^((?:\\d*)d(\\d+))(?|((<)(\\d+)f(>)(\\d+))|((>)(\\d+)f(<)(\\d+)))$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# Regex for dice with success comparison and failure comparison.",
"keep",
"=",
"regex",
".",
"match",
"(",
"r'^((?:\\d*)d\\d+)([Kk])(\\d*)$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# Regex for keeping a number of dice, ie. 2d10K, 2d10k3, etc.",
"drop",
"=",
"regex",
".",
"match",
"(",
"r'^((?:\\d*)d\\d+)([Xx])(\\d*)$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# As above but with dropping dice and X",
"individual",
"=",
"regex",
".",
"match",
"(",
"r'^((\\d*)d(\\d+))([asm])(\\d+)$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"#Regex for rolling dice with a modifier attached to each roll",
"normal",
"=",
"regex",
".",
"match",
"(",
"r'^((\\d*)d(\\d+))$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# Regex for normal dice rolls",
"literal",
"=",
"regex",
".",
"match",
"(",
"r'^(\\d+)(?!\\.)$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# Regex for number literals.",
"float_literal",
"=",
"regex",
".",
"match",
"(",
"r'^(\\.\\d+)|(\\d+.\\d+)$'",
",",
"group",
",",
"regex",
".",
"IGNORECASE",
")",
"# Regex for floats",
"if",
"explode",
"is",
"not",
"None",
":",
"# Handle exploding dice without a comparison modifier.",
"type_of_dice",
"=",
"int",
"(",
"explode",
"[",
"3",
"]",
")",
"result",
"=",
"[",
"]",
"last_result",
"=",
"roll_group",
"(",
"explode",
"[",
"1",
"]",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'='",
",",
"type_of_dice",
")",
"while",
"number_to_roll",
"!=",
"0",
":",
"last_result",
"=",
"roll_group",
"(",
"str",
"(",
"number_to_roll",
")",
"+",
"'d'",
"+",
"str",
"(",
"type_of_dice",
")",
")",
"# Reroll dice",
"result",
".",
"extend",
"(",
"last_result",
")",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'='",
",",
"type_of_dice",
")",
"# Check how many dice we have to reroll again",
"results",
".",
"append",
"(",
"sum",
"(",
"result",
")",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"[",
"(",
"'!'",
"+",
"str",
"(",
"i",
")",
"if",
"i",
"==",
"type_of_dice",
"else",
"str",
"(",
"i",
")",
")",
"for",
"i",
"in",
"result",
"]",
")",
"# Build a string of the dice rolls, adding an exclamation mark before every roll that resulted in an explosion.",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"specific_explode",
"is",
"not",
"None",
":",
"# Handle exploding dice without a comparison modifier.",
"type_of_dice",
"=",
"int",
"(",
"specific_explode",
"[",
"3",
"]",
")",
"comparator",
"=",
"int",
"(",
"specific_explode",
"[",
"4",
"]",
")",
"assert",
"0",
"<",
"comparator",
"<=",
"type_of_dice",
"result",
"=",
"[",
"]",
"last_result",
"=",
"roll_group",
"(",
"specific_explode",
"[",
"1",
"]",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'='",
",",
"comparator",
")",
"while",
"number_to_roll",
"!=",
"0",
":",
"last_result",
"=",
"roll_group",
"(",
"str",
"(",
"number_to_roll",
")",
"+",
"'d'",
"+",
"str",
"(",
"type_of_dice",
")",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'='",
",",
"comparator",
")",
"results",
".",
"append",
"(",
"sum",
"(",
"result",
")",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"[",
"(",
"'!'",
"+",
"str",
"(",
"i",
")",
"if",
"i",
"==",
"comparator",
"else",
"str",
"(",
"i",
")",
")",
"for",
"i",
"in",
"result",
"]",
")",
"# Build a string of the dice rolls, adding an exclamation mark before every roll that resulted in an explosion.",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"comparison_explode",
"is",
"not",
"None",
":",
"# Handle exploding dice with a comparison modifier",
"type_of_dice",
"=",
"int",
"(",
"comparison_explode",
"[",
"3",
"]",
")",
"comparator",
"=",
"int",
"(",
"comparison_explode",
"[",
"5",
"]",
")",
"if",
"comparison_explode",
"[",
"4",
"]",
"==",
"'>'",
":",
"# Ensure comparison is within bounds",
"assert",
"0",
"<",
"comparator",
"<",
"type_of_dice",
"else",
":",
"assert",
"1",
"<",
"comparator",
"<=",
"type_of_dice",
"result",
"=",
"[",
"]",
"last_result",
"=",
"roll_group",
"(",
"comparison_explode",
"[",
"1",
"]",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"if",
"comparison_explode",
"[",
"4",
"]",
"==",
"'>'",
":",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'>'",
",",
"comparator",
")",
"while",
"number_to_roll",
"!=",
"0",
":",
"last_result",
"=",
"roll_group",
"(",
"str",
"(",
"number_to_roll",
")",
"+",
"'d'",
"+",
"str",
"(",
"type_of_dice",
")",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'>'",
",",
"comparator",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"[",
"(",
"'!'",
"+",
"str",
"(",
"i",
")",
"if",
"i",
">",
"comparator",
"else",
"str",
"(",
"i",
")",
")",
"for",
"i",
"in",
"result",
"]",
")",
"# Same as on other explodes except with a > or < comparison",
"else",
":",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'<'",
",",
"comparator",
")",
"while",
"number_to_roll",
"!=",
"0",
":",
"last_result",
"=",
"roll_group",
"(",
"str",
"(",
"number_to_roll",
")",
"+",
"'d'",
"+",
"str",
"(",
"type_of_dice",
")",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'<'",
",",
"comparator",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"[",
"(",
"'!'",
"+",
"str",
"(",
"i",
")",
"if",
"i",
"<",
"comparator",
"else",
"str",
"(",
"i",
")",
")",
"for",
"i",
"in",
"result",
"]",
")",
"# Same as on other explodes except with a > or < comparison",
"results",
".",
"append",
"(",
"sum",
"(",
"result",
")",
")",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"penetrate",
"is",
"not",
"None",
":",
"# Handle penetrating dice without a comparison modifier.",
"type_of_dice",
"=",
"int",
"(",
"penetrate",
"[",
"3",
"]",
")",
"first_num",
"=",
"int",
"(",
"penetrate",
"[",
"2",
"]",
")",
"result",
"=",
"[",
"]",
"last_result",
"=",
"roll_group",
"(",
"penetrate",
"[",
"1",
"]",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'='",
",",
"type_of_dice",
")",
"while",
"number_to_roll",
"!=",
"0",
":",
"last_result",
"=",
"roll_group",
"(",
"str",
"(",
"number_to_roll",
")",
"+",
"'d'",
"+",
"str",
"(",
"type_of_dice",
")",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'='",
",",
"type_of_dice",
")",
"pre_result",
"=",
"result",
"[",
":",
"first_num",
"]",
"# Add the first rolls with no modifier",
"pre_result",
".",
"extend",
"(",
"[",
"x",
"-",
"1",
"for",
"x",
"in",
"result",
"[",
"first_num",
":",
"]",
"]",
")",
"# Add the second rolls with a -1 modifier",
"results",
".",
"append",
"(",
"sum",
"(",
"pre_result",
")",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"[",
"'!'",
"+",
"str",
"(",
"i",
")",
"if",
"i",
"==",
"type_of_dice",
"else",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"result",
"[",
":",
"first_num",
"]",
"]",
")",
"# Add the first numbers, without the -1 but with a ! when roll is penetration",
"roll",
"+=",
"(",
"','",
"if",
"len",
"(",
"pre_result",
")",
">",
"first_num",
"else",
"''",
")",
"# Only add the comma in between if there's at least one penetration",
"roll",
"+=",
"','",
".",
"join",
"(",
"[",
"(",
"'!'",
"+",
"str",
"(",
"i",
")",
"+",
"'-1'",
"if",
"i",
"==",
"type_of_dice",
"else",
"str",
"(",
"i",
")",
"+",
"'-1'",
")",
"for",
"i",
"in",
"result",
"[",
"first_num",
":",
"]",
"]",
")",
"# Add the penetration dice with the '-1' tacked on the end",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"specific_penetrate",
"is",
"not",
"None",
":",
"# Handle penetrating dice without a comparison modifier.",
"type_of_dice",
"=",
"int",
"(",
"specific_penetrate",
"[",
"3",
"]",
")",
"first_num",
"=",
"int",
"(",
"specific_penetrate",
"[",
"2",
"]",
")",
"comparator",
"=",
"int",
"(",
"specific_penetrate",
"[",
"4",
"]",
")",
"assert",
"0",
"<",
"comparator",
"<=",
"type_of_dice",
"result",
"=",
"[",
"]",
"last_result",
"=",
"roll_group",
"(",
"specific_penetrate",
"[",
"1",
"]",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'='",
",",
"comparator",
")",
"while",
"number_to_roll",
"!=",
"0",
":",
"last_result",
"=",
"roll_group",
"(",
"str",
"(",
"number_to_roll",
")",
"+",
"'d'",
"+",
"str",
"(",
"type_of_dice",
")",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'='",
",",
"comparator",
")",
"pre_result",
"=",
"result",
"[",
":",
"first_num",
"]",
"# Same as normal penetration",
"pre_result",
".",
"extend",
"(",
"[",
"x",
"-",
"1",
"for",
"x",
"in",
"result",
"[",
"first_num",
":",
"]",
"]",
")",
"results",
".",
"append",
"(",
"sum",
"(",
"pre_result",
")",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"[",
"'!'",
"+",
"str",
"(",
"i",
")",
"if",
"i",
"==",
"comparator",
"else",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"result",
"[",
":",
"first_num",
"]",
"]",
")",
"# Same as above",
"roll",
"+=",
"(",
"','",
"if",
"len",
"(",
"pre_result",
")",
">",
"first_num",
"else",
"''",
")",
"roll",
"+=",
"','",
".",
"join",
"(",
"[",
"(",
"'!'",
"+",
"str",
"(",
"i",
")",
"+",
"'-1'",
"if",
"i",
"==",
"comparator",
"else",
"str",
"(",
"i",
")",
"+",
"'-1'",
")",
"for",
"i",
"in",
"result",
"[",
"first_num",
":",
"]",
"]",
")",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"comparison_penetrate",
"is",
"not",
"None",
":",
"# Handle penetrating dice without a comparison modifier.",
"type_of_dice",
"=",
"int",
"(",
"comparison_penetrate",
"[",
"3",
"]",
")",
"comparator",
"=",
"int",
"(",
"comparison_penetrate",
"[",
"5",
"]",
")",
"first_num",
"=",
"int",
"(",
"comparison_penetrate",
"[",
"2",
"]",
")",
"if",
"comparison_penetrate",
"[",
"4",
"]",
"==",
"'>'",
":",
"# Ensure comparison is within bounds",
"assert",
"0",
"<",
"comparator",
"<",
"type_of_dice",
"else",
":",
"assert",
"1",
"<",
"comparator",
"<=",
"type_of_dice",
"result",
"=",
"[",
"]",
"last_result",
"=",
"roll_group",
"(",
"comparison_penetrate",
"[",
"1",
"]",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"# Do penetration based on more than or less than sign.",
"if",
"comparison_penetrate",
"[",
"4",
"]",
"==",
"'>'",
":",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'>'",
",",
"comparator",
")",
"while",
"number_to_roll",
"!=",
"0",
":",
"last_result",
"=",
"roll_group",
"(",
"str",
"(",
"number_to_roll",
")",
"+",
"'d'",
"+",
"str",
"(",
"type_of_dice",
")",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'>'",
",",
"comparator",
")",
"else",
":",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'<'",
",",
"comparator",
")",
"while",
"number_to_roll",
"!=",
"0",
":",
"last_result",
"=",
"roll_group",
"(",
"str",
"(",
"number_to_roll",
")",
"+",
"'d'",
"+",
"str",
"(",
"type_of_dice",
")",
")",
"result",
".",
"extend",
"(",
"last_result",
")",
"number_to_roll",
"=",
"num_equal",
"(",
"last_result",
",",
"'<'",
",",
"comparator",
")",
"pre_result",
"=",
"result",
"[",
":",
"first_num",
"]",
"pre_result",
".",
"extend",
"(",
"[",
"x",
"-",
"1",
"for",
"x",
"in",
"result",
"[",
"first_num",
":",
"]",
"]",
")",
"results",
".",
"append",
"(",
"sum",
"(",
"pre_result",
")",
")",
"if",
"comparison_penetrate",
"[",
"4",
"]",
"==",
"'>'",
":",
"roll",
"=",
"','",
".",
"join",
"(",
"[",
"'!'",
"+",
"str",
"(",
"i",
")",
"if",
"i",
">",
"comparator",
"else",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"result",
"[",
":",
"first_num",
"]",
"]",
")",
"# Same as above",
"roll",
"+=",
"(",
"','",
"if",
"len",
"(",
"pre_result",
")",
">",
"first_num",
"else",
"''",
")",
"roll",
"+=",
"','",
".",
"join",
"(",
"[",
"(",
"'!'",
"+",
"str",
"(",
"i",
")",
"+",
"'-1'",
"if",
"i",
">",
"comparator",
"else",
"str",
"(",
"i",
")",
"+",
"'-1'",
")",
"for",
"i",
"in",
"result",
"[",
"first_num",
":",
"]",
"]",
")",
"else",
":",
"roll",
"=",
"','",
".",
"join",
"(",
"[",
"'!'",
"+",
"str",
"(",
"i",
")",
"if",
"i",
"<",
"comparator",
"else",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"result",
"[",
":",
"first_num",
"]",
"]",
")",
"# Same as above",
"roll",
"+=",
"(",
"','",
"if",
"len",
"(",
"pre_result",
")",
">",
"first_num",
"else",
"''",
")",
"roll",
"+=",
"','",
".",
"join",
"(",
"[",
"(",
"'!'",
"+",
"str",
"(",
"i",
")",
"+",
"'-1'",
"if",
"i",
"<",
"comparator",
"else",
"str",
"(",
"i",
")",
"+",
"'-1'",
")",
"for",
"i",
"in",
"result",
"[",
"first_num",
":",
"]",
"]",
")",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"reroll",
"is",
"not",
"None",
":",
"# Handle rerolling dice without a comparison modifier (ie. on 1)",
"type_of_dice",
"=",
"int",
"(",
"reroll",
"[",
"3",
"]",
")",
"result_strings",
"=",
"[",
"]",
"roll_strings",
"=",
"[",
"]",
"result",
"=",
"roll_group",
"(",
"reroll",
"[",
"1",
"]",
")",
"repeat",
"=",
"True",
"if",
"reroll",
"[",
"4",
"]",
"==",
"'R'",
"else",
"False",
"# Reroll just once or infinite number of times",
"if",
"repeat",
":",
"#Handle rerolling the dice and building a string of all the rerolled ones",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
")",
")",
":",
"prev",
"=",
"[",
"result",
"[",
"i",
"]",
"]",
"while",
"result",
"[",
"i",
"]",
"==",
"1",
":",
"result",
"[",
"i",
"]",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"type_of_dice",
")",
"prev",
".",
"append",
"(",
"result",
"[",
"i",
"]",
")",
"roll_strings",
".",
"append",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"prev",
"]",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
")",
")",
":",
"prev",
"=",
"[",
"result",
"[",
"i",
"]",
"]",
"if",
"result",
"[",
"i",
"]",
"==",
"1",
":",
"result",
"[",
"i",
"]",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"type_of_dice",
")",
"prev",
".",
"append",
"(",
"result",
"[",
"i",
"]",
")",
"roll_strings",
".",
"append",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"prev",
"]",
")",
"results",
".",
"append",
"(",
"sum",
"(",
"result",
")",
")",
"for",
"roll_string",
"in",
"roll_strings",
":",
"roll_string",
".",
"reverse",
"(",
")",
"result_strings",
".",
"append",
"(",
"'%s'",
"%",
"roll_string",
"[",
"0",
"]",
"+",
"(",
"'~'",
"if",
"len",
"(",
"roll_string",
")",
">",
"1",
"else",
"''",
")",
"+",
"'~'",
".",
"join",
"(",
"roll_string",
"[",
"1",
":",
"]",
")",
")",
"#Build the string",
"roll",
"=",
"','",
".",
"join",
"(",
"result_strings",
")",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"specific_reroll",
"is",
"not",
"None",
":",
"# Handle rerolling dice on a specific number, see reroll",
"type_of_dice",
"=",
"int",
"(",
"specific_reroll",
"[",
"3",
"]",
")",
"comparator",
"=",
"int",
"(",
"specific_reroll",
"[",
"5",
"]",
")",
"assert",
"0",
"<",
"comparator",
"<=",
"type_of_dice",
"# Ensure comparison is within bounds",
"result_strings",
"=",
"[",
"]",
"roll_strings",
"=",
"[",
"]",
"result",
"=",
"roll_group",
"(",
"specific_reroll",
"[",
"1",
"]",
")",
"repeat",
"=",
"True",
"if",
"specific_reroll",
"[",
"4",
"]",
"==",
"'R'",
"else",
"False",
"if",
"repeat",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
")",
")",
":",
"prev",
"=",
"[",
"result",
"[",
"i",
"]",
"]",
"while",
"result",
"[",
"i",
"]",
"==",
"comparator",
":",
"result",
"[",
"i",
"]",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"type_of_dice",
")",
"prev",
".",
"append",
"(",
"result",
"[",
"i",
"]",
")",
"roll_strings",
".",
"append",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"prev",
"]",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
")",
")",
":",
"prev",
"=",
"[",
"result",
"[",
"i",
"]",
"]",
"if",
"result",
"[",
"i",
"]",
"==",
"comparator",
":",
"result",
"[",
"i",
"]",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"type_of_dice",
")",
"prev",
".",
"append",
"(",
"result",
"[",
"i",
"]",
")",
"roll_strings",
".",
"append",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"prev",
"]",
")",
"results",
".",
"append",
"(",
"sum",
"(",
"result",
")",
")",
"for",
"roll_string",
"in",
"roll_strings",
":",
"roll_string",
".",
"reverse",
"(",
")",
"result_strings",
".",
"append",
"(",
"'%s'",
"%",
"roll_string",
"[",
"0",
"]",
"+",
"(",
"'~'",
"if",
"len",
"(",
"roll_string",
")",
">",
"1",
"else",
"''",
")",
"+",
"'~'",
".",
"join",
"(",
"roll_string",
"[",
"1",
":",
"]",
")",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"result_strings",
")",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"comparison_reroll",
"is",
"not",
"None",
":",
"# Handle rerolling dice with a comparison modifier.",
"type_of_dice",
"=",
"int",
"(",
"comparison_reroll",
"[",
"3",
"]",
")",
"comparator",
"=",
"int",
"(",
"comparison_reroll",
"[",
"6",
"]",
")",
"if",
"comparison_reroll",
"[",
"5",
"]",
"==",
"'>'",
":",
"# Ensure comparison is within bounds",
"assert",
"0",
"<",
"comparator",
"<",
"type_of_dice",
"else",
":",
"assert",
"1",
"<",
"comparator",
"<=",
"type_of_dice",
"result_strings",
"=",
"[",
"]",
"roll_strings",
"=",
"[",
"]",
"result",
"=",
"roll_group",
"(",
"comparison_reroll",
"[",
"1",
"]",
")",
"repeat",
"=",
"True",
"if",
"comparison_reroll",
"[",
"4",
"]",
"==",
"'R'",
"else",
"False",
"if",
"comparison_reroll",
"[",
"5",
"]",
"==",
"'>'",
":",
"if",
"repeat",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
")",
")",
":",
"prev",
"=",
"[",
"result",
"[",
"i",
"]",
"]",
"while",
"result",
"[",
"i",
"]",
">",
"comparator",
":",
"result",
"[",
"i",
"]",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"type_of_dice",
")",
"prev",
".",
"append",
"(",
"result",
"[",
"i",
"]",
")",
"roll_strings",
".",
"append",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"prev",
"]",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
")",
")",
":",
"prev",
"=",
"[",
"result",
"[",
"i",
"]",
"]",
"if",
"result",
"[",
"i",
"]",
">",
"comparator",
":",
"result",
"[",
"i",
"]",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"type_of_dice",
")",
"prev",
".",
"append",
"(",
"result",
"[",
"i",
"]",
")",
"roll_strings",
".",
"append",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"prev",
"]",
")",
"else",
":",
"if",
"repeat",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
")",
")",
":",
"prev",
"=",
"[",
"result",
"[",
"i",
"]",
"]",
"while",
"result",
"[",
"i",
"]",
"<",
"comparator",
":",
"result",
"[",
"i",
"]",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"type_of_dice",
")",
"prev",
".",
"append",
"(",
"result",
"[",
"i",
"]",
")",
"roll_strings",
".",
"append",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"prev",
"]",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
")",
")",
":",
"prev",
"=",
"[",
"result",
"[",
"i",
"]",
"]",
"if",
"result",
"[",
"i",
"]",
"<",
"comparator",
":",
"result",
"[",
"i",
"]",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"type_of_dice",
")",
"prev",
".",
"append",
"(",
"result",
"[",
"i",
"]",
")",
"roll_strings",
".",
"append",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"prev",
"]",
")",
"results",
".",
"append",
"(",
"sum",
"(",
"result",
")",
")",
"for",
"roll_string",
"in",
"roll_strings",
":",
"roll_string",
".",
"reverse",
"(",
")",
"result_strings",
".",
"append",
"(",
"'%s'",
"%",
"roll_string",
"[",
"0",
"]",
"+",
"(",
"'~'",
"if",
"len",
"(",
"roll_string",
")",
">",
"1",
"else",
"''",
")",
"+",
"'~'",
".",
"join",
"(",
"roll_string",
"[",
"1",
":",
"]",
")",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"result_strings",
")",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"success_comparison",
"is",
"not",
"None",
":",
"group_result",
"=",
"roll_group",
"(",
"success_comparison",
"[",
"1",
"]",
")",
"result",
"=",
"[",
"]",
"result_string",
"=",
"[",
"]",
"type_of_dice",
"=",
"int",
"(",
"success_comparison",
"[",
"2",
"]",
")",
"comparator",
"=",
"int",
"(",
"success_comparison",
"[",
"4",
"]",
")",
"if",
"success_comparison",
"[",
"3",
"]",
"==",
"'>'",
":",
"# Ensure comparison is within bounds",
"assert",
"0",
"<",
"comparator",
"<",
"type_of_dice",
"else",
":",
"assert",
"1",
"<",
"comparator",
"<=",
"type_of_dice",
"for",
"die",
"in",
"group_result",
":",
"if",
"success_comparison",
"[",
"3",
"]",
"==",
"'>'",
":",
"result",
".",
"append",
"(",
"1",
"if",
"die",
">",
"comparator",
"else",
"0",
")",
"result_string",
".",
"append",
"(",
"'!'",
"+",
"str",
"(",
"die",
")",
"if",
"die",
">",
"comparator",
"else",
"str",
"(",
"die",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"1",
"if",
"die",
"<",
"comparator",
"else",
"0",
")",
"result_string",
".",
"append",
"(",
"'!'",
"+",
"str",
"(",
"die",
")",
"if",
"die",
"<",
"comparator",
"else",
"str",
"(",
"die",
")",
")",
"results",
".",
"append",
"(",
"sum",
"(",
"result",
")",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"result_string",
")",
"# Craft the string, adding an exclamation mark before every string that passed the comparison.",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"success_fail_comparison",
"is",
"not",
"None",
":",
"group_result",
"=",
"roll_group",
"(",
"success_fail_comparison",
"[",
"1",
"]",
")",
"result",
"=",
"[",
"]",
"result_string",
"=",
"[",
"]",
"type_of_dice",
"=",
"int",
"(",
"success_fail_comparison",
"[",
"2",
"]",
")",
"success_comp",
"=",
"int",
"(",
"success_fail_comparison",
"[",
"5",
"]",
")",
"fail_comp",
"=",
"int",
"(",
"success_fail_comparison",
"[",
"7",
"]",
")",
"# Ensure both comparisons are within bounds",
"if",
"success_fail_comparison",
"[",
"4",
"]",
"==",
"'>'",
":",
"assert",
"0",
"<",
"success_comp",
"<",
"type_of_dice",
"assert",
"1",
"<",
"fail_comp",
"<=",
"type_of_dice",
"else",
":",
"assert",
"1",
"<",
"success_comp",
"<=",
"type_of_dice",
"assert",
"0",
"<",
"fail_comp",
"<",
"type_of_dice",
"for",
"die",
"in",
"group_result",
":",
"if",
"success_fail_comparison",
"[",
"4",
"]",
"==",
"'>'",
":",
"# Get the actual list of successes and fails with both comparisons",
"if",
"die",
">",
"success_comp",
":",
"result",
".",
"append",
"(",
"1",
")",
"result_string",
".",
"append",
"(",
"'!'",
"+",
"str",
"(",
"die",
")",
")",
"elif",
"die",
"<",
"fail_comp",
":",
"result",
".",
"append",
"(",
"-",
"1",
")",
"result_string",
".",
"append",
"(",
"'*'",
"+",
"str",
"(",
"die",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"0",
")",
"result_string",
".",
"append",
"(",
"str",
"(",
"die",
")",
")",
"else",
":",
"if",
"die",
"<",
"success_comp",
":",
"result",
".",
"append",
"(",
"1",
")",
"result_string",
".",
"append",
"(",
"'!'",
"+",
"str",
"(",
"die",
")",
")",
"elif",
"die",
">",
"fail_comp",
":",
"result",
".",
"append",
"(",
"-",
"1",
")",
"result_string",
".",
"append",
"(",
"'*'",
"+",
"str",
"(",
"die",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"0",
")",
"result_string",
".",
"append",
"(",
"str",
"(",
"die",
")",
")",
"results",
".",
"append",
"(",
"sum",
"(",
"result",
")",
")",
"#",
"roll",
"=",
"','",
".",
"join",
"(",
"result_string",
")",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"keep",
"is",
"not",
"None",
":",
"# Handle rolling dice and keeping the x highest or lowest values",
"group_result",
"=",
"roll_group",
"(",
"keep",
"[",
"1",
"]",
")",
"group_result",
".",
"sort",
"(",
"reverse",
"=",
"True",
"if",
"keep",
"[",
"2",
"]",
"==",
"'K'",
"else",
"False",
")",
"# Uppercase is keep highest and lowercase is keep lowest.",
"num_to_keep",
"=",
"int",
"(",
"keep",
"[",
"3",
"]",
"if",
"keep",
"[",
"3",
"]",
"!=",
"''",
"else",
"1",
")",
"assert",
"1",
"<=",
"num_to_keep",
"<",
"len",
"(",
"group_result",
")",
"results",
".",
"append",
"(",
"sum",
"(",
"group_result",
"[",
":",
"num_to_keep",
"]",
")",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"group_result",
"[",
":",
"num_to_keep",
"]",
"]",
")",
"+",
"' ~~ '",
"# This time format the string with all kept rolls on the left and dropped rolls on the right",
"roll",
"+=",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"group_result",
"[",
"num_to_keep",
":",
"]",
"]",
")",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"drop",
"is",
"not",
"None",
":",
"group_result",
"=",
"roll_group",
"(",
"drop",
"[",
"1",
"]",
")",
"group_result",
".",
"sort",
"(",
"reverse",
"=",
"True",
"if",
"drop",
"[",
"2",
"]",
"==",
"'X'",
"else",
"False",
")",
"# Same thing as keep dice",
"num_to_drop",
"=",
"int",
"(",
"drop",
"[",
"3",
"]",
"if",
"drop",
"[",
"3",
"]",
"!=",
"''",
"else",
"1",
")",
"assert",
"1",
"<=",
"num_to_drop",
"<",
"len",
"(",
"group_result",
")",
"results",
".",
"append",
"(",
"sum",
"(",
"group_result",
"[",
":",
"num_to_drop",
"]",
")",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"group_result",
"[",
"num_to_drop",
":",
"]",
"]",
")",
"+",
"' ~~ '",
"# Same as above.",
"roll",
"+=",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"group_result",
"[",
":",
"num_to_drop",
"]",
"]",
")",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"individual",
"is",
"not",
"None",
":",
"group_result",
"=",
"roll_group",
"(",
"individual",
"[",
"1",
"]",
")",
"result",
"=",
"[",
"]",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"group_result",
")",
":",
"#add to each roll",
"if",
"individual",
"[",
"4",
"]",
"==",
"'a'",
":",
"result",
".",
"append",
"(",
"j",
"+",
"int",
"(",
"individual",
"[",
"5",
"]",
")",
")",
"elif",
"individual",
"[",
"4",
"]",
"==",
"'s'",
":",
"result",
".",
"append",
"(",
"j",
"-",
"int",
"(",
"individual",
"[",
"5",
"]",
")",
")",
"elif",
"individual",
"[",
"4",
"]",
"==",
"'m'",
":",
"result",
".",
"append",
"(",
"j",
"*",
"int",
"(",
"individual",
"[",
"5",
"]",
")",
")",
"else",
":",
"raise",
"ValueError",
"results",
".",
"append",
"(",
"sum",
"(",
"result",
")",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"+",
"individual",
"[",
"4",
"]",
"+",
"individual",
"[",
"5",
"]",
"for",
"x",
"in",
"group_result",
"]",
")",
"#Create string with the modifier on each roll",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"normal",
"is",
"not",
"None",
":",
"group_result",
"=",
"roll_group",
"(",
"group",
")",
"results",
".",
"append",
"(",
"sum",
"(",
"group_result",
")",
")",
"roll",
"=",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"group_result",
"]",
")",
"string",
".",
"append",
"(",
"'[%s]'",
"%",
"roll",
")",
"elif",
"literal",
"is",
"not",
"None",
":",
"results",
".",
"append",
"(",
"int",
"(",
"literal",
"[",
"1",
"]",
")",
")",
"# Just append the integer value",
"string",
".",
"append",
"(",
"literal",
"[",
"1",
"]",
")",
"elif",
"float_literal",
"is",
"not",
"None",
":",
"if",
"floats",
":",
"results",
".",
"append",
"(",
"float",
"(",
"group",
")",
")",
"string",
".",
"append",
"(",
"group",
")",
"else",
":",
"raise",
"TypeError",
"else",
":",
"raise",
"Exception",
"except",
"Exception",
":",
"raise",
"DiceGroupException",
"(",
"'\"%s\" is not a valid dicegroup.'",
"%",
"group",
")",
"parser",
"=",
"SimpleEval",
"(",
"floats",
"=",
"floats",
",",
"functions",
"=",
"functions",
")",
"#The parser object parses the dice rolls and functions",
"try",
":",
"final_result",
"=",
"parser",
".",
"eval",
"(",
"''",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"results",
"]",
")",
")",
"#Call the parser to parse into one value",
"if",
"not",
"floats",
":",
"final_result",
"=",
"int",
"(",
"final_result",
")",
"except",
"Exception",
":",
"raise",
"DiceOperatorException",
"(",
"'Error parsing operators and or functions'",
")",
"#Create explanation string and remove extraneous spaces",
"explanation",
"=",
"''",
".",
"join",
"(",
"string",
")",
"explanation",
"=",
"zero_width_split",
"(",
"r\"\"\"((?<=[\\/%^+])(?![\\/,]))| # Split between /, %, ^, and +\n ((?<![\\/,])(?=[\\/%^+]))| # Same as above\n ((?<=[^(])(?=-))(?!-[^[]*])| # Split in front of - that are not in a roll\n (?<=-)(?=[^\\d()a-z])| # Same for splitting after - and before non-literals\n (?<=[\\d)\\]]-)(?=.)(?![^[]*])| # Split after a - that is not in a roll\n (?<=,)(?![^[]*])| # Split after a comma that is not in a roll\n (?<=([^,]\\*))(?!\\*)| # Split after a * that is not in a roll\n (?<![,\\*])(?=\\*) # Split before a * that is not in a roll\"\"\"",
",",
"explanation",
")",
"#Split on ops to properly format the explanation",
"explanation",
"=",
"' '",
".",
"join",
"(",
"explanation",
")",
"explanation",
"=",
"explanation",
".",
"strip",
"(",
")",
"explanation",
"=",
"regex",
".",
"sub",
"(",
"r'[ \\t]{2,}'",
",",
"' '",
",",
"explanation",
")",
"return",
"final_result",
",",
"explanation"
] | 49.420543 | 30.170543 |
def open(self):
"""Create a connection to the AWS API server. This can be reused for
sending multiple emails.
"""
if self.connection:
return False
try:
self.connection = SESConnection(
aws_access_key_id=self._access_key_id,
aws_secret_access_key=self._access_key,
region=self._region,
proxy=self._proxy,
proxy_port=self._proxy_port,
proxy_user=self._proxy_user,
proxy_pass=self._proxy_pass,
)
except Exception:
if not self.fail_silently:
raise
|
[
"def",
"open",
"(",
"self",
")",
":",
"if",
"self",
".",
"connection",
":",
"return",
"False",
"try",
":",
"self",
".",
"connection",
"=",
"SESConnection",
"(",
"aws_access_key_id",
"=",
"self",
".",
"_access_key_id",
",",
"aws_secret_access_key",
"=",
"self",
".",
"_access_key",
",",
"region",
"=",
"self",
".",
"_region",
",",
"proxy",
"=",
"self",
".",
"_proxy",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
",",
"proxy_user",
"=",
"self",
".",
"_proxy_user",
",",
"proxy_pass",
"=",
"self",
".",
"_proxy_pass",
",",
")",
"except",
"Exception",
":",
"if",
"not",
"self",
".",
"fail_silently",
":",
"raise"
] | 32.45 | 12.4 |
def set_actuator_control_target_send(self, time_usec, group_mlx, target_system, target_component, controls, force_mavlink1=False):
'''
Set the vehicle attitude and body angular rates.
time_usec : Timestamp (micros since boot or Unix epoch) (uint64_t)
group_mlx : Actuator group. The "_mlx" indicates this is a multi-instance message and a MAVLink parser should use this field to difference between instances. (uint8_t)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
controls : Actuator controls. Normed to -1..+1 where 0 is neutral position. Throttle for single rotation direction motors is 0..1, negative range for reverse direction. Standard mapping for attitude controls (group 0): (index 0-7): roll, pitch, yaw, throttle, flaps, spoilers, airbrakes, landing gear. Load a pass-through mixer to repurpose them as generic outputs. (float)
'''
return self.send(self.set_actuator_control_target_encode(time_usec, group_mlx, target_system, target_component, controls), force_mavlink1=force_mavlink1)
|
[
"def",
"set_actuator_control_target_send",
"(",
"self",
",",
"time_usec",
",",
"group_mlx",
",",
"target_system",
",",
"target_component",
",",
"controls",
",",
"force_mavlink1",
"=",
"False",
")",
":",
"return",
"self",
".",
"send",
"(",
"self",
".",
"set_actuator_control_target_encode",
"(",
"time_usec",
",",
"group_mlx",
",",
"target_system",
",",
"target_component",
",",
"controls",
")",
",",
"force_mavlink1",
"=",
"force_mavlink1",
")"
] | 102.75 | 83.083333 |
def replicate_global_dbs(cloud_url=None, local_url=None):
"""
Set up replication of the global databases from the cloud server to the
local server.
:param str cloud_url: Used to override the cloud url from the global
configuration in case the calling function is in the process of
initializing the cloud server
:param str local_url: Used to override the local url from the global
configuration in case the calling function is in the process of
initializing the local server
"""
local_url = local_url or config["local_server"]["url"]
cloud_url = cloud_url or config["cloud_server"]["url"]
server = Server(local_url)
for db_name in global_dbs:
server.replicate(
db_name, urljoin(cloud_url, db_name), db_name, continuous=True,
)
|
[
"def",
"replicate_global_dbs",
"(",
"cloud_url",
"=",
"None",
",",
"local_url",
"=",
"None",
")",
":",
"local_url",
"=",
"local_url",
"or",
"config",
"[",
"\"local_server\"",
"]",
"[",
"\"url\"",
"]",
"cloud_url",
"=",
"cloud_url",
"or",
"config",
"[",
"\"cloud_server\"",
"]",
"[",
"\"url\"",
"]",
"server",
"=",
"Server",
"(",
"local_url",
")",
"for",
"db_name",
"in",
"global_dbs",
":",
"server",
".",
"replicate",
"(",
"db_name",
",",
"urljoin",
"(",
"cloud_url",
",",
"db_name",
")",
",",
"db_name",
",",
"continuous",
"=",
"True",
",",
")"
] | 41.684211 | 20.210526 |
def update(self, reference, field_updates, option=None):
"""Add a "change" to update a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.update` for
more information on ``field_updates`` and ``option``.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference that will be deleted in this batch.
field_updates (dict): Field names or paths to update and values
to update with.
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
"""
if option.__class__.__name__ == "ExistsOption":
raise ValueError("you must not pass an explicit write option to " "update.")
write_pbs = _helpers.pbs_for_update(
reference._document_path, field_updates, option
)
self._add_write_pbs(write_pbs)
|
[
"def",
"update",
"(",
"self",
",",
"reference",
",",
"field_updates",
",",
"option",
"=",
"None",
")",
":",
"if",
"option",
".",
"__class__",
".",
"__name__",
"==",
"\"ExistsOption\"",
":",
"raise",
"ValueError",
"(",
"\"you must not pass an explicit write option to \"",
"\"update.\"",
")",
"write_pbs",
"=",
"_helpers",
".",
"pbs_for_update",
"(",
"reference",
".",
"_document_path",
",",
"field_updates",
",",
"option",
")",
"self",
".",
"_add_write_pbs",
"(",
"write_pbs",
")"
] | 46.454545 | 23.636364 |
def decimal_to_digits(decimal, min_digits=None):
"""
Return the number of digits to the first nonzero decimal.
Parameters
-----------
decimal: float
min_digits: int, minimum number of digits to return
Returns
-----------
digits: int, number of digits to the first nonzero decimal
"""
digits = abs(int(np.log10(decimal)))
if min_digits is not None:
digits = np.clip(digits, min_digits, 20)
return digits
|
[
"def",
"decimal_to_digits",
"(",
"decimal",
",",
"min_digits",
"=",
"None",
")",
":",
"digits",
"=",
"abs",
"(",
"int",
"(",
"np",
".",
"log10",
"(",
"decimal",
")",
")",
")",
"if",
"min_digits",
"is",
"not",
"None",
":",
"digits",
"=",
"np",
".",
"clip",
"(",
"digits",
",",
"min_digits",
",",
"20",
")",
"return",
"digits"
] | 25.055556 | 19.5 |
def as_dict(self):
"""
Returns a copy of the configuration dictionary. Changes in this should not reflect on the original
object.
:return: Configuration dictionary.
:rtype: dict
"""
self.clean()
d = OrderedDict()
all_props = self.__class__.CONFIG_PROPERTIES
for attr_name, attr_config in six.iteritems(all_props):
value = self._config[attr_name]
attr_type = attr_config.attr_type
if attr_type:
if value:
if issubclass(attr_type, list):
if issubclass(attr_type, NamedTupleList):
d[attr_name] = [i._asdict() for i in value]
else:
d[attr_name] = value[:]
elif attr_type is dict:
d[attr_name] = dict(value)
elif value is not NotSet:
d[attr_name] = value
return d
|
[
"def",
"as_dict",
"(",
"self",
")",
":",
"self",
".",
"clean",
"(",
")",
"d",
"=",
"OrderedDict",
"(",
")",
"all_props",
"=",
"self",
".",
"__class__",
".",
"CONFIG_PROPERTIES",
"for",
"attr_name",
",",
"attr_config",
"in",
"six",
".",
"iteritems",
"(",
"all_props",
")",
":",
"value",
"=",
"self",
".",
"_config",
"[",
"attr_name",
"]",
"attr_type",
"=",
"attr_config",
".",
"attr_type",
"if",
"attr_type",
":",
"if",
"value",
":",
"if",
"issubclass",
"(",
"attr_type",
",",
"list",
")",
":",
"if",
"issubclass",
"(",
"attr_type",
",",
"NamedTupleList",
")",
":",
"d",
"[",
"attr_name",
"]",
"=",
"[",
"i",
".",
"_asdict",
"(",
")",
"for",
"i",
"in",
"value",
"]",
"else",
":",
"d",
"[",
"attr_name",
"]",
"=",
"value",
"[",
":",
"]",
"elif",
"attr_type",
"is",
"dict",
":",
"d",
"[",
"attr_name",
"]",
"=",
"dict",
"(",
"value",
")",
"elif",
"value",
"is",
"not",
"NotSet",
":",
"d",
"[",
"attr_name",
"]",
"=",
"value",
"return",
"d"
] | 37.307692 | 16 |
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
|
[
"def",
"_update_ctx",
"(",
"self",
",",
"attrs",
")",
":",
"for",
"row_label",
",",
"v",
"in",
"attrs",
".",
"iterrows",
"(",
")",
":",
"for",
"col_label",
",",
"col",
"in",
"v",
".",
"iteritems",
"(",
")",
":",
"i",
"=",
"self",
".",
"index",
".",
"get_indexer",
"(",
"[",
"row_label",
"]",
")",
"[",
"0",
"]",
"j",
"=",
"self",
".",
"columns",
".",
"get_indexer",
"(",
"[",
"col_label",
"]",
")",
"[",
"0",
"]",
"for",
"pair",
"in",
"col",
".",
"rstrip",
"(",
"\";\"",
")",
".",
"split",
"(",
"\";\"",
")",
":",
"self",
".",
"ctx",
"[",
"(",
"i",
",",
"j",
")",
"]",
".",
"append",
"(",
"pair",
")"
] | 39.235294 | 17 |
def _private_notes(self, key, value):
"""Populate the ``_private_notes`` key.
Also populates the ``_export_to`` key through side effects.
"""
def _is_for_cds(value):
normalized_c_values = [el.upper() for el in force_list(value.get('c'))]
return 'CDS' in normalized_c_values
def _is_for_hal(value):
normalized_c_values = [el.upper() for el in force_list(value.get('c'))]
return 'HAL' in normalized_c_values
def _is_not_for_hal(value):
normalized_c_values = [el.upper() for el in force_list(value.get('c'))]
return 'NOT HAL' in normalized_c_values
_private_notes = self.get('_private_notes', [])
_export_to = self.get('_export_to', {})
for value in force_list(value):
if _is_for_cds(value):
_export_to['CDS'] = True
if _is_for_hal(value):
_export_to['HAL'] = True
elif _is_not_for_hal(value):
_export_to['HAL'] = False
source = force_single_element(value.get('9'))
for _private_note in force_list(value.get('a')):
_private_notes.append({
'source': source,
'value': _private_note,
})
self['_export_to'] = _export_to
return _private_notes
|
[
"def",
"_private_notes",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"def",
"_is_for_cds",
"(",
"value",
")",
":",
"normalized_c_values",
"=",
"[",
"el",
".",
"upper",
"(",
")",
"for",
"el",
"in",
"force_list",
"(",
"value",
".",
"get",
"(",
"'c'",
")",
")",
"]",
"return",
"'CDS'",
"in",
"normalized_c_values",
"def",
"_is_for_hal",
"(",
"value",
")",
":",
"normalized_c_values",
"=",
"[",
"el",
".",
"upper",
"(",
")",
"for",
"el",
"in",
"force_list",
"(",
"value",
".",
"get",
"(",
"'c'",
")",
")",
"]",
"return",
"'HAL'",
"in",
"normalized_c_values",
"def",
"_is_not_for_hal",
"(",
"value",
")",
":",
"normalized_c_values",
"=",
"[",
"el",
".",
"upper",
"(",
")",
"for",
"el",
"in",
"force_list",
"(",
"value",
".",
"get",
"(",
"'c'",
")",
")",
"]",
"return",
"'NOT HAL'",
"in",
"normalized_c_values",
"_private_notes",
"=",
"self",
".",
"get",
"(",
"'_private_notes'",
",",
"[",
"]",
")",
"_export_to",
"=",
"self",
".",
"get",
"(",
"'_export_to'",
",",
"{",
"}",
")",
"for",
"value",
"in",
"force_list",
"(",
"value",
")",
":",
"if",
"_is_for_cds",
"(",
"value",
")",
":",
"_export_to",
"[",
"'CDS'",
"]",
"=",
"True",
"if",
"_is_for_hal",
"(",
"value",
")",
":",
"_export_to",
"[",
"'HAL'",
"]",
"=",
"True",
"elif",
"_is_not_for_hal",
"(",
"value",
")",
":",
"_export_to",
"[",
"'HAL'",
"]",
"=",
"False",
"source",
"=",
"force_single_element",
"(",
"value",
".",
"get",
"(",
"'9'",
")",
")",
"for",
"_private_note",
"in",
"force_list",
"(",
"value",
".",
"get",
"(",
"'a'",
")",
")",
":",
"_private_notes",
".",
"append",
"(",
"{",
"'source'",
":",
"source",
",",
"'value'",
":",
"_private_note",
",",
"}",
")",
"self",
"[",
"'_export_to'",
"]",
"=",
"_export_to",
"return",
"_private_notes"
] | 32.342105 | 17.184211 |
def select_hits(hits_array, condition=None):
'''Selects the hits with condition.
E.g.: condition = 'rel_BCID == 7 & event_number < 1000'
Parameters
----------
hits_array : numpy.array
condition : string
A condition that is applied to the hits in numexpr. Only if the expression evaluates to True the hit is taken.
Returns
-------
numpy.array
hit array with the selceted hits
'''
if condition is None:
return hits_array
for variable in set(re.findall(r'[a-zA-Z_]+', condition)):
exec(variable + ' = hits_array[\'' + variable + '\']')
return hits_array[ne.evaluate(condition)]
|
[
"def",
"select_hits",
"(",
"hits_array",
",",
"condition",
"=",
"None",
")",
":",
"if",
"condition",
"is",
"None",
":",
"return",
"hits_array",
"for",
"variable",
"in",
"set",
"(",
"re",
".",
"findall",
"(",
"r'[a-zA-Z_]+'",
",",
"condition",
")",
")",
":",
"exec",
"(",
"variable",
"+",
"' = hits_array[\\''",
"+",
"variable",
"+",
"'\\']'",
")",
"return",
"hits_array",
"[",
"ne",
".",
"evaluate",
"(",
"condition",
")",
"]"
] | 29.136364 | 24.5 |
def institutes(self, institute_ids=None):
"""Fetch all institutes.
Args:
institute_ids(list(str))
Returns:
res(pymongo.Cursor)
"""
query = {}
if institute_ids:
query['_id'] = {'$in': institute_ids}
LOG.debug("Fetching all institutes")
return self.institute_collection.find(query)
|
[
"def",
"institutes",
"(",
"self",
",",
"institute_ids",
"=",
"None",
")",
":",
"query",
"=",
"{",
"}",
"if",
"institute_ids",
":",
"query",
"[",
"'_id'",
"]",
"=",
"{",
"'$in'",
":",
"institute_ids",
"}",
"LOG",
".",
"debug",
"(",
"\"Fetching all institutes\"",
")",
"return",
"self",
".",
"institute_collection",
".",
"find",
"(",
"query",
")"
] | 27.428571 | 13.642857 |
def _add_implied_commands(self):
"""
Add the commands that are implied by the blueprint.
"""
if len(self.get_added_columns()) and not self._creating():
self._commands.insert(0, self._create_command("add"))
if len(self.get_changed_columns()) and not self._creating():
self._commands.insert(0, self._create_command("change"))
return self._add_fluent_indexes()
|
[
"def",
"_add_implied_commands",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"get_added_columns",
"(",
")",
")",
"and",
"not",
"self",
".",
"_creating",
"(",
")",
":",
"self",
".",
"_commands",
".",
"insert",
"(",
"0",
",",
"self",
".",
"_create_command",
"(",
"\"add\"",
")",
")",
"if",
"len",
"(",
"self",
".",
"get_changed_columns",
"(",
")",
")",
"and",
"not",
"self",
".",
"_creating",
"(",
")",
":",
"self",
".",
"_commands",
".",
"insert",
"(",
"0",
",",
"self",
".",
"_create_command",
"(",
"\"change\"",
")",
")",
"return",
"self",
".",
"_add_fluent_indexes",
"(",
")"
] | 38.272727 | 19.545455 |
def list_event_types(self, filter=market_filter(), locale=None, session=None, lightweight=None):
"""
Returns a list of Event Types (i.e. Sports) associated with the markets
selected by the MarketFilter.
:param dict filter: The filter to select desired markets
:param str locale: The language used for the response
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.EventTypeResult]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listEventTypes')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.EventTypeResult, elapsed_time, lightweight)
|
[
"def",
"list_event_types",
"(",
"self",
",",
"filter",
"=",
"market_filter",
"(",
")",
",",
"locale",
"=",
"None",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"params",
"=",
"clean_locals",
"(",
"locals",
"(",
")",
")",
"method",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"URI",
",",
"'listEventTypes'",
")",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"method",
",",
"params",
",",
"session",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"EventTypeResult",
",",
"elapsed_time",
",",
"lightweight",
")"
] | 50.4375 | 24.5625 |
def verify(self, string_version=None):
"""
Check that the version information is consistent with the VCS
before doing a release. If supplied with a string version,
this is also checked against the current version. Should be
called from setup.py with the declared package version before
releasing to PyPI.
"""
if string_version and string_version != str(self):
raise Exception("Supplied string version does not match current version.")
if self.dirty:
raise Exception("Current working directory is dirty.")
if self.release != self.expected_release:
raise Exception("Declared release does not match current release tag.")
if self.commit_count !=0:
raise Exception("Please update the VCS version tag before release.")
if self._expected_commit not in [None, "$Format:%h$"]:
raise Exception("Declared release does not match the VCS version tag")
|
[
"def",
"verify",
"(",
"self",
",",
"string_version",
"=",
"None",
")",
":",
"if",
"string_version",
"and",
"string_version",
"!=",
"str",
"(",
"self",
")",
":",
"raise",
"Exception",
"(",
"\"Supplied string version does not match current version.\"",
")",
"if",
"self",
".",
"dirty",
":",
"raise",
"Exception",
"(",
"\"Current working directory is dirty.\"",
")",
"if",
"self",
".",
"release",
"!=",
"self",
".",
"expected_release",
":",
"raise",
"Exception",
"(",
"\"Declared release does not match current release tag.\"",
")",
"if",
"self",
".",
"commit_count",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"\"Please update the VCS version tag before release.\"",
")",
"if",
"self",
".",
"_expected_commit",
"not",
"in",
"[",
"None",
",",
"\"$Format:%h$\"",
"]",
":",
"raise",
"Exception",
"(",
"\"Declared release does not match the VCS version tag\"",
")"
] | 44.454545 | 25.363636 |
def workbench_scenarios(cls):
"""
Gather scenarios to be displayed in the workbench
"""
module = cls.__module__
module = module.split('.')[0]
directory = pkg_resources.resource_filename(module, 'scenarios')
files = _find_files(directory)
scenarios = _read_files(files)
return scenarios
|
[
"def",
"workbench_scenarios",
"(",
"cls",
")",
":",
"module",
"=",
"cls",
".",
"__module__",
"module",
"=",
"module",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"directory",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"module",
",",
"'scenarios'",
")",
"files",
"=",
"_find_files",
"(",
"directory",
")",
"scenarios",
"=",
"_read_files",
"(",
"files",
")",
"return",
"scenarios"
] | 34.8 | 9.2 |
def cluster_nodes(self, state=None, healthy=None):
"""
With the Nodes API, you can obtain a collection of resources, each of
which represents a node.
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
:raises yarn_api_client.errors.IllegalArgumentError: if `healthy`
incorrect
"""
path = '/ws/v1/cluster/nodes'
# TODO: validate state argument
legal_healthy = ['true', 'false']
if healthy is not None and healthy not in legal_healthy:
msg = 'Valid Healthy arguments are true, false'
raise IllegalArgumentError(msg)
loc_args = (
('state', state),
('healthy', healthy),
)
params = self.construct_parameters(loc_args)
return self.request(path, **params)
|
[
"def",
"cluster_nodes",
"(",
"self",
",",
"state",
"=",
"None",
",",
"healthy",
"=",
"None",
")",
":",
"path",
"=",
"'/ws/v1/cluster/nodes'",
"# TODO: validate state argument",
"legal_healthy",
"=",
"[",
"'true'",
",",
"'false'",
"]",
"if",
"healthy",
"is",
"not",
"None",
"and",
"healthy",
"not",
"in",
"legal_healthy",
":",
"msg",
"=",
"'Valid Healthy arguments are true, false'",
"raise",
"IllegalArgumentError",
"(",
"msg",
")",
"loc_args",
"=",
"(",
"(",
"'state'",
",",
"state",
")",
",",
"(",
"'healthy'",
",",
"healthy",
")",
",",
")",
"params",
"=",
"self",
".",
"construct_parameters",
"(",
"loc_args",
")",
"return",
"self",
".",
"request",
"(",
"path",
",",
"*",
"*",
"params",
")"
] | 34.12 | 17.24 |
def addPathway(
self, pathway_id, pathway_label, pathway_type=None,
pathway_description=None):
"""
Adds a pathway as a class. If no specific type is specified, it will
default to a subclass of "GO:cellular_process" and "PW:pathway".
:param pathway_id:
:param pathway_label:
:param pathway_type:
:param pathway_description:
:return:
"""
if pathway_type is None:
pathway_type = self.globaltt['cellular_process']
self.model.addClassToGraph(
pathway_id, pathway_label, pathway_type, pathway_description)
self.model.addSubClass(pathway_id, self.globaltt['pathway'])
return
|
[
"def",
"addPathway",
"(",
"self",
",",
"pathway_id",
",",
"pathway_label",
",",
"pathway_type",
"=",
"None",
",",
"pathway_description",
"=",
"None",
")",
":",
"if",
"pathway_type",
"is",
"None",
":",
"pathway_type",
"=",
"self",
".",
"globaltt",
"[",
"'cellular_process'",
"]",
"self",
".",
"model",
".",
"addClassToGraph",
"(",
"pathway_id",
",",
"pathway_label",
",",
"pathway_type",
",",
"pathway_description",
")",
"self",
".",
"model",
".",
"addSubClass",
"(",
"pathway_id",
",",
"self",
".",
"globaltt",
"[",
"'pathway'",
"]",
")",
"return"
] | 35.15 | 19.25 |
def validate(self):
"""
Perform some basic checks to help ensure that the specification is valid.
Throws an exception if an invalid value is found.
Returns true if all checks were passed.
:return: boolean
"""
# Check all values for None
for attr in self.__dict__:
if self.__dict__[attr] is None:
raise ValueError(attr + " is not set")
# Validate name
invalid_chars = GPTaskSpec.invalid_chars()
if any(char in invalid_chars for char in self.name):
raise ValueError("module name includes invalid characters: " + self.name)
# Validate LSID
self._valid_lsid()
# Validate categories
if not self.all_strings(self.categories):
raise TypeError("categories contains non-string value: " + str(self.categories))
# Validate file formats
if not self.all_strings(self.file_format):
raise TypeError("file_format contains non-string value: " + str(self.file_format))
# Validate support files
if not self.all_strings(self.support_files):
raise TypeError("support_files contains non-string value: " + str(self.support_files))
# Validate parameter list
if not self._all_params(self.parameters):
raise TypeError("parameters contains non-GPParamSpec value: " + str(self.parameters))
# Validate individual parameters
for param in self.parameters:
param.validate()
# Return that everything validates
return True
|
[
"def",
"validate",
"(",
"self",
")",
":",
"# Check all values for None",
"for",
"attr",
"in",
"self",
".",
"__dict__",
":",
"if",
"self",
".",
"__dict__",
"[",
"attr",
"]",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"attr",
"+",
"\" is not set\"",
")",
"# Validate name",
"invalid_chars",
"=",
"GPTaskSpec",
".",
"invalid_chars",
"(",
")",
"if",
"any",
"(",
"char",
"in",
"invalid_chars",
"for",
"char",
"in",
"self",
".",
"name",
")",
":",
"raise",
"ValueError",
"(",
"\"module name includes invalid characters: \"",
"+",
"self",
".",
"name",
")",
"# Validate LSID",
"self",
".",
"_valid_lsid",
"(",
")",
"# Validate categories",
"if",
"not",
"self",
".",
"all_strings",
"(",
"self",
".",
"categories",
")",
":",
"raise",
"TypeError",
"(",
"\"categories contains non-string value: \"",
"+",
"str",
"(",
"self",
".",
"categories",
")",
")",
"# Validate file formats",
"if",
"not",
"self",
".",
"all_strings",
"(",
"self",
".",
"file_format",
")",
":",
"raise",
"TypeError",
"(",
"\"file_format contains non-string value: \"",
"+",
"str",
"(",
"self",
".",
"file_format",
")",
")",
"# Validate support files",
"if",
"not",
"self",
".",
"all_strings",
"(",
"self",
".",
"support_files",
")",
":",
"raise",
"TypeError",
"(",
"\"support_files contains non-string value: \"",
"+",
"str",
"(",
"self",
".",
"support_files",
")",
")",
"# Validate parameter list",
"if",
"not",
"self",
".",
"_all_params",
"(",
"self",
".",
"parameters",
")",
":",
"raise",
"TypeError",
"(",
"\"parameters contains non-GPParamSpec value: \"",
"+",
"str",
"(",
"self",
".",
"parameters",
")",
")",
"# Validate individual parameters",
"for",
"param",
"in",
"self",
".",
"parameters",
":",
"param",
".",
"validate",
"(",
")",
"# Return that everything validates",
"return",
"True"
] | 37.02381 | 21.595238 |
def create(cls, name, abr_type='cisco', auto_cost_bandwidth=100,
deprecated_algorithm=False, initial_delay=200,
initial_hold_time=1000, max_hold_time=10000,
shutdown_max_metric_lsa=0, startup_max_metric_lsa=0):
"""
Create custom Domain Settings
Domain settings are referenced by an OSPFProfile
:param str name: name of custom domain settings
:param str abr_type: cisco|shortcut|standard
:param int auto_cost_bandwidth: Mbits/s
:param bool deprecated_algorithm: RFC 1518 compatibility
:param int initial_delay: in milliseconds
:param int initial_hold_type: in milliseconds
:param int max_hold_time: in milliseconds
:param int shutdown_max_metric_lsa: in seconds
:param int startup_max_metric_lsa: in seconds
:raises CreateElementFailed: create failed with reason
:return: instance with meta
:rtype: OSPFDomainSetting
"""
json = {'name': name,
'abr_type': abr_type,
'auto_cost_bandwidth': auto_cost_bandwidth,
'deprecated_algorithm': deprecated_algorithm,
'initial_delay': initial_delay,
'initial_hold_time': initial_hold_time,
'max_hold_time': max_hold_time,
'shutdown_max_metric_lsa': shutdown_max_metric_lsa,
'startup_max_metric_lsa': startup_max_metric_lsa}
return ElementCreator(cls, json)
|
[
"def",
"create",
"(",
"cls",
",",
"name",
",",
"abr_type",
"=",
"'cisco'",
",",
"auto_cost_bandwidth",
"=",
"100",
",",
"deprecated_algorithm",
"=",
"False",
",",
"initial_delay",
"=",
"200",
",",
"initial_hold_time",
"=",
"1000",
",",
"max_hold_time",
"=",
"10000",
",",
"shutdown_max_metric_lsa",
"=",
"0",
",",
"startup_max_metric_lsa",
"=",
"0",
")",
":",
"json",
"=",
"{",
"'name'",
":",
"name",
",",
"'abr_type'",
":",
"abr_type",
",",
"'auto_cost_bandwidth'",
":",
"auto_cost_bandwidth",
",",
"'deprecated_algorithm'",
":",
"deprecated_algorithm",
",",
"'initial_delay'",
":",
"initial_delay",
",",
"'initial_hold_time'",
":",
"initial_hold_time",
",",
"'max_hold_time'",
":",
"max_hold_time",
",",
"'shutdown_max_metric_lsa'",
":",
"shutdown_max_metric_lsa",
",",
"'startup_max_metric_lsa'",
":",
"startup_max_metric_lsa",
"}",
"return",
"ElementCreator",
"(",
"cls",
",",
"json",
")"
] | 44.848485 | 15.636364 |
def seek(self, offset, whence):
"""Changes the current file position of this file.
The file current position always applies to the :py:func:`IFile.read`
method. Same for the :py:func:`IFile.write` method it except when
the :py:func:`IFile.access_mode` is :py:attr:`FileAccessMode.append_only`
or :py:attr:`FileAccessMode.append_read` .
in offset of type int
Offset to seek relative to the position specified by @a whence.
in whence of type :class:`FileSeekOrigin`
One of the :py:class:`FileSeekOrigin` seek starting points.
return new_offset of type int
The new file offset after the seek operation.
"""
if not isinstance(offset, baseinteger):
raise TypeError("offset can only be an instance of type baseinteger")
if not isinstance(whence, FileSeekOrigin):
raise TypeError("whence can only be an instance of type FileSeekOrigin")
new_offset = self._call("seek",
in_p=[offset, whence])
return new_offset
|
[
"def",
"seek",
"(",
"self",
",",
"offset",
",",
"whence",
")",
":",
"if",
"not",
"isinstance",
"(",
"offset",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"offset can only be an instance of type baseinteger\"",
")",
"if",
"not",
"isinstance",
"(",
"whence",
",",
"FileSeekOrigin",
")",
":",
"raise",
"TypeError",
"(",
"\"whence can only be an instance of type FileSeekOrigin\"",
")",
"new_offset",
"=",
"self",
".",
"_call",
"(",
"\"seek\"",
",",
"in_p",
"=",
"[",
"offset",
",",
"whence",
"]",
")",
"return",
"new_offset"
] | 43.28 | 22.2 |
def _get_sv_callers(items):
"""
return a sorted list of all of the structural variant callers run
"""
callers = []
for data in items:
for sv in data.get("sv", []):
callers.append(sv["variantcaller"])
return list(set([x for x in callers if x != "sv-ensemble"])).sort()
|
[
"def",
"_get_sv_callers",
"(",
"items",
")",
":",
"callers",
"=",
"[",
"]",
"for",
"data",
"in",
"items",
":",
"for",
"sv",
"in",
"data",
".",
"get",
"(",
"\"sv\"",
",",
"[",
"]",
")",
":",
"callers",
".",
"append",
"(",
"sv",
"[",
"\"variantcaller\"",
"]",
")",
"return",
"list",
"(",
"set",
"(",
"[",
"x",
"for",
"x",
"in",
"callers",
"if",
"x",
"!=",
"\"sv-ensemble\"",
"]",
")",
")",
".",
"sort",
"(",
")"
] | 33.666667 | 13.888889 |
def connect(self, port=None, baud_rate=115200):
'''
Parameters
----------
port : str or list-like, optional
Port (or list of ports) to try to connect to as a DMF Control
Board.
baud_rate : int, optional
Returns
-------
str
Port DMF control board was connected on.
Raises
------
RuntimeError
If connection could not be established.
IOError
If no ports were specified and Arduino Mega2560 not found on any
port.
'''
if isinstance(port, types.StringTypes):
ports = [port]
else:
ports = port
if not ports:
# No port was specified.
#
# Try ports matching Mega2560 USB vendor/product ID.
ports = serial_ports().index.tolist()
if not ports:
raise IOError("Arduino Mega2560 not found on any port.")
for comport_i in ports:
if self.connected():
self.disconnect()
self.port = None
self._i2c_devices = {}
# Try to connect to control board on available ports.
try:
logger.debug('Try to connect to: %s', comport_i)
# Explicitly cast `comport_i` to string since `Base.connect`
# Boost Python binding does not support unicode strings.
#
# Fixes [issue 8][issue-8].
#
# [issue-8]: https://github.com/wheeler-microfluidics/dmf-control-board-firmware/issues/8
Base.connect(self, str(comport_i), baud_rate)
self.port = comport_i
break
except BadVGND, exception:
logger.warning(exception)
break
except RuntimeError, exception:
continue
else:
raise RuntimeError('Could not connect to control board on any of '
'the following ports: %s' % ports)
name = self.name()
version = self.hardware_version()
firmware = self.software_version()
serial_number_string = ""
try:
serial_number_string = ", S/N %03d" % self.serial_number
except:
# Firmware does not support `serial_number` attribute.
pass
logger.info("Connected to %s v%s (Firmware: %s%s)" %
(name, version, firmware, serial_number_string))
logger.info("Poll control board for series resistors and "
"capacitance values.")
self._read_calibration_data()
try:
self.__aref__ = self._aref()
logger.info("Analog reference = %.2f V" % self.__aref__)
except:
# Firmware does not support `__aref__` attribute.
pass
# Check VGND for both analog channels
expected = 2 ** 10/2
v = {}
channels = [0, 1]
damaged = []
for channel in channels:
try:
v[channel] = np.mean(self.analog_reads(channel, 10))
logger.info("A%d VGND = %.2f V (%.2f%% of Aref)", channel,
self.__aref__ * v[channel] / (2 ** 10), 100.0 *
v[channel] / (2 ** 10))
# Make sure that the VGND is close to the expected value;
# otherwise, the op-amp may be damaged (expected error
# is <= 10%).
if np.abs(v[channel] - expected) / expected > .1:
damaged.append(channel)
except:
# Firmware does not support `__aref__` attribute.
break
# Scan I2C bus to generate list of connected devices.
self._i2c_scan()
if damaged:
# At least one of the analog input channels appears to be damaged.
if len(damaged) == 1:
msg = "Analog channel %d appears" % damaged[0]
else:
msg = "Analog channels %s appear" % damaged
raise BadVGND(msg + " to be damaged. You may need to replace the "
"op-amp on the control board.")
return self.RETURN_OK
|
[
"def",
"connect",
"(",
"self",
",",
"port",
"=",
"None",
",",
"baud_rate",
"=",
"115200",
")",
":",
"if",
"isinstance",
"(",
"port",
",",
"types",
".",
"StringTypes",
")",
":",
"ports",
"=",
"[",
"port",
"]",
"else",
":",
"ports",
"=",
"port",
"if",
"not",
"ports",
":",
"# No port was specified.",
"#",
"# Try ports matching Mega2560 USB vendor/product ID.",
"ports",
"=",
"serial_ports",
"(",
")",
".",
"index",
".",
"tolist",
"(",
")",
"if",
"not",
"ports",
":",
"raise",
"IOError",
"(",
"\"Arduino Mega2560 not found on any port.\"",
")",
"for",
"comport_i",
"in",
"ports",
":",
"if",
"self",
".",
"connected",
"(",
")",
":",
"self",
".",
"disconnect",
"(",
")",
"self",
".",
"port",
"=",
"None",
"self",
".",
"_i2c_devices",
"=",
"{",
"}",
"# Try to connect to control board on available ports.",
"try",
":",
"logger",
".",
"debug",
"(",
"'Try to connect to: %s'",
",",
"comport_i",
")",
"# Explicitly cast `comport_i` to string since `Base.connect`",
"# Boost Python binding does not support unicode strings.",
"#",
"# Fixes [issue 8][issue-8].",
"#",
"# [issue-8]: https://github.com/wheeler-microfluidics/dmf-control-board-firmware/issues/8",
"Base",
".",
"connect",
"(",
"self",
",",
"str",
"(",
"comport_i",
")",
",",
"baud_rate",
")",
"self",
".",
"port",
"=",
"comport_i",
"break",
"except",
"BadVGND",
",",
"exception",
":",
"logger",
".",
"warning",
"(",
"exception",
")",
"break",
"except",
"RuntimeError",
",",
"exception",
":",
"continue",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Could not connect to control board on any of '",
"'the following ports: %s'",
"%",
"ports",
")",
"name",
"=",
"self",
".",
"name",
"(",
")",
"version",
"=",
"self",
".",
"hardware_version",
"(",
")",
"firmware",
"=",
"self",
".",
"software_version",
"(",
")",
"serial_number_string",
"=",
"\"\"",
"try",
":",
"serial_number_string",
"=",
"\", S/N %03d\"",
"%",
"self",
".",
"serial_number",
"except",
":",
"# Firmware does not support `serial_number` attribute.",
"pass",
"logger",
".",
"info",
"(",
"\"Connected to %s v%s (Firmware: %s%s)\"",
"%",
"(",
"name",
",",
"version",
",",
"firmware",
",",
"serial_number_string",
")",
")",
"logger",
".",
"info",
"(",
"\"Poll control board for series resistors and \"",
"\"capacitance values.\"",
")",
"self",
".",
"_read_calibration_data",
"(",
")",
"try",
":",
"self",
".",
"__aref__",
"=",
"self",
".",
"_aref",
"(",
")",
"logger",
".",
"info",
"(",
"\"Analog reference = %.2f V\"",
"%",
"self",
".",
"__aref__",
")",
"except",
":",
"# Firmware does not support `__aref__` attribute.",
"pass",
"# Check VGND for both analog channels",
"expected",
"=",
"2",
"**",
"10",
"/",
"2",
"v",
"=",
"{",
"}",
"channels",
"=",
"[",
"0",
",",
"1",
"]",
"damaged",
"=",
"[",
"]",
"for",
"channel",
"in",
"channels",
":",
"try",
":",
"v",
"[",
"channel",
"]",
"=",
"np",
".",
"mean",
"(",
"self",
".",
"analog_reads",
"(",
"channel",
",",
"10",
")",
")",
"logger",
".",
"info",
"(",
"\"A%d VGND = %.2f V (%.2f%% of Aref)\"",
",",
"channel",
",",
"self",
".",
"__aref__",
"*",
"v",
"[",
"channel",
"]",
"/",
"(",
"2",
"**",
"10",
")",
",",
"100.0",
"*",
"v",
"[",
"channel",
"]",
"/",
"(",
"2",
"**",
"10",
")",
")",
"# Make sure that the VGND is close to the expected value;",
"# otherwise, the op-amp may be damaged (expected error",
"# is <= 10%).",
"if",
"np",
".",
"abs",
"(",
"v",
"[",
"channel",
"]",
"-",
"expected",
")",
"/",
"expected",
">",
".1",
":",
"damaged",
".",
"append",
"(",
"channel",
")",
"except",
":",
"# Firmware does not support `__aref__` attribute.",
"break",
"# Scan I2C bus to generate list of connected devices.",
"self",
".",
"_i2c_scan",
"(",
")",
"if",
"damaged",
":",
"# At least one of the analog input channels appears to be damaged.",
"if",
"len",
"(",
"damaged",
")",
"==",
"1",
":",
"msg",
"=",
"\"Analog channel %d appears\"",
"%",
"damaged",
"[",
"0",
"]",
"else",
":",
"msg",
"=",
"\"Analog channels %s appear\"",
"%",
"damaged",
"raise",
"BadVGND",
"(",
"msg",
"+",
"\" to be damaged. You may need to replace the \"",
"\"op-amp on the control board.\"",
")",
"return",
"self",
".",
"RETURN_OK"
] | 35.268908 | 21.722689 |
def plot_pot(self, colorbar=True, cb_orientation='vertical',
cb_label='Potential, m$^2$ s$^{-2}$', ax=None, show=True,
fname=None, **kwargs):
"""
Plot the gravitational potential.
Usage
-----
x.plot_pot([tick_interval, xlabel, ylabel, ax, colorbar,
cb_orientation, cb_label, show, fname, **kwargs])
Parameters
----------
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
colorbar : bool, optional, default = True
If True, plot a colorbar.
cb_orientation : str, optional, default = 'vertical'
Orientation of the colorbar: either 'vertical' or 'horizontal'.
cb_label : str, optional, default = 'potential, m s$^{-1}$'
Text label for the colorbar.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
kwargs : optional
Keyword arguements that will be sent to the SHGrid.plot()
and plt.imshow() methods.
"""
if ax is None:
fig, axes = self.pot.plot(colorbar=colorbar,
cb_orientation=cb_orientation,
cb_label=cb_label, show=False, **kwargs)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
else:
self.pot.plot(colorbar=colorbar, cb_orientation=cb_orientation,
cb_label=cb_label, ax=ax, **kwargs)
|
[
"def",
"plot_pot",
"(",
"self",
",",
"colorbar",
"=",
"True",
",",
"cb_orientation",
"=",
"'vertical'",
",",
"cb_label",
"=",
"'Potential, m$^2$ s$^{-2}$'",
",",
"ax",
"=",
"None",
",",
"show",
"=",
"True",
",",
"fname",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"ax",
"is",
"None",
":",
"fig",
",",
"axes",
"=",
"self",
".",
"pot",
".",
"plot",
"(",
"colorbar",
"=",
"colorbar",
",",
"cb_orientation",
"=",
"cb_orientation",
",",
"cb_label",
"=",
"cb_label",
",",
"show",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"if",
"show",
":",
"fig",
".",
"show",
"(",
")",
"if",
"fname",
"is",
"not",
"None",
":",
"fig",
".",
"savefig",
"(",
"fname",
")",
"return",
"fig",
",",
"axes",
"else",
":",
"self",
".",
"pot",
".",
"plot",
"(",
"colorbar",
"=",
"colorbar",
",",
"cb_orientation",
"=",
"cb_orientation",
",",
"cb_label",
"=",
"cb_label",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")"
] | 42.078431 | 18.862745 |
def list_tables(self, limit=None, start_table=None):
"""
Return a list of table names associated with the current account
and endpoint.
:type limit: int
:param limit: The maximum number of tables to return.
:type start_table: str
:param limit: The name of the table that starts the
list. If you ran a previous list_tables and not
all results were returned, the response dict would
include a LastEvaluatedTableName attribute. Use
that value here to continue the listing.
"""
data = {}
if limit:
data['Limit'] = limit
if start_table:
data['ExclusiveStartTableName'] = start_table
json_input = json.dumps(data)
return self.make_request('ListTables', json_input)
|
[
"def",
"list_tables",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"start_table",
"=",
"None",
")",
":",
"data",
"=",
"{",
"}",
"if",
"limit",
":",
"data",
"[",
"'Limit'",
"]",
"=",
"limit",
"if",
"start_table",
":",
"data",
"[",
"'ExclusiveStartTableName'",
"]",
"=",
"start_table",
"json_input",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"return",
"self",
".",
"make_request",
"(",
"'ListTables'",
",",
"json_input",
")"
] | 37.136364 | 17.772727 |
def setup_signing(self, secret_key, sign_outgoing=True, allow_unsigned_callback=None, initial_timestamp=None, link_id=None):
'''setup for MAVLink2 signing'''
self.mav.signing.secret_key = secret_key
self.mav.signing.sign_outgoing = sign_outgoing
self.mav.signing.allow_unsigned_callback = allow_unsigned_callback
if link_id is None:
# auto-increment the link_id for each link
global global_link_id
link_id = global_link_id
global_link_id = min(global_link_id + 1, 255)
self.mav.signing.link_id = link_id
if initial_timestamp is None:
# timestamp is time since 1/1/2015
epoch_offset = 1420070400
now = max(time.time(), epoch_offset)
initial_timestamp = now - epoch_offset
initial_timestamp = int(initial_timestamp * 100 * 1000)
# initial_timestamp is in 10usec units
self.mav.signing.timestamp = initial_timestamp
|
[
"def",
"setup_signing",
"(",
"self",
",",
"secret_key",
",",
"sign_outgoing",
"=",
"True",
",",
"allow_unsigned_callback",
"=",
"None",
",",
"initial_timestamp",
"=",
"None",
",",
"link_id",
"=",
"None",
")",
":",
"self",
".",
"mav",
".",
"signing",
".",
"secret_key",
"=",
"secret_key",
"self",
".",
"mav",
".",
"signing",
".",
"sign_outgoing",
"=",
"sign_outgoing",
"self",
".",
"mav",
".",
"signing",
".",
"allow_unsigned_callback",
"=",
"allow_unsigned_callback",
"if",
"link_id",
"is",
"None",
":",
"# auto-increment the link_id for each link",
"global",
"global_link_id",
"link_id",
"=",
"global_link_id",
"global_link_id",
"=",
"min",
"(",
"global_link_id",
"+",
"1",
",",
"255",
")",
"self",
".",
"mav",
".",
"signing",
".",
"link_id",
"=",
"link_id",
"if",
"initial_timestamp",
"is",
"None",
":",
"# timestamp is time since 1/1/2015",
"epoch_offset",
"=",
"1420070400",
"now",
"=",
"max",
"(",
"time",
".",
"time",
"(",
")",
",",
"epoch_offset",
")",
"initial_timestamp",
"=",
"now",
"-",
"epoch_offset",
"initial_timestamp",
"=",
"int",
"(",
"initial_timestamp",
"*",
"100",
"*",
"1000",
")",
"# initial_timestamp is in 10usec units",
"self",
".",
"mav",
".",
"signing",
".",
"timestamp",
"=",
"initial_timestamp"
] | 51.263158 | 14.421053 |
def vel_term_floc(ConcAl, ConcClay, coag, material, DIM_FRACTAL,
DiamTarget, Temp):
"""Calculate floc terminal velocity."""
WaterDensity = pc.density_water(Temp).magnitude
return (((pc.gravity.magnitude * material.Diameter**2)
/ (18 * PHI_FLOC * pc.viscosity_kinematic(Temp).magnitude)
)
* ((dens_floc_init(ConcAl, ConcClay, coag, material).magnitude
- WaterDensity
)
/ WaterDensity
)
* (DiamTarget / material.Diameter) ** (DIM_FRACTAL - 1)
)
|
[
"def",
"vel_term_floc",
"(",
"ConcAl",
",",
"ConcClay",
",",
"coag",
",",
"material",
",",
"DIM_FRACTAL",
",",
"DiamTarget",
",",
"Temp",
")",
":",
"WaterDensity",
"=",
"pc",
".",
"density_water",
"(",
"Temp",
")",
".",
"magnitude",
"return",
"(",
"(",
"(",
"pc",
".",
"gravity",
".",
"magnitude",
"*",
"material",
".",
"Diameter",
"**",
"2",
")",
"/",
"(",
"18",
"*",
"PHI_FLOC",
"*",
"pc",
".",
"viscosity_kinematic",
"(",
"Temp",
")",
".",
"magnitude",
")",
")",
"*",
"(",
"(",
"dens_floc_init",
"(",
"ConcAl",
",",
"ConcClay",
",",
"coag",
",",
"material",
")",
".",
"magnitude",
"-",
"WaterDensity",
")",
"/",
"WaterDensity",
")",
"*",
"(",
"DiamTarget",
"/",
"material",
".",
"Diameter",
")",
"**",
"(",
"DIM_FRACTAL",
"-",
"1",
")",
")"
] | 41.642857 | 19.285714 |
def _get_dynamic_field_for(cls, field_name):
"""
Return the dynamic field within this class that match the given name.
Keep an internal cache to speed up future calls wieh same field name.
(The cache store the field for each individual class and subclasses, to
keep the link between a field and its direct model)
"""
from .fields import DynamicFieldMixin # here to avoid circular import
if cls not in ModelWithDynamicFieldMixin._dynamic_fields_cache:
ModelWithDynamicFieldMixin._dynamic_fields_cache[cls] = {}
if field_name not in ModelWithDynamicFieldMixin._dynamic_fields_cache[cls]:
ModelWithDynamicFieldMixin._dynamic_fields_cache[cls][field_name] = None
for a_field_name in cls._fields:
field = cls.get_field(a_field_name)
if isinstance(field, DynamicFieldMixin) and field._accept_name(field_name):
ModelWithDynamicFieldMixin._dynamic_fields_cache[cls][field_name] = field
break
field = ModelWithDynamicFieldMixin._dynamic_fields_cache[cls][field_name]
if field is None:
raise ValueError('No DynamicField matching "%s"' % field_name)
return field
|
[
"def",
"_get_dynamic_field_for",
"(",
"cls",
",",
"field_name",
")",
":",
"from",
".",
"fields",
"import",
"DynamicFieldMixin",
"# here to avoid circular import",
"if",
"cls",
"not",
"in",
"ModelWithDynamicFieldMixin",
".",
"_dynamic_fields_cache",
":",
"ModelWithDynamicFieldMixin",
".",
"_dynamic_fields_cache",
"[",
"cls",
"]",
"=",
"{",
"}",
"if",
"field_name",
"not",
"in",
"ModelWithDynamicFieldMixin",
".",
"_dynamic_fields_cache",
"[",
"cls",
"]",
":",
"ModelWithDynamicFieldMixin",
".",
"_dynamic_fields_cache",
"[",
"cls",
"]",
"[",
"field_name",
"]",
"=",
"None",
"for",
"a_field_name",
"in",
"cls",
".",
"_fields",
":",
"field",
"=",
"cls",
".",
"get_field",
"(",
"a_field_name",
")",
"if",
"isinstance",
"(",
"field",
",",
"DynamicFieldMixin",
")",
"and",
"field",
".",
"_accept_name",
"(",
"field_name",
")",
":",
"ModelWithDynamicFieldMixin",
".",
"_dynamic_fields_cache",
"[",
"cls",
"]",
"[",
"field_name",
"]",
"=",
"field",
"break",
"field",
"=",
"ModelWithDynamicFieldMixin",
".",
"_dynamic_fields_cache",
"[",
"cls",
"]",
"[",
"field_name",
"]",
"if",
"field",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'No DynamicField matching \"%s\"'",
"%",
"field_name",
")",
"return",
"field"
] | 48 | 29.461538 |
def handler(self, reply):
"""Upper level handler of keyboard events."""
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(data, self.display.display, None, None)
if self.escape(event): # Quit if this returns True
self.stop()
else:
self._tap(event)
|
[
"def",
"handler",
"(",
"self",
",",
"reply",
")",
":",
"data",
"=",
"reply",
".",
"data",
"while",
"len",
"(",
"data",
")",
":",
"event",
",",
"data",
"=",
"rq",
".",
"EventField",
"(",
"None",
")",
".",
"parse_binary_value",
"(",
"data",
",",
"self",
".",
"display",
".",
"display",
",",
"None",
",",
"None",
")",
"if",
"self",
".",
"escape",
"(",
"event",
")",
":",
"# Quit if this returns True",
"self",
".",
"stop",
"(",
")",
"else",
":",
"self",
".",
"_tap",
"(",
"event",
")"
] | 41.111111 | 19.666667 |
def intersect(self, *queries):
'''Return a new :class:`Query` obtained form the intersection of this
:class:`Query` with one or more *queries*. Workds the same way as
the :meth:`union` method.'''
q = self._clone()
q.intersections += queries
return q
|
[
"def",
"intersect",
"(",
"self",
",",
"*",
"queries",
")",
":",
"q",
"=",
"self",
".",
"_clone",
"(",
")",
"q",
".",
"intersections",
"+=",
"queries",
"return",
"q"
] | 40.142857 | 18.142857 |
def get(key, profile=None): # pylint: disable=W0613
'''
Get a value from the dictionary
'''
data = _get_values(profile)
# Decrypt SDB data if specified in the profile
if profile and profile.get('gpg', False):
return salt.utils.data.traverse_dict_and_list(_decrypt(data), key, None)
return salt.utils.data.traverse_dict_and_list(data, key, None)
|
[
"def",
"get",
"(",
"key",
",",
"profile",
"=",
"None",
")",
":",
"# pylint: disable=W0613",
"data",
"=",
"_get_values",
"(",
"profile",
")",
"# Decrypt SDB data if specified in the profile",
"if",
"profile",
"and",
"profile",
".",
"get",
"(",
"'gpg'",
",",
"False",
")",
":",
"return",
"salt",
".",
"utils",
".",
"data",
".",
"traverse_dict_and_list",
"(",
"_decrypt",
"(",
"data",
")",
",",
"key",
",",
"None",
")",
"return",
"salt",
".",
"utils",
".",
"data",
".",
"traverse_dict_and_list",
"(",
"data",
",",
"key",
",",
"None",
")"
] | 33.909091 | 23 |
def update(self, callback=None, errback=None, **kwargs):
"""
Update Network configuration. Pass a list of keywords and their values to update.
For the list of keywords available for zone configuration, see :attr:`ns1.rest.ipam.Networks.INT_FIELDS` and :attr:`ns1.rest.ipam.Networks.PASSTHRU_FIELDS`
"""
if not self.data:
raise NetworkException('Network not loaded')
def success(result, *args):
self.data = result
self.id = result['id']
self.name = result['name']
self.report = self._rest.report(self.id)
if callback:
return callback(self)
else:
return self
return self._rest.update(self.id, callback=success, errback=errback,
**kwargs)
|
[
"def",
"update",
"(",
"self",
",",
"callback",
"=",
"None",
",",
"errback",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"data",
":",
"raise",
"NetworkException",
"(",
"'Network not loaded'",
")",
"def",
"success",
"(",
"result",
",",
"*",
"args",
")",
":",
"self",
".",
"data",
"=",
"result",
"self",
".",
"id",
"=",
"result",
"[",
"'id'",
"]",
"self",
".",
"name",
"=",
"result",
"[",
"'name'",
"]",
"self",
".",
"report",
"=",
"self",
".",
"_rest",
".",
"report",
"(",
"self",
".",
"id",
")",
"if",
"callback",
":",
"return",
"callback",
"(",
"self",
")",
"else",
":",
"return",
"self",
"return",
"self",
".",
"_rest",
".",
"update",
"(",
"self",
".",
"id",
",",
"callback",
"=",
"success",
",",
"errback",
"=",
"errback",
",",
"*",
"*",
"kwargs",
")"
] | 41.15 | 21.35 |
def _evaluate(self,*args,**kwargs):
"""
NAME:
__call__ (_evaluate)
PURPOSE:
evaluate the actions (jr,lz,jz)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
delta= (object-wide default) can be used to override the object-wide focal length; can also be an array with length N to allow different delta for different phase-space points
u0= (None) if object-wide option useu0 is set, u0 to use (if useu0 and useu0 is None, a good value will be computed)
c= (object-wide default, bool) True/False to override the object-wide setting for whether or not to use the C implementation
order= (object-wide default, int) number of points to use in the Gauss-Legendre numerical integration of the relevant action integrals
When not using C:
fixed_quad= (False) if True, use Gaussian quadrature (scipy.integrate.fixed_quad instead of scipy.integrate.quad)
scipy.integrate.fixed_quad or .quad keywords
OUTPUT:
(jr,lz,jz)
HISTORY:
2012-11-27 - Written - Bovy (IAS)
2017-12-27 - Allowed individual delta for each point - Bovy (UofT)
"""
delta= kwargs.pop('delta',self._delta)
order= kwargs.get('order',self._order)
if ((self._c and not ('c' in kwargs and not kwargs['c']))\
or (ext_loaded and (('c' in kwargs and kwargs['c'])))) \
and _check_c(self._pot):
if len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
if isinstance(R,float):
R= nu.array([R])
vR= nu.array([vR])
vT= nu.array([vT])
z= nu.array([z])
vz= nu.array([vz])
Lz= R*vT
if self._useu0:
#First calculate u0
if 'u0' in kwargs:
u0= nu.asarray(kwargs['u0'])
else:
E= nu.array([_evaluatePotentials(self._pot,R[ii],z[ii])
+vR[ii]**2./2.+vz[ii]**2./2.+vT[ii]**2./2. for ii in range(len(R))])
u0= actionAngleStaeckel_c.actionAngleStaeckel_calcu0(\
E,Lz,self._pot,delta)[0]
kwargs.pop('u0',None)
else:
u0= None
jr, jz, err= actionAngleStaeckel_c.actionAngleStaeckel_c(\
self._pot,delta,R,vR,vT,z,vz,u0=u0,order=order)
if err == 0:
return (jr,Lz,jz)
else: #pragma: no cover
raise RuntimeError("C-code for calculation actions failed; try with c=False")
else:
if 'c' in kwargs and kwargs['c'] and not self._c: #pragma: no cover
warnings.warn("C module not used because potential does not have a C implementation",galpyWarning)
kwargs.pop('c',None)
if (len(args) == 5 or len(args) == 6) \
and isinstance(args[0],nu.ndarray):
ojr= nu.zeros((len(args[0])))
olz= nu.zeros((len(args[0])))
ojz= nu.zeros((len(args[0])))
for ii in range(len(args[0])):
if len(args) == 5:
targs= (args[0][ii],args[1][ii],args[2][ii],
args[3][ii],args[4][ii])
elif len(args) == 6:
targs= (args[0][ii],args[1][ii],args[2][ii],
args[3][ii],args[4][ii],args[5][ii])
tkwargs= copy.copy(kwargs)
try:
tkwargs['delta']= delta[ii]
except TypeError:
tkwargs['delta']= delta
tjr,tlz,tjz= self(*targs,**tkwargs)
ojr[ii]= tjr
ojz[ii]= tjz
olz[ii]= tlz
return (ojr,olz,ojz)
else:
#Set up the actionAngleStaeckelSingle object
aASingle= actionAngleStaeckelSingle(*args,pot=self._pot,
delta=delta)
return (aASingle.JR(**copy.copy(kwargs)),
aASingle._R*aASingle._vT,
aASingle.Jz(**copy.copy(kwargs)))
|
[
"def",
"_evaluate",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"delta",
"=",
"kwargs",
".",
"pop",
"(",
"'delta'",
",",
"self",
".",
"_delta",
")",
"order",
"=",
"kwargs",
".",
"get",
"(",
"'order'",
",",
"self",
".",
"_order",
")",
"if",
"(",
"(",
"self",
".",
"_c",
"and",
"not",
"(",
"'c'",
"in",
"kwargs",
"and",
"not",
"kwargs",
"[",
"'c'",
"]",
")",
")",
"or",
"(",
"ext_loaded",
"and",
"(",
"(",
"'c'",
"in",
"kwargs",
"and",
"kwargs",
"[",
"'c'",
"]",
")",
")",
")",
")",
"and",
"_check_c",
"(",
"self",
".",
"_pot",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"5",
":",
"#R,vR.vT, z, vz",
"R",
",",
"vR",
",",
"vT",
",",
"z",
",",
"vz",
"=",
"args",
"elif",
"len",
"(",
"args",
")",
"==",
"6",
":",
"#R,vR.vT, z, vz, phi",
"R",
",",
"vR",
",",
"vT",
",",
"z",
",",
"vz",
",",
"phi",
"=",
"args",
"else",
":",
"self",
".",
"_parse_eval_args",
"(",
"*",
"args",
")",
"R",
"=",
"self",
".",
"_eval_R",
"vR",
"=",
"self",
".",
"_eval_vR",
"vT",
"=",
"self",
".",
"_eval_vT",
"z",
"=",
"self",
".",
"_eval_z",
"vz",
"=",
"self",
".",
"_eval_vz",
"if",
"isinstance",
"(",
"R",
",",
"float",
")",
":",
"R",
"=",
"nu",
".",
"array",
"(",
"[",
"R",
"]",
")",
"vR",
"=",
"nu",
".",
"array",
"(",
"[",
"vR",
"]",
")",
"vT",
"=",
"nu",
".",
"array",
"(",
"[",
"vT",
"]",
")",
"z",
"=",
"nu",
".",
"array",
"(",
"[",
"z",
"]",
")",
"vz",
"=",
"nu",
".",
"array",
"(",
"[",
"vz",
"]",
")",
"Lz",
"=",
"R",
"*",
"vT",
"if",
"self",
".",
"_useu0",
":",
"#First calculate u0",
"if",
"'u0'",
"in",
"kwargs",
":",
"u0",
"=",
"nu",
".",
"asarray",
"(",
"kwargs",
"[",
"'u0'",
"]",
")",
"else",
":",
"E",
"=",
"nu",
".",
"array",
"(",
"[",
"_evaluatePotentials",
"(",
"self",
".",
"_pot",
",",
"R",
"[",
"ii",
"]",
",",
"z",
"[",
"ii",
"]",
")",
"+",
"vR",
"[",
"ii",
"]",
"**",
"2.",
"/",
"2.",
"+",
"vz",
"[",
"ii",
"]",
"**",
"2.",
"/",
"2.",
"+",
"vT",
"[",
"ii",
"]",
"**",
"2.",
"/",
"2.",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"R",
")",
")",
"]",
")",
"u0",
"=",
"actionAngleStaeckel_c",
".",
"actionAngleStaeckel_calcu0",
"(",
"E",
",",
"Lz",
",",
"self",
".",
"_pot",
",",
"delta",
")",
"[",
"0",
"]",
"kwargs",
".",
"pop",
"(",
"'u0'",
",",
"None",
")",
"else",
":",
"u0",
"=",
"None",
"jr",
",",
"jz",
",",
"err",
"=",
"actionAngleStaeckel_c",
".",
"actionAngleStaeckel_c",
"(",
"self",
".",
"_pot",
",",
"delta",
",",
"R",
",",
"vR",
",",
"vT",
",",
"z",
",",
"vz",
",",
"u0",
"=",
"u0",
",",
"order",
"=",
"order",
")",
"if",
"err",
"==",
"0",
":",
"return",
"(",
"jr",
",",
"Lz",
",",
"jz",
")",
"else",
":",
"#pragma: no cover",
"raise",
"RuntimeError",
"(",
"\"C-code for calculation actions failed; try with c=False\"",
")",
"else",
":",
"if",
"'c'",
"in",
"kwargs",
"and",
"kwargs",
"[",
"'c'",
"]",
"and",
"not",
"self",
".",
"_c",
":",
"#pragma: no cover",
"warnings",
".",
"warn",
"(",
"\"C module not used because potential does not have a C implementation\"",
",",
"galpyWarning",
")",
"kwargs",
".",
"pop",
"(",
"'c'",
",",
"None",
")",
"if",
"(",
"len",
"(",
"args",
")",
"==",
"5",
"or",
"len",
"(",
"args",
")",
"==",
"6",
")",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"nu",
".",
"ndarray",
")",
":",
"ojr",
"=",
"nu",
".",
"zeros",
"(",
"(",
"len",
"(",
"args",
"[",
"0",
"]",
")",
")",
")",
"olz",
"=",
"nu",
".",
"zeros",
"(",
"(",
"len",
"(",
"args",
"[",
"0",
"]",
")",
")",
")",
"ojz",
"=",
"nu",
".",
"zeros",
"(",
"(",
"len",
"(",
"args",
"[",
"0",
"]",
")",
")",
")",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"args",
"[",
"0",
"]",
")",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"5",
":",
"targs",
"=",
"(",
"args",
"[",
"0",
"]",
"[",
"ii",
"]",
",",
"args",
"[",
"1",
"]",
"[",
"ii",
"]",
",",
"args",
"[",
"2",
"]",
"[",
"ii",
"]",
",",
"args",
"[",
"3",
"]",
"[",
"ii",
"]",
",",
"args",
"[",
"4",
"]",
"[",
"ii",
"]",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"6",
":",
"targs",
"=",
"(",
"args",
"[",
"0",
"]",
"[",
"ii",
"]",
",",
"args",
"[",
"1",
"]",
"[",
"ii",
"]",
",",
"args",
"[",
"2",
"]",
"[",
"ii",
"]",
",",
"args",
"[",
"3",
"]",
"[",
"ii",
"]",
",",
"args",
"[",
"4",
"]",
"[",
"ii",
"]",
",",
"args",
"[",
"5",
"]",
"[",
"ii",
"]",
")",
"tkwargs",
"=",
"copy",
".",
"copy",
"(",
"kwargs",
")",
"try",
":",
"tkwargs",
"[",
"'delta'",
"]",
"=",
"delta",
"[",
"ii",
"]",
"except",
"TypeError",
":",
"tkwargs",
"[",
"'delta'",
"]",
"=",
"delta",
"tjr",
",",
"tlz",
",",
"tjz",
"=",
"self",
"(",
"*",
"targs",
",",
"*",
"*",
"tkwargs",
")",
"ojr",
"[",
"ii",
"]",
"=",
"tjr",
"ojz",
"[",
"ii",
"]",
"=",
"tjz",
"olz",
"[",
"ii",
"]",
"=",
"tlz",
"return",
"(",
"ojr",
",",
"olz",
",",
"ojz",
")",
"else",
":",
"#Set up the actionAngleStaeckelSingle object",
"aASingle",
"=",
"actionAngleStaeckelSingle",
"(",
"*",
"args",
",",
"pot",
"=",
"self",
".",
"_pot",
",",
"delta",
"=",
"delta",
")",
"return",
"(",
"aASingle",
".",
"JR",
"(",
"*",
"*",
"copy",
".",
"copy",
"(",
"kwargs",
")",
")",
",",
"aASingle",
".",
"_R",
"*",
"aASingle",
".",
"_vT",
",",
"aASingle",
".",
"Jz",
"(",
"*",
"*",
"copy",
".",
"copy",
"(",
"kwargs",
")",
")",
")"
] | 49.747475 | 21.282828 |
def convert_dict_to_option_dict(input_dict):
"""Convert a simple key-value dictionary to a dictionary of options tuples"""
ret_dict = {}
for key, value in input_dict.items():
ret_dict[key] = convert_value_to_option_tuple(value)
return ret_dict
|
[
"def",
"convert_dict_to_option_dict",
"(",
"input_dict",
")",
":",
"ret_dict",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"input_dict",
".",
"items",
"(",
")",
":",
"ret_dict",
"[",
"key",
"]",
"=",
"convert_value_to_option_tuple",
"(",
"value",
")",
"return",
"ret_dict"
] | 43.666667 | 11.5 |
def get_serializable_data_for_fields(model):
"""
Return a serialised version of the model's fields which exist as local database
columns (i.e. excluding m2m and incoming foreign key relations)
"""
pk_field = model._meta.pk
# If model is a child via multitable inheritance, use parent's pk
while pk_field.remote_field and pk_field.remote_field.parent_link:
pk_field = pk_field.remote_field.model._meta.pk
obj = {'pk': get_field_value(pk_field, model)}
for field in model._meta.fields:
if field.serialize:
obj[field.name] = get_field_value(field, model)
return obj
|
[
"def",
"get_serializable_data_for_fields",
"(",
"model",
")",
":",
"pk_field",
"=",
"model",
".",
"_meta",
".",
"pk",
"# If model is a child via multitable inheritance, use parent's pk",
"while",
"pk_field",
".",
"remote_field",
"and",
"pk_field",
".",
"remote_field",
".",
"parent_link",
":",
"pk_field",
"=",
"pk_field",
".",
"remote_field",
".",
"model",
".",
"_meta",
".",
"pk",
"obj",
"=",
"{",
"'pk'",
":",
"get_field_value",
"(",
"pk_field",
",",
"model",
")",
"}",
"for",
"field",
"in",
"model",
".",
"_meta",
".",
"fields",
":",
"if",
"field",
".",
"serialize",
":",
"obj",
"[",
"field",
".",
"name",
"]",
"=",
"get_field_value",
"(",
"field",
",",
"model",
")",
"return",
"obj"
] | 36.294118 | 20.647059 |
def get_rating_metadata(self):
"""Gets the metadata for a rating.
return: (osid.Metadata) - metadata for the rating
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['rating'])
metadata.update({'existing_id_values': self._my_map['ratingId']})
return Metadata(**metadata)
|
[
"def",
"get_rating_metadata",
"(",
"self",
")",
":",
"# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template",
"metadata",
"=",
"dict",
"(",
"self",
".",
"_mdata",
"[",
"'rating'",
"]",
")",
"metadata",
".",
"update",
"(",
"{",
"'existing_id_values'",
":",
"self",
".",
"_my_map",
"[",
"'ratingId'",
"]",
"}",
")",
"return",
"Metadata",
"(",
"*",
"*",
"metadata",
")"
] | 41.363636 | 21.090909 |
def info(self, message, *args, **kwargs):
"""More important level : default for print and save
"""
self._log(logging.INFO, message, *args, **kwargs)
|
[
"def",
"info",
"(",
"self",
",",
"message",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_log",
"(",
"logging",
".",
"INFO",
",",
"message",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 42.25 | 4.5 |
def read(self, symbol, as_of=None, date_range=None, from_version=None, allow_secondary=None, **kwargs):
"""
Read data for the named symbol. Returns a VersionedItem object with
a data and metdata element (as passed into write).
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or `int` or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
date_range: `arctic.date.DateRange`
DateRange to read data for. Applies to Pandas data, with a DateTime index
returns only the part of the data that falls in the DateRange.
allow_secondary : `bool` or `None`
Override the default behavior for allowing reads from secondary members of a cluster:
`None` : use the settings from the top-level `Arctic` object used to query this version store.
`True` : allow reads from secondary members
`False` : only allow reads from primary members
Returns
-------
VersionedItem namedtuple which contains a .data and .metadata element
"""
try:
read_preference = self._read_preference(allow_secondary)
_version = self._read_metadata(symbol, as_of=as_of, read_preference=read_preference)
return self._do_read(symbol, _version, from_version,
date_range=date_range, read_preference=read_preference, **kwargs)
except (OperationFailure, AutoReconnect) as e:
# Log the exception so we know how often this is happening
log_exception('read', e, 1)
# If we've failed to read from the secondary, then it's possible the
# secondary has lagged. In this case direct the query to the primary.
_version = mongo_retry(self._read_metadata)(symbol, as_of=as_of,
read_preference=ReadPreference.PRIMARY)
return self._do_read_retry(symbol, _version, from_version,
date_range=date_range,
read_preference=ReadPreference.PRIMARY,
**kwargs)
except Exception as e:
log_exception('read', e, 1)
raise
|
[
"def",
"read",
"(",
"self",
",",
"symbol",
",",
"as_of",
"=",
"None",
",",
"date_range",
"=",
"None",
",",
"from_version",
"=",
"None",
",",
"allow_secondary",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"read_preference",
"=",
"self",
".",
"_read_preference",
"(",
"allow_secondary",
")",
"_version",
"=",
"self",
".",
"_read_metadata",
"(",
"symbol",
",",
"as_of",
"=",
"as_of",
",",
"read_preference",
"=",
"read_preference",
")",
"return",
"self",
".",
"_do_read",
"(",
"symbol",
",",
"_version",
",",
"from_version",
",",
"date_range",
"=",
"date_range",
",",
"read_preference",
"=",
"read_preference",
",",
"*",
"*",
"kwargs",
")",
"except",
"(",
"OperationFailure",
",",
"AutoReconnect",
")",
"as",
"e",
":",
"# Log the exception so we know how often this is happening",
"log_exception",
"(",
"'read'",
",",
"e",
",",
"1",
")",
"# If we've failed to read from the secondary, then it's possible the",
"# secondary has lagged. In this case direct the query to the primary.",
"_version",
"=",
"mongo_retry",
"(",
"self",
".",
"_read_metadata",
")",
"(",
"symbol",
",",
"as_of",
"=",
"as_of",
",",
"read_preference",
"=",
"ReadPreference",
".",
"PRIMARY",
")",
"return",
"self",
".",
"_do_read_retry",
"(",
"symbol",
",",
"_version",
",",
"from_version",
",",
"date_range",
"=",
"date_range",
",",
"read_preference",
"=",
"ReadPreference",
".",
"PRIMARY",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"log_exception",
"(",
"'read'",
",",
"e",
",",
"1",
")",
"raise"
] | 54.804348 | 27.326087 |
def getObjectsAspecting(self, point, aspList):
""" Returns a list of objects aspecting a point
considering a list of possible aspects.
"""
res = []
for obj in self:
if obj.isPlanet() and aspects.isAspecting(obj, point, aspList):
res.append(obj)
return ObjectList(res)
|
[
"def",
"getObjectsAspecting",
"(",
"self",
",",
"point",
",",
"aspList",
")",
":",
"res",
"=",
"[",
"]",
"for",
"obj",
"in",
"self",
":",
"if",
"obj",
".",
"isPlanet",
"(",
")",
"and",
"aspects",
".",
"isAspecting",
"(",
"obj",
",",
"point",
",",
"aspList",
")",
":",
"res",
".",
"append",
"(",
"obj",
")",
"return",
"ObjectList",
"(",
"res",
")"
] | 34.4 | 13.9 |
def _fit_island(self, island_data):
"""
Take an Island, do all the parameter estimation and fitting.
Parameters
----------
island_data : :class:`AegeanTools.models.IslandFittingData`
The island to be fit.
Returns
-------
sources : list
The sources that were fit.
"""
global_data = self.global_data
# global data
dcurve = global_data.dcurve
rmsimg = global_data.rmsimg
# island data
isle_num = island_data.isle_num
idata = island_data.i
innerclip, outerclip, max_summits = island_data.scalars
xmin, xmax, ymin, ymax = island_data.offsets
# get the beam parameters at the center of this island
midra, middec = global_data.wcshelper.pix2sky([0.5 * (xmax + xmin), 0.5 * (ymax + ymin)])
beam = global_data.psfhelper.get_psf_pix(midra, middec)
del middec, midra
icurve = dcurve[xmin:xmax, ymin:ymax]
rms = rmsimg[xmin:xmax, ymin:ymax]
is_flag = 0
pixbeam = global_data.psfhelper.get_pixbeam_pixel((xmin + xmax) / 2., (ymin + ymax) / 2.)
if pixbeam is None:
# This island is not 'on' the sky, ignore it
return []
self.log.debug("=====")
self.log.debug("Island ({0})".format(isle_num))
params = self.estimate_lmfit_parinfo(idata, rms, icurve, beam, innerclip, outerclip, offsets=[xmin, ymin],
max_summits=max_summits)
# islands at the edge of a region of nans
# result in no components
if params is None or params['components'].value < 1:
return []
self.log.debug("Rms is {0}".format(np.shape(rms)))
self.log.debug("Isle is {0}".format(np.shape(idata)))
self.log.debug(" of which {0} are masked".format(sum(np.isnan(idata).ravel() * 1)))
# Check that there is enough data to do the fit
mx, my = np.where(np.isfinite(idata))
non_blank_pix = len(mx)
free_vars = len([1 for a in params.keys() if params[a].vary])
if non_blank_pix < free_vars or free_vars == 0:
self.log.debug("Island {0} doesn't have enough pixels to fit the given model".format(isle_num))
self.log.debug("non_blank_pix {0}, free_vars {1}".format(non_blank_pix, free_vars))
result = DummyLM()
model = params
is_flag |= flags.NOTFIT
else:
# Model is the fitted parameters
fac = 1 / np.sqrt(2)
if self.global_data.docov:
C = Cmatrix(mx, my, pixbeam.a * FWHM2CC * fac, pixbeam.b * FWHM2CC * fac, pixbeam.pa)
B = Bmatrix(C)
else:
C = B = None
self.log.debug(
"C({0},{1},{2},{3},{4})".format(len(mx), len(my), pixbeam.a * FWHM2CC, pixbeam.b * FWHM2CC, pixbeam.pa))
errs = np.nanmax(rms)
self.log.debug("Initial params")
self.log.debug(params)
result, _ = do_lmfit(idata, params, B=B)
if not result.errorbars:
is_flag |= flags.FITERR
# get the real (sky) parameter errors
model = covar_errors(result.params, idata, errs=errs, B=B, C=C)
if self.global_data.dobias and self.global_data.docov:
x, y = np.indices(idata.shape)
acf = elliptical_gaussian(x, y, 1, 0, 0, pixbeam.a * FWHM2CC * fac, pixbeam.b * FWHM2CC * fac,
pixbeam.pa)
bias_correct(model, idata, acf=acf * errs ** 2)
if not result.success:
is_flag |= flags.FITERR
self.log.debug("Final params")
self.log.debug(model)
# convert the fitting results to a list of sources [and islands]
sources = self.result_to_components(result, model, island_data, is_flag)
return sources
|
[
"def",
"_fit_island",
"(",
"self",
",",
"island_data",
")",
":",
"global_data",
"=",
"self",
".",
"global_data",
"# global data",
"dcurve",
"=",
"global_data",
".",
"dcurve",
"rmsimg",
"=",
"global_data",
".",
"rmsimg",
"# island data",
"isle_num",
"=",
"island_data",
".",
"isle_num",
"idata",
"=",
"island_data",
".",
"i",
"innerclip",
",",
"outerclip",
",",
"max_summits",
"=",
"island_data",
".",
"scalars",
"xmin",
",",
"xmax",
",",
"ymin",
",",
"ymax",
"=",
"island_data",
".",
"offsets",
"# get the beam parameters at the center of this island",
"midra",
",",
"middec",
"=",
"global_data",
".",
"wcshelper",
".",
"pix2sky",
"(",
"[",
"0.5",
"*",
"(",
"xmax",
"+",
"xmin",
")",
",",
"0.5",
"*",
"(",
"ymax",
"+",
"ymin",
")",
"]",
")",
"beam",
"=",
"global_data",
".",
"psfhelper",
".",
"get_psf_pix",
"(",
"midra",
",",
"middec",
")",
"del",
"middec",
",",
"midra",
"icurve",
"=",
"dcurve",
"[",
"xmin",
":",
"xmax",
",",
"ymin",
":",
"ymax",
"]",
"rms",
"=",
"rmsimg",
"[",
"xmin",
":",
"xmax",
",",
"ymin",
":",
"ymax",
"]",
"is_flag",
"=",
"0",
"pixbeam",
"=",
"global_data",
".",
"psfhelper",
".",
"get_pixbeam_pixel",
"(",
"(",
"xmin",
"+",
"xmax",
")",
"/",
"2.",
",",
"(",
"ymin",
"+",
"ymax",
")",
"/",
"2.",
")",
"if",
"pixbeam",
"is",
"None",
":",
"# This island is not 'on' the sky, ignore it",
"return",
"[",
"]",
"self",
".",
"log",
".",
"debug",
"(",
"\"=====\"",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Island ({0})\"",
".",
"format",
"(",
"isle_num",
")",
")",
"params",
"=",
"self",
".",
"estimate_lmfit_parinfo",
"(",
"idata",
",",
"rms",
",",
"icurve",
",",
"beam",
",",
"innerclip",
",",
"outerclip",
",",
"offsets",
"=",
"[",
"xmin",
",",
"ymin",
"]",
",",
"max_summits",
"=",
"max_summits",
")",
"# islands at the edge of a region of nans",
"# result in no components",
"if",
"params",
"is",
"None",
"or",
"params",
"[",
"'components'",
"]",
".",
"value",
"<",
"1",
":",
"return",
"[",
"]",
"self",
".",
"log",
".",
"debug",
"(",
"\"Rms is {0}\"",
".",
"format",
"(",
"np",
".",
"shape",
"(",
"rms",
")",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Isle is {0}\"",
".",
"format",
"(",
"np",
".",
"shape",
"(",
"idata",
")",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\" of which {0} are masked\"",
".",
"format",
"(",
"sum",
"(",
"np",
".",
"isnan",
"(",
"idata",
")",
".",
"ravel",
"(",
")",
"*",
"1",
")",
")",
")",
"# Check that there is enough data to do the fit",
"mx",
",",
"my",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isfinite",
"(",
"idata",
")",
")",
"non_blank_pix",
"=",
"len",
"(",
"mx",
")",
"free_vars",
"=",
"len",
"(",
"[",
"1",
"for",
"a",
"in",
"params",
".",
"keys",
"(",
")",
"if",
"params",
"[",
"a",
"]",
".",
"vary",
"]",
")",
"if",
"non_blank_pix",
"<",
"free_vars",
"or",
"free_vars",
"==",
"0",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Island {0} doesn't have enough pixels to fit the given model\"",
".",
"format",
"(",
"isle_num",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"non_blank_pix {0}, free_vars {1}\"",
".",
"format",
"(",
"non_blank_pix",
",",
"free_vars",
")",
")",
"result",
"=",
"DummyLM",
"(",
")",
"model",
"=",
"params",
"is_flag",
"|=",
"flags",
".",
"NOTFIT",
"else",
":",
"# Model is the fitted parameters",
"fac",
"=",
"1",
"/",
"np",
".",
"sqrt",
"(",
"2",
")",
"if",
"self",
".",
"global_data",
".",
"docov",
":",
"C",
"=",
"Cmatrix",
"(",
"mx",
",",
"my",
",",
"pixbeam",
".",
"a",
"*",
"FWHM2CC",
"*",
"fac",
",",
"pixbeam",
".",
"b",
"*",
"FWHM2CC",
"*",
"fac",
",",
"pixbeam",
".",
"pa",
")",
"B",
"=",
"Bmatrix",
"(",
"C",
")",
"else",
":",
"C",
"=",
"B",
"=",
"None",
"self",
".",
"log",
".",
"debug",
"(",
"\"C({0},{1},{2},{3},{4})\"",
".",
"format",
"(",
"len",
"(",
"mx",
")",
",",
"len",
"(",
"my",
")",
",",
"pixbeam",
".",
"a",
"*",
"FWHM2CC",
",",
"pixbeam",
".",
"b",
"*",
"FWHM2CC",
",",
"pixbeam",
".",
"pa",
")",
")",
"errs",
"=",
"np",
".",
"nanmax",
"(",
"rms",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Initial params\"",
")",
"self",
".",
"log",
".",
"debug",
"(",
"params",
")",
"result",
",",
"_",
"=",
"do_lmfit",
"(",
"idata",
",",
"params",
",",
"B",
"=",
"B",
")",
"if",
"not",
"result",
".",
"errorbars",
":",
"is_flag",
"|=",
"flags",
".",
"FITERR",
"# get the real (sky) parameter errors",
"model",
"=",
"covar_errors",
"(",
"result",
".",
"params",
",",
"idata",
",",
"errs",
"=",
"errs",
",",
"B",
"=",
"B",
",",
"C",
"=",
"C",
")",
"if",
"self",
".",
"global_data",
".",
"dobias",
"and",
"self",
".",
"global_data",
".",
"docov",
":",
"x",
",",
"y",
"=",
"np",
".",
"indices",
"(",
"idata",
".",
"shape",
")",
"acf",
"=",
"elliptical_gaussian",
"(",
"x",
",",
"y",
",",
"1",
",",
"0",
",",
"0",
",",
"pixbeam",
".",
"a",
"*",
"FWHM2CC",
"*",
"fac",
",",
"pixbeam",
".",
"b",
"*",
"FWHM2CC",
"*",
"fac",
",",
"pixbeam",
".",
"pa",
")",
"bias_correct",
"(",
"model",
",",
"idata",
",",
"acf",
"=",
"acf",
"*",
"errs",
"**",
"2",
")",
"if",
"not",
"result",
".",
"success",
":",
"is_flag",
"|=",
"flags",
".",
"FITERR",
"self",
".",
"log",
".",
"debug",
"(",
"\"Final params\"",
")",
"self",
".",
"log",
".",
"debug",
"(",
"model",
")",
"# convert the fitting results to a list of sources [and islands]",
"sources",
"=",
"self",
".",
"result_to_components",
"(",
"result",
",",
"model",
",",
"island_data",
",",
"is_flag",
")",
"return",
"sources"
] | 38.534653 | 22.871287 |
def get_preorder_burn_info( outputs ):
"""
Given the set of outputs, find the fee sent
to our burn address. This is always the third output.
Return the fee and burn address on success as {'op_fee': ..., 'burn_address': ...}
Return None if not found
"""
if len(outputs) != 3:
# not a well-formed preorder
return None
op_fee = outputs[2]['value']
burn_address = None
try:
burn_address = virtualchain.script_hex_to_address(outputs[2]['script'])
assert burn_address
except:
log.error("Not a well-formed preorder burn: {}".format(outputs[2]['script']))
return None
return {'op_fee': op_fee, 'burn_address': burn_address}
|
[
"def",
"get_preorder_burn_info",
"(",
"outputs",
")",
":",
"if",
"len",
"(",
"outputs",
")",
"!=",
"3",
":",
"# not a well-formed preorder ",
"return",
"None",
"op_fee",
"=",
"outputs",
"[",
"2",
"]",
"[",
"'value'",
"]",
"burn_address",
"=",
"None",
"try",
":",
"burn_address",
"=",
"virtualchain",
".",
"script_hex_to_address",
"(",
"outputs",
"[",
"2",
"]",
"[",
"'script'",
"]",
")",
"assert",
"burn_address",
"except",
":",
"log",
".",
"error",
"(",
"\"Not a well-formed preorder burn: {}\"",
".",
"format",
"(",
"outputs",
"[",
"2",
"]",
"[",
"'script'",
"]",
")",
")",
"return",
"None",
"return",
"{",
"'op_fee'",
":",
"op_fee",
",",
"'burn_address'",
":",
"burn_address",
"}"
] | 29.5 | 22.333333 |
def do_unthrottle(self, *args):
"""
Remove the throughput limits for DQL that were set with 'throttle'
# Remove all limits
> unthrottle
# Remove the limit on total allowed throughput
> unthrottle total
# Remove the default limit
> unthrottle default
# Remove the limit on a table
> unthrottle mytable
# Remove the limit on a global index
> unthrottle mytable myindex
"""
if not args:
if promptyn("Are you sure you want to clear all throttles?"):
self.throttle.load({})
elif len(args) == 1:
tablename = args[0]
if tablename == "total":
self.throttle.set_total_limit()
elif tablename == "default":
self.throttle.set_default_limit()
else:
self.throttle.set_table_limit(tablename)
elif len(args) == 2:
tablename, indexname = args
self.throttle.set_index_limit(tablename, indexname)
else:
self.onecmd("help unthrottle")
self.conf["_throttle"] = self.throttle.save()
self.save_config()
|
[
"def",
"do_unthrottle",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"not",
"args",
":",
"if",
"promptyn",
"(",
"\"Are you sure you want to clear all throttles?\"",
")",
":",
"self",
".",
"throttle",
".",
"load",
"(",
"{",
"}",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"1",
":",
"tablename",
"=",
"args",
"[",
"0",
"]",
"if",
"tablename",
"==",
"\"total\"",
":",
"self",
".",
"throttle",
".",
"set_total_limit",
"(",
")",
"elif",
"tablename",
"==",
"\"default\"",
":",
"self",
".",
"throttle",
".",
"set_default_limit",
"(",
")",
"else",
":",
"self",
".",
"throttle",
".",
"set_table_limit",
"(",
"tablename",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"2",
":",
"tablename",
",",
"indexname",
"=",
"args",
"self",
".",
"throttle",
".",
"set_index_limit",
"(",
"tablename",
",",
"indexname",
")",
"else",
":",
"self",
".",
"onecmd",
"(",
"\"help unthrottle\"",
")",
"self",
".",
"conf",
"[",
"\"_throttle\"",
"]",
"=",
"self",
".",
"throttle",
".",
"save",
"(",
")",
"self",
".",
"save_config",
"(",
")"
] | 34.117647 | 13.294118 |
def to_json(self):
"""
Serialize object to json dict
:return: dict
"""
data = dict()
data['InterlocutorId'] = self.id_
data['Text'] = self.text
data['Username'] = self.username
data['FirstName'] = self.first_name
data['LastName'] = self.last_name
return data
|
[
"def",
"to_json",
"(",
"self",
")",
":",
"data",
"=",
"dict",
"(",
")",
"data",
"[",
"'InterlocutorId'",
"]",
"=",
"self",
".",
"id_",
"data",
"[",
"'Text'",
"]",
"=",
"self",
".",
"text",
"data",
"[",
"'Username'",
"]",
"=",
"self",
".",
"username",
"data",
"[",
"'FirstName'",
"]",
"=",
"self",
".",
"first_name",
"data",
"[",
"'LastName'",
"]",
"=",
"self",
".",
"last_name",
"return",
"data"
] | 25.769231 | 10.538462 |
def set_setting(key, value, qsettings=None):
"""Set value to QSettings based on key in InaSAFE scope.
:param key: Unique key for setting.
:type key: basestring
:param value: Value to be saved.
:type value: QVariant
:param qsettings: A custom QSettings to use. If it's not defined, it will
use the default one.
:type qsettings: qgis.PyQt.QtCore.QSettings
"""
full_key = '%s/%s' % (APPLICATION_NAME, key)
set_general_setting(full_key, value, qsettings)
|
[
"def",
"set_setting",
"(",
"key",
",",
"value",
",",
"qsettings",
"=",
"None",
")",
":",
"full_key",
"=",
"'%s/%s'",
"%",
"(",
"APPLICATION_NAME",
",",
"key",
")",
"set_general_setting",
"(",
"full_key",
",",
"value",
",",
"qsettings",
")"
] | 32.466667 | 15.6 |
def keep_types_s(s, types):
"""
Keep the given types from a string
Same as :meth:`keep_types` but does not use the :attr:`params`
dictionary
Parameters
----------
s: str
The string of the returns like section
types: list of str
The type identifiers to keep
Returns
-------
str
The modified string `s` with only the descriptions of `types`
"""
patt = '|'.join('(?<=\n)' + s + '\n(?s).+?\n(?=\S+|$)' for s in types)
return ''.join(re.findall(patt, '\n' + s.strip() + '\n')).rstrip()
|
[
"def",
"keep_types_s",
"(",
"s",
",",
"types",
")",
":",
"patt",
"=",
"'|'",
".",
"join",
"(",
"'(?<=\\n)'",
"+",
"s",
"+",
"'\\n(?s).+?\\n(?=\\S+|$)'",
"for",
"s",
"in",
"types",
")",
"return",
"''",
".",
"join",
"(",
"re",
".",
"findall",
"(",
"patt",
",",
"'\\n'",
"+",
"s",
".",
"strip",
"(",
")",
"+",
"'\\n'",
")",
")",
".",
"rstrip",
"(",
")"
] | 29.095238 | 22.142857 |
def measure(function, xs, ys, popt, weights):
"""
measure the quality of a fit
"""
m = 0
n = 0
for x in xs:
try:
if len(popt) == 2:
m += (ys[n] - function(x, popt[0], popt[1]))**2 * weights[n]
elif len(popt) == 3:
m += (ys[n] - function(x, popt[0], popt[1], popt[2]))**2 * weights[n]
else:
raise NotImplementedError
n += 1
except IndexError:
raise RuntimeError('y does not exist for x = ', x, ' this should not happen')
return m
|
[
"def",
"measure",
"(",
"function",
",",
"xs",
",",
"ys",
",",
"popt",
",",
"weights",
")",
":",
"m",
"=",
"0",
"n",
"=",
"0",
"for",
"x",
"in",
"xs",
":",
"try",
":",
"if",
"len",
"(",
"popt",
")",
"==",
"2",
":",
"m",
"+=",
"(",
"ys",
"[",
"n",
"]",
"-",
"function",
"(",
"x",
",",
"popt",
"[",
"0",
"]",
",",
"popt",
"[",
"1",
"]",
")",
")",
"**",
"2",
"*",
"weights",
"[",
"n",
"]",
"elif",
"len",
"(",
"popt",
")",
"==",
"3",
":",
"m",
"+=",
"(",
"ys",
"[",
"n",
"]",
"-",
"function",
"(",
"x",
",",
"popt",
"[",
"0",
"]",
",",
"popt",
"[",
"1",
"]",
",",
"popt",
"[",
"2",
"]",
")",
")",
"**",
"2",
"*",
"weights",
"[",
"n",
"]",
"else",
":",
"raise",
"NotImplementedError",
"n",
"+=",
"1",
"except",
"IndexError",
":",
"raise",
"RuntimeError",
"(",
"'y does not exist for x = '",
",",
"x",
",",
"' this should not happen'",
")",
"return",
"m"
] | 29.631579 | 21.210526 |
def get_metadata(url, validate_cert=True):
"""
Gets the metadata XML from the provided URL
:param url: Url where the XML of the Identity Provider Metadata is published.
:type url: string
:param validate_cert: If the url uses https schema, that flag enables or not the verification of the associated certificate.
:type validate_cert: bool
:returns: metadata XML
:rtype: string
"""
valid = False
if validate_cert:
response = urllib2.urlopen(url)
else:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
response = urllib2.urlopen(url, context=ctx)
xml = response.read()
if xml:
try:
dom = fromstring(xml, forbid_dtd=True)
idp_descriptor_nodes = OneLogin_Saml2_Utils.query(dom, '//md:IDPSSODescriptor')
if idp_descriptor_nodes:
valid = True
except Exception:
pass
if not valid:
raise Exception('Not valid IdP XML found from URL: %s' % (url))
return xml
|
[
"def",
"get_metadata",
"(",
"url",
",",
"validate_cert",
"=",
"True",
")",
":",
"valid",
"=",
"False",
"if",
"validate_cert",
":",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"url",
")",
"else",
":",
"ctx",
"=",
"ssl",
".",
"create_default_context",
"(",
")",
"ctx",
".",
"check_hostname",
"=",
"False",
"ctx",
".",
"verify_mode",
"=",
"ssl",
".",
"CERT_NONE",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"url",
",",
"context",
"=",
"ctx",
")",
"xml",
"=",
"response",
".",
"read",
"(",
")",
"if",
"xml",
":",
"try",
":",
"dom",
"=",
"fromstring",
"(",
"xml",
",",
"forbid_dtd",
"=",
"True",
")",
"idp_descriptor_nodes",
"=",
"OneLogin_Saml2_Utils",
".",
"query",
"(",
"dom",
",",
"'//md:IDPSSODescriptor'",
")",
"if",
"idp_descriptor_nodes",
":",
"valid",
"=",
"True",
"except",
"Exception",
":",
"pass",
"if",
"not",
"valid",
":",
"raise",
"Exception",
"(",
"'Not valid IdP XML found from URL: %s'",
"%",
"(",
"url",
")",
")",
"return",
"xml"
] | 32.527778 | 21.527778 |
def get_points(recording):
"""
Get one point for each stroke in a recording. The point represents the
strokes spacial position (e.g. the center of the bounding box).
Parameters
----------
recording : list of strokes
Returns
-------
list :
points
"""
points = []
for stroke in recording:
point = geometry.get_bounding_box(stroke).get_center()
points.append(point)
return points
|
[
"def",
"get_points",
"(",
"recording",
")",
":",
"points",
"=",
"[",
"]",
"for",
"stroke",
"in",
"recording",
":",
"point",
"=",
"geometry",
".",
"get_bounding_box",
"(",
"stroke",
")",
".",
"get_center",
"(",
")",
"points",
".",
"append",
"(",
"point",
")",
"return",
"points"
] | 22.947368 | 22.315789 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.