text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def text2sentences(text, labels):
'''
Splits given text at predicted positions from `labels`
'''
sentence = ''
for i, label in enumerate(labels):
if label == '1':
if sentence:
yield sentence
sentence = ''
else:
sentence += text[i]
if sentence:
yield sentence | [
"def",
"text2sentences",
"(",
"text",
",",
"labels",
")",
":",
"sentence",
"=",
"''",
"for",
"i",
",",
"label",
"in",
"enumerate",
"(",
"labels",
")",
":",
"if",
"label",
"==",
"'1'",
":",
"if",
"sentence",
":",
"yield",
"sentence",
"sentence",
"=",
"''",
"else",
":",
"sentence",
"+=",
"text",
"[",
"i",
"]",
"if",
"sentence",
":",
"yield",
"sentence"
]
| 24.642857 | 17.928571 |
def contraction_conical_Crane(Di1, Di2, l=None, angle=None):
r'''Returns loss coefficient for a conical pipe contraction
as shown in Crane TP 410M [1]_ between 0 and 180 degrees.
If :math:`\theta < 45^{\circ}`:
.. math::
K_2 = {0.8 \sin \frac{\theta}{2}(1 - \beta^2)}
otherwise:
.. math::
K_2 = {0.5\sqrt{\sin \frac{\theta}{2}} (1 - \beta^2)}
.. math::
\beta = d_2/d_1
Parameters
----------
Di1 : float
Inside pipe diameter of the larger, upstream, pipe, [m]
Di2 : float
Inside pipe diameter of the smaller, downstream, pipe, [m]
l : float
Length of the contraction, optional [m]
angle : float
Angle of contraction, optional [degrees]
Returns
-------
K : float
Loss coefficient in terms of the following (smaller) pipe [-]
Notes
-----
Cheap and has substantial impact on pressure drop. Note that the
nomenclature in [1]_ is somewhat different - the smaller pipe is called 1,
and the larger pipe is called 2; and so the beta ratio is reversed, and the
fourth power of beta used in their equation is not necessary.
Examples
--------
>>> contraction_conical_Crane(Di1=0.0779, Di2=0.0525, l=0)
0.2729017979998056
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
'''
if l is None and angle is None:
raise Exception('One of `l` or `angle` must be specified')
beta = Di2/Di1
beta2 = beta*beta
if angle is not None:
angle = radians(angle)
#l = (Di1 - Di2)/(2.0*tan(0.5*angle)) # L is not needed in this calculation
elif l is not None:
try:
angle = 2.0*atan((Di1-Di2)/(2.0*l))
except ZeroDivisionError:
angle = pi
if angle < 0.25*pi:
# Formula 1
K2 = 0.8*sin(0.5*angle)*(1.0 - beta2)
else:
# Formula 2
K2 = 0.5*(sin(0.5*angle)**0.5*(1.0 - beta2))
return K2 | [
"def",
"contraction_conical_Crane",
"(",
"Di1",
",",
"Di2",
",",
"l",
"=",
"None",
",",
"angle",
"=",
"None",
")",
":",
"if",
"l",
"is",
"None",
"and",
"angle",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'One of `l` or `angle` must be specified'",
")",
"beta",
"=",
"Di2",
"/",
"Di1",
"beta2",
"=",
"beta",
"*",
"beta",
"if",
"angle",
"is",
"not",
"None",
":",
"angle",
"=",
"radians",
"(",
"angle",
")",
"#l = (Di1 - Di2)/(2.0*tan(0.5*angle)) # L is not needed in this calculation",
"elif",
"l",
"is",
"not",
"None",
":",
"try",
":",
"angle",
"=",
"2.0",
"*",
"atan",
"(",
"(",
"Di1",
"-",
"Di2",
")",
"/",
"(",
"2.0",
"*",
"l",
")",
")",
"except",
"ZeroDivisionError",
":",
"angle",
"=",
"pi",
"if",
"angle",
"<",
"0.25",
"*",
"pi",
":",
"# Formula 1",
"K2",
"=",
"0.8",
"*",
"sin",
"(",
"0.5",
"*",
"angle",
")",
"*",
"(",
"1.0",
"-",
"beta2",
")",
"else",
":",
"# Formula 2",
"K2",
"=",
"0.5",
"*",
"(",
"sin",
"(",
"0.5",
"*",
"angle",
")",
"**",
"0.5",
"*",
"(",
"1.0",
"-",
"beta2",
")",
")",
"return",
"K2"
]
| 28.913043 | 25.115942 |
def run(self):
"""Continously scan for BLE advertisements."""
self.socket = self.bluez.hci_open_dev(self.bt_device_id)
filtr = self.bluez.hci_filter_new()
self.bluez.hci_filter_all_events(filtr)
self.bluez.hci_filter_set_ptype(filtr, self.bluez.HCI_EVENT_PKT)
self.socket.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, filtr)
self.set_scan_parameters()
self.toggle_scan(True)
while self.keep_going:
pkt = self.socket.recv(255)
event = to_int(pkt[1])
subevent = to_int(pkt[3])
if event == LE_META_EVENT and subevent == EVT_LE_ADVERTISING_REPORT:
# we have an BLE advertisement
self.process_packet(pkt)
self.socket.close() | [
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"socket",
"=",
"self",
".",
"bluez",
".",
"hci_open_dev",
"(",
"self",
".",
"bt_device_id",
")",
"filtr",
"=",
"self",
".",
"bluez",
".",
"hci_filter_new",
"(",
")",
"self",
".",
"bluez",
".",
"hci_filter_all_events",
"(",
"filtr",
")",
"self",
".",
"bluez",
".",
"hci_filter_set_ptype",
"(",
"filtr",
",",
"self",
".",
"bluez",
".",
"HCI_EVENT_PKT",
")",
"self",
".",
"socket",
".",
"setsockopt",
"(",
"self",
".",
"bluez",
".",
"SOL_HCI",
",",
"self",
".",
"bluez",
".",
"HCI_FILTER",
",",
"filtr",
")",
"self",
".",
"set_scan_parameters",
"(",
")",
"self",
".",
"toggle_scan",
"(",
"True",
")",
"while",
"self",
".",
"keep_going",
":",
"pkt",
"=",
"self",
".",
"socket",
".",
"recv",
"(",
"255",
")",
"event",
"=",
"to_int",
"(",
"pkt",
"[",
"1",
"]",
")",
"subevent",
"=",
"to_int",
"(",
"pkt",
"[",
"3",
"]",
")",
"if",
"event",
"==",
"LE_META_EVENT",
"and",
"subevent",
"==",
"EVT_LE_ADVERTISING_REPORT",
":",
"# we have an BLE advertisement",
"self",
".",
"process_packet",
"(",
"pkt",
")",
"self",
".",
"socket",
".",
"close",
"(",
")"
]
| 38.55 | 17.35 |
def _update_rr_ce_entry(self, rec):
# type: (dr.DirectoryRecord) -> int
'''
An internal method to update the Rock Ridge CE entry for the given
record.
Parameters:
rec - The record to update the Rock Ridge CE entry for (if it exists).
Returns:
The number of additional bytes needed for this Rock Ridge CE entry.
'''
if rec.rock_ridge is not None and rec.rock_ridge.dr_entries.ce_record is not None:
celen = rec.rock_ridge.dr_entries.ce_record.len_cont_area
added_block, block, offset = self.pvd.add_rr_ce_entry(celen)
rec.rock_ridge.update_ce_block(block)
rec.rock_ridge.dr_entries.ce_record.update_offset(offset)
if added_block:
return self.pvd.logical_block_size()
return 0 | [
"def",
"_update_rr_ce_entry",
"(",
"self",
",",
"rec",
")",
":",
"# type: (dr.DirectoryRecord) -> int",
"if",
"rec",
".",
"rock_ridge",
"is",
"not",
"None",
"and",
"rec",
".",
"rock_ridge",
".",
"dr_entries",
".",
"ce_record",
"is",
"not",
"None",
":",
"celen",
"=",
"rec",
".",
"rock_ridge",
".",
"dr_entries",
".",
"ce_record",
".",
"len_cont_area",
"added_block",
",",
"block",
",",
"offset",
"=",
"self",
".",
"pvd",
".",
"add_rr_ce_entry",
"(",
"celen",
")",
"rec",
".",
"rock_ridge",
".",
"update_ce_block",
"(",
"block",
")",
"rec",
".",
"rock_ridge",
".",
"dr_entries",
".",
"ce_record",
".",
"update_offset",
"(",
"offset",
")",
"if",
"added_block",
":",
"return",
"self",
".",
"pvd",
".",
"logical_block_size",
"(",
")",
"return",
"0"
]
| 41.15 | 26.15 |
def image_id_from_registry(image_name):
"""Get the docker id from a public or private registry"""
registry, repository, tag = parse(image_name)
try:
token = auth_token(registry, repository).get("token")
# dockerhub is crazy
if registry == "index.docker.io":
registry = "registry-1.docker.io"
res = requests.head("https://{}/v2/{}/manifests/{}".format(registry, repository, tag), headers={
"Authorization": "Bearer {}".format(token),
"Accept": "application/vnd.docker.distribution.manifest.v2+json"
}, timeout=5)
res.raise_for_status()
except requests.RequestException:
log.error("Received {} when attempting to get digest for {}".format(
res, image_name))
return None
return "@".join([registry+"/"+repository, res.headers["Docker-Content-Digest"]]) | [
"def",
"image_id_from_registry",
"(",
"image_name",
")",
":",
"registry",
",",
"repository",
",",
"tag",
"=",
"parse",
"(",
"image_name",
")",
"try",
":",
"token",
"=",
"auth_token",
"(",
"registry",
",",
"repository",
")",
".",
"get",
"(",
"\"token\"",
")",
"# dockerhub is crazy",
"if",
"registry",
"==",
"\"index.docker.io\"",
":",
"registry",
"=",
"\"registry-1.docker.io\"",
"res",
"=",
"requests",
".",
"head",
"(",
"\"https://{}/v2/{}/manifests/{}\"",
".",
"format",
"(",
"registry",
",",
"repository",
",",
"tag",
")",
",",
"headers",
"=",
"{",
"\"Authorization\"",
":",
"\"Bearer {}\"",
".",
"format",
"(",
"token",
")",
",",
"\"Accept\"",
":",
"\"application/vnd.docker.distribution.manifest.v2+json\"",
"}",
",",
"timeout",
"=",
"5",
")",
"res",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"RequestException",
":",
"log",
".",
"error",
"(",
"\"Received {} when attempting to get digest for {}\"",
".",
"format",
"(",
"res",
",",
"image_name",
")",
")",
"return",
"None",
"return",
"\"@\"",
".",
"join",
"(",
"[",
"registry",
"+",
"\"/\"",
"+",
"repository",
",",
"res",
".",
"headers",
"[",
"\"Docker-Content-Digest\"",
"]",
"]",
")"
]
| 47.944444 | 18.888889 |
def _set_conf(self):
"""
Set configuration parameters from the Conf object into the detector
object.
Time values are converted to samples, and amplitude values are in mV.
"""
self.rr_init = 60 * self.fs / self.conf.hr_init
self.rr_max = 60 * self.fs / self.conf.hr_min
self.rr_min = 60 * self.fs / self.conf.hr_max
# Note: if qrs_width is odd, qrs_width == qrs_radius*2 + 1
self.qrs_width = int(self.conf.qrs_width * self.fs)
self.qrs_radius = int(self.conf.qrs_radius * self.fs)
self.qrs_thr_init = self.conf.qrs_thr_init
self.qrs_thr_min = self.conf.qrs_thr_min
self.ref_period = int(self.conf.ref_period * self.fs)
self.t_inspect_period = int(self.conf.t_inspect_period * self.fs) | [
"def",
"_set_conf",
"(",
"self",
")",
":",
"self",
".",
"rr_init",
"=",
"60",
"*",
"self",
".",
"fs",
"/",
"self",
".",
"conf",
".",
"hr_init",
"self",
".",
"rr_max",
"=",
"60",
"*",
"self",
".",
"fs",
"/",
"self",
".",
"conf",
".",
"hr_min",
"self",
".",
"rr_min",
"=",
"60",
"*",
"self",
".",
"fs",
"/",
"self",
".",
"conf",
".",
"hr_max",
"# Note: if qrs_width is odd, qrs_width == qrs_radius*2 + 1",
"self",
".",
"qrs_width",
"=",
"int",
"(",
"self",
".",
"conf",
".",
"qrs_width",
"*",
"self",
".",
"fs",
")",
"self",
".",
"qrs_radius",
"=",
"int",
"(",
"self",
".",
"conf",
".",
"qrs_radius",
"*",
"self",
".",
"fs",
")",
"self",
".",
"qrs_thr_init",
"=",
"self",
".",
"conf",
".",
"qrs_thr_init",
"self",
".",
"qrs_thr_min",
"=",
"self",
".",
"conf",
".",
"qrs_thr_min",
"self",
".",
"ref_period",
"=",
"int",
"(",
"self",
".",
"conf",
".",
"ref_period",
"*",
"self",
".",
"fs",
")",
"self",
".",
"t_inspect_period",
"=",
"int",
"(",
"self",
".",
"conf",
".",
"t_inspect_period",
"*",
"self",
".",
"fs",
")"
]
| 39.4 | 22.8 |
def _notify_add_at(self, index, length=1):
"""Notify about an AddChange at a caertain index and length."""
slice_ = self._slice_at(index, length)
self._notify_add(slice_) | [
"def",
"_notify_add_at",
"(",
"self",
",",
"index",
",",
"length",
"=",
"1",
")",
":",
"slice_",
"=",
"self",
".",
"_slice_at",
"(",
"index",
",",
"length",
")",
"self",
".",
"_notify_add",
"(",
"slice_",
")"
]
| 47.75 | 4 |
def _run_configure(subcmd, args):
"""Runs the configuration step for the specified sub-command.
"""
maps = {
"packages": _conf_packages
}
if subcmd in maps:
maps[subcmd](args)
else:
msg.warn("'configure' sub-command {} is not supported.".format(subcmd)) | [
"def",
"_run_configure",
"(",
"subcmd",
",",
"args",
")",
":",
"maps",
"=",
"{",
"\"packages\"",
":",
"_conf_packages",
"}",
"if",
"subcmd",
"in",
"maps",
":",
"maps",
"[",
"subcmd",
"]",
"(",
"args",
")",
"else",
":",
"msg",
".",
"warn",
"(",
"\"'configure' sub-command {} is not supported.\"",
".",
"format",
"(",
"subcmd",
")",
")"
]
| 29.6 | 17.4 |
def _move(self, speed=0, steering=0, seconds=None):
"""Move robot."""
self.drive_queue.put((speed, steering))
if seconds is not None:
time.sleep(seconds)
self.drive_queue.put((0, 0))
self.drive_queue.join() | [
"def",
"_move",
"(",
"self",
",",
"speed",
"=",
"0",
",",
"steering",
"=",
"0",
",",
"seconds",
"=",
"None",
")",
":",
"self",
".",
"drive_queue",
".",
"put",
"(",
"(",
"speed",
",",
"steering",
")",
")",
"if",
"seconds",
"is",
"not",
"None",
":",
"time",
".",
"sleep",
"(",
"seconds",
")",
"self",
".",
"drive_queue",
".",
"put",
"(",
"(",
"0",
",",
"0",
")",
")",
"self",
".",
"drive_queue",
".",
"join",
"(",
")"
]
| 36.571429 | 6.428571 |
def from_api_repr(cls, resource):
"""Factory: construct a model reference given its API representation
Args:
resource (Dict[str, object]):
Model reference representation returned from the API
Returns:
google.cloud.bigquery.model.ModelReference:
Model reference parsed from ``resource``.
"""
ref = cls()
ref._proto = json_format.ParseDict(resource, types.ModelReference())
return ref | [
"def",
"from_api_repr",
"(",
"cls",
",",
"resource",
")",
":",
"ref",
"=",
"cls",
"(",
")",
"ref",
".",
"_proto",
"=",
"json_format",
".",
"ParseDict",
"(",
"resource",
",",
"types",
".",
"ModelReference",
"(",
")",
")",
"return",
"ref"
]
| 34.571429 | 19.857143 |
def update_pass(user_id, newpass):
'''
Update the password of a user.
'''
out_dic = {'success': False, 'code': '00'}
entry = TabMember.update(user_pass=tools.md5(newpass)).where(TabMember.uid == user_id)
entry.execute()
out_dic['success'] = True
return out_dic | [
"def",
"update_pass",
"(",
"user_id",
",",
"newpass",
")",
":",
"out_dic",
"=",
"{",
"'success'",
":",
"False",
",",
"'code'",
":",
"'00'",
"}",
"entry",
"=",
"TabMember",
".",
"update",
"(",
"user_pass",
"=",
"tools",
".",
"md5",
"(",
"newpass",
")",
")",
".",
"where",
"(",
"TabMember",
".",
"uid",
"==",
"user_id",
")",
"entry",
".",
"execute",
"(",
")",
"out_dic",
"[",
"'success'",
"]",
"=",
"True",
"return",
"out_dic"
]
| 24.307692 | 25.538462 |
def read(self):
"""
Read the target value
Use $project aggregate operator in order to support nested objects
"""
result = self.get_collection().aggregate([
{'$match': {'_id': self._document_id}},
{'$project': {'_value': '$' + self._path, '_id': False}}
])
for doc in result:
if '_value' not in doc:
break
return doc['_value'] | [
"def",
"read",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"get_collection",
"(",
")",
".",
"aggregate",
"(",
"[",
"{",
"'$match'",
":",
"{",
"'_id'",
":",
"self",
".",
"_document_id",
"}",
"}",
",",
"{",
"'$project'",
":",
"{",
"'_value'",
":",
"'$'",
"+",
"self",
".",
"_path",
",",
"'_id'",
":",
"False",
"}",
"}",
"]",
")",
"for",
"doc",
"in",
"result",
":",
"if",
"'_value'",
"not",
"in",
"doc",
":",
"break",
"return",
"doc",
"[",
"'_value'",
"]"
]
| 28.866667 | 18.333333 |
def _find_flats_edges(self, data, mag, direction):
"""
Extend flats 1 square downstream
Flats on the downstream side of the flat might find a valid angle,
but that doesn't mean that it's a correct angle. We have to find
these and then set them equal to a flat
"""
i12 = np.arange(data.size).reshape(data.shape)
flat = mag == FLAT_ID_INT
flats, n = spndi.label(flat, structure=FLATS_KERNEL3)
objs = spndi.find_objects(flats)
f = flat.ravel()
d = data.ravel()
for i, _obj in enumerate(objs):
region = flats[_obj] == i+1
I = i12[_obj][region]
J = get_adjacent_index(I, data.shape, data.size)
f[J] = d[J] == d[I[0]]
flat = f.reshape(data.shape)
return flat | [
"def",
"_find_flats_edges",
"(",
"self",
",",
"data",
",",
"mag",
",",
"direction",
")",
":",
"i12",
"=",
"np",
".",
"arange",
"(",
"data",
".",
"size",
")",
".",
"reshape",
"(",
"data",
".",
"shape",
")",
"flat",
"=",
"mag",
"==",
"FLAT_ID_INT",
"flats",
",",
"n",
"=",
"spndi",
".",
"label",
"(",
"flat",
",",
"structure",
"=",
"FLATS_KERNEL3",
")",
"objs",
"=",
"spndi",
".",
"find_objects",
"(",
"flats",
")",
"f",
"=",
"flat",
".",
"ravel",
"(",
")",
"d",
"=",
"data",
".",
"ravel",
"(",
")",
"for",
"i",
",",
"_obj",
"in",
"enumerate",
"(",
"objs",
")",
":",
"region",
"=",
"flats",
"[",
"_obj",
"]",
"==",
"i",
"+",
"1",
"I",
"=",
"i12",
"[",
"_obj",
"]",
"[",
"region",
"]",
"J",
"=",
"get_adjacent_index",
"(",
"I",
",",
"data",
".",
"shape",
",",
"data",
".",
"size",
")",
"f",
"[",
"J",
"]",
"=",
"d",
"[",
"J",
"]",
"==",
"d",
"[",
"I",
"[",
"0",
"]",
"]",
"flat",
"=",
"f",
".",
"reshape",
"(",
"data",
".",
"shape",
")",
"return",
"flat"
]
| 33.375 | 15.708333 |
def move_odict_item(odict, key, newpos):
"""
References:
http://stackoverflow.com/questions/22663966/changing-order-of-ordered-dictionary-in-python
CommandLine:
python -m utool.util_dict --exec-move_odict_item
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> odict = OrderedDict()
>>> odict['a'] = 1
>>> odict['b'] = 2
>>> odict['c'] = 3
>>> odict['e'] = 5
>>> print(ut.repr4(odict, nl=False))
>>> move_odict_item(odict, 'c', 1)
>>> print(ut.repr4(odict, nl=False))
>>> move_odict_item(odict, 'a', 3)
>>> print(ut.repr4(odict, nl=False))
>>> move_odict_item(odict, 'a', 0)
>>> print(ut.repr4(odict, nl=False))
>>> move_odict_item(odict, 'b', 2)
>>> result = ut.repr4(odict, nl=False)
>>> print(result)
{'a': 1, 'c': 3, 'b': 2, 'e': 5}
"""
odict[key] = odict.pop(key)
for i, otherkey in enumerate(list(odict.keys())):
if otherkey != key and i >= newpos:
odict[otherkey] = odict.pop(otherkey)
return odict | [
"def",
"move_odict_item",
"(",
"odict",
",",
"key",
",",
"newpos",
")",
":",
"odict",
"[",
"key",
"]",
"=",
"odict",
".",
"pop",
"(",
"key",
")",
"for",
"i",
",",
"otherkey",
"in",
"enumerate",
"(",
"list",
"(",
"odict",
".",
"keys",
"(",
")",
")",
")",
":",
"if",
"otherkey",
"!=",
"key",
"and",
"i",
">=",
"newpos",
":",
"odict",
"[",
"otherkey",
"]",
"=",
"odict",
".",
"pop",
"(",
"otherkey",
")",
"return",
"odict"
]
| 33.588235 | 12.588235 |
def list_themes_user():
"""List user theme files."""
themes = [*os.scandir(os.path.join(CONF_DIR, "colorschemes/dark/")),
*os.scandir(os.path.join(CONF_DIR, "colorschemes/light/"))]
return [t for t in themes if os.path.isfile(t.path)] | [
"def",
"list_themes_user",
"(",
")",
":",
"themes",
"=",
"[",
"*",
"os",
".",
"scandir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"CONF_DIR",
",",
"\"colorschemes/dark/\"",
")",
")",
",",
"*",
"os",
".",
"scandir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"CONF_DIR",
",",
"\"colorschemes/light/\"",
")",
")",
"]",
"return",
"[",
"t",
"for",
"t",
"in",
"themes",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"t",
".",
"path",
")",
"]"
]
| 51.2 | 19.6 |
def sum(x, weights=None):
'''
sum(x) yields either a potential-sum object if x is a potential function or the sum of x if x
is not. If x is not a potential-field then it must be a vector.
sum(x, weights=w) uses the given weights to produce a weighted sum.
'''
x = to_potential(x)
if is_const_potential(x): return PotentialConstant(np.sum(x.c))
else: return PotentialSum(x, weights=weights) | [
"def",
"sum",
"(",
"x",
",",
"weights",
"=",
"None",
")",
":",
"x",
"=",
"to_potential",
"(",
"x",
")",
"if",
"is_const_potential",
"(",
"x",
")",
":",
"return",
"PotentialConstant",
"(",
"np",
".",
"sum",
"(",
"x",
".",
"c",
")",
")",
"else",
":",
"return",
"PotentialSum",
"(",
"x",
",",
"weights",
"=",
"weights",
")"
]
| 48.333333 | 30.111111 |
def save_output(results, output_directory="output"):
"""
Save report data in the given directory
Args:
results (OrderedDict): Parsing results
output_directory: The patch to the directory to save in
"""
aggregate_reports = results["aggregate_reports"]
forensic_reports = results["forensic_reports"]
if os.path.exists(output_directory):
if not os.path.isdir(output_directory):
raise ValueError("{0} is not a directory".format(output_directory))
else:
os.makedirs(output_directory)
with open("{0}".format(os.path.join(output_directory, "aggregate.json")),
"w", newline="\n", encoding="utf-8") as agg_json:
agg_json.write(json.dumps(aggregate_reports, ensure_ascii=False,
indent=2))
with open("{0}".format(os.path.join(output_directory, "aggregate.csv")),
"w", newline="\n", encoding="utf-8") as agg_csv:
csv = parsed_aggregate_reports_to_csv(aggregate_reports)
agg_csv.write(csv)
with open("{0}".format(os.path.join(output_directory, "forensic.json")),
"w", newline="\n", encoding="utf-8") as for_json:
for_json.write(json.dumps(forensic_reports, ensure_ascii=False,
indent=2))
with open("{0}".format(os.path.join(output_directory, "forensic.csv")),
"w", newline="\n", encoding="utf-8") as for_csv:
csv = parsed_forensic_reports_to_csv(forensic_reports)
for_csv.write(csv)
samples_directory = os.path.join(output_directory, "samples")
if not os.path.exists(samples_directory):
os.makedirs(samples_directory)
sample_filenames = []
for forensic_report in forensic_reports:
sample = forensic_report["sample"]
message_count = 0
parsed_sample = forensic_report["parsed_sample"]
subject = parsed_sample["filename_safe_subject"]
filename = subject
while filename in sample_filenames:
message_count += 1
filename = "{0} ({1})".format(subject, message_count)
sample_filenames.append(filename)
filename = "{0}.eml".format(filename)
path = os.path.join(samples_directory, filename)
with open(path, "w", newline="\n", encoding="utf-8") as sample_file:
sample_file.write(sample) | [
"def",
"save_output",
"(",
"results",
",",
"output_directory",
"=",
"\"output\"",
")",
":",
"aggregate_reports",
"=",
"results",
"[",
"\"aggregate_reports\"",
"]",
"forensic_reports",
"=",
"results",
"[",
"\"forensic_reports\"",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"output_directory",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"output_directory",
")",
":",
"raise",
"ValueError",
"(",
"\"{0} is not a directory\"",
".",
"format",
"(",
"output_directory",
")",
")",
"else",
":",
"os",
".",
"makedirs",
"(",
"output_directory",
")",
"with",
"open",
"(",
"\"{0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"\"aggregate.json\"",
")",
")",
",",
"\"w\"",
",",
"newline",
"=",
"\"\\n\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"agg_json",
":",
"agg_json",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"aggregate_reports",
",",
"ensure_ascii",
"=",
"False",
",",
"indent",
"=",
"2",
")",
")",
"with",
"open",
"(",
"\"{0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"\"aggregate.csv\"",
")",
")",
",",
"\"w\"",
",",
"newline",
"=",
"\"\\n\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"agg_csv",
":",
"csv",
"=",
"parsed_aggregate_reports_to_csv",
"(",
"aggregate_reports",
")",
"agg_csv",
".",
"write",
"(",
"csv",
")",
"with",
"open",
"(",
"\"{0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"\"forensic.json\"",
")",
")",
",",
"\"w\"",
",",
"newline",
"=",
"\"\\n\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"for_json",
":",
"for_json",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"forensic_reports",
",",
"ensure_ascii",
"=",
"False",
",",
"indent",
"=",
"2",
")",
")",
"with",
"open",
"(",
"\"{0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"\"forensic.csv\"",
")",
")",
",",
"\"w\"",
",",
"newline",
"=",
"\"\\n\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"for_csv",
":",
"csv",
"=",
"parsed_forensic_reports_to_csv",
"(",
"forensic_reports",
")",
"for_csv",
".",
"write",
"(",
"csv",
")",
"samples_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"\"samples\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"samples_directory",
")",
":",
"os",
".",
"makedirs",
"(",
"samples_directory",
")",
"sample_filenames",
"=",
"[",
"]",
"for",
"forensic_report",
"in",
"forensic_reports",
":",
"sample",
"=",
"forensic_report",
"[",
"\"sample\"",
"]",
"message_count",
"=",
"0",
"parsed_sample",
"=",
"forensic_report",
"[",
"\"parsed_sample\"",
"]",
"subject",
"=",
"parsed_sample",
"[",
"\"filename_safe_subject\"",
"]",
"filename",
"=",
"subject",
"while",
"filename",
"in",
"sample_filenames",
":",
"message_count",
"+=",
"1",
"filename",
"=",
"\"{0} ({1})\"",
".",
"format",
"(",
"subject",
",",
"message_count",
")",
"sample_filenames",
".",
"append",
"(",
"filename",
")",
"filename",
"=",
"\"{0}.eml\"",
".",
"format",
"(",
"filename",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"samples_directory",
",",
"filename",
")",
"with",
"open",
"(",
"path",
",",
"\"w\"",
",",
"newline",
"=",
"\"\\n\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"sample_file",
":",
"sample_file",
".",
"write",
"(",
"sample",
")"
]
| 38.65 | 20.816667 |
def find_and_remove(self, f: Callable):
"""
Removes any and all fields for which `f(field)` returns `True`.
"""
self._table = [fld for fld in self._table if not f(fld)] | [
"def",
"find_and_remove",
"(",
"self",
",",
"f",
":",
"Callable",
")",
":",
"self",
".",
"_table",
"=",
"[",
"fld",
"for",
"fld",
"in",
"self",
".",
"_table",
"if",
"not",
"f",
"(",
"fld",
")",
"]"
]
| 39.2 | 11.2 |
def vrange(start, stop, step=1, dtype='f8'):
"""Creates a virtual column which is the equivalent of numpy.arange, but uses 0 memory"""
from .column import ColumnVirtualRange
return ColumnVirtualRange(start, stop, step, dtype) | [
"def",
"vrange",
"(",
"start",
",",
"stop",
",",
"step",
"=",
"1",
",",
"dtype",
"=",
"'f8'",
")",
":",
"from",
".",
"column",
"import",
"ColumnVirtualRange",
"return",
"ColumnVirtualRange",
"(",
"start",
",",
"stop",
",",
"step",
",",
"dtype",
")"
]
| 58.5 | 5.25 |
def connectedVertices(self, index, returnIds=False):
"""Find all vertices connected to an input vertex specified by its index.
:param bool returnIds: return vertex IDs instead of vertex coordinates.
.. hint:: |connVtx| |connVtx.py|_
"""
mesh = self.polydata()
cellIdList = vtk.vtkIdList()
mesh.GetPointCells(index, cellIdList)
idxs = []
for i in range(cellIdList.GetNumberOfIds()):
pointIdList = vtk.vtkIdList()
mesh.GetCellPoints(cellIdList.GetId(i), pointIdList)
for j in range(pointIdList.GetNumberOfIds()):
idj = pointIdList.GetId(j)
if idj == index:
continue
if idj in idxs:
continue
idxs.append(idj)
if returnIds:
return idxs
else:
trgp = []
for i in idxs:
p = [0, 0, 0]
mesh.GetPoints().GetPoint(i, p)
trgp.append(p)
return np.array(trgp) | [
"def",
"connectedVertices",
"(",
"self",
",",
"index",
",",
"returnIds",
"=",
"False",
")",
":",
"mesh",
"=",
"self",
".",
"polydata",
"(",
")",
"cellIdList",
"=",
"vtk",
".",
"vtkIdList",
"(",
")",
"mesh",
".",
"GetPointCells",
"(",
"index",
",",
"cellIdList",
")",
"idxs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"cellIdList",
".",
"GetNumberOfIds",
"(",
")",
")",
":",
"pointIdList",
"=",
"vtk",
".",
"vtkIdList",
"(",
")",
"mesh",
".",
"GetCellPoints",
"(",
"cellIdList",
".",
"GetId",
"(",
"i",
")",
",",
"pointIdList",
")",
"for",
"j",
"in",
"range",
"(",
"pointIdList",
".",
"GetNumberOfIds",
"(",
")",
")",
":",
"idj",
"=",
"pointIdList",
".",
"GetId",
"(",
"j",
")",
"if",
"idj",
"==",
"index",
":",
"continue",
"if",
"idj",
"in",
"idxs",
":",
"continue",
"idxs",
".",
"append",
"(",
"idj",
")",
"if",
"returnIds",
":",
"return",
"idxs",
"else",
":",
"trgp",
"=",
"[",
"]",
"for",
"i",
"in",
"idxs",
":",
"p",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"mesh",
".",
"GetPoints",
"(",
")",
".",
"GetPoint",
"(",
"i",
",",
"p",
")",
"trgp",
".",
"append",
"(",
"p",
")",
"return",
"np",
".",
"array",
"(",
"trgp",
")"
]
| 31.575758 | 16.060606 |
def ind_zero_freq(self):
"""
Index of the first point for which the freqencies are equal or greater than zero.
"""
ind = np.searchsorted(self.frequencies, 0)
if ind >= len(self.frequencies):
raise ValueError("No positive frequencies found")
return ind | [
"def",
"ind_zero_freq",
"(",
"self",
")",
":",
"ind",
"=",
"np",
".",
"searchsorted",
"(",
"self",
".",
"frequencies",
",",
"0",
")",
"if",
"ind",
">=",
"len",
"(",
"self",
".",
"frequencies",
")",
":",
"raise",
"ValueError",
"(",
"\"No positive frequencies found\"",
")",
"return",
"ind"
]
| 38 | 14.75 |
def ConvCnstrMODMaskOptions(opt=None, method='fista'):
"""A wrapper function that dynamically defines a class derived from
the Options class associated with one of the implementations of
the Convolutional Constrained MOD problem, and returns an object
instantiated with the provided parameters. The wrapper is designed
to allow the appropriate object to be created by calling this
function using the same syntax as would be used if it were a
class. The specific implementation is selected by use of an
additional keyword argument 'method'. Valid values are as
specified in the documentation for :func:`ConvCnstrMODMask`.
"""
# Assign base class depending on method selection argument
base = ccmodmsk_class_label_lookup(method).Options
# Nested class with dynamically determined inheritance
class ConvCnstrMODMaskOptions(base):
def __init__(self, opt):
super(ConvCnstrMODMaskOptions, self).__init__(opt)
# Allow pickling of objects of type ConvCnstrMODMaskOptions
_fix_dynamic_class_lookup(ConvCnstrMODMaskOptions, method)
# Return object of the nested class type
return ConvCnstrMODMaskOptions(opt) | [
"def",
"ConvCnstrMODMaskOptions",
"(",
"opt",
"=",
"None",
",",
"method",
"=",
"'fista'",
")",
":",
"# Assign base class depending on method selection argument",
"base",
"=",
"ccmodmsk_class_label_lookup",
"(",
"method",
")",
".",
"Options",
"# Nested class with dynamically determined inheritance",
"class",
"ConvCnstrMODMaskOptions",
"(",
"base",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"opt",
")",
":",
"super",
"(",
"ConvCnstrMODMaskOptions",
",",
"self",
")",
".",
"__init__",
"(",
"opt",
")",
"# Allow pickling of objects of type ConvCnstrMODMaskOptions",
"_fix_dynamic_class_lookup",
"(",
"ConvCnstrMODMaskOptions",
",",
"method",
")",
"# Return object of the nested class type",
"return",
"ConvCnstrMODMaskOptions",
"(",
"opt",
")"
]
| 46.8 | 20.4 |
def json(self):
"""Custom JSON encoder"""
attributes = {
'type': self.type,
'filename': self.filename,
'line_number': self.lineno,
'hashed_secret': self.secret_hash,
}
if self.is_secret is not None:
attributes['is_secret'] = self.is_secret
return attributes | [
"def",
"json",
"(",
"self",
")",
":",
"attributes",
"=",
"{",
"'type'",
":",
"self",
".",
"type",
",",
"'filename'",
":",
"self",
".",
"filename",
",",
"'line_number'",
":",
"self",
".",
"lineno",
",",
"'hashed_secret'",
":",
"self",
".",
"secret_hash",
",",
"}",
"if",
"self",
".",
"is_secret",
"is",
"not",
"None",
":",
"attributes",
"[",
"'is_secret'",
"]",
"=",
"self",
".",
"is_secret",
"return",
"attributes"
]
| 26.692308 | 15.538462 |
def get(self, sid):
"""
Constructs a InviteContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.chat.v2.service.channel.invite.InviteContext
:rtype: twilio.rest.chat.v2.service.channel.invite.InviteContext
"""
return InviteContext(
self._version,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=sid,
) | [
"def",
"get",
"(",
"self",
",",
"sid",
")",
":",
"return",
"InviteContext",
"(",
"self",
".",
"_version",
",",
"service_sid",
"=",
"self",
".",
"_solution",
"[",
"'service_sid'",
"]",
",",
"channel_sid",
"=",
"self",
".",
"_solution",
"[",
"'channel_sid'",
"]",
",",
"sid",
"=",
"sid",
",",
")"
]
| 31.933333 | 20.2 |
def update_membership_memberships(self, group_id, membership_id, moderator=None, workflow_state=None):
"""
Update a membership.
Accept a membership request, or add/remove moderator rights.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
# REQUIRED - PATH - membership_id
"""ID"""
path["membership_id"] = membership_id
# OPTIONAL - workflow_state
"""Currently, the only allowed value is "accepted""""
if workflow_state is not None:
self._validate_enum(workflow_state, ["accepted"])
data["workflow_state"] = workflow_state
# OPTIONAL - moderator
"""no description"""
if moderator is not None:
data["moderator"] = moderator
self.logger.debug("PUT /api/v1/groups/{group_id}/memberships/{membership_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/groups/{group_id}/memberships/{membership_id}".format(**path), data=data, params=params, single_item=True) | [
"def",
"update_membership_memberships",
"(",
"self",
",",
"group_id",
",",
"membership_id",
",",
"moderator",
"=",
"None",
",",
"workflow_state",
"=",
"None",
")",
":",
"path",
"=",
"{",
"}",
"data",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"# REQUIRED - PATH - group_id\r",
"\"\"\"ID\"\"\"",
"path",
"[",
"\"group_id\"",
"]",
"=",
"group_id",
"# REQUIRED - PATH - membership_id\r",
"\"\"\"ID\"\"\"",
"path",
"[",
"\"membership_id\"",
"]",
"=",
"membership_id",
"# OPTIONAL - workflow_state\r",
"\"\"\"Currently, the only allowed value is \"accepted\"\"\"",
"\"",
"if",
"workflow_state",
"is",
"not",
"None",
":",
"self",
".",
"_validate_enum",
"(",
"workflow_state",
",",
"[",
"\"accepted\"",
"]",
")",
"data",
"[",
"\"workflow_state\"",
"]",
"=",
"workflow_state",
"# OPTIONAL - moderator\r",
"\"\"\"no description\"\"\"",
"if",
"moderator",
"is",
"not",
"None",
":",
"data",
"[",
"\"moderator\"",
"]",
"=",
"moderator",
"self",
".",
"logger",
".",
"debug",
"(",
"\"PUT /api/v1/groups/{group_id}/memberships/{membership_id} with query params: {params} and form data: {data}\"",
".",
"format",
"(",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"*",
"*",
"path",
")",
")",
"return",
"self",
".",
"generic_request",
"(",
"\"PUT\"",
",",
"\"/api/v1/groups/{group_id}/memberships/{membership_id}\"",
".",
"format",
"(",
"*",
"*",
"path",
")",
",",
"data",
"=",
"data",
",",
"params",
"=",
"params",
",",
"single_item",
"=",
"True",
")"
]
| 38.870968 | 23.483871 |
def _resolve_reference_sample(self, reference_samples=None,
service_uids=None):
"""Returns the reference sample from reference_samples passed in that fits
better with the service uid requirements. This is, the reference sample
that covers most (or all) of the service uids passed in and has less
number of remaining service_uids.
If no reference_samples are set, returns None
If no service_uids are set, returns the first reference_sample
:param reference_samples: list of reference samples
:param service_uids: list of service uids
:return: the reference sample that fits better with the service uids
"""
if not reference_samples:
return None, list()
if not service_uids:
# Since no service filtering has been defined, there is no need to
# look for the best choice. Return the first one
sample = reference_samples[0]
spec_uids = sample.getSupportedServices(only_uids=True)
return sample, spec_uids
best_score = [0, 0]
best_sample = None
best_supported = None
for sample in reference_samples:
specs_uids = sample.getSupportedServices(only_uids=True)
supported = [uid for uid in specs_uids if uid in service_uids]
matches = len(supported)
overlays = len(service_uids) - matches
overlays = 0 if overlays < 0 else overlays
if overlays == 0 and matches == len(service_uids):
# Perfect match.. no need to go further
return sample, supported
if not best_sample \
or matches > best_score[0] \
or (matches == best_score[0] and overlays < best_score[1]):
best_sample = sample
best_score = [matches, overlays]
best_supported = supported
return best_sample, best_supported | [
"def",
"_resolve_reference_sample",
"(",
"self",
",",
"reference_samples",
"=",
"None",
",",
"service_uids",
"=",
"None",
")",
":",
"if",
"not",
"reference_samples",
":",
"return",
"None",
",",
"list",
"(",
")",
"if",
"not",
"service_uids",
":",
"# Since no service filtering has been defined, there is no need to",
"# look for the best choice. Return the first one",
"sample",
"=",
"reference_samples",
"[",
"0",
"]",
"spec_uids",
"=",
"sample",
".",
"getSupportedServices",
"(",
"only_uids",
"=",
"True",
")",
"return",
"sample",
",",
"spec_uids",
"best_score",
"=",
"[",
"0",
",",
"0",
"]",
"best_sample",
"=",
"None",
"best_supported",
"=",
"None",
"for",
"sample",
"in",
"reference_samples",
":",
"specs_uids",
"=",
"sample",
".",
"getSupportedServices",
"(",
"only_uids",
"=",
"True",
")",
"supported",
"=",
"[",
"uid",
"for",
"uid",
"in",
"specs_uids",
"if",
"uid",
"in",
"service_uids",
"]",
"matches",
"=",
"len",
"(",
"supported",
")",
"overlays",
"=",
"len",
"(",
"service_uids",
")",
"-",
"matches",
"overlays",
"=",
"0",
"if",
"overlays",
"<",
"0",
"else",
"overlays",
"if",
"overlays",
"==",
"0",
"and",
"matches",
"==",
"len",
"(",
"service_uids",
")",
":",
"# Perfect match.. no need to go further",
"return",
"sample",
",",
"supported",
"if",
"not",
"best_sample",
"or",
"matches",
">",
"best_score",
"[",
"0",
"]",
"or",
"(",
"matches",
"==",
"best_score",
"[",
"0",
"]",
"and",
"overlays",
"<",
"best_score",
"[",
"1",
"]",
")",
":",
"best_sample",
"=",
"sample",
"best_score",
"=",
"[",
"matches",
",",
"overlays",
"]",
"best_supported",
"=",
"supported",
"return",
"best_sample",
",",
"best_supported"
]
| 41.914894 | 18.914894 |
def _make_signature(self):
"""
Create a signature for `execute` based on the minimizers this
`ChainedMinimizer` was initiated with. For the format, see the docstring
of :meth:`ChainedMinimizer.execute`.
:return: :class:`inspect.Signature` instance.
"""
# Create KEYWORD_ONLY arguments with the names of the minimizers.
name = lambda x: x.__class__.__name__
count = Counter(
[name(minimizer) for minimizer in self.minimizers]
) # Count the number of each minimizer, they don't have to be unique
# Note that these are inspect_sig.Parameter's, not symfit parameters!
parameters = []
for minimizer in reversed(self.minimizers):
if count[name(minimizer)] == 1:
# No ambiguity, so use the name directly.
param_name = name(minimizer)
else:
# Ambiguity, so append the number of remaining minimizers
param_name = '{}_{}'.format(name(minimizer), count[name(minimizer)])
count[name(minimizer)] -= 1
parameters.append(
inspect_sig.Parameter(
param_name,
kind=inspect_sig.Parameter.KEYWORD_ONLY,
default={}
)
)
return inspect_sig.Signature(parameters=reversed(parameters)) | [
"def",
"_make_signature",
"(",
"self",
")",
":",
"# Create KEYWORD_ONLY arguments with the names of the minimizers.",
"name",
"=",
"lambda",
"x",
":",
"x",
".",
"__class__",
".",
"__name__",
"count",
"=",
"Counter",
"(",
"[",
"name",
"(",
"minimizer",
")",
"for",
"minimizer",
"in",
"self",
".",
"minimizers",
"]",
")",
"# Count the number of each minimizer, they don't have to be unique",
"# Note that these are inspect_sig.Parameter's, not symfit parameters!",
"parameters",
"=",
"[",
"]",
"for",
"minimizer",
"in",
"reversed",
"(",
"self",
".",
"minimizers",
")",
":",
"if",
"count",
"[",
"name",
"(",
"minimizer",
")",
"]",
"==",
"1",
":",
"# No ambiguity, so use the name directly.",
"param_name",
"=",
"name",
"(",
"minimizer",
")",
"else",
":",
"# Ambiguity, so append the number of remaining minimizers",
"param_name",
"=",
"'{}_{}'",
".",
"format",
"(",
"name",
"(",
"minimizer",
")",
",",
"count",
"[",
"name",
"(",
"minimizer",
")",
"]",
")",
"count",
"[",
"name",
"(",
"minimizer",
")",
"]",
"-=",
"1",
"parameters",
".",
"append",
"(",
"inspect_sig",
".",
"Parameter",
"(",
"param_name",
",",
"kind",
"=",
"inspect_sig",
".",
"Parameter",
".",
"KEYWORD_ONLY",
",",
"default",
"=",
"{",
"}",
")",
")",
"return",
"inspect_sig",
".",
"Signature",
"(",
"parameters",
"=",
"reversed",
"(",
"parameters",
")",
")"
]
| 41.515152 | 19.757576 |
def ms_contrast_restore(self, viewer, event, data_x, data_y, msg=True):
"""An interactive way to restore the colormap contrast settings after
a warp operation.
"""
if self.cancmap and (event.state == 'down'):
self.restore_contrast(viewer, msg=msg)
return True | [
"def",
"ms_contrast_restore",
"(",
"self",
",",
"viewer",
",",
"event",
",",
"data_x",
",",
"data_y",
",",
"msg",
"=",
"True",
")",
":",
"if",
"self",
".",
"cancmap",
"and",
"(",
"event",
".",
"state",
"==",
"'down'",
")",
":",
"self",
".",
"restore_contrast",
"(",
"viewer",
",",
"msg",
"=",
"msg",
")",
"return",
"True"
]
| 43.571429 | 12.714286 |
def path_exists(path):
"""
Check if file exists either remote or local.
Parameters:
-----------
path : path to file
Returns:
--------
exists : bool
"""
if path.startswith(("http://", "https://")):
try:
urlopen(path).info()
return True
except HTTPError as e:
if e.code == 404:
return False
else:
raise
elif path.startswith("s3://"):
bucket = get_boto3_bucket(path.split("/")[2])
key = "/".join(path.split("/")[3:])
for obj in bucket.objects.filter(Prefix=key):
if obj.key == key:
return True
else:
return False
else:
logger.debug("%s exists: %s", path, os.path.exists(path))
return os.path.exists(path) | [
"def",
"path_exists",
"(",
"path",
")",
":",
"if",
"path",
".",
"startswith",
"(",
"(",
"\"http://\"",
",",
"\"https://\"",
")",
")",
":",
"try",
":",
"urlopen",
"(",
"path",
")",
".",
"info",
"(",
")",
"return",
"True",
"except",
"HTTPError",
"as",
"e",
":",
"if",
"e",
".",
"code",
"==",
"404",
":",
"return",
"False",
"else",
":",
"raise",
"elif",
"path",
".",
"startswith",
"(",
"\"s3://\"",
")",
":",
"bucket",
"=",
"get_boto3_bucket",
"(",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"2",
"]",
")",
"key",
"=",
"\"/\"",
".",
"join",
"(",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"3",
":",
"]",
")",
"for",
"obj",
"in",
"bucket",
".",
"objects",
".",
"filter",
"(",
"Prefix",
"=",
"key",
")",
":",
"if",
"obj",
".",
"key",
"==",
"key",
":",
"return",
"True",
"else",
":",
"return",
"False",
"else",
":",
"logger",
".",
"debug",
"(",
"\"%s exists: %s\"",
",",
"path",
",",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
")",
"return",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")"
]
| 25.125 | 17.1875 |
def find_lemma(self, verb):
""" Returns the base form of the given inflected verb, using a rule-based approach.
"""
v = verb.lower()
# Common prefixes: be-finden and emp-finden probably inflect like finden.
if not (v.startswith("ge") and v.endswith("t")): # Probably gerund.
for prefix in prefixes:
if v.startswith(prefix) and v[len(prefix):] in self.inflections:
return prefix + self.inflections[v[len(prefix):]]
# Common sufixes: setze nieder => niedersetzen.
b, suffix = " " in v and v.split()[:2] or (v, "")
# Infinitive -ln: trommeln.
if b.endswith(("ln", "rn")):
return b
# Lemmatize regular inflections.
for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"):
if b.endswith(x): b = b[:-len(x)]; break
# Subjunctive: hielte => halten, schnitte => schneiden.
for x, y in (
("ieb", "eib"), ( "ied", "eid"), ( "ief", "auf" ), ( "ieg", "eig" ), ("iel", "alt"),
("ien", "ein"), ("iess", "ass"), (u"ieß", u"aß" ), ( "iff", "eif" ), ("iss", "eiss"),
(u"iß", u"eiß"), ( "it", "eid"), ( "oss", "iess"), (u"öss", "iess")):
if b.endswith(x): b = b[:-len(x)] + y; break
b = b.replace("eeiss", "eiss")
b = b.replace("eeid", "eit")
# Subjunctive: wechselte => wechseln
if not b.endswith(("e", "l")) and not (b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS):
b = b + "e"
# abknallst != abknalln => abknallen
if b.endswith(("hl", "ll", "ul", "eil")):
b = b + "e"
# Strip ge- from (likely) gerund:
if b.startswith("ge") and v.endswith("t"):
b = b[2:]
# Corrections (these add about 1.5% accuracy):
if b.endswith(("lnde", "rnde")):
b = b[:-3]
if b.endswith(("ae", "al", u"öe", u"üe")):
b = b.rstrip("e") + "te"
if b.endswith(u"äl"):
b = b + "e"
return suffix + b + "n" | [
"def",
"find_lemma",
"(",
"self",
",",
"verb",
")",
":",
"v",
"=",
"verb",
".",
"lower",
"(",
")",
"# Common prefixes: be-finden and emp-finden probably inflect like finden.",
"if",
"not",
"(",
"v",
".",
"startswith",
"(",
"\"ge\"",
")",
"and",
"v",
".",
"endswith",
"(",
"\"t\"",
")",
")",
":",
"# Probably gerund.",
"for",
"prefix",
"in",
"prefixes",
":",
"if",
"v",
".",
"startswith",
"(",
"prefix",
")",
"and",
"v",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"in",
"self",
".",
"inflections",
":",
"return",
"prefix",
"+",
"self",
".",
"inflections",
"[",
"v",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"]",
"# Common sufixes: setze nieder => niedersetzen.",
"b",
",",
"suffix",
"=",
"\" \"",
"in",
"v",
"and",
"v",
".",
"split",
"(",
")",
"[",
":",
"2",
"]",
"or",
"(",
"v",
",",
"\"\"",
")",
"# Infinitive -ln: trommeln.",
"if",
"b",
".",
"endswith",
"(",
"(",
"\"ln\"",
",",
"\"rn\"",
")",
")",
":",
"return",
"b",
"# Lemmatize regular inflections.",
"for",
"x",
"in",
"(",
"\"test\"",
",",
"\"est\"",
",",
"\"end\"",
",",
"\"ten\"",
",",
"\"tet\"",
",",
"\"en\"",
",",
"\"et\"",
",",
"\"te\"",
",",
"\"st\"",
",",
"\"e\"",
",",
"\"t\"",
")",
":",
"if",
"b",
".",
"endswith",
"(",
"x",
")",
":",
"b",
"=",
"b",
"[",
":",
"-",
"len",
"(",
"x",
")",
"]",
"break",
"# Subjunctive: hielte => halten, schnitte => schneiden.",
"for",
"x",
",",
"y",
"in",
"(",
"(",
"\"ieb\"",
",",
"\"eib\"",
")",
",",
"(",
"\"ied\"",
",",
"\"eid\"",
")",
",",
"(",
"\"ief\"",
",",
"\"auf\"",
")",
",",
"(",
"\"ieg\"",
",",
"\"eig\"",
")",
",",
"(",
"\"iel\"",
",",
"\"alt\"",
")",
",",
"(",
"\"ien\"",
",",
"\"ein\"",
")",
",",
"(",
"\"iess\"",
",",
"\"ass\"",
")",
",",
"(",
"u\"ieß\",",
" ",
"\"aß\" ",
" ",
"(",
"\"",
"ff\", ",
"\"",
"if\" )",
" ",
"(",
"i",
"ss\", ",
"\"",
"iss\"),",
" ",
"",
"(",
"u\"iß\",",
" ",
"\"eiß\"),",
" ",
"(",
" ",
"t\", ",
"\"",
"id\"),",
" ",
"(",
"\"",
"ss\", ",
" ",
"ess\"),",
" ",
"(",
"\"",
"öss\", \"",
"i",
"ss\")):",
"",
"",
"",
"if",
"b",
".",
"endswith",
"(",
"x",
")",
":",
"b",
"=",
"b",
"[",
":",
"-",
"len",
"(",
"x",
")",
"]",
"+",
"y",
"break",
"b",
"=",
"b",
".",
"replace",
"(",
"\"eeiss\"",
",",
"\"eiss\"",
")",
"b",
"=",
"b",
".",
"replace",
"(",
"\"eeid\"",
",",
"\"eit\"",
")",
"# Subjunctive: wechselte => wechseln",
"if",
"not",
"b",
".",
"endswith",
"(",
"(",
"\"e\"",
",",
"\"l\"",
")",
")",
"and",
"not",
"(",
"b",
".",
"endswith",
"(",
"\"er\"",
")",
"and",
"len",
"(",
"b",
")",
">=",
"3",
"and",
"not",
"b",
"[",
"-",
"3",
"]",
"in",
"VOWELS",
")",
":",
"b",
"=",
"b",
"+",
"\"e\"",
"# abknallst != abknalln => abknallen",
"if",
"b",
".",
"endswith",
"(",
"(",
"\"hl\"",
",",
"\"ll\"",
",",
"\"ul\"",
",",
"\"eil\"",
")",
")",
":",
"b",
"=",
"b",
"+",
"\"e\"",
"# Strip ge- from (likely) gerund:",
"if",
"b",
".",
"startswith",
"(",
"\"ge\"",
")",
"and",
"v",
".",
"endswith",
"(",
"\"t\"",
")",
":",
"b",
"=",
"b",
"[",
"2",
":",
"]",
"# Corrections (these add about 1.5% accuracy):",
"if",
"b",
".",
"endswith",
"(",
"(",
"\"lnde\"",
",",
"\"rnde\"",
")",
")",
":",
"b",
"=",
"b",
"[",
":",
"-",
"3",
"]",
"if",
"b",
".",
"endswith",
"(",
"(",
"\"ae\"",
",",
"\"al\"",
",",
"u\"öe\",",
" ",
"\"üe\"))",
":",
"",
"",
"b",
"=",
"b",
".",
"rstrip",
"(",
"\"e\"",
")",
"+",
"\"te\"",
"if",
"b",
".",
"endswith",
"(",
"u\"äl\")",
":",
"",
"b",
"=",
"b",
"+",
"\"e\"",
"return",
"suffix",
"+",
"b",
"+",
"\"n\""
]
| 48.857143 | 17.857143 |
def dependency_list(self, tkn: str) -> List[str]:
"""Return a list all of the grammarelts that depend on tkn
:param tkn:
:return:
"""
if tkn not in self.dependency_map:
self.dependency_map[tkn] = [tkn] # Force a circular reference
self.dependency_map[tkn] = self.reference(tkn).dependency_list()
return self.dependency_map[tkn] | [
"def",
"dependency_list",
"(",
"self",
",",
"tkn",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"if",
"tkn",
"not",
"in",
"self",
".",
"dependency_map",
":",
"self",
".",
"dependency_map",
"[",
"tkn",
"]",
"=",
"[",
"tkn",
"]",
"# Force a circular reference",
"self",
".",
"dependency_map",
"[",
"tkn",
"]",
"=",
"self",
".",
"reference",
"(",
"tkn",
")",
".",
"dependency_list",
"(",
")",
"return",
"self",
".",
"dependency_map",
"[",
"tkn",
"]"
]
| 39.9 | 17.2 |
def get_third_party(self, third_party):
"""Return the account for the given third-party. Raise <something> if the third party doesn't belong to this bookset."""
actual_account = third_party.get_account()
assert actual_account.get_bookset() == self
return ThirdPartySubAccount(actual_account, third_party=third_party) | [
"def",
"get_third_party",
"(",
"self",
",",
"third_party",
")",
":",
"actual_account",
"=",
"third_party",
".",
"get_account",
"(",
")",
"assert",
"actual_account",
".",
"get_bookset",
"(",
")",
"==",
"self",
"return",
"ThirdPartySubAccount",
"(",
"actual_account",
",",
"third_party",
"=",
"third_party",
")"
]
| 69 | 11.6 |
def NewFromJSON(data):
"""
Create a new Shake instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Shake.
Returns:
A Shake instance.
"""
s = Shake(
id=data.get('id', None),
name=data.get('name', None),
url=data.get('url', None),
thumbnail_url=data.get('thumbnail_url', None),
description=data.get('description', None),
type=data.get('type', None),
created_at=data.get('created_at', None),
updated_at=data.get('updated_at', None)
)
if data.get('owner', None):
s.owner = User.NewFromJSON(data.get('owner', None))
return s | [
"def",
"NewFromJSON",
"(",
"data",
")",
":",
"s",
"=",
"Shake",
"(",
"id",
"=",
"data",
".",
"get",
"(",
"'id'",
",",
"None",
")",
",",
"name",
"=",
"data",
".",
"get",
"(",
"'name'",
",",
"None",
")",
",",
"url",
"=",
"data",
".",
"get",
"(",
"'url'",
",",
"None",
")",
",",
"thumbnail_url",
"=",
"data",
".",
"get",
"(",
"'thumbnail_url'",
",",
"None",
")",
",",
"description",
"=",
"data",
".",
"get",
"(",
"'description'",
",",
"None",
")",
",",
"type",
"=",
"data",
".",
"get",
"(",
"'type'",
",",
"None",
")",
",",
"created_at",
"=",
"data",
".",
"get",
"(",
"'created_at'",
",",
"None",
")",
",",
"updated_at",
"=",
"data",
".",
"get",
"(",
"'updated_at'",
",",
"None",
")",
")",
"if",
"data",
".",
"get",
"(",
"'owner'",
",",
"None",
")",
":",
"s",
".",
"owner",
"=",
"User",
".",
"NewFromJSON",
"(",
"data",
".",
"get",
"(",
"'owner'",
",",
"None",
")",
")",
"return",
"s"
]
| 31.608696 | 15.695652 |
def authenticate(session, username, password):
"""
Authenticate a PasswordUser with the specified
username/password.
:param session: An active SQLAlchemy session
:param username: The username
:param password: The password
:raise AuthenticationError: if an error occurred
:return: a PasswordUser
"""
if not username or not password:
raise AuthenticationError()
user = session.query(PasswordUser).filter(
PasswordUser.username == username).first()
if not user:
raise AuthenticationError()
if not user.authenticate(password):
raise AuthenticationError()
log.info("User %s successfully authenticated", username)
return user | [
"def",
"authenticate",
"(",
"session",
",",
"username",
",",
"password",
")",
":",
"if",
"not",
"username",
"or",
"not",
"password",
":",
"raise",
"AuthenticationError",
"(",
")",
"user",
"=",
"session",
".",
"query",
"(",
"PasswordUser",
")",
".",
"filter",
"(",
"PasswordUser",
".",
"username",
"==",
"username",
")",
".",
"first",
"(",
")",
"if",
"not",
"user",
":",
"raise",
"AuthenticationError",
"(",
")",
"if",
"not",
"user",
".",
"authenticate",
"(",
"password",
")",
":",
"raise",
"AuthenticationError",
"(",
")",
"log",
".",
"info",
"(",
"\"User %s successfully authenticated\"",
",",
"username",
")",
"return",
"user"
]
| 26.615385 | 16.384615 |
def delete_entity_alias(self, alias_id, mount_point=DEFAULT_MOUNT_POINT):
"""Delete a entity alias.
Supported methods:
DELETE: /{mount_point}/entity-alias/id/{alias_id}. Produces: 204 (empty body)
:param alias_id: Identifier of the entity.
:type alias_id: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = '/v1/{mount_point}/entity-alias/id/{id}'.format(
mount_point=mount_point,
id=alias_id,
)
return self._adapter.delete(
url=api_path,
) | [
"def",
"delete_entity_alias",
"(",
"self",
",",
"alias_id",
",",
"mount_point",
"=",
"DEFAULT_MOUNT_POINT",
")",
":",
"api_path",
"=",
"'/v1/{mount_point}/entity-alias/id/{id}'",
".",
"format",
"(",
"mount_point",
"=",
"mount_point",
",",
"id",
"=",
"alias_id",
",",
")",
"return",
"self",
".",
"_adapter",
".",
"delete",
"(",
"url",
"=",
"api_path",
",",
")"
]
| 35.8 | 18.1 |
def get(self,identity,params=None, headers=None):
"""Get a single creditor bank account.
Retrieves the details of an existing creditor bank account.
Args:
identity (string): Unique identifier, beginning with "BA".
params (dict, optional): Query string parameters.
Returns:
ListResponse of CreditorBankAccount instances
"""
path = self._sub_url_params('/creditor_bank_accounts/:identity', {
'identity': identity,
})
response = self._perform_request('GET', path, params, headers,
retry_failures=True)
return self._resource_for(response) | [
"def",
"get",
"(",
"self",
",",
"identity",
",",
"params",
"=",
"None",
",",
"headers",
"=",
"None",
")",
":",
"path",
"=",
"self",
".",
"_sub_url_params",
"(",
"'/creditor_bank_accounts/:identity'",
",",
"{",
"'identity'",
":",
"identity",
",",
"}",
")",
"response",
"=",
"self",
".",
"_perform_request",
"(",
"'GET'",
",",
"path",
",",
"params",
",",
"headers",
",",
"retry_failures",
"=",
"True",
")",
"return",
"self",
".",
"_resource_for",
"(",
"response",
")"
]
| 33.666667 | 24.095238 |
def L_diffuser_inner(sed_inputs=sed_dict):
"""Return the inner length of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Inner length of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
"""
return L_diffuser_outer(sed_inputs['tank']['W']) -
(2 * (sed_inputs['manifold']['diffuser']['thickness_wall']).to(u.m)).magnitude) | [
"def",
"L_diffuser_inner",
"(",
"sed_inputs",
"=",
"sed_dict",
")",
":",
"return",
"L_diffuser_outer",
"(",
"sed_inputs",
"[",
"'tank'",
"]",
"[",
"'W'",
"]",
")",
"-",
"(",
"2",
"*",
"(",
"sed_inputs",
"[",
"'manifold'",
"]",
"[",
"'diffuser'",
"]",
"[",
"'thickness_wall'",
"]",
")",
".",
"to",
"(",
"u",
".",
"m",
")",
")",
".",
"magnitude",
")"
]
| 33.555556 | 21.5 |
def runs_done():
"""Marks a release candidate as having all runs reported."""
build = g.build
release_name, release_number = _get_release_params()
release = (
models.Release.query
.filter_by(build_id=build.id, name=release_name, number=release_number)
.with_lockmode('update')
.first())
utils.jsonify_assert(release, 'Release does not exist')
release.status = models.Release.PROCESSING
db.session.add(release)
_check_release_done_processing(release)
db.session.commit()
signals.release_updated_via_api.send(app, build=build, release=release)
logging.info('Runs done for release: build_id=%r, release_name=%r, '
'release_number=%d', build.id, release.name, release.number)
results_url = url_for(
'view_release',
id=build.id,
name=release.name,
number=release.number,
_external=True)
return flask.jsonify(
success=True,
results_url=results_url) | [
"def",
"runs_done",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"release_name",
",",
"release_number",
"=",
"_get_release_params",
"(",
")",
"release",
"=",
"(",
"models",
".",
"Release",
".",
"query",
".",
"filter_by",
"(",
"build_id",
"=",
"build",
".",
"id",
",",
"name",
"=",
"release_name",
",",
"number",
"=",
"release_number",
")",
".",
"with_lockmode",
"(",
"'update'",
")",
".",
"first",
"(",
")",
")",
"utils",
".",
"jsonify_assert",
"(",
"release",
",",
"'Release does not exist'",
")",
"release",
".",
"status",
"=",
"models",
".",
"Release",
".",
"PROCESSING",
"db",
".",
"session",
".",
"add",
"(",
"release",
")",
"_check_release_done_processing",
"(",
"release",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"signals",
".",
"release_updated_via_api",
".",
"send",
"(",
"app",
",",
"build",
"=",
"build",
",",
"release",
"=",
"release",
")",
"logging",
".",
"info",
"(",
"'Runs done for release: build_id=%r, release_name=%r, '",
"'release_number=%d'",
",",
"build",
".",
"id",
",",
"release",
".",
"name",
",",
"release",
".",
"number",
")",
"results_url",
"=",
"url_for",
"(",
"'view_release'",
",",
"id",
"=",
"build",
".",
"id",
",",
"name",
"=",
"release",
".",
"name",
",",
"number",
"=",
"release",
".",
"number",
",",
"_external",
"=",
"True",
")",
"return",
"flask",
".",
"jsonify",
"(",
"success",
"=",
"True",
",",
"results_url",
"=",
"results_url",
")"
]
| 30.4375 | 22 |
def send_message(self, message):
"""
Prepare a message to send on the UDP socket. Eventually set retransmissions.
:param message: the message to send
"""
if isinstance(message, Request):
request = self._requestLayer.send_request(message)
request = self._observeLayer.send_request(request)
request = self._blockLayer.send_request(request)
transaction = self._messageLayer.send_request(request)
self.send_datagram(transaction.request)
if transaction.request.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.request)
elif isinstance(message, Message):
message = self._observeLayer.send_empty(message)
message = self._messageLayer.send_empty(None, None, message)
self.send_datagram(message) | [
"def",
"send_message",
"(",
"self",
",",
"message",
")",
":",
"if",
"isinstance",
"(",
"message",
",",
"Request",
")",
":",
"request",
"=",
"self",
".",
"_requestLayer",
".",
"send_request",
"(",
"message",
")",
"request",
"=",
"self",
".",
"_observeLayer",
".",
"send_request",
"(",
"request",
")",
"request",
"=",
"self",
".",
"_blockLayer",
".",
"send_request",
"(",
"request",
")",
"transaction",
"=",
"self",
".",
"_messageLayer",
".",
"send_request",
"(",
"request",
")",
"self",
".",
"send_datagram",
"(",
"transaction",
".",
"request",
")",
"if",
"transaction",
".",
"request",
".",
"type",
"==",
"defines",
".",
"Types",
"[",
"\"CON\"",
"]",
":",
"self",
".",
"_start_retransmission",
"(",
"transaction",
",",
"transaction",
".",
"request",
")",
"elif",
"isinstance",
"(",
"message",
",",
"Message",
")",
":",
"message",
"=",
"self",
".",
"_observeLayer",
".",
"send_empty",
"(",
"message",
")",
"message",
"=",
"self",
".",
"_messageLayer",
".",
"send_empty",
"(",
"None",
",",
"None",
",",
"message",
")",
"self",
".",
"send_datagram",
"(",
"message",
")"
]
| 48.611111 | 17.277778 |
def _pad(self, text):
"""Pad the text."""
top_bottom = ("\n" * self._padding) + " "
right_left = " " * self._padding * self.PAD_WIDTH
return top_bottom + right_left + text + right_left + top_bottom | [
"def",
"_pad",
"(",
"self",
",",
"text",
")",
":",
"top_bottom",
"=",
"(",
"\"\\n\"",
"*",
"self",
".",
"_padding",
")",
"+",
"\" \"",
"right_left",
"=",
"\" \"",
"*",
"self",
".",
"_padding",
"*",
"self",
".",
"PAD_WIDTH",
"return",
"top_bottom",
"+",
"right_left",
"+",
"text",
"+",
"right_left",
"+",
"top_bottom"
]
| 45 | 15.2 |
def maybe_call_fn_and_grads(fn,
fn_arg_list,
result=None,
grads=None,
check_non_none_grads=True,
name=None):
"""Calls `fn` and computes the gradient of the result wrt `args_list`."""
with tf.compat.v1.name_scope(name, 'maybe_call_fn_and_grads',
[fn_arg_list, result, grads]):
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
result, grads = _value_and_gradients(fn, fn_arg_list, result, grads)
if not all(r.dtype.is_floating
for r in (result if is_list_like(result) else [result])): # pylint: disable=superfluous-parens
raise TypeError('Function result must be a `Tensor` with `float` '
'`dtype`.')
if len(fn_arg_list) != len(grads):
raise ValueError('Function args must be in one-to-one correspondence '
'with grads.')
if check_non_none_grads and any(g is None for g in grads):
raise ValueError('Encountered `None` gradient.\n'
' fn_arg_list: {}\n'
' grads: {}'.format(fn_arg_list, grads))
return result, grads | [
"def",
"maybe_call_fn_and_grads",
"(",
"fn",
",",
"fn_arg_list",
",",
"result",
"=",
"None",
",",
"grads",
"=",
"None",
",",
"check_non_none_grads",
"=",
"True",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'maybe_call_fn_and_grads'",
",",
"[",
"fn_arg_list",
",",
"result",
",",
"grads",
"]",
")",
":",
"fn_arg_list",
"=",
"(",
"list",
"(",
"fn_arg_list",
")",
"if",
"is_list_like",
"(",
"fn_arg_list",
")",
"else",
"[",
"fn_arg_list",
"]",
")",
"result",
",",
"grads",
"=",
"_value_and_gradients",
"(",
"fn",
",",
"fn_arg_list",
",",
"result",
",",
"grads",
")",
"if",
"not",
"all",
"(",
"r",
".",
"dtype",
".",
"is_floating",
"for",
"r",
"in",
"(",
"result",
"if",
"is_list_like",
"(",
"result",
")",
"else",
"[",
"result",
"]",
")",
")",
":",
"# pylint: disable=superfluous-parens",
"raise",
"TypeError",
"(",
"'Function result must be a `Tensor` with `float` '",
"'`dtype`.'",
")",
"if",
"len",
"(",
"fn_arg_list",
")",
"!=",
"len",
"(",
"grads",
")",
":",
"raise",
"ValueError",
"(",
"'Function args must be in one-to-one correspondence '",
"'with grads.'",
")",
"if",
"check_non_none_grads",
"and",
"any",
"(",
"g",
"is",
"None",
"for",
"g",
"in",
"grads",
")",
":",
"raise",
"ValueError",
"(",
"'Encountered `None` gradient.\\n'",
"' fn_arg_list: {}\\n'",
"' grads: {}'",
".",
"format",
"(",
"fn_arg_list",
",",
"grads",
")",
")",
"return",
"result",
",",
"grads"
]
| 52.75 | 15.208333 |
def _not(cls, operation):
"""not operation"""
def _wrap(*args, **kwargs):
return not operation(*args, **kwargs)
return _wrap | [
"def",
"_not",
"(",
"cls",
",",
"operation",
")",
":",
"def",
"_wrap",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"not",
"operation",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_wrap"
]
| 22.285714 | 18.428571 |
def plot_eq(fignum, DIblock, s):
"""
plots directions on eqarea projection
Parameters
__________
fignum : matplotlib figure number
DIblock : nested list of dec/inc pairs
s : specimen name
"""
# make the stereonet
plt.figure(num=fignum)
if len(DIblock) < 1:
return
# plt.clf()
if not isServer:
plt.figtext(.02, .01, version_num)
plot_net(fignum)
#
# put on the directions
#
plot_di(fignum, DIblock) # plot directions
plt.axis("equal")
plt.text(-1.1, 1.15, s)
plt.draw() | [
"def",
"plot_eq",
"(",
"fignum",
",",
"DIblock",
",",
"s",
")",
":",
"# make the stereonet",
"plt",
".",
"figure",
"(",
"num",
"=",
"fignum",
")",
"if",
"len",
"(",
"DIblock",
")",
"<",
"1",
":",
"return",
"# plt.clf()",
"if",
"not",
"isServer",
":",
"plt",
".",
"figtext",
"(",
".02",
",",
".01",
",",
"version_num",
")",
"plot_net",
"(",
"fignum",
")",
"#",
"# put on the directions",
"#",
"plot_di",
"(",
"fignum",
",",
"DIblock",
")",
"# plot directions",
"plt",
".",
"axis",
"(",
"\"equal\"",
")",
"plt",
".",
"text",
"(",
"-",
"1.1",
",",
"1.15",
",",
"s",
")",
"plt",
".",
"draw",
"(",
")"
]
| 22.166667 | 16.083333 |
def get_chunks(sequence, chunk_size):
"""Split sequence into chunks.
:param list sequence:
:param int chunk_size:
"""
return [
sequence[idx:idx + chunk_size]
for idx in range(0, len(sequence), chunk_size)
] | [
"def",
"get_chunks",
"(",
"sequence",
",",
"chunk_size",
")",
":",
"return",
"[",
"sequence",
"[",
"idx",
":",
"idx",
"+",
"chunk_size",
"]",
"for",
"idx",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"sequence",
")",
",",
"chunk_size",
")",
"]"
]
| 23.8 | 15.1 |
def _sample(self, position, trajectory_length, stepsize, lsteps=None):
"""
Runs a single sampling iteration to return a sample
"""
# Resampling momentum
momentum = np.reshape(np.random.normal(0, 1, len(position)), position.shape)
# position_m here will be the previous sampled value of position
position_bar, momentum_bar = position.copy(), momentum
# Number of steps L to simulate dynamics
if lsteps is None:
lsteps = int(max(1, round(trajectory_length / stepsize, 0)))
grad_bar, _ = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf()
for _ in range(lsteps):
position_bar, momentum_bar, grad_bar =\
self.simulate_dynamics(self.model, position_bar, momentum_bar,
stepsize, self.grad_log_pdf, grad_bar).get_proposed_values()
acceptance_prob = self._acceptance_prob(position, position_bar, momentum, momentum_bar)
# Metropolis acceptance probability
alpha = min(1, acceptance_prob)
# Accept or reject the new proposed value of position, i.e position_bar
if np.random.rand() < alpha:
position = position_bar.copy()
self.accepted_proposals += 1.0
return position, alpha | [
"def",
"_sample",
"(",
"self",
",",
"position",
",",
"trajectory_length",
",",
"stepsize",
",",
"lsteps",
"=",
"None",
")",
":",
"# Resampling momentum",
"momentum",
"=",
"np",
".",
"reshape",
"(",
"np",
".",
"random",
".",
"normal",
"(",
"0",
",",
"1",
",",
"len",
"(",
"position",
")",
")",
",",
"position",
".",
"shape",
")",
"# position_m here will be the previous sampled value of position",
"position_bar",
",",
"momentum_bar",
"=",
"position",
".",
"copy",
"(",
")",
",",
"momentum",
"# Number of steps L to simulate dynamics",
"if",
"lsteps",
"is",
"None",
":",
"lsteps",
"=",
"int",
"(",
"max",
"(",
"1",
",",
"round",
"(",
"trajectory_length",
"/",
"stepsize",
",",
"0",
")",
")",
")",
"grad_bar",
",",
"_",
"=",
"self",
".",
"grad_log_pdf",
"(",
"position_bar",
",",
"self",
".",
"model",
")",
".",
"get_gradient_log_pdf",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"lsteps",
")",
":",
"position_bar",
",",
"momentum_bar",
",",
"grad_bar",
"=",
"self",
".",
"simulate_dynamics",
"(",
"self",
".",
"model",
",",
"position_bar",
",",
"momentum_bar",
",",
"stepsize",
",",
"self",
".",
"grad_log_pdf",
",",
"grad_bar",
")",
".",
"get_proposed_values",
"(",
")",
"acceptance_prob",
"=",
"self",
".",
"_acceptance_prob",
"(",
"position",
",",
"position_bar",
",",
"momentum",
",",
"momentum_bar",
")",
"# Metropolis acceptance probability",
"alpha",
"=",
"min",
"(",
"1",
",",
"acceptance_prob",
")",
"# Accept or reject the new proposed value of position, i.e position_bar",
"if",
"np",
".",
"random",
".",
"rand",
"(",
")",
"<",
"alpha",
":",
"position",
"=",
"position_bar",
".",
"copy",
"(",
")",
"self",
".",
"accepted_proposals",
"+=",
"1.0",
"return",
"position",
",",
"alpha"
]
| 40.53125 | 25.40625 |
def _hugoniot_p(self, v):
"""
calculate static pressure at 300 K.
:param v: unit-cell volume in A^3
:return: static pressure at t_ref (=300 K) in GPa
"""
rho = self._get_rho(v)
params = self._set_params(self.params_hugoniot)
if self.nonlinear:
return hugoniot_p_nlin(rho, *params)
else:
return hugoniot_p(rho, *params) | [
"def",
"_hugoniot_p",
"(",
"self",
",",
"v",
")",
":",
"rho",
"=",
"self",
".",
"_get_rho",
"(",
"v",
")",
"params",
"=",
"self",
".",
"_set_params",
"(",
"self",
".",
"params_hugoniot",
")",
"if",
"self",
".",
"nonlinear",
":",
"return",
"hugoniot_p_nlin",
"(",
"rho",
",",
"*",
"params",
")",
"else",
":",
"return",
"hugoniot_p",
"(",
"rho",
",",
"*",
"params",
")"
]
| 31 | 11.769231 |
def load_and_preprocess_imdb_data(n_gram=None):
"""Load IMDb data and augment with hashed n-gram features."""
X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE)
if n_gram is not None:
X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train])
X_test = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_test])
return X_train, y_train, X_test, y_test | [
"def",
"load_and_preprocess_imdb_data",
"(",
"n_gram",
"=",
"None",
")",
":",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
"=",
"tl",
".",
"files",
".",
"load_imdb_dataset",
"(",
"nb_words",
"=",
"VOCAB_SIZE",
")",
"if",
"n_gram",
"is",
"not",
"None",
":",
"X_train",
"=",
"np",
".",
"array",
"(",
"[",
"augment_with_ngrams",
"(",
"x",
",",
"VOCAB_SIZE",
",",
"N_BUCKETS",
",",
"n",
"=",
"n_gram",
")",
"for",
"x",
"in",
"X_train",
"]",
")",
"X_test",
"=",
"np",
".",
"array",
"(",
"[",
"augment_with_ngrams",
"(",
"x",
",",
"VOCAB_SIZE",
",",
"N_BUCKETS",
",",
"n",
"=",
"n_gram",
")",
"for",
"x",
"in",
"X_test",
"]",
")",
"return",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test"
]
| 52.111111 | 30.222222 |
def get_by_uri(self, uri):
'''Get a concept or collection by its uri.
Returns a single concept or collection if one exists with this uri.
Returns False otherwise.
:param string uri: The uri to find a concept or collection for.
:raises ValueError: The uri is invalid.
:rtype: :class:`skosprovider.skos.Concept` or
:class:`skosprovider.skos.Collection`
'''
if not is_uri(uri):
raise ValueError('%s is not a valid URI.' % uri)
# Check if there's a provider that's more likely to have the URI
csuris = [csuri for csuri in self.concept_scheme_uri_map.keys() if uri.startswith(csuri)]
for csuri in csuris:
c = self.get_provider(csuri).get_by_uri(uri)
if c:
return c
# Check all providers
for p in self.providers.values():
c = p.get_by_uri(uri)
if c:
return c
return False | [
"def",
"get_by_uri",
"(",
"self",
",",
"uri",
")",
":",
"if",
"not",
"is_uri",
"(",
"uri",
")",
":",
"raise",
"ValueError",
"(",
"'%s is not a valid URI.'",
"%",
"uri",
")",
"# Check if there's a provider that's more likely to have the URI",
"csuris",
"=",
"[",
"csuri",
"for",
"csuri",
"in",
"self",
".",
"concept_scheme_uri_map",
".",
"keys",
"(",
")",
"if",
"uri",
".",
"startswith",
"(",
"csuri",
")",
"]",
"for",
"csuri",
"in",
"csuris",
":",
"c",
"=",
"self",
".",
"get_provider",
"(",
"csuri",
")",
".",
"get_by_uri",
"(",
"uri",
")",
"if",
"c",
":",
"return",
"c",
"# Check all providers",
"for",
"p",
"in",
"self",
".",
"providers",
".",
"values",
"(",
")",
":",
"c",
"=",
"p",
".",
"get_by_uri",
"(",
"uri",
")",
"if",
"c",
":",
"return",
"c",
"return",
"False"
]
| 38.36 | 20.12 |
def get_links_of_type(self, s_type=''):
"""Return the `s_type` satellite list (eg. schedulers)
If s_type is None, returns a dictionary of all satellites, else returns the dictionary
of the s_type satellites
The returned dict is indexed with the satellites uuid.
:param s_type: satellite type
:type s_type: str
:return: dictionary of satellites
:rtype: dict
"""
satellites = {
'arbiter': getattr(self, 'arbiters', []),
'scheduler': getattr(self, 'schedulers', []),
'broker': getattr(self, 'brokers', []),
'poller': getattr(self, 'pollers', []),
'reactionner': getattr(self, 'reactionners', []),
'receiver': getattr(self, 'receivers', [])
}
if not s_type:
result = {}
for sat_type in satellites:
# if sat_type == self.type:
# continue
for sat_uuid in satellites[sat_type]:
result[sat_uuid] = satellites[sat_type][sat_uuid]
return result
if s_type in satellites:
return satellites[s_type]
return None | [
"def",
"get_links_of_type",
"(",
"self",
",",
"s_type",
"=",
"''",
")",
":",
"satellites",
"=",
"{",
"'arbiter'",
":",
"getattr",
"(",
"self",
",",
"'arbiters'",
",",
"[",
"]",
")",
",",
"'scheduler'",
":",
"getattr",
"(",
"self",
",",
"'schedulers'",
",",
"[",
"]",
")",
",",
"'broker'",
":",
"getattr",
"(",
"self",
",",
"'brokers'",
",",
"[",
"]",
")",
",",
"'poller'",
":",
"getattr",
"(",
"self",
",",
"'pollers'",
",",
"[",
"]",
")",
",",
"'reactionner'",
":",
"getattr",
"(",
"self",
",",
"'reactionners'",
",",
"[",
"]",
")",
",",
"'receiver'",
":",
"getattr",
"(",
"self",
",",
"'receivers'",
",",
"[",
"]",
")",
"}",
"if",
"not",
"s_type",
":",
"result",
"=",
"{",
"}",
"for",
"sat_type",
"in",
"satellites",
":",
"# if sat_type == self.type:",
"# continue",
"for",
"sat_uuid",
"in",
"satellites",
"[",
"sat_type",
"]",
":",
"result",
"[",
"sat_uuid",
"]",
"=",
"satellites",
"[",
"sat_type",
"]",
"[",
"sat_uuid",
"]",
"return",
"result",
"if",
"s_type",
"in",
"satellites",
":",
"return",
"satellites",
"[",
"s_type",
"]",
"return",
"None"
]
| 35.545455 | 16.909091 |
def resolve(self, current_file, rel_path):
"""Search the filesystem."""
p = path.join(path.dirname(current_file), rel_path)
if p not in self.file_dict:
raise RuntimeError('No such fake file: %r' % p)
return p, p | [
"def",
"resolve",
"(",
"self",
",",
"current_file",
",",
"rel_path",
")",
":",
"p",
"=",
"path",
".",
"join",
"(",
"path",
".",
"dirname",
"(",
"current_file",
")",
",",
"rel_path",
")",
"if",
"p",
"not",
"in",
"self",
".",
"file_dict",
":",
"raise",
"RuntimeError",
"(",
"'No such fake file: %r'",
"%",
"p",
")",
"return",
"p",
",",
"p"
]
| 38 | 10.666667 |
def simPrepare(unit: Unit, modelCls: Optional[SimModel]=None,
targetPlatform=DummyPlatform(),
dumpModelIn: str=None, onAfterToRtl=None):
"""
Create simulation model and connect it with interfaces of original unit
and decorate it with agents
:param unit: interface level unit which you wont prepare for simulation
:param modelCls: class of rtl simulation model to run simulation on,
if is None rtl sim model will be generated from unit
:param targetPlatform: target platform for this synthes
:param dumpModelIn: folder to where put sim model files
(if is None sim model will be constructed only in memory)
:param onAfterToRtl: callback fn(unit, modelCls) which will be called
after unit will be synthesised to rtl
:return: tuple (fully loaded unit with connected sim model,
connected simulation model,
simulation processes of agents
)
"""
if modelCls is None:
modelCls = toSimModel(
unit, targetPlatform=targetPlatform, dumpModelIn=dumpModelIn)
else:
# to instantiate hierarchy of unit
toSimModel(unit)
if onAfterToRtl:
onAfterToRtl(unit, modelCls)
reconnectUnitSignalsToModel(unit, modelCls)
model = modelCls()
procs = autoAddAgents(unit)
return unit, model, procs | [
"def",
"simPrepare",
"(",
"unit",
":",
"Unit",
",",
"modelCls",
":",
"Optional",
"[",
"SimModel",
"]",
"=",
"None",
",",
"targetPlatform",
"=",
"DummyPlatform",
"(",
")",
",",
"dumpModelIn",
":",
"str",
"=",
"None",
",",
"onAfterToRtl",
"=",
"None",
")",
":",
"if",
"modelCls",
"is",
"None",
":",
"modelCls",
"=",
"toSimModel",
"(",
"unit",
",",
"targetPlatform",
"=",
"targetPlatform",
",",
"dumpModelIn",
"=",
"dumpModelIn",
")",
"else",
":",
"# to instantiate hierarchy of unit",
"toSimModel",
"(",
"unit",
")",
"if",
"onAfterToRtl",
":",
"onAfterToRtl",
"(",
"unit",
",",
"modelCls",
")",
"reconnectUnitSignalsToModel",
"(",
"unit",
",",
"modelCls",
")",
"model",
"=",
"modelCls",
"(",
")",
"procs",
"=",
"autoAddAgents",
"(",
"unit",
")",
"return",
"unit",
",",
"model",
",",
"procs"
]
| 37.828571 | 19.257143 |
def collect_variables(self, selections) -> None:
"""Apply method |ChangeItem.collect_variables| of the base class
|ChangeItem| and also apply method |ExchangeItem.insert_variables|
of class |ExchangeItem| to collect the relevant base variables
handled by the devices of the given |Selections| object.
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> from hydpy import AddItem
>>> item = AddItem(
... 'alpha', 'hland_v1', 'control.sfcf', 'control.rfcf', 0)
>>> item.collect_variables(pub.selections)
>>> land_dill = hp.elements.land_dill
>>> control = land_dill.model.parameters.control
>>> item.device2target[land_dill] is control.sfcf
True
>>> item.device2base[land_dill] is control.rfcf
True
>>> for device in sorted(item.device2base, key=lambda x: x.name):
... print(device)
land_dill
land_lahn_1
land_lahn_2
land_lahn_3
"""
super().collect_variables(selections)
self.insert_variables(self.device2base, self.basespecs, selections) | [
"def",
"collect_variables",
"(",
"self",
",",
"selections",
")",
"->",
"None",
":",
"super",
"(",
")",
".",
"collect_variables",
"(",
"selections",
")",
"self",
".",
"insert_variables",
"(",
"self",
".",
"device2base",
",",
"self",
".",
"basespecs",
",",
"selections",
")"
]
| 43.592593 | 18.962963 |
def get_info(df, group, info=['mean', 'std']):
"""
Aggregate mean and std with the given group.
"""
agg = df.groupby(group).agg(info)
agg.columns = agg.columns.droplevel(0)
return agg | [
"def",
"get_info",
"(",
"df",
",",
"group",
",",
"info",
"=",
"[",
"'mean'",
",",
"'std'",
"]",
")",
":",
"agg",
"=",
"df",
".",
"groupby",
"(",
"group",
")",
".",
"agg",
"(",
"info",
")",
"agg",
".",
"columns",
"=",
"agg",
".",
"columns",
".",
"droplevel",
"(",
"0",
")",
"return",
"agg"
]
| 28.714286 | 6.428571 |
def _default_handlers(self):
""" Generate the handlers for this site """
static_path = os.path.abspath(os.path.join(os.path.dirname(__file__),"static"))
urls = [
(r"/static/(.*)", cyclone.web.StaticFileHandler, {"path": static_path}),
]
for p in self.pages:
handler = p.handler
handler.site = self
handler.page = p
urls.append((p.link.url,handler))
return urls | [
"def",
"_default_handlers",
"(",
"self",
")",
":",
"static_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"\"static\"",
")",
")",
"urls",
"=",
"[",
"(",
"r\"/static/(.*)\"",
",",
"cyclone",
".",
"web",
".",
"StaticFileHandler",
",",
"{",
"\"path\"",
":",
"static_path",
"}",
")",
",",
"]",
"for",
"p",
"in",
"self",
".",
"pages",
":",
"handler",
"=",
"p",
".",
"handler",
"handler",
".",
"site",
"=",
"self",
"handler",
".",
"page",
"=",
"p",
"urls",
".",
"append",
"(",
"(",
"p",
".",
"link",
".",
"url",
",",
"handler",
")",
")",
"return",
"urls"
]
| 38.083333 | 18.833333 |
def pubsub_pub(self, topic, payload, **kwargs):
"""Publish a message to a given pubsub topic
Publishing will publish the given payload (string) to
everyone currently subscribed to the given topic.
All data (including the id of the publisher) is automatically
base64 encoded when published.
.. code-block:: python
# publishes the message 'message' to the topic 'hello'
>>> c.pubsub_pub('hello', 'message')
[]
Parameters
----------
topic : str
Topic to publish to
payload : Data to be published to the given topic
Returns
-------
list : empty list
"""
args = (topic, payload)
return self._client.request('/pubsub/pub', args,
decoder='json', **kwargs) | [
"def",
"pubsub_pub",
"(",
"self",
",",
"topic",
",",
"payload",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"topic",
",",
"payload",
")",
"return",
"self",
".",
"_client",
".",
"request",
"(",
"'/pubsub/pub'",
",",
"args",
",",
"decoder",
"=",
"'json'",
",",
"*",
"*",
"kwargs",
")"
]
| 30.107143 | 20.857143 |
def authenticate(api_key, api_url, **kwargs):
"""Returns a muddle instance, with API key and url set for requests."""
muddle = Muddle(**kwargs)
# Login.
muddle.authenticate(api_key, api_url)
return muddle | [
"def",
"authenticate",
"(",
"api_key",
",",
"api_url",
",",
"*",
"*",
"kwargs",
")",
":",
"muddle",
"=",
"Muddle",
"(",
"*",
"*",
"kwargs",
")",
"# Login.",
"muddle",
".",
"authenticate",
"(",
"api_key",
",",
"api_url",
")",
"return",
"muddle"
]
| 27.375 | 18.5 |
def default_job_name(self):
"""Slurm job name if not already specified
in the `sbatch` section"""
name = ''
if not self.root.existing_campaign:
campaign_file = osp.basename(self.root.campaign_file)
campaign = osp.splitext(campaign_file)[0]
name += campaign + '/'
name += self.tag
return name | [
"def",
"default_job_name",
"(",
"self",
")",
":",
"name",
"=",
"''",
"if",
"not",
"self",
".",
"root",
".",
"existing_campaign",
":",
"campaign_file",
"=",
"osp",
".",
"basename",
"(",
"self",
".",
"root",
".",
"campaign_file",
")",
"campaign",
"=",
"osp",
".",
"splitext",
"(",
"campaign_file",
")",
"[",
"0",
"]",
"name",
"+=",
"campaign",
"+",
"'/'",
"name",
"+=",
"self",
".",
"tag",
"return",
"name"
]
| 36.6 | 12 |
def S(Document, *fields):
"""Generate a MongoDB sort order list using the Django ORM style."""
result = []
for field in fields:
if isinstance(field, tuple): # Unpack existing tuple.
field, direction = field
result.append((field, direction))
continue
direction = ASCENDING
if not field.startswith('__'):
field = field.replace('__', '.')
if field[0] == '-':
direction = DESCENDING
if field[0] in ('+', '-'):
field = field[1:]
_field = traverse(Document, field, default=None)
result.append(((~_field) if _field else field, direction))
return result | [
"def",
"S",
"(",
"Document",
",",
"*",
"fields",
")",
":",
"result",
"=",
"[",
"]",
"for",
"field",
"in",
"fields",
":",
"if",
"isinstance",
"(",
"field",
",",
"tuple",
")",
":",
"# Unpack existing tuple.",
"field",
",",
"direction",
"=",
"field",
"result",
".",
"append",
"(",
"(",
"field",
",",
"direction",
")",
")",
"continue",
"direction",
"=",
"ASCENDING",
"if",
"not",
"field",
".",
"startswith",
"(",
"'__'",
")",
":",
"field",
"=",
"field",
".",
"replace",
"(",
"'__'",
",",
"'.'",
")",
"if",
"field",
"[",
"0",
"]",
"==",
"'-'",
":",
"direction",
"=",
"DESCENDING",
"if",
"field",
"[",
"0",
"]",
"in",
"(",
"'+'",
",",
"'-'",
")",
":",
"field",
"=",
"field",
"[",
"1",
":",
"]",
"_field",
"=",
"traverse",
"(",
"Document",
",",
"field",
",",
"default",
"=",
"None",
")",
"result",
".",
"append",
"(",
"(",
"(",
"~",
"_field",
")",
"if",
"_field",
"else",
"field",
",",
"direction",
")",
")",
"return",
"result"
]
| 21.481481 | 23 |
def to_task(self):
"""Return a task object representing this async job."""
from google.appengine.api.taskqueue import Task
from google.appengine.api.taskqueue import TaskRetryOptions
self._increment_recursion_level()
self.check_recursion_depth()
url = "%s/%s" % (ASYNC_ENDPOINT, self.function_path)
kwargs = {
'url': url,
'headers': self.get_headers().copy(),
'payload': json.dumps(self.to_dict())
}
kwargs.update(copy.deepcopy(self.get_task_args()))
# Set task_retry_limit
retry_options = copy.deepcopy(DEFAULT_RETRY_OPTIONS)
retry_options.update(kwargs.pop('retry_options', {}))
kwargs['retry_options'] = TaskRetryOptions(**retry_options)
return Task(**kwargs) | [
"def",
"to_task",
"(",
"self",
")",
":",
"from",
"google",
".",
"appengine",
".",
"api",
".",
"taskqueue",
"import",
"Task",
"from",
"google",
".",
"appengine",
".",
"api",
".",
"taskqueue",
"import",
"TaskRetryOptions",
"self",
".",
"_increment_recursion_level",
"(",
")",
"self",
".",
"check_recursion_depth",
"(",
")",
"url",
"=",
"\"%s/%s\"",
"%",
"(",
"ASYNC_ENDPOINT",
",",
"self",
".",
"function_path",
")",
"kwargs",
"=",
"{",
"'url'",
":",
"url",
",",
"'headers'",
":",
"self",
".",
"get_headers",
"(",
")",
".",
"copy",
"(",
")",
",",
"'payload'",
":",
"json",
".",
"dumps",
"(",
"self",
".",
"to_dict",
"(",
")",
")",
"}",
"kwargs",
".",
"update",
"(",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"get_task_args",
"(",
")",
")",
")",
"# Set task_retry_limit",
"retry_options",
"=",
"copy",
".",
"deepcopy",
"(",
"DEFAULT_RETRY_OPTIONS",
")",
"retry_options",
".",
"update",
"(",
"kwargs",
".",
"pop",
"(",
"'retry_options'",
",",
"{",
"}",
")",
")",
"kwargs",
"[",
"'retry_options'",
"]",
"=",
"TaskRetryOptions",
"(",
"*",
"*",
"retry_options",
")",
"return",
"Task",
"(",
"*",
"*",
"kwargs",
")"
]
| 34.478261 | 21.043478 |
def get_cdk_stacks(module_path, env_vars, context_opts):
"""Return list of CDK stacks."""
LOGGER.debug('Listing stacks in the CDK app prior to '
'diff')
return subprocess.check_output(
generate_node_command(
command='cdk',
command_opts=['list'] + context_opts,
path=module_path),
env=env_vars
).strip().split('\n') | [
"def",
"get_cdk_stacks",
"(",
"module_path",
",",
"env_vars",
",",
"context_opts",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'Listing stacks in the CDK app prior to '",
"'diff'",
")",
"return",
"subprocess",
".",
"check_output",
"(",
"generate_node_command",
"(",
"command",
"=",
"'cdk'",
",",
"command_opts",
"=",
"[",
"'list'",
"]",
"+",
"context_opts",
",",
"path",
"=",
"module_path",
")",
",",
"env",
"=",
"env_vars",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")"
]
| 35.363636 | 12.090909 |
def get_estimates(estimators_tuple, positions, num_workers=1):
"""
Estimates densities, and probabilities for paragraph positions.
Parameters
----------
estimators_tuple : (float, KernelDensity, KernelDensity)
An estimate of the prior probability P(relevant), an estimator of the prior density
p(position), and an estimator of the conditional density p(position | relevant).
positions : iterable of float
Paragraph positions for which densities, and probabilities will be estimated.
num_workers : int, optional
The number of processes that will compute the estimates.
Returns
-------
five-tuple of (sequence of float)
Estimates of P(relevant), p(position), p(position | relevant), P(position, relevant), and
P(relevant | position) in the form of histograms.
"""
estimators = dict()
estimators["P(relevant)"], estimators["p(position)"], \
estimators["p(position|relevant)"] = estimators_tuple
log_estimates = dict()
log_estimates["P(relevant)"] = log(estimators["P(relevant)"])
X = [(position,) for position in positions]
with Pool(num_workers) as pool:
first_job = pool.map_async(estimators["p(position)"].score_samples, tqdm(
array_split(X, num_workers), desc="p(position)"))
second_job = pool.map_async(estimators["p(position|relevant)"].score_samples, tqdm(
array_split(X, num_workers), desc="p(position | relevant)"))
log_estimates["p(position)"] = concatenate(first_job.get())
log_estimates["p(position|relevant)"] = concatenate(second_job.get())
log_estimates["P(position,relevant)"] = \
log_estimates["p(position|relevant)"] + log_estimates["P(relevant)"]
log_estimates["P(relevant|position)"] = \
log_estimates["P(position,relevant)"] - log_estimates["p(position)"]
return (
[estimators["P(relevant)"]] * len(X), exp(log_estimates["p(position)"]),
exp(log_estimates["p(position|relevant)"]), exp(log_estimates["P(position,relevant)"]),
exp(log_estimates["P(relevant|position)"])) | [
"def",
"get_estimates",
"(",
"estimators_tuple",
",",
"positions",
",",
"num_workers",
"=",
"1",
")",
":",
"estimators",
"=",
"dict",
"(",
")",
"estimators",
"[",
"\"P(relevant)\"",
"]",
",",
"estimators",
"[",
"\"p(position)\"",
"]",
",",
"estimators",
"[",
"\"p(position|relevant)\"",
"]",
"=",
"estimators_tuple",
"log_estimates",
"=",
"dict",
"(",
")",
"log_estimates",
"[",
"\"P(relevant)\"",
"]",
"=",
"log",
"(",
"estimators",
"[",
"\"P(relevant)\"",
"]",
")",
"X",
"=",
"[",
"(",
"position",
",",
")",
"for",
"position",
"in",
"positions",
"]",
"with",
"Pool",
"(",
"num_workers",
")",
"as",
"pool",
":",
"first_job",
"=",
"pool",
".",
"map_async",
"(",
"estimators",
"[",
"\"p(position)\"",
"]",
".",
"score_samples",
",",
"tqdm",
"(",
"array_split",
"(",
"X",
",",
"num_workers",
")",
",",
"desc",
"=",
"\"p(position)\"",
")",
")",
"second_job",
"=",
"pool",
".",
"map_async",
"(",
"estimators",
"[",
"\"p(position|relevant)\"",
"]",
".",
"score_samples",
",",
"tqdm",
"(",
"array_split",
"(",
"X",
",",
"num_workers",
")",
",",
"desc",
"=",
"\"p(position | relevant)\"",
")",
")",
"log_estimates",
"[",
"\"p(position)\"",
"]",
"=",
"concatenate",
"(",
"first_job",
".",
"get",
"(",
")",
")",
"log_estimates",
"[",
"\"p(position|relevant)\"",
"]",
"=",
"concatenate",
"(",
"second_job",
".",
"get",
"(",
")",
")",
"log_estimates",
"[",
"\"P(position,relevant)\"",
"]",
"=",
"log_estimates",
"[",
"\"p(position|relevant)\"",
"]",
"+",
"log_estimates",
"[",
"\"P(relevant)\"",
"]",
"log_estimates",
"[",
"\"P(relevant|position)\"",
"]",
"=",
"log_estimates",
"[",
"\"P(position,relevant)\"",
"]",
"-",
"log_estimates",
"[",
"\"p(position)\"",
"]",
"return",
"(",
"[",
"estimators",
"[",
"\"P(relevant)\"",
"]",
"]",
"*",
"len",
"(",
"X",
")",
",",
"exp",
"(",
"log_estimates",
"[",
"\"p(position)\"",
"]",
")",
",",
"exp",
"(",
"log_estimates",
"[",
"\"p(position|relevant)\"",
"]",
")",
",",
"exp",
"(",
"log_estimates",
"[",
"\"P(position,relevant)\"",
"]",
")",
",",
"exp",
"(",
"log_estimates",
"[",
"\"P(relevant|position)\"",
"]",
")",
")"
]
| 49.547619 | 26.02381 |
def bruteforce(users, domain, password, host):
"""
Performs a bruteforce for the given users, password, domain on the given host.
"""
cs = CredentialSearch(use_pipe=False)
print_notification("Connecting to {}".format(host))
s = Server(host)
c = Connection(s)
for user in users:
if c.rebind(user="{}\\{}".format(domain, user.username), password=password, authentication=NTLM):
print_success('Success for: {}:{}'.format(user.username, password))
credential = cs.find_object(
user.username, password, domain=domain, host_ip=host)
if not credential:
credential = Credential(username=user.username, secret=password,
domain=domain, host_ip=host, type="plaintext", port=389)
credential.add_tag(tag)
credential.save()
# Add a tag to the user object, so we dont have to bruteforce it again.
user.add_tag(tag)
user.save()
else:
print_error("Fail for: {}:{}".format(user.username, password)) | [
"def",
"bruteforce",
"(",
"users",
",",
"domain",
",",
"password",
",",
"host",
")",
":",
"cs",
"=",
"CredentialSearch",
"(",
"use_pipe",
"=",
"False",
")",
"print_notification",
"(",
"\"Connecting to {}\"",
".",
"format",
"(",
"host",
")",
")",
"s",
"=",
"Server",
"(",
"host",
")",
"c",
"=",
"Connection",
"(",
"s",
")",
"for",
"user",
"in",
"users",
":",
"if",
"c",
".",
"rebind",
"(",
"user",
"=",
"\"{}\\\\{}\"",
".",
"format",
"(",
"domain",
",",
"user",
".",
"username",
")",
",",
"password",
"=",
"password",
",",
"authentication",
"=",
"NTLM",
")",
":",
"print_success",
"(",
"'Success for: {}:{}'",
".",
"format",
"(",
"user",
".",
"username",
",",
"password",
")",
")",
"credential",
"=",
"cs",
".",
"find_object",
"(",
"user",
".",
"username",
",",
"password",
",",
"domain",
"=",
"domain",
",",
"host_ip",
"=",
"host",
")",
"if",
"not",
"credential",
":",
"credential",
"=",
"Credential",
"(",
"username",
"=",
"user",
".",
"username",
",",
"secret",
"=",
"password",
",",
"domain",
"=",
"domain",
",",
"host_ip",
"=",
"host",
",",
"type",
"=",
"\"plaintext\"",
",",
"port",
"=",
"389",
")",
"credential",
".",
"add_tag",
"(",
"tag",
")",
"credential",
".",
"save",
"(",
")",
"# Add a tag to the user object, so we dont have to bruteforce it again.",
"user",
".",
"add_tag",
"(",
"tag",
")",
"user",
".",
"save",
"(",
")",
"else",
":",
"print_error",
"(",
"\"Fail for: {}:{}\"",
".",
"format",
"(",
"user",
".",
"username",
",",
"password",
")",
")"
]
| 40.37037 | 24.888889 |
def geo_distance_range(cls, field, center, from_distance, to_distance, distance_type=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-range-filter.html
Filters documents that exists within a range from a specific point
'''
instance = cls(geo_distance_range={'from': from_distance, 'to': to_distance, field: center})
if distance_type is not None:
instance['geo_distance_range']['distance_type'] = distance_type
return instance | [
"def",
"geo_distance_range",
"(",
"cls",
",",
"field",
",",
"center",
",",
"from_distance",
",",
"to_distance",
",",
"distance_type",
"=",
"None",
")",
":",
"instance",
"=",
"cls",
"(",
"geo_distance_range",
"=",
"{",
"'from'",
":",
"from_distance",
",",
"'to'",
":",
"to_distance",
",",
"field",
":",
"center",
"}",
")",
"if",
"distance_type",
"is",
"not",
"None",
":",
"instance",
"[",
"'geo_distance_range'",
"]",
"[",
"'distance_type'",
"]",
"=",
"distance_type",
"return",
"instance"
]
| 46.818182 | 35.545455 |
def set_ram(self, ram):
"""
Set the RAM amount for the GNS3 VM.
:param ram: amount of memory
"""
yield from self._execute("modifyvm", [self._vmname, "--memory", str(ram)], timeout=3)
log.info("GNS3 VM RAM amount set to {}".format(ram)) | [
"def",
"set_ram",
"(",
"self",
",",
"ram",
")",
":",
"yield",
"from",
"self",
".",
"_execute",
"(",
"\"modifyvm\"",
",",
"[",
"self",
".",
"_vmname",
",",
"\"--memory\"",
",",
"str",
"(",
"ram",
")",
"]",
",",
"timeout",
"=",
"3",
")",
"log",
".",
"info",
"(",
"\"GNS3 VM RAM amount set to {}\"",
".",
"format",
"(",
"ram",
")",
")"
]
| 30.777778 | 19.666667 |
def enqueue(self, message, *, delay=None):
"""Enqueue a message.
Parameters:
message(Message): The message to enqueue.
delay(int): The minimum amount of time, in milliseconds, to
delay the message by. Must be less than 7 days.
Raises:
ValueError: If ``delay`` is longer than 7 days.
"""
queue_name = message.queue_name
# Each enqueued message must have a unique id in Redis so
# using the Message's id isn't safe because messages may be
# retried.
message = message.copy(options={
"redis_message_id": str(uuid4()),
})
if delay is not None:
queue_name = dq_name(queue_name)
message_eta = current_millis() + delay
message = message.copy(
queue_name=queue_name,
options={
"eta": message_eta,
},
)
self.logger.debug("Enqueueing message %r on queue %r.", message.message_id, queue_name)
self.emit_before("enqueue", message, delay)
self.do_enqueue(queue_name, message.options["redis_message_id"], message.encode())
self.emit_after("enqueue", message, delay)
return message | [
"def",
"enqueue",
"(",
"self",
",",
"message",
",",
"*",
",",
"delay",
"=",
"None",
")",
":",
"queue_name",
"=",
"message",
".",
"queue_name",
"# Each enqueued message must have a unique id in Redis so",
"# using the Message's id isn't safe because messages may be",
"# retried.",
"message",
"=",
"message",
".",
"copy",
"(",
"options",
"=",
"{",
"\"redis_message_id\"",
":",
"str",
"(",
"uuid4",
"(",
")",
")",
",",
"}",
")",
"if",
"delay",
"is",
"not",
"None",
":",
"queue_name",
"=",
"dq_name",
"(",
"queue_name",
")",
"message_eta",
"=",
"current_millis",
"(",
")",
"+",
"delay",
"message",
"=",
"message",
".",
"copy",
"(",
"queue_name",
"=",
"queue_name",
",",
"options",
"=",
"{",
"\"eta\"",
":",
"message_eta",
",",
"}",
",",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Enqueueing message %r on queue %r.\"",
",",
"message",
".",
"message_id",
",",
"queue_name",
")",
"self",
".",
"emit_before",
"(",
"\"enqueue\"",
",",
"message",
",",
"delay",
")",
"self",
".",
"do_enqueue",
"(",
"queue_name",
",",
"message",
".",
"options",
"[",
"\"redis_message_id\"",
"]",
",",
"message",
".",
"encode",
"(",
")",
")",
"self",
".",
"emit_after",
"(",
"\"enqueue\"",
",",
"message",
",",
"delay",
")",
"return",
"message"
]
| 35.314286 | 19.314286 |
async def verify_proof(self, proof_req: dict, proof: dict) -> str:
"""
Verify proof as Verifier. Raise AbsentRevReg if a proof cites a revocation registry
that does not exist on the distributed ledger.
:param proof_req: proof request as Verifier creates, as per proof_req_json above
:param proof: proof as HolderProver creates
:return: json encoded True if proof is valid; False if not
"""
LOGGER.debug('Verifier.verify_proof >>> proof_req: %s, proof: %s', proof_req, proof)
if not Verifier.check_encoding(proof_req, proof):
LOGGER.info(
'Proof encoding does not cross-reference with proof request %s: failing verification',
proof_req.get('nonce', '(missing nonce)'))
LOGGER.debug('Verifier.verify_proof <<< "False"')
return json.dumps(False)
async def _set_schema(s_id: str) -> None:
nonlocal s_id2schema
if not ok_schema_id(s_id):
LOGGER.debug('Verifier.verify_proof <!< Bad schema id %s', s_id)
raise BadIdentifier('Bad schema id {}'.format(s_id))
if s_id not in s_id2schema:
schema = json.loads(await self.get_schema(s_id)) # add to cache en passant
if not schema:
LOGGER.debug(
'Verifier.verify_proof <!< absent schema %s, proof req may be for another ledger',
s_id)
raise AbsentSchema('Absent schema {}, proof req may be for another ledger'.format(s_id))
s_id2schema[s_id] = schema
async def _set_cred_def(cd_id: str) -> None:
nonlocal cd_id2cred_def
if not ok_cred_def_id(cd_id):
LOGGER.debug('Verifier.verify_proof <!< Bad cred def id %s', cd_id)
raise BadIdentifier('Bad cred def id {}'.format(cd_id))
if cd_id not in cd_id2cred_def:
cd_id2cred_def[cd_id] = json.loads(await self.get_cred_def(cd_id)) # add to cache en passant
async def _set_rev_reg_def(rr_id: str) -> bool:
"""
Return true to continue to timestamp setting, false to short-circuit
"""
nonlocal rr_id2rr_def
if not rr_id:
return False
if not ok_rev_reg_id(rr_id):
LOGGER.debug('Verifier.verify_proof <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
if rr_id not in rr_id2rr_def:
rr_id2rr_def[rr_id] = json.loads(await self.get_rev_reg_def(rr_id))
return True
async def _set_timestamp(rr_id: str, timestamp: int) -> None:
nonlocal rr_id2rr
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
(rr_json, _) = await revo_cache_entry.get_state_json(self._build_rr_state_json, timestamp, timestamp)
if rr_id not in rr_id2rr:
rr_id2rr[rr_id] = {}
rr_id2rr[rr_id][timestamp] = json.loads(rr_json)
s_id2schema = {}
cd_id2cred_def = {}
rr_id2rr_def = {}
rr_id2rr = {}
proof_ids = proof['identifiers']
for proof_id in proof_ids:
await _set_schema(proof_id['schema_id'])
await _set_cred_def(proof_id['cred_def_id'])
rr_id = proof_id['rev_reg_id']
if await _set_rev_reg_def(rr_id):
await _set_timestamp(rr_id, proof_id['timestamp'])
rv = json.dumps(await anoncreds.verifier_verify_proof(
json.dumps(proof_req),
json.dumps(proof),
json.dumps(s_id2schema),
json.dumps(cd_id2cred_def),
json.dumps(rr_id2rr_def),
json.dumps(rr_id2rr)))
LOGGER.debug('Verifier.verify_proof <<< %s', rv)
return rv | [
"async",
"def",
"verify_proof",
"(",
"self",
",",
"proof_req",
":",
"dict",
",",
"proof",
":",
"dict",
")",
"->",
"str",
":",
"LOGGER",
".",
"debug",
"(",
"'Verifier.verify_proof >>> proof_req: %s, proof: %s'",
",",
"proof_req",
",",
"proof",
")",
"if",
"not",
"Verifier",
".",
"check_encoding",
"(",
"proof_req",
",",
"proof",
")",
":",
"LOGGER",
".",
"info",
"(",
"'Proof encoding does not cross-reference with proof request %s: failing verification'",
",",
"proof_req",
".",
"get",
"(",
"'nonce'",
",",
"'(missing nonce)'",
")",
")",
"LOGGER",
".",
"debug",
"(",
"'Verifier.verify_proof <<< \"False\"'",
")",
"return",
"json",
".",
"dumps",
"(",
"False",
")",
"async",
"def",
"_set_schema",
"(",
"s_id",
":",
"str",
")",
"->",
"None",
":",
"nonlocal",
"s_id2schema",
"if",
"not",
"ok_schema_id",
"(",
"s_id",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'Verifier.verify_proof <!< Bad schema id %s'",
",",
"s_id",
")",
"raise",
"BadIdentifier",
"(",
"'Bad schema id {}'",
".",
"format",
"(",
"s_id",
")",
")",
"if",
"s_id",
"not",
"in",
"s_id2schema",
":",
"schema",
"=",
"json",
".",
"loads",
"(",
"await",
"self",
".",
"get_schema",
"(",
"s_id",
")",
")",
"# add to cache en passant",
"if",
"not",
"schema",
":",
"LOGGER",
".",
"debug",
"(",
"'Verifier.verify_proof <!< absent schema %s, proof req may be for another ledger'",
",",
"s_id",
")",
"raise",
"AbsentSchema",
"(",
"'Absent schema {}, proof req may be for another ledger'",
".",
"format",
"(",
"s_id",
")",
")",
"s_id2schema",
"[",
"s_id",
"]",
"=",
"schema",
"async",
"def",
"_set_cred_def",
"(",
"cd_id",
":",
"str",
")",
"->",
"None",
":",
"nonlocal",
"cd_id2cred_def",
"if",
"not",
"ok_cred_def_id",
"(",
"cd_id",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'Verifier.verify_proof <!< Bad cred def id %s'",
",",
"cd_id",
")",
"raise",
"BadIdentifier",
"(",
"'Bad cred def id {}'",
".",
"format",
"(",
"cd_id",
")",
")",
"if",
"cd_id",
"not",
"in",
"cd_id2cred_def",
":",
"cd_id2cred_def",
"[",
"cd_id",
"]",
"=",
"json",
".",
"loads",
"(",
"await",
"self",
".",
"get_cred_def",
"(",
"cd_id",
")",
")",
"# add to cache en passant",
"async",
"def",
"_set_rev_reg_def",
"(",
"rr_id",
":",
"str",
")",
"->",
"bool",
":",
"\"\"\"\n Return true to continue to timestamp setting, false to short-circuit\n \"\"\"",
"nonlocal",
"rr_id2rr_def",
"if",
"not",
"rr_id",
":",
"return",
"False",
"if",
"not",
"ok_rev_reg_id",
"(",
"rr_id",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'Verifier.verify_proof <!< Bad rev reg id %s'",
",",
"rr_id",
")",
"raise",
"BadIdentifier",
"(",
"'Bad rev reg id {}'",
".",
"format",
"(",
"rr_id",
")",
")",
"if",
"rr_id",
"not",
"in",
"rr_id2rr_def",
":",
"rr_id2rr_def",
"[",
"rr_id",
"]",
"=",
"json",
".",
"loads",
"(",
"await",
"self",
".",
"get_rev_reg_def",
"(",
"rr_id",
")",
")",
"return",
"True",
"async",
"def",
"_set_timestamp",
"(",
"rr_id",
":",
"str",
",",
"timestamp",
":",
"int",
")",
"->",
"None",
":",
"nonlocal",
"rr_id2rr",
"with",
"REVO_CACHE",
".",
"lock",
":",
"revo_cache_entry",
"=",
"REVO_CACHE",
".",
"get",
"(",
"rr_id",
",",
"None",
")",
"(",
"rr_json",
",",
"_",
")",
"=",
"await",
"revo_cache_entry",
".",
"get_state_json",
"(",
"self",
".",
"_build_rr_state_json",
",",
"timestamp",
",",
"timestamp",
")",
"if",
"rr_id",
"not",
"in",
"rr_id2rr",
":",
"rr_id2rr",
"[",
"rr_id",
"]",
"=",
"{",
"}",
"rr_id2rr",
"[",
"rr_id",
"]",
"[",
"timestamp",
"]",
"=",
"json",
".",
"loads",
"(",
"rr_json",
")",
"s_id2schema",
"=",
"{",
"}",
"cd_id2cred_def",
"=",
"{",
"}",
"rr_id2rr_def",
"=",
"{",
"}",
"rr_id2rr",
"=",
"{",
"}",
"proof_ids",
"=",
"proof",
"[",
"'identifiers'",
"]",
"for",
"proof_id",
"in",
"proof_ids",
":",
"await",
"_set_schema",
"(",
"proof_id",
"[",
"'schema_id'",
"]",
")",
"await",
"_set_cred_def",
"(",
"proof_id",
"[",
"'cred_def_id'",
"]",
")",
"rr_id",
"=",
"proof_id",
"[",
"'rev_reg_id'",
"]",
"if",
"await",
"_set_rev_reg_def",
"(",
"rr_id",
")",
":",
"await",
"_set_timestamp",
"(",
"rr_id",
",",
"proof_id",
"[",
"'timestamp'",
"]",
")",
"rv",
"=",
"json",
".",
"dumps",
"(",
"await",
"anoncreds",
".",
"verifier_verify_proof",
"(",
"json",
".",
"dumps",
"(",
"proof_req",
")",
",",
"json",
".",
"dumps",
"(",
"proof",
")",
",",
"json",
".",
"dumps",
"(",
"s_id2schema",
")",
",",
"json",
".",
"dumps",
"(",
"cd_id2cred_def",
")",
",",
"json",
".",
"dumps",
"(",
"rr_id2rr_def",
")",
",",
"json",
".",
"dumps",
"(",
"rr_id2rr",
")",
")",
")",
"LOGGER",
".",
"debug",
"(",
"'Verifier.verify_proof <<< %s'",
",",
"rv",
")",
"return",
"rv"
]
| 44.471264 | 21.689655 |
def which(program):
"""
returns the path to an executable or None if it can't be found
"""
def is_exe(_fpath):
return os.path.isfile(_fpath) and os.access(_fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None | [
"def",
"which",
"(",
"program",
")",
":",
"def",
"is_exe",
"(",
"_fpath",
")",
":",
"return",
"os",
".",
"path",
".",
"isfile",
"(",
"_fpath",
")",
"and",
"os",
".",
"access",
"(",
"_fpath",
",",
"os",
".",
"X_OK",
")",
"fpath",
",",
"fname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"program",
")",
"if",
"fpath",
":",
"if",
"is_exe",
"(",
"program",
")",
":",
"return",
"program",
"else",
":",
"for",
"path",
"in",
"os",
".",
"environ",
"[",
"\"PATH\"",
"]",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
":",
"exe_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"program",
")",
"if",
"is_exe",
"(",
"exe_file",
")",
":",
"return",
"exe_file",
"return",
"None"
]
| 28.882353 | 16.882353 |
def Add(self, request, callback=None):
"""Add a new request.
Args:
request: A http_wrapper.Request to add to the batch.
callback: A callback to be called for this response, of the
form callback(response, exception). The first parameter is the
deserialized response object. The second is an
apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no errors
occurred.
Returns:
None
"""
handler = RequestResponseAndHandler(request, None, callback)
self.__request_response_handlers[self._NewId()] = handler | [
"def",
"Add",
"(",
"self",
",",
"request",
",",
"callback",
"=",
"None",
")",
":",
"handler",
"=",
"RequestResponseAndHandler",
"(",
"request",
",",
"None",
",",
"callback",
")",
"self",
".",
"__request_response_handlers",
"[",
"self",
".",
"_NewId",
"(",
")",
"]",
"=",
"handler"
]
| 40.647059 | 23.705882 |
def put(self, key, value):
"""
>>> c = MemSizeLRUCache(maxmem=24*4)
>>> c.put(1, 1)
>>> c.mem() # 24-bytes per integer
24
>>> c.put(2, 2)
>>> c.put(3, 3)
>>> c.put(4, 4)
>>> c.get(1)
1
>>> c.mem()
96
>>> c.size()
4
>>> c.put(5, 5)
>>> c.size()
4
>>> c.get(2)
Traceback (most recent call last):
...
KeyError: 2
"""
mem = sys.getsizeof(value)
if self._mem + mem > self._maxmem:
self.delete(self.last())
LRUCache.put(self, key, (value, mem))
self._mem += mem | [
"def",
"put",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"mem",
"=",
"sys",
".",
"getsizeof",
"(",
"value",
")",
"if",
"self",
".",
"_mem",
"+",
"mem",
">",
"self",
".",
"_maxmem",
":",
"self",
".",
"delete",
"(",
"self",
".",
"last",
"(",
")",
")",
"LRUCache",
".",
"put",
"(",
"self",
",",
"key",
",",
"(",
"value",
",",
"mem",
")",
")",
"self",
".",
"_mem",
"+=",
"mem"
]
| 23.285714 | 15.714286 |
def sci(x, precision=2, base=10, exp_only=False):
"""
Convert the given floating point number into scientific notation, using
unicode superscript characters to nicely render the exponent.
"""
# Handle the corner cases of zero and non-finite numbers, which can't be
# represented using scientific notation.
if x == 0 or not math.isfinite(x):
return '{1:.{0}f}'.format(precision, x)
exponent = math.floor(math.log(abs(x), base))
mantissa = x / base**exponent
# Handle the corner case where the mantissa would round up to 10 (at the
# given level of precision) by incrementing the exponent.
if abs(mantissa) + base**-precision >= base:
exponent += 1
mantissa /= base
superscripts = str.maketrans('-0123456789', '⁻⁰¹²³⁴⁵⁶⁷⁸⁹')
superscript_exponent = str(exponent).translate(superscripts)
if exp_only:
return '{0}{1}'.format(base, superscript_exponent)
else:
return '{1:.{0}f}×{2}{3}'.format(precision, mantissa, base, superscript_exponent) | [
"def",
"sci",
"(",
"x",
",",
"precision",
"=",
"2",
",",
"base",
"=",
"10",
",",
"exp_only",
"=",
"False",
")",
":",
"# Handle the corner cases of zero and non-finite numbers, which can't be ",
"# represented using scientific notation.",
"if",
"x",
"==",
"0",
"or",
"not",
"math",
".",
"isfinite",
"(",
"x",
")",
":",
"return",
"'{1:.{0}f}'",
".",
"format",
"(",
"precision",
",",
"x",
")",
"exponent",
"=",
"math",
".",
"floor",
"(",
"math",
".",
"log",
"(",
"abs",
"(",
"x",
")",
",",
"base",
")",
")",
"mantissa",
"=",
"x",
"/",
"base",
"**",
"exponent",
"# Handle the corner case where the mantissa would round up to 10 (at the ",
"# given level of precision) by incrementing the exponent.",
"if",
"abs",
"(",
"mantissa",
")",
"+",
"base",
"**",
"-",
"precision",
">=",
"base",
":",
"exponent",
"+=",
"1",
"mantissa",
"/=",
"base",
"superscripts",
"=",
"str",
".",
"maketrans",
"(",
"'-0123456789'",
",",
"'⁻⁰¹²³⁴⁵⁶⁷⁸⁹')",
"",
"superscript_exponent",
"=",
"str",
"(",
"exponent",
")",
".",
"translate",
"(",
"superscripts",
")",
"if",
"exp_only",
":",
"return",
"'{0}{1}'",
".",
"format",
"(",
"base",
",",
"superscript_exponent",
")",
"else",
":",
"return",
"'{1:.{0}f}×{2}{3}'.",
"f",
"ormat(",
"p",
"recision,",
" ",
"antissa,",
" ",
"ase,",
" ",
"uperscript_exponent)",
""
]
| 37.814815 | 22.407407 |
def _generate_base_namespace_module(self, api, namespace):
"""Creates a module for the namespace. All data types and routes are
represented as Python classes."""
self.cur_namespace = namespace
generate_module_header(self)
if namespace.doc is not None:
self.emit('"""')
self.emit_raw(namespace.doc)
self.emit('"""')
self.emit()
self.emit_raw(validators_import)
# Generate import statements for all referenced namespaces.
self._generate_imports_for_referenced_namespaces(namespace)
for annotation_type in namespace.annotation_types:
self._generate_annotation_type_class(namespace, annotation_type)
for data_type in namespace.linearize_data_types():
if isinstance(data_type, Struct):
self._generate_struct_class(namespace, data_type)
elif isinstance(data_type, Union):
self._generate_union_class(namespace, data_type)
else:
raise TypeError('Cannot handle type %r' % type(data_type))
for alias in namespace.linearize_aliases():
self._generate_alias_definition(namespace, alias)
# Generate the struct->subtype tag mapping at the end so that
# references to later-defined subtypes don't cause errors.
for data_type in namespace.linearize_data_types():
if is_struct_type(data_type):
self._generate_struct_class_reflection_attributes(
namespace, data_type)
if data_type.has_enumerated_subtypes():
self._generate_enumerated_subtypes_tag_mapping(
namespace, data_type)
elif is_union_type(data_type):
self._generate_union_class_reflection_attributes(
namespace, data_type)
self._generate_union_class_symbol_creators(data_type)
self._generate_routes(api.route_schema, namespace) | [
"def",
"_generate_base_namespace_module",
"(",
"self",
",",
"api",
",",
"namespace",
")",
":",
"self",
".",
"cur_namespace",
"=",
"namespace",
"generate_module_header",
"(",
"self",
")",
"if",
"namespace",
".",
"doc",
"is",
"not",
"None",
":",
"self",
".",
"emit",
"(",
"'\"\"\"'",
")",
"self",
".",
"emit_raw",
"(",
"namespace",
".",
"doc",
")",
"self",
".",
"emit",
"(",
"'\"\"\"'",
")",
"self",
".",
"emit",
"(",
")",
"self",
".",
"emit_raw",
"(",
"validators_import",
")",
"# Generate import statements for all referenced namespaces.",
"self",
".",
"_generate_imports_for_referenced_namespaces",
"(",
"namespace",
")",
"for",
"annotation_type",
"in",
"namespace",
".",
"annotation_types",
":",
"self",
".",
"_generate_annotation_type_class",
"(",
"namespace",
",",
"annotation_type",
")",
"for",
"data_type",
"in",
"namespace",
".",
"linearize_data_types",
"(",
")",
":",
"if",
"isinstance",
"(",
"data_type",
",",
"Struct",
")",
":",
"self",
".",
"_generate_struct_class",
"(",
"namespace",
",",
"data_type",
")",
"elif",
"isinstance",
"(",
"data_type",
",",
"Union",
")",
":",
"self",
".",
"_generate_union_class",
"(",
"namespace",
",",
"data_type",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Cannot handle type %r'",
"%",
"type",
"(",
"data_type",
")",
")",
"for",
"alias",
"in",
"namespace",
".",
"linearize_aliases",
"(",
")",
":",
"self",
".",
"_generate_alias_definition",
"(",
"namespace",
",",
"alias",
")",
"# Generate the struct->subtype tag mapping at the end so that",
"# references to later-defined subtypes don't cause errors.",
"for",
"data_type",
"in",
"namespace",
".",
"linearize_data_types",
"(",
")",
":",
"if",
"is_struct_type",
"(",
"data_type",
")",
":",
"self",
".",
"_generate_struct_class_reflection_attributes",
"(",
"namespace",
",",
"data_type",
")",
"if",
"data_type",
".",
"has_enumerated_subtypes",
"(",
")",
":",
"self",
".",
"_generate_enumerated_subtypes_tag_mapping",
"(",
"namespace",
",",
"data_type",
")",
"elif",
"is_union_type",
"(",
"data_type",
")",
":",
"self",
".",
"_generate_union_class_reflection_attributes",
"(",
"namespace",
",",
"data_type",
")",
"self",
".",
"_generate_union_class_symbol_creators",
"(",
"data_type",
")",
"self",
".",
"_generate_routes",
"(",
"api",
".",
"route_schema",
",",
"namespace",
")"
]
| 42.06383 | 19.191489 |
def non_tag_chars_from_raw(html):
'''generator that yields clean visible as it transitions through
states in the raw `html`
'''
n = 0
while n < len(html):
# find start of tag
angle = html.find('<', n)
if angle == -1:
yield html[n:]
n = len(html)
break
yield html[n:angle]
n = angle
# find the end of the tag string
space = html.find(' ', n, n + longest_extended_tag + 2)
angle = html.find('>', n, n + longest_extended_tag + 2)
nl = html.find('\n', n, n + longest_extended_tag + 2)
tab = html.find('\t', n, n + longest_extended_tag + 2)
ends = filter(lambda end: end > -1, [tab, nl, space, angle])
if ends:
tag = html[n + 1 : min(ends)]
if tag == '!--':
# whiteout comment except newlines
end = html.find('-->', n)
while n < end:
nl = html.find('\n', n, end)
if nl != -1:
yield ' ' * (nl - n) + '\n'
n = nl + 1
else:
yield ' ' * (end - n + 3)
break
n = end + 3
continue
is_extended = tag.lower() in extended_tags
else:
is_extended = False
# find end of tag even if on a lower line
while n < len(html):
squote = html.find("'", n)
dquote = html.find('"', n)
nl = html.find('\n', n)
angle = html.find('>', n)
if angle == -1:
# hits end of doc before end of tag
yield ' ' * (len(html) - n)
n = len(html)
break
elif -1 < squote < angle or -1 < dquote < angle:
if squote != -1 and dquote != -1:
if squote < dquote:
open_quote = squote
quote = "'"
else:
open_quote = dquote
quote = '"'
elif dquote != -1:
open_quote = dquote
quote = '"'
else:
open_quote = squote
quote = "'"
close_quote = html.find(quote, open_quote + 1)
while n < close_quote:
nl = html.find('\n', n, close_quote)
if nl == -1: break
yield ' ' * (nl - n) + '\n'
n = nl + 1
yield ' ' * (close_quote + 1 - n)
n = close_quote + 1
continue
elif nl == -1 or angle < nl:
# found close before either newline or end of doc
yield ' ' * (angle + 1 - n)
n = angle + 1
if is_extended and html[angle - 1] != '/':
# find matching closing tag. JavaScript can
# include HTML *strings* within it, and in
# principle, that HTML could contain a closing
# script tag in it; ignoring for now.
while n < len(html):
nl = html.find('\n', n)
close = html.find('</', n)
close2 = html.find('</', close + 2)
angle = html.find('>', close + 2)
if nl != -1 and nl < close:
yield ' ' * (nl - n) + '\n'
n = nl + 1
elif close == -1 or angle == -1:
# end of doc before matching close tag
yield ' ' * (len(html) - n)
n = len(html)
break
elif close2 != -1 and close2 < angle:
# broken tag inside current tag
yield ' ' * (close + 2 - n)
n = close + 2
elif html[close + 2:angle].lower() == tag.lower():
yield ' ' * (angle + 1 - n)
n = angle + 1
break
else:
yield ' ' * (angle + 1 - n)
n = angle + 1
# do not break
# finished with tag
break
else:
# found a newline within the current tag
yield ' ' * (nl - n) + '\n'
n = nl + 1 | [
"def",
"non_tag_chars_from_raw",
"(",
"html",
")",
":",
"n",
"=",
"0",
"while",
"n",
"<",
"len",
"(",
"html",
")",
":",
"# find start of tag",
"angle",
"=",
"html",
".",
"find",
"(",
"'<'",
",",
"n",
")",
"if",
"angle",
"==",
"-",
"1",
":",
"yield",
"html",
"[",
"n",
":",
"]",
"n",
"=",
"len",
"(",
"html",
")",
"break",
"yield",
"html",
"[",
"n",
":",
"angle",
"]",
"n",
"=",
"angle",
"# find the end of the tag string",
"space",
"=",
"html",
".",
"find",
"(",
"' '",
",",
"n",
",",
"n",
"+",
"longest_extended_tag",
"+",
"2",
")",
"angle",
"=",
"html",
".",
"find",
"(",
"'>'",
",",
"n",
",",
"n",
"+",
"longest_extended_tag",
"+",
"2",
")",
"nl",
"=",
"html",
".",
"find",
"(",
"'\\n'",
",",
"n",
",",
"n",
"+",
"longest_extended_tag",
"+",
"2",
")",
"tab",
"=",
"html",
".",
"find",
"(",
"'\\t'",
",",
"n",
",",
"n",
"+",
"longest_extended_tag",
"+",
"2",
")",
"ends",
"=",
"filter",
"(",
"lambda",
"end",
":",
"end",
">",
"-",
"1",
",",
"[",
"tab",
",",
"nl",
",",
"space",
",",
"angle",
"]",
")",
"if",
"ends",
":",
"tag",
"=",
"html",
"[",
"n",
"+",
"1",
":",
"min",
"(",
"ends",
")",
"]",
"if",
"tag",
"==",
"'!--'",
":",
"# whiteout comment except newlines",
"end",
"=",
"html",
".",
"find",
"(",
"'-->'",
",",
"n",
")",
"while",
"n",
"<",
"end",
":",
"nl",
"=",
"html",
".",
"find",
"(",
"'\\n'",
",",
"n",
",",
"end",
")",
"if",
"nl",
"!=",
"-",
"1",
":",
"yield",
"' '",
"*",
"(",
"nl",
"-",
"n",
")",
"+",
"'\\n'",
"n",
"=",
"nl",
"+",
"1",
"else",
":",
"yield",
"' '",
"*",
"(",
"end",
"-",
"n",
"+",
"3",
")",
"break",
"n",
"=",
"end",
"+",
"3",
"continue",
"is_extended",
"=",
"tag",
".",
"lower",
"(",
")",
"in",
"extended_tags",
"else",
":",
"is_extended",
"=",
"False",
"# find end of tag even if on a lower line",
"while",
"n",
"<",
"len",
"(",
"html",
")",
":",
"squote",
"=",
"html",
".",
"find",
"(",
"\"'\"",
",",
"n",
")",
"dquote",
"=",
"html",
".",
"find",
"(",
"'\"'",
",",
"n",
")",
"nl",
"=",
"html",
".",
"find",
"(",
"'\\n'",
",",
"n",
")",
"angle",
"=",
"html",
".",
"find",
"(",
"'>'",
",",
"n",
")",
"if",
"angle",
"==",
"-",
"1",
":",
"# hits end of doc before end of tag",
"yield",
"' '",
"*",
"(",
"len",
"(",
"html",
")",
"-",
"n",
")",
"n",
"=",
"len",
"(",
"html",
")",
"break",
"elif",
"-",
"1",
"<",
"squote",
"<",
"angle",
"or",
"-",
"1",
"<",
"dquote",
"<",
"angle",
":",
"if",
"squote",
"!=",
"-",
"1",
"and",
"dquote",
"!=",
"-",
"1",
":",
"if",
"squote",
"<",
"dquote",
":",
"open_quote",
"=",
"squote",
"quote",
"=",
"\"'\"",
"else",
":",
"open_quote",
"=",
"dquote",
"quote",
"=",
"'\"'",
"elif",
"dquote",
"!=",
"-",
"1",
":",
"open_quote",
"=",
"dquote",
"quote",
"=",
"'\"'",
"else",
":",
"open_quote",
"=",
"squote",
"quote",
"=",
"\"'\"",
"close_quote",
"=",
"html",
".",
"find",
"(",
"quote",
",",
"open_quote",
"+",
"1",
")",
"while",
"n",
"<",
"close_quote",
":",
"nl",
"=",
"html",
".",
"find",
"(",
"'\\n'",
",",
"n",
",",
"close_quote",
")",
"if",
"nl",
"==",
"-",
"1",
":",
"break",
"yield",
"' '",
"*",
"(",
"nl",
"-",
"n",
")",
"+",
"'\\n'",
"n",
"=",
"nl",
"+",
"1",
"yield",
"' '",
"*",
"(",
"close_quote",
"+",
"1",
"-",
"n",
")",
"n",
"=",
"close_quote",
"+",
"1",
"continue",
"elif",
"nl",
"==",
"-",
"1",
"or",
"angle",
"<",
"nl",
":",
"# found close before either newline or end of doc",
"yield",
"' '",
"*",
"(",
"angle",
"+",
"1",
"-",
"n",
")",
"n",
"=",
"angle",
"+",
"1",
"if",
"is_extended",
"and",
"html",
"[",
"angle",
"-",
"1",
"]",
"!=",
"'/'",
":",
"# find matching closing tag. JavaScript can",
"# include HTML *strings* within it, and in",
"# principle, that HTML could contain a closing",
"# script tag in it; ignoring for now.",
"while",
"n",
"<",
"len",
"(",
"html",
")",
":",
"nl",
"=",
"html",
".",
"find",
"(",
"'\\n'",
",",
"n",
")",
"close",
"=",
"html",
".",
"find",
"(",
"'</'",
",",
"n",
")",
"close2",
"=",
"html",
".",
"find",
"(",
"'</'",
",",
"close",
"+",
"2",
")",
"angle",
"=",
"html",
".",
"find",
"(",
"'>'",
",",
"close",
"+",
"2",
")",
"if",
"nl",
"!=",
"-",
"1",
"and",
"nl",
"<",
"close",
":",
"yield",
"' '",
"*",
"(",
"nl",
"-",
"n",
")",
"+",
"'\\n'",
"n",
"=",
"nl",
"+",
"1",
"elif",
"close",
"==",
"-",
"1",
"or",
"angle",
"==",
"-",
"1",
":",
"# end of doc before matching close tag",
"yield",
"' '",
"*",
"(",
"len",
"(",
"html",
")",
"-",
"n",
")",
"n",
"=",
"len",
"(",
"html",
")",
"break",
"elif",
"close2",
"!=",
"-",
"1",
"and",
"close2",
"<",
"angle",
":",
"# broken tag inside current tag",
"yield",
"' '",
"*",
"(",
"close",
"+",
"2",
"-",
"n",
")",
"n",
"=",
"close",
"+",
"2",
"elif",
"html",
"[",
"close",
"+",
"2",
":",
"angle",
"]",
".",
"lower",
"(",
")",
"==",
"tag",
".",
"lower",
"(",
")",
":",
"yield",
"' '",
"*",
"(",
"angle",
"+",
"1",
"-",
"n",
")",
"n",
"=",
"angle",
"+",
"1",
"break",
"else",
":",
"yield",
"' '",
"*",
"(",
"angle",
"+",
"1",
"-",
"n",
")",
"n",
"=",
"angle",
"+",
"1",
"# do not break",
"# finished with tag",
"break",
"else",
":",
"# found a newline within the current tag",
"yield",
"' '",
"*",
"(",
"nl",
"-",
"n",
")",
"+",
"'\\n'",
"n",
"=",
"nl",
"+",
"1"
]
| 39.362069 | 13.258621 |
def hooi(X, rank, **kwargs):
"""
Compute Tucker decomposition of a tensor using Higher-Order Orthogonal
Iterations.
Parameters
----------
X : tensor_mixin
The tensor to be decomposed
rank : array_like
The rank of the decomposition for each mode of the tensor.
The length of ``rank`` must match the number of modes of ``X``.
init : {'random', 'nvecs'}, optional
The initialization method to use.
- random : Factor matrices are initialized randomly.
- nvecs : Factor matrices are initialzed via HOSVD.
default : 'nvecs'
Examples
--------
Create dense tensor
>>> T = np.zeros((3, 4, 2))
>>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]]
>>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]]
>>> T = dtensor(T)
Compute Tucker decomposition of ``T`` with n-rank [2, 3, 1] via higher-order
orthogonal iterations
>>> Y = hooi(T, [2, 3, 1], init='nvecs')
Shape of the core tensor matches n-rank of the decomposition.
>>> Y['core'].shape
(2, 3, 1)
>>> Y['U'][1].shape
(3, 2)
References
----------
.. [1] L. De Lathauwer, B. De Moor, J. Vandewalle: On the best rank-1 and
rank-(R_1, R_2, \ldots, R_N) approximation of higher order tensors;
IEEE Trans. Signal Process. 49 (2001), pp. 2262-2271
"""
# init options
ainit = kwargs.pop('init', __DEF_INIT)
maxIter = kwargs.pop('maxIter', __DEF_MAXITER)
conv = kwargs.pop('conv', __DEF_CONV)
dtype = kwargs.pop('dtype', X.dtype)
if not len(kwargs) == 0:
raise ValueError('Unknown keywords (%s)' % (kwargs.keys()))
ndims = X.ndim
if is_number(rank):
rank = rank * ones(ndims)
normX = norm(X)
U = __init(ainit, X, ndims, rank, dtype)
fit = 0
exectimes = []
for itr in range(maxIter):
tic = time.clock()
fitold = fit
for n in range(ndims):
Utilde = ttm(X, U, n, transp=True, without=True)
U[n] = nvecs(Utilde, n, rank[n])
# compute core tensor to get fit
core = ttm(Utilde, U, n, transp=True)
# since factors are orthonormal, compute fit on core tensor
normresidual = sqrt(normX ** 2 - norm(core) ** 2)
# fraction explained by model
fit = 1 - (normresidual / normX)
fitchange = abs(fitold - fit)
exectimes.append(time.clock() - tic)
_log.debug(
'[%3d] fit: %.5f | delta: %7.1e | secs: %.5f'
% (itr, fit, fitchange, exectimes[-1])
)
if itr > 1 and fitchange < conv:
break
return core, U | [
"def",
"hooi",
"(",
"X",
",",
"rank",
",",
"*",
"*",
"kwargs",
")",
":",
"# init options",
"ainit",
"=",
"kwargs",
".",
"pop",
"(",
"'init'",
",",
"__DEF_INIT",
")",
"maxIter",
"=",
"kwargs",
".",
"pop",
"(",
"'maxIter'",
",",
"__DEF_MAXITER",
")",
"conv",
"=",
"kwargs",
".",
"pop",
"(",
"'conv'",
",",
"__DEF_CONV",
")",
"dtype",
"=",
"kwargs",
".",
"pop",
"(",
"'dtype'",
",",
"X",
".",
"dtype",
")",
"if",
"not",
"len",
"(",
"kwargs",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'Unknown keywords (%s)'",
"%",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
")",
"ndims",
"=",
"X",
".",
"ndim",
"if",
"is_number",
"(",
"rank",
")",
":",
"rank",
"=",
"rank",
"*",
"ones",
"(",
"ndims",
")",
"normX",
"=",
"norm",
"(",
"X",
")",
"U",
"=",
"__init",
"(",
"ainit",
",",
"X",
",",
"ndims",
",",
"rank",
",",
"dtype",
")",
"fit",
"=",
"0",
"exectimes",
"=",
"[",
"]",
"for",
"itr",
"in",
"range",
"(",
"maxIter",
")",
":",
"tic",
"=",
"time",
".",
"clock",
"(",
")",
"fitold",
"=",
"fit",
"for",
"n",
"in",
"range",
"(",
"ndims",
")",
":",
"Utilde",
"=",
"ttm",
"(",
"X",
",",
"U",
",",
"n",
",",
"transp",
"=",
"True",
",",
"without",
"=",
"True",
")",
"U",
"[",
"n",
"]",
"=",
"nvecs",
"(",
"Utilde",
",",
"n",
",",
"rank",
"[",
"n",
"]",
")",
"# compute core tensor to get fit",
"core",
"=",
"ttm",
"(",
"Utilde",
",",
"U",
",",
"n",
",",
"transp",
"=",
"True",
")",
"# since factors are orthonormal, compute fit on core tensor",
"normresidual",
"=",
"sqrt",
"(",
"normX",
"**",
"2",
"-",
"norm",
"(",
"core",
")",
"**",
"2",
")",
"# fraction explained by model",
"fit",
"=",
"1",
"-",
"(",
"normresidual",
"/",
"normX",
")",
"fitchange",
"=",
"abs",
"(",
"fitold",
"-",
"fit",
")",
"exectimes",
".",
"append",
"(",
"time",
".",
"clock",
"(",
")",
"-",
"tic",
")",
"_log",
".",
"debug",
"(",
"'[%3d] fit: %.5f | delta: %7.1e | secs: %.5f'",
"%",
"(",
"itr",
",",
"fit",
",",
"fitchange",
",",
"exectimes",
"[",
"-",
"1",
"]",
")",
")",
"if",
"itr",
">",
"1",
"and",
"fitchange",
"<",
"conv",
":",
"break",
"return",
"core",
",",
"U"
]
| 29.795455 | 21.340909 |
def save(self, doc):
"""Save a doc to cache
"""
self.log.debug('save()')
self.docs.append(doc)
self.commit() | [
"def",
"save",
"(",
"self",
",",
"doc",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'save()'",
")",
"self",
".",
"docs",
".",
"append",
"(",
"doc",
")",
"self",
".",
"commit",
"(",
")"
]
| 23.833333 | 9.666667 |
def _compute_nfps_uniform(cum_counts, sizes):
"""Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes, assuming uniform
distribution of set_sizes within each sub-intervals.
Args:
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1.
"""
nfps = np.zeros((len(sizes), len(sizes)))
# All u an l are inclusive bounds for intervals.
# Compute p = 1, the NFPs
for l in range(len(sizes)):
for u in range(l, len(sizes)):
nfps[l, u] = _compute_nfp_uniform(l, u, cum_counts, sizes)
return nfps | [
"def",
"_compute_nfps_uniform",
"(",
"cum_counts",
",",
"sizes",
")",
":",
"nfps",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"sizes",
")",
",",
"len",
"(",
"sizes",
")",
")",
")",
"# All u an l are inclusive bounds for intervals.",
"# Compute p = 1, the NFPs",
"for",
"l",
"in",
"range",
"(",
"len",
"(",
"sizes",
")",
")",
":",
"for",
"u",
"in",
"range",
"(",
"l",
",",
"len",
"(",
"sizes",
")",
")",
":",
"nfps",
"[",
"l",
",",
"u",
"]",
"=",
"_compute_nfp_uniform",
"(",
"l",
",",
"u",
",",
"cum_counts",
",",
"sizes",
")",
"return",
"nfps"
]
| 40.8 | 19.2 |
def set_value(self, name, value):
"""Set value for a variable"""
value = to_text_string(value)
code = u"get_ipython().kernel.set_value('%s', %s, %s)" % (name, value,
PY2)
if self._reading:
self.kernel_client.input(u'!' + code)
else:
self.silent_execute(code) | [
"def",
"set_value",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"value",
"=",
"to_text_string",
"(",
"value",
")",
"code",
"=",
"u\"get_ipython().kernel.set_value('%s', %s, %s)\"",
"%",
"(",
"name",
",",
"value",
",",
"PY2",
")",
"if",
"self",
".",
"_reading",
":",
"self",
".",
"kernel_client",
".",
"input",
"(",
"u'!'",
"+",
"code",
")",
"else",
":",
"self",
".",
"silent_execute",
"(",
"code",
")"
]
| 38 | 17.2 |
def saveSnapshot(self):
'''
Saves the current shanpshot to the specified file.
Current snapshot is the image being displayed on the main window.
'''
filename = self.snapshotDir + os.sep + '${serialno}-${focusedwindowname}-${timestamp}' + '.' + self.snapshotFormat.lower()
# We have the snapshot already taken, no need to retake
d = FileDialog(self, self.device.substituteDeviceTemplate(filename))
saveAsFilename = d.askSaveAsFilename()
if saveAsFilename:
_format = os.path.splitext(saveAsFilename)[1][1:].upper()
self.printOperation(None, Operation.SNAPSHOT, filename, _format, self.deviceArt, self.dropShadow,
self.screenGlare)
# FIXME: we should add deviceArt, dropShadow and screenGlare to the saved image
# self.unscaledScreenshot.save(saveAsFilename, _format, self.deviceArt, self.dropShadow, self.screenGlare)
self.unscaledScreenshot.save(saveAsFilename, _format) | [
"def",
"saveSnapshot",
"(",
"self",
")",
":",
"filename",
"=",
"self",
".",
"snapshotDir",
"+",
"os",
".",
"sep",
"+",
"'${serialno}-${focusedwindowname}-${timestamp}'",
"+",
"'.'",
"+",
"self",
".",
"snapshotFormat",
".",
"lower",
"(",
")",
"# We have the snapshot already taken, no need to retake",
"d",
"=",
"FileDialog",
"(",
"self",
",",
"self",
".",
"device",
".",
"substituteDeviceTemplate",
"(",
"filename",
")",
")",
"saveAsFilename",
"=",
"d",
".",
"askSaveAsFilename",
"(",
")",
"if",
"saveAsFilename",
":",
"_format",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"saveAsFilename",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
".",
"upper",
"(",
")",
"self",
".",
"printOperation",
"(",
"None",
",",
"Operation",
".",
"SNAPSHOT",
",",
"filename",
",",
"_format",
",",
"self",
".",
"deviceArt",
",",
"self",
".",
"dropShadow",
",",
"self",
".",
"screenGlare",
")",
"# FIXME: we should add deviceArt, dropShadow and screenGlare to the saved image",
"# self.unscaledScreenshot.save(saveAsFilename, _format, self.deviceArt, self.dropShadow, self.screenGlare)",
"self",
".",
"unscaledScreenshot",
".",
"save",
"(",
"saveAsFilename",
",",
"_format",
")"
]
| 59.882353 | 35.058824 |
def zoom_blur(x, severity=1):
"""Zoom blurring to images.
Applying zoom blurring to images by zooming the central part of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.
"""
c = [
np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)
][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip) | [
"def",
"zoom_blur",
"(",
"x",
",",
"severity",
"=",
"1",
")",
":",
"c",
"=",
"[",
"np",
".",
"arange",
"(",
"1",
",",
"1.11",
",",
"0.01",
")",
",",
"np",
".",
"arange",
"(",
"1",
",",
"1.16",
",",
"0.01",
")",
",",
"np",
".",
"arange",
"(",
"1",
",",
"1.21",
",",
"0.02",
")",
",",
"np",
".",
"arange",
"(",
"1",
",",
"1.26",
",",
"0.02",
")",
",",
"np",
".",
"arange",
"(",
"1",
",",
"1.31",
",",
"0.03",
")",
"]",
"[",
"severity",
"-",
"1",
"]",
"x",
"=",
"(",
"np",
".",
"array",
"(",
"x",
")",
"/",
"255.",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"out",
"=",
"np",
".",
"zeros_like",
"(",
"x",
")",
"for",
"zoom_factor",
"in",
"c",
":",
"out",
"+=",
"clipped_zoom",
"(",
"x",
",",
"zoom_factor",
")",
"x",
"=",
"(",
"x",
"+",
"out",
")",
"/",
"(",
"len",
"(",
"c",
")",
"+",
"1",
")",
"x_clip",
"=",
"np",
".",
"clip",
"(",
"x",
",",
"0",
",",
"1",
")",
"*",
"255",
"return",
"around_and_astype",
"(",
"x_clip",
")"
]
| 29.153846 | 18.076923 |
def list_deployments(jboss_config):
'''
List all deployments on the jboss instance
jboss_config
Configuration dictionary with properties specified above.
CLI Example:
.. code-block:: bash
salt '*' jboss7.list_deployments '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}'
'''
log.debug("======================== MODULE FUNCTION: jboss7.list_deployments")
command_result = __salt__['jboss7_cli.run_command'](jboss_config, 'deploy')
deployments = []
if command_result['stdout']:
deployments = re.split('\\s*', command_result['stdout'])
log.debug('deployments=%s', deployments)
return deployments | [
"def",
"list_deployments",
"(",
"jboss_config",
")",
":",
"log",
".",
"debug",
"(",
"\"======================== MODULE FUNCTION: jboss7.list_deployments\"",
")",
"command_result",
"=",
"__salt__",
"[",
"'jboss7_cli.run_command'",
"]",
"(",
"jboss_config",
",",
"'deploy'",
")",
"deployments",
"=",
"[",
"]",
"if",
"command_result",
"[",
"'stdout'",
"]",
":",
"deployments",
"=",
"re",
".",
"split",
"(",
"'\\\\s*'",
",",
"command_result",
"[",
"'stdout'",
"]",
")",
"log",
".",
"debug",
"(",
"'deployments=%s'",
",",
"deployments",
")",
"return",
"deployments"
]
| 36.47619 | 32.285714 |
def _getitem_normalized(self, index):
"""Builds the more compact fmtstrs by using fromstr( of the control sequences)"""
index = normalize_slice(len(self), index)
counter = 0
output = ''
for fs in self.chunks:
if index.start < counter + len(fs) and index.stop > counter:
s_part = fs.s[max(0, index.start - counter):index.stop - counter]
piece = Chunk(s_part, fs.atts).color_str
output += piece
counter += len(fs)
if index.stop < counter:
break
return fmtstr(output) | [
"def",
"_getitem_normalized",
"(",
"self",
",",
"index",
")",
":",
"index",
"=",
"normalize_slice",
"(",
"len",
"(",
"self",
")",
",",
"index",
")",
"counter",
"=",
"0",
"output",
"=",
"''",
"for",
"fs",
"in",
"self",
".",
"chunks",
":",
"if",
"index",
".",
"start",
"<",
"counter",
"+",
"len",
"(",
"fs",
")",
"and",
"index",
".",
"stop",
">",
"counter",
":",
"s_part",
"=",
"fs",
".",
"s",
"[",
"max",
"(",
"0",
",",
"index",
".",
"start",
"-",
"counter",
")",
":",
"index",
".",
"stop",
"-",
"counter",
"]",
"piece",
"=",
"Chunk",
"(",
"s_part",
",",
"fs",
".",
"atts",
")",
".",
"color_str",
"output",
"+=",
"piece",
"counter",
"+=",
"len",
"(",
"fs",
")",
"if",
"index",
".",
"stop",
"<",
"counter",
":",
"break",
"return",
"fmtstr",
"(",
"output",
")"
]
| 42.785714 | 14.714286 |
def append(self, record):
"""
Adds the passed +record+ to satisfy the query. Only intended to be
used in conjunction with associations (i.e. do not use if self.record
is None).
Intended use case (DO THIS):
post.comments.append(comment)
NOT THIS:
Query(Post).where(content="foo").append(post)
"""
if self.record:
self._validate_record(record)
if self.join_args:
# As always, the related record is created when the primary
# record is saved
build_args = dict(self.where_query)
# The +final_join+ is what connects the record chain to the
# passed +record+
final_join = self.join_args[-2]
# don't need to worry about one-to-many through because
# there is not enough information to find or create the
# joining record
# i.e. in the Forum -> Thread -> Post example
# forum.posts.append(post) doesn't make sense since there
# is no information about what thread it will be attached to
# Thus, this only makes sense on many-to-many. BUT we still
# have to consider the case where there is a one-many-many
# To make that work, we need to treat this like when doing
# building
joining_relation = getattr(self.record, final_join['table'])
# Uses the lookup info in the join to figure out what ids to
# set, and where to get the id value from
joining_args = {final_join['on'][0]:
getattr(record, final_join['on'][1])}
build_args.update(joining_args)
joining_record = joining_relation.build(**build_args)
self.record._related_records.append(joining_record)
else:
# Add our id to their foreign key so that the relationship is
# created
setattr(record,
foreign_key(record, self.record),
self.record.id)
# Add to the list of related records so that it is saved when
# we are
self.record._related_records.append(record) | [
"def",
"append",
"(",
"self",
",",
"record",
")",
":",
"if",
"self",
".",
"record",
":",
"self",
".",
"_validate_record",
"(",
"record",
")",
"if",
"self",
".",
"join_args",
":",
"# As always, the related record is created when the primary",
"# record is saved",
"build_args",
"=",
"dict",
"(",
"self",
".",
"where_query",
")",
"# The +final_join+ is what connects the record chain to the",
"# passed +record+",
"final_join",
"=",
"self",
".",
"join_args",
"[",
"-",
"2",
"]",
"# don't need to worry about one-to-many through because",
"# there is not enough information to find or create the",
"# joining record",
"# i.e. in the Forum -> Thread -> Post example",
"# forum.posts.append(post) doesn't make sense since there",
"# is no information about what thread it will be attached to",
"# Thus, this only makes sense on many-to-many. BUT we still",
"# have to consider the case where there is a one-many-many",
"# To make that work, we need to treat this like when doing",
"# building",
"joining_relation",
"=",
"getattr",
"(",
"self",
".",
"record",
",",
"final_join",
"[",
"'table'",
"]",
")",
"# Uses the lookup info in the join to figure out what ids to",
"# set, and where to get the id value from",
"joining_args",
"=",
"{",
"final_join",
"[",
"'on'",
"]",
"[",
"0",
"]",
":",
"getattr",
"(",
"record",
",",
"final_join",
"[",
"'on'",
"]",
"[",
"1",
"]",
")",
"}",
"build_args",
".",
"update",
"(",
"joining_args",
")",
"joining_record",
"=",
"joining_relation",
".",
"build",
"(",
"*",
"*",
"build_args",
")",
"self",
".",
"record",
".",
"_related_records",
".",
"append",
"(",
"joining_record",
")",
"else",
":",
"# Add our id to their foreign key so that the relationship is",
"# created",
"setattr",
"(",
"record",
",",
"foreign_key",
"(",
"record",
",",
"self",
".",
"record",
")",
",",
"self",
".",
"record",
".",
"id",
")",
"# Add to the list of related records so that it is saved when",
"# we are",
"self",
".",
"record",
".",
"_related_records",
".",
"append",
"(",
"record",
")"
]
| 46.36 | 21.72 |
def stop(self, precision=0):
""" Stops the timer, adds it as an interval to :prop:intervals
@precision: #int number of decimal places to round to
-> #str formatted interval time
"""
self._stop = time.perf_counter()
return self.add_interval(precision) | [
"def",
"stop",
"(",
"self",
",",
"precision",
"=",
"0",
")",
":",
"self",
".",
"_stop",
"=",
"time",
".",
"perf_counter",
"(",
")",
"return",
"self",
".",
"add_interval",
"(",
"precision",
")"
]
| 37.5 | 10.375 |
def getskyimg(self,chip):
"""
Notes
=====
Return an array representing the sky image for the detector. The value
of the sky is what would actually be subtracted from the exposure by
the skysub step.
:units: electrons
"""
sci_chip = self._image[self.scienceExt,chip]
return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.subtractedSky | [
"def",
"getskyimg",
"(",
"self",
",",
"chip",
")",
":",
"sci_chip",
"=",
"self",
".",
"_image",
"[",
"self",
".",
"scienceExt",
",",
"chip",
"]",
"return",
"np",
".",
"ones",
"(",
"sci_chip",
".",
"image_shape",
",",
"dtype",
"=",
"sci_chip",
".",
"image_dtype",
")",
"*",
"sci_chip",
".",
"subtractedSky"
]
| 32.538462 | 24.692308 |
def sorted_errors(self):
"""
Generator for (rank, name, MSE, MAE) sorted by increasing MAE
"""
for i, (name, mae) in enumerate(
sorted(self.mae_dict.items(), key=lambda x: x[1])):
yield(i + 1, name, self.mse_dict[name], self.mae_dict[name],) | [
"def",
"sorted_errors",
"(",
"self",
")",
":",
"for",
"i",
",",
"(",
"name",
",",
"mae",
")",
"in",
"enumerate",
"(",
"sorted",
"(",
"self",
".",
"mae_dict",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
")",
":",
"yield",
"(",
"i",
"+",
"1",
",",
"name",
",",
"self",
".",
"mse_dict",
"[",
"name",
"]",
",",
"self",
".",
"mae_dict",
"[",
"name",
"]",
",",
")"
]
| 42.142857 | 15 |
def image_to_pdf_or_hocr(image,
lang=None,
config='',
nice=0,
extension='pdf'):
'''
Returns the result of a Tesseract OCR run on the provided image to pdf/hocr
'''
if extension not in {'pdf', 'hocr'}:
raise ValueError('Unsupported extension: {}'.format(extension))
args = [image, extension, lang, config, nice, True]
return run_and_get_output(*args) | [
"def",
"image_to_pdf_or_hocr",
"(",
"image",
",",
"lang",
"=",
"None",
",",
"config",
"=",
"''",
",",
"nice",
"=",
"0",
",",
"extension",
"=",
"'pdf'",
")",
":",
"if",
"extension",
"not",
"in",
"{",
"'pdf'",
",",
"'hocr'",
"}",
":",
"raise",
"ValueError",
"(",
"'Unsupported extension: {}'",
".",
"format",
"(",
"extension",
")",
")",
"args",
"=",
"[",
"image",
",",
"extension",
",",
"lang",
",",
"config",
",",
"nice",
",",
"True",
"]",
"return",
"run_and_get_output",
"(",
"*",
"args",
")"
]
| 32.142857 | 20 |
def execute_command(self):
"""
The run command implements the web content extractor corresponding to the given \
configuration file.
The execute_command() validates the input project name and opens the JSON \
configuration file. The run() method handles the execution of the extractor run.
The extractor implementation follows these primary steps :
1. Selects the appropriate :ref:`selector class <implementation-selectors>` through \
a dynamic dispatch, with the selector_type argument from the CLI input.
#. Iterate through the data section in level-0 of the configuration file. \
On each data item, call the extract_content() method from the selector class to \
extract the content according to the specified extractor rule.
#. If there are multiple levels of the extractor, i.e, if there is a 'next' \
attribute in the configuration file, call the traverse_next() \
:ref:`utility function <implementation-utils>` and parse through successive levels \
of the configuration file.
#. According to the --output_type argument, the result data is saved in a JSON \
document or a CSV document.
"""
try:
self.args['--verbosity'] = int(self.args['--verbosity'])
if self.args['--verbosity'] not in [0, 1, 2]:
raise ValueError
if self.args['--verbosity'] > 0:
print(Back.GREEN + Fore.BLACK + "Scrapple Run")
print(Back.RESET + Fore.RESET)
import json
with open(self.args['<projectname>'] + '.json', 'r') as f:
self.config = json.load(f)
validate_config(self.config)
self.run()
except ValueError:
print(Back.WHITE + Fore.RED + "Use 0, 1 or 2 for verbosity." \
+ Back.RESET + Fore.RESET, sep="")
except IOError:
print(Back.WHITE + Fore.RED + self.args['<projectname>'], ".json does not ", \
"exist. Use ``scrapple genconfig``." + Back.RESET + Fore.RESET, sep="")
except InvalidConfigException as e:
print(Back.WHITE + Fore.RED + e + Back.RESET + Fore.RESET, sep="") | [
"def",
"execute_command",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"args",
"[",
"'--verbosity'",
"]",
"=",
"int",
"(",
"self",
".",
"args",
"[",
"'--verbosity'",
"]",
")",
"if",
"self",
".",
"args",
"[",
"'--verbosity'",
"]",
"not",
"in",
"[",
"0",
",",
"1",
",",
"2",
"]",
":",
"raise",
"ValueError",
"if",
"self",
".",
"args",
"[",
"'--verbosity'",
"]",
">",
"0",
":",
"print",
"(",
"Back",
".",
"GREEN",
"+",
"Fore",
".",
"BLACK",
"+",
"\"Scrapple Run\"",
")",
"print",
"(",
"Back",
".",
"RESET",
"+",
"Fore",
".",
"RESET",
")",
"import",
"json",
"with",
"open",
"(",
"self",
".",
"args",
"[",
"'<projectname>'",
"]",
"+",
"'.json'",
",",
"'r'",
")",
"as",
"f",
":",
"self",
".",
"config",
"=",
"json",
".",
"load",
"(",
"f",
")",
"validate_config",
"(",
"self",
".",
"config",
")",
"self",
".",
"run",
"(",
")",
"except",
"ValueError",
":",
"print",
"(",
"Back",
".",
"WHITE",
"+",
"Fore",
".",
"RED",
"+",
"\"Use 0, 1 or 2 for verbosity.\"",
"+",
"Back",
".",
"RESET",
"+",
"Fore",
".",
"RESET",
",",
"sep",
"=",
"\"\"",
")",
"except",
"IOError",
":",
"print",
"(",
"Back",
".",
"WHITE",
"+",
"Fore",
".",
"RED",
"+",
"self",
".",
"args",
"[",
"'<projectname>'",
"]",
",",
"\".json does not \"",
",",
"\"exist. Use ``scrapple genconfig``.\"",
"+",
"Back",
".",
"RESET",
"+",
"Fore",
".",
"RESET",
",",
"sep",
"=",
"\"\"",
")",
"except",
"InvalidConfigException",
"as",
"e",
":",
"print",
"(",
"Back",
".",
"WHITE",
"+",
"Fore",
".",
"RED",
"+",
"e",
"+",
"Back",
".",
"RESET",
"+",
"Fore",
".",
"RESET",
",",
"sep",
"=",
"\"\"",
")"
]
| 48.173913 | 27.608696 |
def disconnect(self, frame):
"""
Handles the DISCONNECT command: Unbinds the connection.
Clients are supposed to send this command, but in practice it should not be
relied upon.
"""
self.engine.log.debug("Disconnect")
self.engine.unbind() | [
"def",
"disconnect",
"(",
"self",
",",
"frame",
")",
":",
"self",
".",
"engine",
".",
"log",
".",
"debug",
"(",
"\"Disconnect\"",
")",
"self",
".",
"engine",
".",
"unbind",
"(",
")"
]
| 31.888889 | 17 |
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name) | [
"def",
"fasta_from_biom",
"(",
"table",
",",
"fasta_file_name",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"'saving biom table sequences to fasta file %s'",
"%",
"fasta_file_name",
")",
"with",
"open",
"(",
"fasta_file_name",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"cseq",
"in",
"table",
".",
"ids",
"(",
"axis",
"=",
"'observation'",
")",
":",
"f",
".",
"write",
"(",
"'>%s\\n%s\\n'",
"%",
"(",
"cseq",
",",
"cseq",
")",
")",
"logger",
".",
"info",
"(",
"'saved biom table sequences to fasta file %s'",
"%",
"fasta_file_name",
")"
]
| 35.588235 | 19.235294 |
def init_widget(self):
""" Initialize the underlying widget.
"""
super(AndroidProgressBar, self).init_widget()
d = self.declaration
self.set_indeterminate(self.indeterminate)
if not self.indeterminate:
if d.max:
self.set_max(d.max)
if d.min:
self.set_min(d.min)
self.set_progress(d.progress)
if d.secondary_progress:
self.set_secondary_progress(d.secondary_progress) | [
"def",
"init_widget",
"(",
"self",
")",
":",
"super",
"(",
"AndroidProgressBar",
",",
"self",
")",
".",
"init_widget",
"(",
")",
"d",
"=",
"self",
".",
"declaration",
"self",
".",
"set_indeterminate",
"(",
"self",
".",
"indeterminate",
")",
"if",
"not",
"self",
".",
"indeterminate",
":",
"if",
"d",
".",
"max",
":",
"self",
".",
"set_max",
"(",
"d",
".",
"max",
")",
"if",
"d",
".",
"min",
":",
"self",
".",
"set_min",
"(",
"d",
".",
"min",
")",
"self",
".",
"set_progress",
"(",
"d",
".",
"progress",
")",
"if",
"d",
".",
"secondary_progress",
":",
"self",
".",
"set_secondary_progress",
"(",
"d",
".",
"secondary_progress",
")"
]
| 27.611111 | 16.5 |
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters | [
"def",
"_remove_nulls",
"(",
"managed_clusters",
")",
":",
"attrs",
"=",
"[",
"'tags'",
"]",
"ap_attrs",
"=",
"[",
"'os_disk_size_gb'",
",",
"'vnet_subnet_id'",
"]",
"sp_attrs",
"=",
"[",
"'secret'",
"]",
"for",
"managed_cluster",
"in",
"managed_clusters",
":",
"for",
"attr",
"in",
"attrs",
":",
"if",
"getattr",
"(",
"managed_cluster",
",",
"attr",
",",
"None",
")",
"is",
"None",
":",
"delattr",
"(",
"managed_cluster",
",",
"attr",
")",
"for",
"ap_profile",
"in",
"managed_cluster",
".",
"agent_pool_profiles",
":",
"for",
"attr",
"in",
"ap_attrs",
":",
"if",
"getattr",
"(",
"ap_profile",
",",
"attr",
",",
"None",
")",
"is",
"None",
":",
"delattr",
"(",
"ap_profile",
",",
"attr",
")",
"for",
"attr",
"in",
"sp_attrs",
":",
"if",
"getattr",
"(",
"managed_cluster",
".",
"service_principal_profile",
",",
"attr",
",",
"None",
")",
"is",
"None",
":",
"delattr",
"(",
"managed_cluster",
".",
"service_principal_profile",
",",
"attr",
")",
"return",
"managed_clusters"
]
| 45.130435 | 18.782609 |
def get_exception():
"""Return full formatted traceback as a string."""
trace = ""
exception = ""
exc_list = traceback.format_exception_only(
sys.exc_info()[0], sys.exc_info()[1]
)
for entry in exc_list:
exception += entry
tb_list = traceback.format_tb(sys.exc_info()[2])
for entry in tb_list:
trace += entry
return "%s\n%s" % (exception, trace) | [
"def",
"get_exception",
"(",
")",
":",
"trace",
"=",
"\"\"",
"exception",
"=",
"\"\"",
"exc_list",
"=",
"traceback",
".",
"format_exception_only",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
")",
"for",
"entry",
"in",
"exc_list",
":",
"exception",
"+=",
"entry",
"tb_list",
"=",
"traceback",
".",
"format_tb",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")",
"for",
"entry",
"in",
"tb_list",
":",
"trace",
"+=",
"entry",
"return",
"\"%s\\n%s\"",
"%",
"(",
"exception",
",",
"trace",
")"
]
| 30.230769 | 14.384615 |
def get(self, key, mem_map=True):
"""
Read and return the data stored for the given key.
Args:
key (str): The key to read the data from.
mem_map (bool): If ``True`` returns the data as
memory-mapped array, otherwise a copy is returned.
Note:
The container has to be opened in advance.
Returns:
numpy.ndarray: The stored data.
"""
self.raise_error_if_not_open()
if key in self._file:
data = self._file[key]
if not mem_map:
data = data[()]
return data
else:
return None | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"mem_map",
"=",
"True",
")",
":",
"self",
".",
"raise_error_if_not_open",
"(",
")",
"if",
"key",
"in",
"self",
".",
"_file",
":",
"data",
"=",
"self",
".",
"_file",
"[",
"key",
"]",
"if",
"not",
"mem_map",
":",
"data",
"=",
"data",
"[",
"(",
")",
"]",
"return",
"data",
"else",
":",
"return",
"None"
]
| 25.384615 | 20.461538 |
def merge(a, b):
"""Merge two ranges with step == 1.
Parameters
----------
a : range
The first range.
b : range
The second range.
"""
_check_steps(a, b)
return range(min(a.start, b.start), max(a.stop, b.stop)) | [
"def",
"merge",
"(",
"a",
",",
"b",
")",
":",
"_check_steps",
"(",
"a",
",",
"b",
")",
"return",
"range",
"(",
"min",
"(",
"a",
".",
"start",
",",
"b",
".",
"start",
")",
",",
"max",
"(",
"a",
".",
"stop",
",",
"b",
".",
"stop",
")",
")"
]
| 20.583333 | 19.916667 |
def addSubsequenceOfFeature(self, parentid):
"""
This will add reciprocal triples like:
feature <is subsequence of> parent
parent has_subsequence feature
:param graph:
:param parentid:
:return:
"""
self.graph.addTriple(self.fid, self.globaltt['is subsequence of'], parentid)
# this should be expected to be done in reasoning not ETL
self.graph.addTriple(parentid, self.globaltt['has subsequence'], self.fid)
return | [
"def",
"addSubsequenceOfFeature",
"(",
"self",
",",
"parentid",
")",
":",
"self",
".",
"graph",
".",
"addTriple",
"(",
"self",
".",
"fid",
",",
"self",
".",
"globaltt",
"[",
"'is subsequence of'",
"]",
",",
"parentid",
")",
"# this should be expected to be done in reasoning not ETL",
"self",
".",
"graph",
".",
"addTriple",
"(",
"parentid",
",",
"self",
".",
"globaltt",
"[",
"'has subsequence'",
"]",
",",
"self",
".",
"fid",
")",
"return"
]
| 31.125 | 20.625 |
def subtype_ids(elements, subtype):
"""
returns the ids of all elements of a list that have a certain type,
e.g. show all the nodes that are ``TokenNode``\s.
"""
return [i for (i, element) in enumerate(elements)
if isinstance(element, subtype)] | [
"def",
"subtype_ids",
"(",
"elements",
",",
"subtype",
")",
":",
"return",
"[",
"i",
"for",
"(",
"i",
",",
"element",
")",
"in",
"enumerate",
"(",
"elements",
")",
"if",
"isinstance",
"(",
"element",
",",
"subtype",
")",
"]"
]
| 38.571429 | 9.428571 |
def rgb_percent_to_name(rgb_percent_triplet, spec=u'css3'):
"""
Convert a 3-tuple of percentages, suitable for use in an ``rgb()``
color triplet, to its corresponding normalized color name, if any
such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
If there is no matching name, ``ValueError`` is raised.
"""
return rgb_to_name(
rgb_percent_to_rgb(
normalize_percent_triplet(
rgb_percent_triplet
)
),
spec=spec
) | [
"def",
"rgb_percent_to_name",
"(",
"rgb_percent_triplet",
",",
"spec",
"=",
"u'css3'",
")",
":",
"return",
"rgb_to_name",
"(",
"rgb_percent_to_rgb",
"(",
"normalize_percent_triplet",
"(",
"rgb_percent_triplet",
")",
")",
",",
"spec",
"=",
"spec",
")"
]
| 30.409091 | 22.318182 |
def _transfer_data(self, remote_path, data):
"""
Used by the base _execute_module(), and in <2.4 also by the template
action module, and probably others.
"""
if isinstance(data, dict):
data = jsonify(data)
if not isinstance(data, bytes):
data = to_bytes(data, errors='surrogate_or_strict')
LOG.debug('_transfer_data(%r, %s ..%d bytes)',
remote_path, type(data), len(data))
self._connection.put_data(remote_path, data)
return remote_path | [
"def",
"_transfer_data",
"(",
"self",
",",
"remote_path",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"data",
"=",
"jsonify",
"(",
"data",
")",
"if",
"not",
"isinstance",
"(",
"data",
",",
"bytes",
")",
":",
"data",
"=",
"to_bytes",
"(",
"data",
",",
"errors",
"=",
"'surrogate_or_strict'",
")",
"LOG",
".",
"debug",
"(",
"'_transfer_data(%r, %s ..%d bytes)'",
",",
"remote_path",
",",
"type",
"(",
"data",
")",
",",
"len",
"(",
"data",
")",
")",
"self",
".",
"_connection",
".",
"put_data",
"(",
"remote_path",
",",
"data",
")",
"return",
"remote_path"
]
| 38.428571 | 12.428571 |
def update_event(self, id, **kwargs): # noqa: E501
"""Update a specific event # noqa: E501
The following fields are readonly and will be ignored when passed in the request: <code>id</code>, <code>isEphemeral</code>, <code>isUserEvent</code>, <code>runningState</code>, <code>canDelete</code>, <code>canClose</code>, <code>creatorType</code>, <code>createdAt</code>, <code>updatedAt</code>, <code>createdEpochMillis</code>, <code>updatedEpochMillis</code>, <code>updaterId</code>, <code>creatorId</code>, and <code>summarizedEvents</code> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_event(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Event body: Example Body: <pre>{ \"name\": \"Event API Example\", \"annotations\": { \"severity\": \"info\", \"type\": \"event type\", \"details\": \"description\" }, \"tags\" : [ \"eventTag1\" ], \"startTime\": 1490000000000, \"endTime\": 1490000000001 }</pre>
:return: ResponseContainerEvent
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_event_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_event_with_http_info(id, **kwargs) # noqa: E501
return data | [
"def",
"update_event",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"update_event_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"update_event_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
]
| 72.272727 | 47.318182 |
def search_list(kb, value=None, match_type=None,
page=None, per_page=None, unique=False):
"""Search "mappings to" for knowledge."""
# init
page = page or 1
per_page = per_page or 10
if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']:
# get the base query
query = api.query_kb_mappings(
kbid=kb.id,
value=value or '',
match_type=match_type or 's'
).with_entities(models.KnwKBRVAL.m_value)
# if you want a 'unique' list
if unique:
query = query.distinct()
# run query and paginate
return [item.m_value for item in
pagination.RestfulSQLAlchemyPagination(
query, page=page or 1,
per_page=per_page or 10
).items]
elif kb.kbtype == models.KnwKB.KNWKB_TYPES['dynamic']:
items = api.get_kbd_values(kb.name, value)
return pagination.RestfulPagination(
page=page, per_page=per_page,
total_count=len(items)
).slice(items)
return [] | [
"def",
"search_list",
"(",
"kb",
",",
"value",
"=",
"None",
",",
"match_type",
"=",
"None",
",",
"page",
"=",
"None",
",",
"per_page",
"=",
"None",
",",
"unique",
"=",
"False",
")",
":",
"# init",
"page",
"=",
"page",
"or",
"1",
"per_page",
"=",
"per_page",
"or",
"10",
"if",
"kb",
".",
"kbtype",
"==",
"models",
".",
"KnwKB",
".",
"KNWKB_TYPES",
"[",
"'written_as'",
"]",
":",
"# get the base query",
"query",
"=",
"api",
".",
"query_kb_mappings",
"(",
"kbid",
"=",
"kb",
".",
"id",
",",
"value",
"=",
"value",
"or",
"''",
",",
"match_type",
"=",
"match_type",
"or",
"'s'",
")",
".",
"with_entities",
"(",
"models",
".",
"KnwKBRVAL",
".",
"m_value",
")",
"# if you want a 'unique' list",
"if",
"unique",
":",
"query",
"=",
"query",
".",
"distinct",
"(",
")",
"# run query and paginate",
"return",
"[",
"item",
".",
"m_value",
"for",
"item",
"in",
"pagination",
".",
"RestfulSQLAlchemyPagination",
"(",
"query",
",",
"page",
"=",
"page",
"or",
"1",
",",
"per_page",
"=",
"per_page",
"or",
"10",
")",
".",
"items",
"]",
"elif",
"kb",
".",
"kbtype",
"==",
"models",
".",
"KnwKB",
".",
"KNWKB_TYPES",
"[",
"'dynamic'",
"]",
":",
"items",
"=",
"api",
".",
"get_kbd_values",
"(",
"kb",
".",
"name",
",",
"value",
")",
"return",
"pagination",
".",
"RestfulPagination",
"(",
"page",
"=",
"page",
",",
"per_page",
"=",
"per_page",
",",
"total_count",
"=",
"len",
"(",
"items",
")",
")",
".",
"slice",
"(",
"items",
")",
"return",
"[",
"]"
]
| 39.2 | 11.5 |
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
""" | [
"def",
"process_msg",
"(",
"self",
",",
"msg",
")",
":",
"jmsg",
"=",
"json",
".",
"loads",
"(",
"msg",
")",
"msgtype",
"=",
"jmsg",
"[",
"'MessageType'",
"]",
"msgdata",
"=",
"jmsg",
"[",
"'Data'",
"]",
"_LOGGER",
".",
"debug",
"(",
"'New websocket message recieved of type: %s'",
",",
"msgtype",
")",
"if",
"msgtype",
"==",
"'Sessions'",
":",
"self",
".",
"_sessions",
"=",
"msgdata",
"# Check for new devices and update as needed.",
"self",
".",
"update_device_list",
"(",
"self",
".",
"_sessions",
")",
"\"\"\"\n May process other message types in the future.\n Other known types are:\n - PlaybackStarted\n - PlaybackStopped\n - SessionEnded\n \"\"\""
]
| 33.777778 | 12.388889 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.