content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def svd(A):
"""
Singular Value Decomposition
Parameters
----------
A: af.Array
A 2 dimensional arrayfire array.
Returns
-------
(U,S,Vt): tuple of af.Arrays
- U - A unitary matrix
- S - An array containing the elements of diagonal matrix
- Vt - A unitary matrix
Note
----
- The original matrix `A` is preserved and additional storage space is required for decomposition.
- If the original matrix `A` need not be preserved, use `svd_inplace` instead.
- The original matrix `A` can be reconstructed using the outputs in the following manner.
>>> Smat = af.diag(S, 0, False)
>>> A_recon = af.matmul(af.matmul(U, Smat), Vt)
"""
U = Array()
S = Array()
Vt = Array()
safe_call(backend.get().af_svd(c_pointer(U.arr), c_pointer(S.arr), c_pointer(Vt.arr), A.arr))
return U, S, Vt | 7b7d48dc1782d1e02eca01b657895372170caf6c | 3,649,740 |
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ctype.defects.append(errors.HeaderMissingRequiredValue(
"Missing content type specification"))
return ctype
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content maintype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
# XXX: If we really want to follow the formal grammar we should make
# mantype and subtype specialized TokenLists here. Probably not worth it.
if not value or value[0] != '/':
ctype.defects.append(errors.InvalidHeaderDefect(
"Invalid content type"))
if value:
_find_mime_parameters(ctype, value)
return ctype
ctype.maintype = token.value.strip().lower()
ctype.append(ValueTerminal('/', 'content-type-separator'))
value = value[1:]
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content subtype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
ctype.subtype = token.value.strip().lower()
if not value:
return ctype
if value[0] != ';':
ctype.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content type, but "
"found {!r}".format(value)))
# The RFC requires that a syntactically invalid content-type be treated
# as text/plain. Perhaps we should postel this, but we should probably
# only do that if we were checking the subtype value against IANA.
del ctype.maintype, ctype.subtype
_find_mime_parameters(ctype, value)
return ctype
ctype.append(ValueTerminal(';', 'parameter-separator'))
ctype.append(parse_mime_parameters(value[1:]))
return ctype | 24722c1dd5784896fd6aa8b39cd29eb76fec155a | 3,649,741 |
import numpy
def csr_matrix_multiply(S, x): # noqa
"""Multiplies a :class:`scipy.sparse.csr_matrix` S by an object-array vector x.
"""
h, w = S.shape
result = numpy.empty_like(x)
for i in range(h):
result[i] = sum(S.data[idx]*x[S.indices[idx]] # noqa pylint:disable=unsupported-assignment-operation
for idx in range(S.indptr[i], S.indptr[i+1]))
return result | 77e1630cbdd59f53b1b2885b731e73a14fb18b35 | 3,649,742 |
def calculate_sem_IoU(pred_np, seg_np, num_classes):
"""Calculate the Intersection Over Union of the predicted classes and the ground truth
Args:
pred_np (array_like): List of predicted class labels
seg_np (array_like): List of ground truth labels
num_classes (int): Number of classes in the dataset
"""
I_all = np.zeros(num_classes)
U_all = np.zeros(num_classes)
for sem_idx in range(len(seg_np)):
for sem in range(num_classes):
I = np.sum(np.logical_and(pred_np[sem_idx] == sem, seg_np[sem_idx] == sem))
U = np.sum(np.logical_or(pred_np[sem_idx] == sem, seg_np[sem_idx] == sem))
I_all[sem] += I
U_all[sem] += U
return I_all / U_all | e8e360cb8aad0f2226aa54c88c01485840017f2d | 3,649,743 |
def _invert(M, eps):
"""
Invert matrices, with special fast handling of the 1x1 and 2x2 cases.
Will generate errors if the matrices are singular: user must handle this
through his own regularization schemes.
Parameters
----------
M: np.ndarray [shape=(..., nb_channels, nb_channels)]
matrices to invert: must be square along the last two dimensions
eps: [scalar]
regularization parameter to use _only in the case of matrices
bigger than 2x2
Returns
-------
invM: np.ndarray, [shape=M.shape]
inverses of M
"""
nb_channels = M.shape[-1]
if nb_channels == 1:
# scalar case
invM = 1.0/(M+eps)
elif nb_channels == 2:
# two channels case: analytical expression
det = (
M[..., 0, 0]*M[..., 1, 1] -
M[..., 0, 1]*M[..., 1, 0])
invDet = 1.0/(det)
invM = np.empty_like(M)
invM[..., 0, 0] = invDet*M[..., 1, 1]
invM[..., 1, 0] = -invDet*M[..., 1, 0]
invM[..., 0, 1] = -invDet*M[..., 0, 1]
invM[..., 1, 1] = invDet*M[..., 0, 0]
else:
# general case : no use of analytical expression (slow!)
invM = np.linalg.pinv(M, eps)
return invM | 119c16ad816dd37b7e5eb23c121ef5affc8851f5 | 3,649,744 |
from pathlib import Path
def read_densecsv_to_anndata(ds_file: Path):
"""Reads a dense text file in csv format into the AnnData format."""
return read_densemat_to_anndata(ds_file, sep=",") | cee99509b6744972ad7a9530d66b59c06f7deec5 | 3,649,745 |
def _singleton(name):
"""Returns a singleton object which represents itself as `name` when printed,
but is only comparable (via identity) to itself."""
return type(name, (), {'__repr__': lambda self: name})() | b07003e1716115864bf1914d4b523b36d0f0471f | 3,649,747 |
def get_zone(*, zone_name: str):
""" Get zone with given zone name.
Args:
zone_name: zone name, e.g. "haomingyin.com"
Returns:
json: zone details
"""
params = dict(name=zone_name)
zones = _get("zones", params=params)
if not zones:
raise CloudflareAPIError(f"Unable to fetch zone {zone_name}")
return zones[0] | 5e79ec900af7e5cc4d457d04292e55e2e3abc9ec | 3,649,748 |
def execute(connection, cmdline, **kwargs):
"""generic function to execute command for device
| Parameters:
| connection (Adaptor): connection of device
| cmdline (str): command line
| kwargs (dict): additional keyword arguments for command line execution
| Returns:
| str: output of command line
"""
result = Dgs.execute_cmdline(connection, cmdline, **kwargs)
return result | 7f3424cb8a747fab87a5a67c880ec755d9c9cb96 | 3,649,750 |
def json_to_numpy_mask(shapes, width, height):
"""Converts JSON labels with pixel classifications into NumPy arrays"""
img = Image.new("L", (width, height), 0)
for shape in shapes:
if shape["label"] == "barrel":
barrel_lst = [tuple(i) for i in shape["points"]]
ImageDraw.Draw(img).polygon(barrel_lst, outline=1, fill=1)
if shape["label"] == "line":
line_lst = [tuple(i) for i in shape["points"]]
ImageDraw.Draw(img).polygon(line_lst, outline=2, fill=2)
mask = np.array(img)
return mask | 33757246478d854d15f71a0737174ac6952514ef | 3,649,751 |
import types
import typing
def _format_call(value: ast3.Call, context: types.Context) -> typing.Text:
"""Format a function call like 'print(a*b, foo=x)'"""
try:
return _format_call_horizontal(value, context)
except errors.NotPossible:
return _format_call_vertical(value, context) | 2019f50943bb597948248dfda9ce8620d3286377 | 3,649,752 |
def get_tags(repo_dir):
"""
_get_tags_
returns a list of tags for the given repo, ordered as
newest first
"""
repo = git.Repo(repo_dir)
tags_with_date = {
tag.name: tag.commit.committed_date
for tag in repo.tags
}
return sorted(tags_with_date, key=tags_with_date.get, reverse=True) | aa5462ff0b15501cf486a2bf49837f0dd60ecfaf | 3,649,753 |
def readh5(filename, GroupName=None):
"""
Read the HDF5 file 'filename' into a class. Groups within the hdf5 file are
by default loaded as sub classes, unless they include a _read_as attribute
(see sharpy.postproc.savedata). In this case, group can be loaded as classes,
dictionaries, lists or tuples.
filename: string to file location
GroupName = string or list of strings. Default is None: if given, allows
reading a specific group h5 file.
Warning:
Groups that need to be read as lists and tuples are assumed to conform to
the format used in sharpy.postproc.savedata
"""
Hinst = ReadInto()
### read and scan file
hdfile = h5.File(filename, 'r')
NamesList = [] # dataset names
hdfile.visit(NamesList.append)
### Identify higher level groups / attributes
if GroupName is None:
MainLev = []
for name in NamesList:
if '/' not in name: MainLev.append(name)
else:
if type(GroupName) is list:
MainLev = GroupName
else:
MainLev = [GroupName]
### Loop through higher level
for name in MainLev:
# sub-group
if type(hdfile[name]) is h5._hl.group.Group:
Ginst = read_group(hdfile[name])
try:
Ginst.name = name
except:
pass
setattr(Hinst, name, Ginst)
else:
setattr(Hinst, name, hdfile[name][()])
# close and return
hdfile.close()
return Hinst | 9ab33071916a634da6ddc68df56fe29429ef6313 | 3,649,754 |
def calc_E_E_hs_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_b2_d_t, W_dash_ba1_d_t,
theta_ex_d_Ave_d,
L_dashdash_ba2_d_t):
"""1時間当たりの給湯機の消費電力量 (kWh/h) (1)
Args:
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/h)
W_dash_b2_d_t(ndarray): 1時間当たりの自動湯はり時における太陽熱補正給湯負荷 (MJ/h)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h)
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの給湯機の消費電力量 (kWh/d)
"""
# 給湯機の待機時及び水栓給湯時の補機による消費電力 (2)
E_E_hs_aux1_d_t = get_E_E_hs_aux1_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t,
theta_ex_d_Ave_d)
# 給湯機の湯はり時の補機による消費電力量 (3)
E_E_hs_aux2_d_t = get_E_E_hs_aux2_d_t(W_dash_b2_d_t)
# 給湯機の保温時の補機による消費電力量 (4)
E_E_hs_aux3_d_t = calc_E_E_hs_aux3_d_t(L_dashdash_ba2_d_t)
print('E_E_hs_aux1 = {}'.format(np.sum(E_E_hs_aux1_d_t)))
print('E_E_hs_aux2 = {}'.format(np.sum(E_E_hs_aux2_d_t)))
print('E_E_hs_aux3 = {}'.format(np.sum(E_E_hs_aux3_d_t)))
return E_E_hs_aux1_d_t + E_E_hs_aux2_d_t + E_E_hs_aux3_d_t | d76a65d4d30b0c2cf59b837e188473827425d576 | 3,649,755 |
def best_promo(order):
"""
选择可用的最佳折扣
"""
return max(promo(order) for promo in promos) | db4001b4e04a167171da02da92e4234489bf13a5 | 3,649,756 |
def random_joint_positions(robot):
"""
Generates random joint positions within joint limits for the given robot.
@type robot: orpy.Robot
@param robot: The OpenRAVE robot
@rtype: np.array
@return:
"""
# Get the limits of the active DOFs
lower, upper = robot.GetActiveDOFLimits()
positions = lower + np.random.rand(len(lower))*(upper-lower)
return positions | 49fe770a8cc22945e79c892d54754c50f19974e8 | 3,649,757 |
def test_cancel_examples(example):
"""
We can't specify examples in test_fuzz_cancel (because we use data, see
https://hypothesis.readthedocs.io/en/latest/data.html#interactive-draw),
so we have this here for explicit examples.
"""
stream_req, stream_resp, draws = example
def draw(lst):
if draws:
this_draw = draws.pop(0)
for name, evt in lst:
if name == this_draw:
return name, evt
raise AssertionError(
f"{this_draw} not in list: {[name for name, _ in lst]}"
)
else:
return lst[0]
_test_cancel(stream_req, stream_resp, draw) | c3a3a970a77f136c39e86666c0485163d0fbb408 | 3,649,758 |
import pickle
def fetch_pickle(filename):
"""
Fetches any variable saved into a picklefile with the given filename.
Parameters:
filename (str): filename of the pickle file
Returns:
variable (any pickle compatible type): variable that was saved into the picklefile.
"""
with open(filename, 'rb') as picklefile:
variable = pickle.load(picklefile)
return variable | 172c18520619d102b520658949d2464d5ecfb05c | 3,649,759 |
def check_clockwise(poly):
"""Checks if a sequence of (x,y) polygon vertice pairs is ordered clockwise or not.
NOTE: Counter-clockwise (=FALSE) vertice order reserved for inner ring polygons"""
clockwise = False
if (sum(x0*y1 - x1*y0 for ((x0, y0), (x1, y1)) in zip(poly, poly[1:] + [poly[0]]))) < 0:
clockwise = not clockwise
return clockwise | 5e9f8fba6cd11e33dfe60a89e62eeac2ac24c805 | 3,649,760 |
def bookList(request):
"""测试"""
# 查询书籍信息:使用默认的管理器对象 : 在管理器上调用过滤器方法会返回查询集
# book_list = BookInfo.objects.all()
# 查询书籍信息:使用自定义的管理器对象
# book_list = BookInfo.books.all()
# 以下代码演示,自定义管理器的类给模型类新增初始化方法: 类比books.all()
# book1 = BookInfo.books.create_model('zxc')
# book2 = BookInfo.books.create_model('zxj')
# book_list = [book1,book2]
# 以下代码演示,限制查询集:limit 0,2
# book_list = BookInfo.books.all()[:2]
# 以下代码演示基础条件查询 : filter(模型属性__条件运算符=值)
# 1.查询id为1的书籍 : exact 判断相等,可以省虐,直接等号, pk 等价于 主键
# book_list = BookInfo.books.filter(id=1)
# 2.查询书名包含‘湖’的书籍 : contains :包含,类似于 like
# book_list = BookInfo.books.filter(name__contains='湖')
# 3.查询书名以‘部’结尾的书籍:endswith :以什么什么结尾;startswith以什么什么开头
# book_list = BookInfo.books.filter(name__endswith='部')
# 4.查询书名不为空的书籍 : isnull : 判断是否为空,False表示不为空,两个否定表示肯定 "容易懵逼"
# book_list = BookInfo.books.filter(name__isnull=False)
# 5.查询编号为2或4的书籍 in : 表示只能在指定的元素中选择,不表示区间 "容易懵逼"
# book_list = BookInfo.books.filter(id__in=[2,4])
# 6.查询编号大于2的书籍 gt 大于, gte 大于等于, lt 小于, lte 小于等于
# book_list = BookInfo.books.filter(id__gt=2)
# 7.查询id不等于3的书籍:exclude 查询满足条件以外的数据
# book_list = BookInfo.books.exclude(id=3)
# 8.查询1980年发表的书籍
# book_list = BookInfo.books.filter(pub_date__year='1980')
# 9.查询1990年1月1日后发表的书籍
# book_list = BookInfo.books.filter(pub_date__gt='1990-1-1')
# from datetime import date
# book_list = BookInfo.books.filter(pub_date__gt=date(1990,1,1))
# 以下代码,演示F对象和Q对象查询 : F('模型属性') Q(属性名__条件运算符=值) | Q(属性名__条件运算符=值)
# 1.查询阅读量大于评论量的书籍
# book_list = BookInfo.books.filter(readcount__gt=F('commentcount'))
# 2.查询阅读量大于2倍评论量的书籍 : F()支持计算
# book_list = BookInfo.books.filter(readcount__gt=F('commentcount') * 2)
# 1.查询阅读量大于20,或编号小于3的图书
# book_list = BookInfo.books.filter(Q(readcount__gt=20) | Q(id__lt=3))
# 2.查询编号不等于3的书籍 ~Q()
book_list = BookInfo.books.filter(~Q(id=3))
# 以下代码演示聚合过滤器aggregate();该过滤器可以调用出聚合函数的 Avg(), Sum(), max(), min(), count()
# 需求:计算阅读量的总数 aggregate() 返回单个字典对象 {'readcount__sum': 134}
total_count = BookInfo.books.aggregate(Sum('readcount'))
# 以下代码演示基础关联查询
# 1.查询编号为1的图书中所有人物信息 : 一查多 : peopleinfo_set
book1 = BookInfo.books.get(id=1)
people_list1 = book1.peopleinfo_set.all()
# 2.查询编号为1的英雄出自的书籍 : 多查一 : people1.book : 调用关联的外键属性即可
people1 = PeopleInfo.objects.get(id=1)
book2 = people1.book
# 以下代码演示内连接 : filter(关联的模型类小写__属性名__条件运算符=值)
# 1.查询书名为"天龙八部"的所有人物信息 : 一查多 : 内连接需要使用外键作为关联的模型类
people_list2 = PeopleInfo.objects.filter(book__name='天龙八部')
# 2.查询书籍中人物的描述包含"降龙"的书籍 : 多查一
book_list2 = BookInfo.books.filter(peopleinfo__description__contains='降龙')
# 构造上下文
context = {
'book_list':book_list,
'total_count':total_count,
'people_list1':people_list1,
'book2':book2,
'people_list2':people_list2,
'book_list2':book_list2
}
return render(request, 'Book/booklist.html', context) | b9b05f259d5cdb9d0570268c0f08eaafc8ba6cc1 | 3,649,761 |
def format_stats(stats):
"""Format statistics for printing to a table"""
result = ''
for key, value in stats.items():
result += f'{key} - {value}\n'
return result[:-1] | 2d01b6c48b83f8e8810f4609183b39fad871f942 | 3,649,762 |
def imcrop(img, bboxes, scale=1.0, pad_fill=None):
"""Crop image patches.
3 steps: scale the bboxes -> clip bboxes -> crop and pad.
Args:
img (ndarray): Image to be cropped.
bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
scale (float, optional): Scale ratio of bboxes, the default value
1.0 means no padding.
pad_fill (Number | list[Number]): Value to be filled for padding.
Default: None, which means no padding.
Returns:
list[ndarray] | ndarray: The cropped image patches.
"""
chn = 1 if img.ndim == 2 else img.shape[2]
if pad_fill is not None:
if isinstance(pad_fill, (int, float)):
pad_fill = [pad_fill for _ in range(chn)]
assert len(pad_fill) == chn
_bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes
scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32)
clipped_bbox = bbox_clip(scaled_bboxes, img.shape)
patches = []
for i in range(clipped_bbox.shape[0]):
x1, y1, x2, y2 = tuple(clipped_bbox[i, :])
if pad_fill is None:
patch = img[y1:y2 + 1, x1:x2 + 1, ...]
else:
_x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :])
if chn == 1:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1)
else:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn)
patch = np.array(
pad_fill, dtype=img.dtype) * np.ones(
patch_shape, dtype=img.dtype)
x_start = 0 if _x1 >= 0 else -_x1
y_start = 0 if _y1 >= 0 else -_y1
w = x2 - x1 + 1
h = y2 - y1 + 1
patch[y_start:y_start + h, x_start:x_start + w,
...] = img[y1:y1 + h, x1:x1 + w, ...]
patches.append(patch)
if bboxes.ndim == 1:
return patches[0]
else:
return patches | 244d6c39410c5d51780a8d3a261810986c17d779 | 3,649,763 |
def timestamp2str(ts):
""" Converts Timestamp object to str containing date and time
"""
date = ts.date().strftime("%Y-%m-%d")
time = ts.time().strftime("%H:%M:%S")
return ' '.join([date, time]) | 0e847a8af0cbbacf18df911e3070ac7c70e504b7 | 3,649,764 |
from operator import index
def define_class_functions(processes, stages, progress):
"""
Define and return class of unit tests for stand-alone functions
for the given configuration.
"""
class Test_functions(TestCase):
def test_mapreduce(self):
logger = log() if progress else None
result = mr4mp.mapreduce(
index, merge, range(50),
processes=processes, stages=stages, progress=logger
)
self.assertEqual(result, result_reference)
if progress:
self.assertEqual(
logger.to_list(),
list(range(50)) if stages is not None else []
)
def test_mapconcat(self):
logger = log() if progress else None
result = mr4mp.mapconcat(
add_one, range(0, 100),
processes=processes, stages=stages, progress=logger
)
self.assertEqual(list(result), list(range(1, 101)))
if progress:
self.assertEqual(
logger.to_list(),
list(range(100)) if stages is not None else []
)
return Test_functions | 0dc8df39e49f1e7591be7a7b8e80dc1266714cc4 | 3,649,765 |
def concept(*reference):
"""Reference to a semantic concept.
Parameters
----------
*reference : :obj:`str`
Keys pointing to the ruleset defining this concept in the rules file of
an ontology.
Returns
-------
:obj:`CubeProxy`
A textual reference to the concept that can be solved by the query
processor.
Examples
--------
>>> sq.concept("entity", "water")
{
"type": "concept",
"reference": [
"entity",
"water"
]
}
"""
obj = {"type": "concept", "reference": reference}
return CubeProxy(obj) | c3e01f48ca962c5312a0cf8d6deb66eecc062078 | 3,649,766 |
import torch
def collate_tensors(batch, stack_tensors=torch.stack):
""" Collate a list of type ``k`` (dict, namedtuple, list, etc.) with tensors.
Inspired by:
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31
Args:
batch (list of k): List of rows of type ``k``.
stack_tensors (callable): Function to stack tensors into a batch.
Returns:
k: Collated batch of type ``k``.
Example use case:
This is useful with ``torch.utils.data.dataloader.DataLoader`` which requires a collate
function. Typically, when collating sequences you'd set
``collate_fn=partial(collate_tensors, stack_tensors=encoders.text.stack_and_pad_tensors)``.
Example:
>>> import torch
>>> batch = [
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... ]
>>> collated = collate_tensors(batch)
>>> {k: t.size() for (k, t) in collated.items()}
{'column_a': torch.Size([2, 5]), 'column_b': torch.Size([2, 5])}
"""
if all([torch.is_tensor(b) for b in batch]):
return stack_tensors(batch)
if (all([isinstance(b, dict) for b in batch]) and
all([b.keys() == batch[0].keys() for b in batch])):
return {key: collate_tensors([d[key] for d in batch], stack_tensors) for key in batch[0]}
elif all([is_namedtuple(b) for b in batch]): # Handle ``namedtuple``
return batch[0].__class__(**collate_tensors([b._asdict() for b in batch], stack_tensors))
elif all([isinstance(b, list) for b in batch]):
# Handle list of lists such each list has some column to be batched, similar to:
# [['a', 'b'], ['a', 'b']] → [['a', 'a'], ['b', 'b']]
transposed = zip(*batch)
return [collate_tensors(samples, stack_tensors) for samples in transposed]
else:
return batch | cbd1098188e3d47b705e25edeae636624ebbec47 | 3,649,767 |
def build_boundaries_layers(cyt_coord, nuc_coord, rna_coord):
"""
Parameters
----------
cyt_coord : np.ndarray, np.int64
Array of cytoplasm boundaries coordinates with shape (nb_points, 2).
nuc_coord : np.ndarray, np.int64
Array of nucleus boundaries coordinates with shape (nb_points, 2).
rna_coord : np.ndarray, np.int64
Array of mRNAs coordinates with shape (nb_points, 2) or
(nb_points, 3).
Returns
-------
cyt_boundaries : np.ndarray, np.float32
A 2-d binary tensor with shape (y, x) showing cytoplasm boundaries.
border.
nuc_boundaries : np.ndarray, np.float32
A 2-d binary tensor with shape (y, x) showing nucleus boundaries.
rna_layer : np.ndarray, np.float32
Binary image of mRNAs localizations with shape (y, x).
"""
# check parameters
stack.check_array(cyt_coord,
ndim=2,
dtype=[np.int64])
if nuc_coord is not None:
stack.check_array(nuc_coord,
ndim=2,
dtype=[np.int64])
if rna_coord is not None:
stack.check_array(rna_coord,
ndim=2,
dtype=[np.int64])
# build surface binary matrices from coordinates
cyt_surface, nuc_surface, rna_layer, _ = stack.from_coord_to_surface(
cyt_coord=cyt_coord,
nuc_coord=nuc_coord,
rna_coord=rna_coord)
# from surface binary matrices to boundaries binary matrices
cyt_boundaries = stack.from_surface_to_boundaries(cyt_surface)
nuc_boundaries = stack.from_surface_to_boundaries(nuc_surface)
# cast layer in float32
cyt_boundaries = stack.cast_img_float32(cyt_boundaries)
nuc_boundaries = stack.cast_img_float32(nuc_boundaries)
rna_layer = stack.cast_img_float32(rna_layer)
return cyt_boundaries, nuc_boundaries, rna_layer | a99efab6ccc3044c04df330ca9c3ce0ebbf0c413 | 3,649,768 |
def predicted_actual_chart(actual, predicted, title="Predicted vs Actual Values"):
"""Predicted vs actual values curve."""
source = pd.DataFrame({"x": actual, "y": predicted})
scatter = scatter_chart(source, "Actual", "Residual", title=title)
vmin = source.min().min()
vmax = source.max().max()
_df = pd.DataFrame({"x": [vmin, vmax], "y": [vmin, vmax]})
baseline = alt.Chart(_df).mark_line(strokeDash=[20, 5], color="black").encode(
alt.X("x"),
alt.Y("y"),
)
return scatter + baseline | 91588a9d79bfa8eaea39067042b7e4b3c6784b7e | 3,649,769 |
from operator import mul
from operator import inv
def interpolate(R1,R2,u):
"""Interpolate linearly between the two rotations R1 and R2. """
R = mul(inv(R1),R2)
m = moment(R)
angle = vectorops.norm(m)
if angle==0: return R1
axis = vectorops.div(m,angle)
return mul(R1,rotation(axis,angle*u)) | d4aaa976e52b6f44f44c4f26eccb59f1b85f9f0b | 3,649,772 |
def plot_spikes(
spikes: dict,
ax: plt.Axes = None,
markersize: int = None,
color: tp.Union[str, tp.Any] = "k",
) -> plt.Axes:
"""Plot Spikes returned by NeuroDriver's OutputRecorder"""
if ax is None:
fig = plt.gcf()
ax = fig.add_subplot()
for n, (name, ss) in enumerate(spikes.items()):
if "data" not in ss:
raise EOSPlotterException(f"'data' field missing for node {name}")
if "data" in ss and "time" in ss:
raise EOSPlotterException(
f"Data for node {name} is not compatible with required format, "
"data mush only have 'data' field. Did you mean "
"to call plot_data?"
)
if len(ss["data"]) > 0:
ax.plot(
ss["data"],
np.full(len(ss["data"]), n),
"|",
c=color,
markersize=markersize,
)
return ax | d757c9c342e34e45820ee81f45e0bc59b8cbc277 | 3,649,773 |
def boardToString(board):
"""
return a string representation of the current board.
"""
# global board
# b = board
rg = range(board.size())
s = "┌────┬────┬────┬────┐\n|"+"|\n╞════╪════╪════╪════╡\n|".join(
['|'.join([getCellStr(board, x, y) for x in rg]) for y in rg])
s = "\n" + s + "|\n└────┴────┴────┴────┘"
return s | 2ea53d0ce7448ab0073176195195f1c4fb028a71 | 3,649,774 |
def create_data_ops(batch_size, num_elements_min_max):
"""Returns graphs containg the inputs and targets for classification.
Refer to create_data_dicts_tf and create_linked_list_target for more details.
Args:
batch_size: batch size for the `input_graphs`.
num_elements_min_max: a 2-`tuple` of `int`s which define the [lower, upper)
range of the number of elements per list.
Returns:
inputs_op: a `graphs.GraphsTuple` which contains the input list as a graph.
targets_op: a `graphs.GraphsTuple` which contains the target as a graph.
sort_indices_op: a `graphs.GraphsTuple` which contains the sort indices of
the list elements a graph.
ranks_op: a `graphs.GraphsTuple` which contains the ranks of the list
elements as a graph.
data_dicts_to_graphs_tuple:
Creates a `graphs.GraphsTuple` containing tensors from data dicts.
"""
inputs_op, sort_indices_op, ranks_op = create_graph_dicts_tf(
batch_size, num_elements_min_max)
# show["inputs_graphs"] = inputs_op
# show["sort_indices_graphs"] = sort_indices_op
# show["ranks_graphs"] = ranks_op
inputs_op = utils_tf.data_dicts_to_graphs_tuple(inputs_op)
sort_indices_op = utils_tf.data_dicts_to_graphs_tuple(sort_indices_op)
ranks_op = utils_tf.data_dicts_to_graphs_tuple(ranks_op)
inputs_op = utils_tf.fully_connect_graph_dynamic(inputs_op) # Adds edges to a graph by fully-connecting the nodes.
sort_indices_op = utils_tf.fully_connect_graph_dynamic(sort_indices_op)
ranks_op = utils_tf.fully_connect_graph_dynamic(ranks_op)
targets_op = create_linked_list_target(batch_size, sort_indices_op)
nodes = tf.concat((targets_op.nodes, 1.0 - targets_op.nodes), axis=1)
edges = tf.concat((targets_op.edges, 1.0 - targets_op.edges), axis=1)
targets_op = targets_op._replace(nodes=nodes, edges=edges)
return inputs_op, targets_op, sort_indices_op, ranks_op | fd38b1a7d0d8e9e4633fa6fcefc5b1c1614c97fc | 3,649,775 |
def location_matches(stmt):
"""Return a matches_key which takes geo-location into account."""
if isinstance(stmt, Event):
context_key = get_location(stmt)
matches_key = str((stmt.concept.matches_key(), context_key))
elif isinstance(stmt, Influence):
subj_context_key = get_location(stmt.subj)
obj_context_key = get_location(stmt.obj)
matches_key = str((stmt.matches_key(), subj_context_key,
obj_context_key))
else:
matches_key = stmt.matches_key()
return matches_key | be261d2dcf7be09330542a4cd2c18b3261ef0eca | 3,649,777 |
def parse_files(files, options):
"""Build datastructures from lines"""
lines = []
for line in finput(files, openhook=compr):
if (type(line) is bytes): line = line.decode('utf-8')
lines.append(line.rstrip().split("|"))
db = {}
db['rp'], db['users'], db['msgprof'], db['logins'] = {}, {}, {}, 0
# Audit log format we're trying to parse below:
# datetime|req_bind|req_id|rp|msg_profile|idp|resp_bind|resp_id|user|authn_mech|attribs|name_id|assert_id|ip
for event in lines:
try:
rp, msg_profile, user = list(event[i] for i in [3, 4, 8])
except ValueError:
print(linesep.join([
"ERROR: Unsupported log file format or compressed logs with Python < 2.5",
"See the documentation."]))
term(-1)
if msg_profile.lower().find("sso") > -1:
db['logins'] += 1
# we almost always need to count rps:
if len(rp) > 0:
if rp in db['rp']:
db['rp'][rp] += 1
else:
db['rp'][rp] = 1
# only count users if asked to
if len(user) > 0:
if options.uniqusers or options.xml or options.rrd or options.json:
if user in db['users']:
db['users'][user] += 1
else:
db['users'][user] = 1
# only count message profiles and rps if asked to
if options.msgprofiles:
if msg_profile in db['msgprof']:
if rp in db['msgprof'][msg_profile]:
db['msgprof'][msg_profile][rp] += 1
else:
db['msgprof'][msg_profile][rp] = 1
else:
db['msgprof'][msg_profile] = {}
db['msgprof'][msg_profile][rp] = 1
return db | 926f805d87ead9af1099f39bfb57be0b4b775e0a | 3,649,779 |
def resize_preserving_order(nparray: np.ndarray, length: int) -> np.ndarray:
"""Extends/truncates nparray so that ``len(result) == length``.
The elements of nparray are duplicated to achieve the desired length
(favours earlier elements).
Constructs a zeroes array of length if nparray is empty.
See Also
--------
resize_array : cycles elements instead of favouring earlier ones
make_even : similar earlier-favouring behaviour for balancing 2 iterables
Examples
--------
Normal usage::
resize_preserving_order(np.array([]), 5)
# np.array([0., 0., 0., 0., 0.])
nparray = np.array([[1, 2],
[3, 4]])
resize_preserving_order(nparray, 1)
# np.array([[1, 2]])
resize_preserving_order(nparray, 3)
# np.array([[1, 2],
# [1, 2],
# [3, 4]])
"""
if len(nparray) == 0:
return np.zeros((length, *nparray.shape[1:]))
if len(nparray) == length:
return nparray
indices = np.arange(length) * len(nparray) // length
return nparray[indices] | e074b1135d2192a9b0cf2d9b91f6d99f22408220 | 3,649,780 |
def push(service, key, data):
"""Push
Called to push data to the sync cache
Args:
service (str): The name of the service using the sync
key (mixed): The key to push the data onto
data (mixed): The data to be pushed
Returns:
bool|string
"""
# Make sure the service and key are strings
if not isinstance(service, basestring): service = str(service)
if not isinstance(key, basestring): key = str(key)
# Generate the JSON
sJSON = JSON.encode({
"service": service,
"key": key,
"data": data
})
# Check if anyone is interested in the key
lSessions = _moRedis.smembers("%s%s" % (service, key))
# If there are any sessions
if lSessions:
# For each session found
for sSession in lSessions:
# Add the message to its list
p = _moRedis.pipeline()
p.lpush(sSession, sJSON)
p.expire(sSession, 21600)
p.execute()
# Now publish the message for anyone using websockets
_moRedis.publish("%s%s" % (service, key), sJSON)
# Return OK
return True | 2be85735b1c4965e5a0cdf35b5f62267ce31cc6e | 3,649,781 |
def get_db_filenames(database_name):
""" This is used to populate the dropdown menu, so users can
only access their data if their name is in the user column"""
con = sql.connect(database_name)
c = con.cursor()
names_list = []
for row in c.execute(
"""SELECT Dataset_Name FROM master_table"""):
names_list.append(row[0])
con.close()
exists_list = []
for name in names_list:
if if_file_exists_in_db(database_name, name):
exists_list.append(name)
return exists_list | 7ffdd7cfb24d135ddc20353799dd0c7d21504232 | 3,649,783 |
import string
def Calculate(values, mode=0, bin_function=None):
"""Return a list of (value, count) pairs, summarizing the input values.
Sorted by increasing value, or if mode=1, by decreasing count.
If bin_function is given, map it over values first.
"""
if bin_function:
values = list(map(bin_function, values))
bins = {}
for val in values:
v = "%f-%f" % tuple(val)
bins[v] = bins.get(v, 0) + 1
bb = list(bins.items())
if mode:
bb.sort(lambda x, y: cmp(y[1], x[1]))
else:
bb.sort()
r = []
for v, n in bb:
x, y = list(map(string.atof, string.split(v, "-")))
r.append((x, y, n))
return r | bb3f40eec7733d948e66e00c3bafdd032acb6372 | 3,649,784 |
import time
def getToday(format=3):
"""返回今天的日期字串"""
t = time.time()
date_ary = time.localtime(t)
if format == 1:
x = time.strftime("%Y%m%d", date_ary)
elif format == 2:
x = time.strftime("%H:%M", date_ary)
elif format == 3:
x = time.strftime("%Y/%m/%d", date_ary)
elif format == 4:
x = time.strftime("%Y/%m/%d %H:%M", date_ary)
elif format == 5:
x = time.strftime("%y%m%d", date_ary)
elif format == 6:
x = time.strftime("%Y-%m-%d", date_ary)
elif format == 7:
x = time.strftime("%Y/%m/%d %H:%M:%S", date_ary)
elif format == 8:
x = time.strftime("%Y-%m-%d %H:%M", date_ary)
elif format == 9:
x = time.strftime("%Y-%m-%d %H:%M:%S", date_ary)
elif format == 10:
x = time.strftime("%Y年%m月%d日 %H:%M", date_ary)
else:
x = time.strftime("%Y-%m-%d %H:%M:%S", date_ary)
return x | 900c0a0d42dc2220c5e5030eeebd858e3e6a41bf | 3,649,785 |
def _get_referenced(body, start, end, no_header, clean, as_xml, as_list):
"""Retrieve data from body between some start and end."""
if body is None or start is None or end is None:
return None
content_list = body.get_between(
start, end, as_text=False, no_header=no_header, clean=clean
)
if as_list:
return content_list
referenced = Element.from_tag("office:text")
for chunk in content_list:
referenced.append(chunk)
if as_xml:
return referenced.serialize()
else:
return referenced | 2b3e1ce008461711c37e4af6dda7dc7d2e332d9e | 3,649,786 |
import torch
def info(filepath: str) -> AudioMetaData:
"""Get signal information of an audio file.
Args:
filepath (str): Path to audio file
Returns:
AudioMetaData: meta data of the given audio.
"""
sinfo = torch.ops.torchaudio.sox_io_get_info(filepath)
return AudioMetaData(sinfo.get_sample_rate(), sinfo.get_num_frames(), sinfo.get_num_channels()) | e3ff5929f563977c44f25f8f51f3a7014f43b397 | 3,649,787 |
def _override_regex_to_allow_long_doctest_lines():
"""Allow too-long lines for doctests.
Mostly a copy from `pylint/checkers/format.py`
Parts newly added are marked with comment, "[PYTA added]: ..."
"""
def new_check_lines(self, lines, i):
"""check lines have less than a maximum number of characters
"""
max_chars = self.config.max_line_length
ignore_long_line = self.config.ignore_long_lines
def check_line(line, i, prev_line=None):
if not line.endswith('\n'):
self.add_message('missing-final-newline', line=i)
else:
# exclude \f (formfeed) from the rstrip
stripped_line = line.rstrip('\t\n\r\v ')
if not stripped_line and _EMPTY_LINE in self.config.no_space_check:
# allow empty lines
pass
elif line[len(stripped_line):] not in ('\n', '\r\n'):
self.add_message('trailing-whitespace', line=i)
# Don't count excess whitespace in the line length.
line = stripped_line
mobj = OPTION_RGX.search(line)
if mobj and mobj.group(1).split('=', 1)[0].strip() == 'disable':
line = line.split('#')[0].rstrip()
if len(line) > max_chars and not ignore_long_line.search(line):
self.add_message('line-too-long', line=i, args=(len(line), max_chars))
return i + 1
unsplit_ends = {
'\v', '\x0b', '\f', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029'}
unsplit = []
_split_lines = lines.splitlines(True)
# [PYTA added]: enumerate to get line_i index.
for line_i, line in enumerate(_split_lines):
if line[-1] in unsplit_ends:
unsplit.append(line)
continue
if unsplit:
unsplit.append(line)
line = ''.join(unsplit)
unsplit = []
# [PYTA added]: Skip error message for long doctest lines
doctest_tokens = compile(r'^\s*>>>.*?\n$')
if match(doctest_tokens, line):
continue
elif line_i > 0 and match(doctest_tokens, _split_lines[line_i-1]):
continue
i = check_line(line, i)
if unsplit:
check_line(''.join(unsplit), i)
FormatChecker.check_lines = new_check_lines | 9b9d1b5eefaa9e61d1e8915aef988fbc25756d1a | 3,649,788 |
import types
def handle(*, artifacts: oa_types.SimplePropertyArtifacts) -> types.TColumn:
"""
Handle a simple property.
Args:
artifacts: The artifacts of the simple property.
Returns:
The constructed column.
"""
return facades.sqlalchemy.simple.construct(artifacts=artifacts) | 2c9d5cd47b2aecb7603430c8eec7b326ce3c249f | 3,649,789 |
def rollout_representation(representation_model, steps, obs_embed, action, prev_states, done):
"""
Roll out the model with actions and observations from data.
:param steps: number of steps to roll out
:param obs_embed: size(time_steps, batch_size, n_agents, embedding_size)
:param action: size(time_steps, batch_size, n_agents, action_size)
:param prev_states: RSSM state, size(batch_size, n_agents, state_size)
:return: prior, posterior states. size(time_steps, batch_size, n_agents, state_size)
"""
priors = []
posteriors = []
for t in range(steps):
prior_states, posterior_states = representation_model(obs_embed[t], action[t], prev_states)
prev_states = posterior_states.map(lambda x: x * (1.0 - done[t]))
priors.append(prior_states)
posteriors.append(posterior_states)
prior = stack_states(priors, dim=0)
post = stack_states(posteriors, dim=0)
return prior.map(lambda x: x[:-1]), post.map(lambda x: x[:-1]), post.deter[1:] | 2736609ab54d477c3fad2ab7a4e3270772517a08 | 3,649,790 |
def generate_random_ast(schema, rng):
"""End-to-end simulator for AST of Core DSL."""
distributions = [schemum[1] for schemum in schema]
partition_alpha = rng.gamma(1,1)
partition = generate_random_partition(partition_alpha, len(distributions), rng)
row_dividers = [generate_random_row_divider(rng) for _i in partition]
primitives = [
[output, dist, generate_random_hyperparameters(dist, rng)]
for output, dist in enumerate(distributions)
]
return [
[row_divider, [primitives[b] for b in block]]
for row_divider, block in zip(row_dividers, partition)
] | 9547f815ad07af33b182c7edf7ea646ec9fdd49f | 3,649,792 |
def _opcode_to_string(opcode):
"""Return the printable name for a REIL opcode.
Args:
opcode (reil.Opcode): The opcode to provide in printable form.
Returns:
A string representing the opcode.
"""
return _opcode_string_map[opcode] | a1307efe0af8d223360a9ca0f2d9e96913ccb601 | 3,649,793 |
def get_shot(shot):
"""Retrieves shot object from database and returns as dictionary.
Raises exception if shot is not found.
"""
return __get_conn().get_entity(__table_name(),
shot['PartitionKey'], shot['RowKey']) | 0e9ad55427bba2074f7a77d94b61e7bae34bcbda | 3,649,794 |
def report_value_count(data_frame: pd.DataFrame, column: str, digits: int = 2) -> str:
"""
Report the number and percentage of non-empty values in the column.
Parameters
----------
data_frame : pandas.DataFrame
A data frame with one or more columns.
column : str
The name of the column to report on.
digits : int, optional
The number of digits to report in the percentage (default 2).
Returns
-------
str
The number of non-empty cells and a percentage of the total number of rows.
"""
count = data_frame[column].notnull().sum()
# The type of `count` is `numpy.int64` which when divided by zero yields `nan`.
# This is undesired and we rather raise an exception here.
if len(data_frame) == 0:
raise ZeroDivisionError("The data frame is empty!")
return f"{humanize.intcomma(count)} ({count / len(data_frame):.{digits}%})" | d31d9e8bae216f7931f96ec08992d6319d4c3645 | 3,649,795 |
def input_fn(is_training, data_dir, batch_size, num_epochs=1,
num_parallel_calls=1, multi_gpu=False):
"""Input_fn using the tf.data input pipeline for CIFAR-10 dataset.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
num_parallel_calls: The number of records that are processed in parallel.
This can be optimized per data set but for generally homogeneous data
sets, should be approximately the number of available CPU cores.
multi_gpu: Whether this is run multi-GPU. Note that this is only required
currently to handle the batch leftovers, and can be removed
when that is handled directly by Estimator.
Returns:
A dataset that can be used for iteration.
"""
filenames = get_filenames(is_training, data_dir)
dataset = tf.data.FixedLengthRecordDataset(filenames, _RECORD_BYTES)
num_images = is_training and _NUM_IMAGES['train'] or _NUM_IMAGES['validation']
return resnet_run_loop.process_record_dataset(
dataset, is_training, batch_size, _NUM_IMAGES['train'],
parse_record, num_epochs, num_parallel_calls,
examples_per_epoch=num_images, multi_gpu=multi_gpu) | 5d27f5a04b409ad4b04ce9885b592b0454ae0b4b | 3,649,796 |
def getWinners(players, game):
"""
Return a list of winners
:param players:
:param game:
:return:
"""
# get score for each player
for i in range(0, len(game.players)):
game.players[i].credits = scoreFor(i, game)
currentPlayer = whoseTurn(game)
# add 1 to players who had less turns
best = 0
winners = []
for i in range(0, len(game.players)):
if best < game.players[i].credits:
best = game.players[i].credits
winners.append(i)
elif best == game.players[i].credits:
if i <= currentPlayer:
winners.append(i)
else:
if currentPlayer in winners:
winners = []
winners.append(i)
return winners | a872d4f9ed596e31ae9a129c9054f9bb95a6e765 | 3,649,797 |
def read_xsf(filepath):
"""
:param filepath filepath of the xtd file
:return cell and atoms need to build the pymatflow.structure.crystal object
"""
a = ase.io.read(filepath, format='xsf')
cell = a.cell.tolist()
atoms = []
for i in range(len(a.arrays['numbers'])):
for item in base.element:
if base.element[item].number == a.arrays['numbers'][i]:
symbol = item
break
atoms.append(base.Atom(
symbol,
a.arrays['positions'][i, 0],
a.arrays['positions'][i, 1],
a.arrays['positions'][i, 2]
))
return cell, atoms | 97152eb3d18752e78689598bb0c8603c13051623 | 3,649,798 |
def elina_abstract0_bound_linexpr(man, a, linexpr):
"""
Returns the ElinaInterval taken by an ElinaLinexpr0 over an ElinaAbstract0.
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
a : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
linexpr : ElinaLinexpr0Ptr
Pointer to the ElinaLinexpr0.
Returns
-------
interval : ElinaIntervalPtr
Pointer to the ElinaInterval.
"""
interval = None
try:
elina_abstract0_bound_linexpr_c = elina_auxiliary_api.elina_abstract0_bound_linexpr
elina_abstract0_bound_linexpr_c.restype = ElinaIntervalPtr
elina_abstract0_bound_linexpr_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ElinaLinexpr0Ptr]
interval = elina_abstract0_bound_linexpr_c(man, a, linexpr)
except:
print('Problem with loading/calling "elina_abstract0_bound_linexpr" from "libelinaux.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, ElinaLinexpr0Ptr to the function')
return interval | 2764507b79f3326741496a92642be75b5afb8ce4 | 3,649,799 |
import random
import collections
def load_papertext(train_rate=0.8, dev_rate=0.1, test_rate=0.1, max_length=50, download_from_label_studio=True):
"""
Aspect Base sentiment analysis
:param kind: 是加载papertext数据,还是dem8的数据
:return:
:rtype:
"""
export_dir = "/opt/nlp/data/papertext/"
if download_from_label_studio:
json_path = export_data(hostname='http://127.0.0.1:8080/api/', dirpath=export_dir, jsonfile="0707.json")
data = collect_json(dirpath=export_dir)
valid_data = []
for one in data:
for complete in one['completions']:
if complete.get('was_cancelled'):
# 被取消了,那么跳过
continue
else:
# 只取第一个标注结果就行了,我们只有一个标注结果
if complete['result']:
result_one = complete['result'][0]
label = result_one['value']['choices'][0]
location = one['data']['location']
location = location.replace('行数','lines num').replace('段落宽度','paragraph width').replace('段落高度','paragraph height').replace('页面宽','page width').replace('页面高','page height')
text = one['data']['text']
valid_data.append([text,location,label])
print(f'从总的数据{len(data)}中, 共收集到有效数据{len(valid_data)}')
random.seed(30)
random.shuffle(valid_data)
total = len(valid_data)
train_num = int(total * train_rate)
dev_num = int(total * dev_rate)
test_num = int(total * test_rate)
train_data = valid_data[:train_num]
dev_data = valid_data[train_num:train_num+dev_num]
test_data = valid_data[train_num+dev_num:]
# 处理一下,保存的格式
def change_data(kind_data, name):
cnts = collections.Counter()
rows = []
for idx, one_data in enumerate(kind_data):
content, location, label = one_data
# label_id = labels2id[label]
assert label in ['作者','页眉','页脚','段落','标题','参考','表格','图像','公式','其它'], "label不是特定的关键字,那么paper_task_def.yml配置文件中的labels就不能解析,会出现错误"
sample = {'uid': idx, 'premise': content, 'hypothesis': location, 'label': label}
cnts[label] +=1
rows.append(sample)
print(f"{name}数据集的各个label的数量是: {cnts}")
return rows
papertext_train_data = change_data(train_data, name='train')
papertext_dev_data = change_data(dev_data, name='dev')
papertext_test_data = change_data(test_data, name='test')
return papertext_train_data, papertext_dev_data, papertext_test_data | b0c4747aaf61dce82612162652218ce001a7f17e | 3,649,800 |
import json
def _load_cmake_spec():
"""Load and return the CMake spec from disk"""
try:
with open(CMAKE_SPEC_FILE()) as fp:
return json.load(fp)
except (OSError, IOError, ValueError):
return None | 32d239ec667aa6f24da6f426d0c2dbf1984f3409 | 3,649,802 |
def read():
"""
Read temperature
:return: temperature
"""
# global ds18b20
location = '/sys/bus/w1/devices/' + ds18b20 + '/w1_slave'
tfile = open(location)
text = tfile.read()
tfile.close()
secondline = text.split("\n")[1]
temperaturedata = secondline.split(" ")[9]
temperature = float(temperaturedata[2:])
temperature = temperature / 1000
return temperature | 7e4c689d5cce6b28c28314eb7e1773e9af1a5061 | 3,649,804 |
import time
def wine(root):
"""Title of Database: Wine recognition data
Updated Sept 21, 1998 by C.Blake : Added attribute information
These data are the results of a chemical analysis of
wines grown in the same region in Italy but derived from three
different cultivars.
The analysis determined the quantities of 13 constituents
found in each of the three types of wines.
Number of Instances
class 1 59
class 2 71
class 3 48
Data storage directory:
root = `/user/.../mydata`
wine data:
`root/wine/wine.txt`
`root/wine/wine.json`
Args:
root: str, Store the absolute path of the data directory.
example:if you want data path is `/user/.../mydata/wine`,
root should be `/user/.../mydata`.
Returns:
Store the absolute path of the data directory, is `root/wine`.
"""
start = time.time()
task_path = assert_dirs(root, 'wine')
url_introduce = 'http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.names'
url_txt = 'http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'
rq.files(url_introduce, gfile.path_join(task_path, 'introduce.txt'), verbose=0)
rq.table(url_txt, gfile.path_join(task_path, 'wine.txt'),
names=['label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium',
'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline'])
print('wine dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return task_path | f2a9a3b66b276b563dc03919becc326f35d77b3a | 3,649,805 |
def initialize_scenario_data():
"""Will initialize the Scenario Data.
:return an empty ScenarioData named tuple
:rtype ScenarioData
"""
actors = {}
companies = {}
scenario_data = ScenarioData(actors, companies)
return scenario_data | 4bbb26b84abef89fc6636bd382d0308cbc8e7573 | 3,649,806 |
def dynamicMass(bulk_density, lat, lon, height, jd, velocity, decel, gamma=1.0, shape_factor=1.21):
""" Calculate dynamic mass at the given point on meteor's trajectory.
Either a single set of values can be given (i.e. every argument is a float number), or all arguments
must be numpy arrays.
Arguments:
bulk_density: [float] Bulk density of the meteoroid in kg/m^3.
lat: [float] Latitude of the meteor (radians).
lon: [flaot] Longitude of the meteor (radians).
height: [float] Height of the meteor (meters).
jd: [float] Julian date of the meteor.
velocity: [float] Velocity of the meteor (m/s).
decel: [float] Deceleration in m/s^2.
Keyword arguments:
gamma: [flot] Drag coefficient. 1 by defualt.
shape_factor: [float] Shape factory for the body. 1.21 (sphere) by default. Other values:
- sphere = 1.21
- hemisphere = 1.92
- cube = 1.0
- brick 2:3:5 = 1.55
Return:
dyn_mass: [float] Dynamic mass in kg.
"""
# Calculate the atmosphere density at the given point
atm_dens = getAtmDensity_vect(lat, lon, height, jd)
# Calculate the dynamic mass
dyn_mass = (1.0/(bulk_density**2))*((gamma*(velocity**2)*atm_dens*shape_factor)/decel)**3
return dyn_mass | 48920ecaef4c039672a387f4da45297861b6da56 | 3,649,807 |
def input_fn_tfrecords(files_name_pattern, num_epochs, batch_size, mode):
"""
Input functions which parses TFRecords.
:param files_name_pattern: File name to TFRecords.
:param num_epochs: Number of epochs.
:param batch_size: Batch size.
:param mode: Input function mode.
:return: features and label.
"""
return tf.data.experimental.make_batched_features_dataset(
file_pattern=files_name_pattern,
batch_size=batch_size,
features=get_metadata().schema.as_feature_spec(),
reader=tf.data.TFRecordDataset,
num_epochs=num_epochs,
shuffle=True if mode == tf.estimator.ModeKeys.TRAIN else False,
shuffle_buffer_size=1 + (batch_size * 2),
prefetch_buffer_size=1,
) | bd2b5bf41c2ea9fbb28d7e2cdc5c8f22e8bbac93 | 3,649,808 |
def validate(number):
"""Check if the number provided is a valid RUC number. This checks the
length, formatting, check digit and check sum."""
number = compact(number)
if len(number) != 13:
raise InvalidLength()
if not number.isdigit():
raise InvalidFormat()
if number[:2] < '01' or number[:2] > '24':
raise InvalidComponent() # invalid province code
if number[2] < '6':
# 0..5 = natural RUC: CI plus establishment number
if number[-3:] == '000':
raise InvalidComponent() # establishment number wrong
ci.validate(number[:10])
elif number[2] == '6':
# 6 = public RUC
if number[-4:] == '0000':
raise InvalidComponent() # establishment number wrong
if _checksum(number[:9], (3, 2, 7, 6, 5, 4, 3, 2, 1)) != 0:
raise InvalidChecksum()
elif number[2] == '9':
# 9 = juridical RUC
if number[-3:] == '000':
raise InvalidComponent() # establishment number wrong
if _checksum(number[:10], (4, 3, 2, 7, 6, 5, 4, 3, 2, 1)) != 0:
raise InvalidChecksum()
else:
raise InvalidComponent() # third digit wrong
return number | c09602c8b3301c6f1d4d467a1b7bfd607656c436 | 3,649,809 |
def parse_raw(data: bytes) -> dict:
"""
Parse the contents of an environment retrieved from flash or memory
and provide an equivalent dictionary.
The provided *data* should being at the start of the variable definitions.
It **must not** contain the ``env_t`` metadata, such as the CRC32 word
and the ``flags`` value (only present when compiled with
"``CONFIG_SYS_REDUNDAND_ENVIRONMENT``".
A :py:exc:`ValueError` is raised if no environment variables are found.
"""
results = {}
regex = raw_var_regex()
for match in regex.finditer(data):
name = match.group('name').decode('ascii')
value = match.group('value').decode('ascii')
results[name] = value
if not results:
raise ValueError('No environment variables found')
return results | c40c08a099d7468a4ec19da90ce9062d8ddd6ed1 | 3,649,810 |
from typing import List
def _list_registered_paths() -> List[str]:
"""List available paths registered to this service."""
paths = []
for rule in application.url_map.iter_rules():
rule = str(rule)
if rule.startswith("/api/v1"):
paths.append(rule)
return paths | 56f27aa4b33191cbd779e0e173295431670d26ab | 3,649,811 |
def input_fn(request_body, request_content_type):
"""An input_fn that loads a pickled numpy array"""
if request_content_type == "application/python-pickle":
array = np.load(BytesIO(request_body), allow_pickle=True)
return array
else:
raise Exception("Please provide 'application/python-pickle' as a request content type") | 0f6387dffc3ade2097888a92ef1af99f4d367ef8 | 3,649,812 |
def game(x_train, x_test, y_train, y_test, algo='rf', show_train_scores=True):
"""Standard Alogrithms fit and return scores.
* Default Random State is set as 192 when posible.
* Available models - dc, rf, gb, knn, mc_ovo_rf, mc_ova_rf
"""
if algo is 'dc':
clf = clf = DummyClassifier(strategy='most_frequent', random_state=192)
elif algo is 'rf':
clf = RandomForestClassifier(n_jobs=-1, random_state=192)
elif algo is 'gb':
clf = GradientBoostingClassifier(random_state=192)
elif algo is 'knn':
clf = KNeighborsClassifier()
elif algo is 'mc_ovo_rf':
clf = OneVsOneClassifier(RandomForestClassifier(n_jobs=-1,
random_state=192))
elif algo is 'mc_ova_rf':
clf = OneVsRestClassifier(RandomForestClassifier(n_jobs=-1,
random_state=192))
else:
print('improper model name, please check help')
return 0, 0
clf = clf.fit(x_train, y_train)
# if user does not opt
ac_score, f1_score = 0, 0
if show_train_scores:
print('Training Scores')
ac_score, f1_score = check_metric(clf.predict(x_train), y_train)
print('\nTesting Scores')
ac_score1, f1_score1 = check_metric(clf.predict(x_test), y_test)
ret = {'classifier': clf,
'test_ac_score': ac_score,
'test_f1_score': f1_score,
'train_ac_score': ac_score1,
'train_f1_score': f1_score1,
}
return ret | 9a225f04d5d883bc70c4f4f9036ddfee7b206dbc | 3,649,813 |
def get_convolutional_model(vocab_size: int,
input_length: int,
num_classes: int,
embedding_size: int=300,
model_size: str='small'
) -> Model:
"""Create a character convolutional model
Parameters
----------
vocab_size: the number of characters in the vocabulary
input_length: the size of the input sequences (must be least 160)
num_classes: the number of output classes
embedding_size: the vector size of character representations
model_size: 'large' or 'small' feature sizes
Returns
-------
tf.keras.Model: an uncompiled keras model
"""
if model_size.lower() == 'small':
conv_filters = 256
dnn_size = 1024
elif model_size.lower() == 'large':
conv_filters = 1024
dnn_size = 2048
else:
ValueError("model size must be either 'small' or 'large'")
if input_length < 160:
ValueError('The input sequences must be at least 160 characters long')
model = Sequential()
model.add(layers.Embedding(
vocab_size,
embedding_size,
input_length=input_length,
name='character_embedding'
))
model.add(layers.Dropout(0.2, name='input_dropout'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=7,
activation='relu',
name='conv_1'))
model.add(layers.MaxPooling1D(
pool_size=3,
name='pooling_1'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=7,
activation='relu',
name='conv_2'))
model.add(layers.MaxPooling1D(
pool_size=3,
name='pooling_2'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=3,
activation='relu',
name='conv_3'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=3,
activation='relu',
name='conv_4'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=3,
activation='relu',
name='conv_5'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=7,
activation='relu',
name='conv_6'))
model.add(layers.MaxPooling1D(
pool_size=3,
name='pooling_3'))
model.add(layers.Flatten(name='flatten'))
model.add(layers.Dense(dnn_size,
activation='relu',
name='dense_out_1'))
model.add(layers.Dropout(0.5, name='post_dropout_1'))
model.add(layers.Dense(dnn_size,
activation='relu',
name='dense_out_2'))
model.add(layers.Dropout(0.5, name='post_dropout_2'))
model.add(layers.Dense(num_classes,
activation='softmax',
name='output'))
return model | aafd9fe6141a05c433508ff0a9583d9c42a7de5b | 3,649,814 |
def parse_test_config(doc):
""" Get the configuration element. """
test_config = doc.documentElement
if test_config.tagName != 'configuration':
raise RuntimeError('expected configuration tag at root')
return test_config | c61c2f4e43c5501c461bb92b63609162b2918860 | 3,649,815 |
import textwrap
def _get_control_vars(control_vars):
"""
Create the section of control variables
Parameters
----------
control_vars: str
Functions to define control variables.
Returns
-------
text: str
Control variables section and header of model variables section.
"""
text = textwrap.dedent("""
##########################################################################
# CONTROL VARIABLES #
##########################################################################
def _init_outer_references(data):
for key in data:
__data[key] = data[key]
def time():
return __data['time']()
""")
text += control_vars
text += textwrap.dedent("""
##########################################################################
# MODEL VARIABLES #
##########################################################################
""")
return text | 614a6ca5bc8ac7354f63bfceabaff4eb4b93208a | 3,649,816 |
def echo():
"""Echo data"""
return request.get_data() + '\n' | 75aad93e46925ed086be87b18a96d756fa1c6425 | 3,649,817 |
import logging
def _get_signature_def(signature_def_key, export_dir, tags):
"""Construct a `SignatureDef` proto."""
signature_def_key = (
signature_def_key or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
metagraph_def = saved_model_cli.get_meta_graph_def(export_dir, tags)
try:
signature_def = signature_def_utils.get_signature_def_by_key(
metagraph_def,
signature_def_key)
except ValueError as e:
try:
formatted_key = _DEFAULT_INPUT_ALTERNATIVE_FORMAT.format(
signature_def_key)
signature_def = signature_def_utils.get_signature_def_by_key(
metagraph_def, formatted_key)
logging.warning('Could not find signature def "%s". '
'Using "%s" instead', signature_def_key, formatted_key)
except ValueError:
raise ValueError(
'Got signature_def_key "{}". Available signatures are {}. '
'Original error:\n{}'.format(
signature_def_key, list(metagraph_def.signature_def), e))
return signature_def | d0bedd323fb68ad41553034a08b64dc73f85faf3 | 3,649,819 |
def illuminance_to_exposure_value(E, S, c=250):
"""
Computes the exposure value :math:`EV` from given scene illuminance
:math:`E` in :math:`Lux`, *ISO* arithmetic speed :math:`S` and
*incident light calibration constant* :math:`c`.
Parameters
----------
E : array_like
Scene illuminance :math:`E` in :math:`Lux`.
S : array_like
*ISO* arithmetic speed :math:`S`.
c : numeric, optional
*Incident light calibration constant* :math:`c`.
With a flat receptor, *ISO 2720:1974* recommends a range for
:math:`c`. of 240 to 400 with illuminance in :math:`Lux`; a value of
250 is commonly used. With a hemispherical receptor, *ISO 2720:1974*
recommends a range for :math:`c` of 320 to 540 with illuminance in
:math:`Lux`; in practice, values typically are between 320 (Minolta)
and 340 (Sekonic).
Returns
-------
ndarray
Exposure value :math:`EV`.
Notes
-----
- The exposure value :math:`EV` indicates a combination of camera
settings rather than the focal plane exposure, i.e. luminous exposure,
photometric exposure, :math:`H`. The focal plane exposure is
time-integrated illuminance.
References
----------
:cite:`Wikipediabj`
Examples
--------
>>> illuminance_to_exposure_value(2.5, 100)
0.0
"""
E = as_float_array(E)
S = as_float_array(S)
c = as_float_array(c)
EV = np.log2(E * S / c)
return EV | 7c03f816e801f04735687a2a2adb6f4969877bb2 | 3,649,820 |
from typing import Counter
def feedback(code, guess):
"""
Return a namedtuple Feedback(blacks, whites) where
blacks is the number of pegs from the guess that
are correct in both color and position and
whites is the number of pegs of the right color but wrong position.
"""
blacks = sum(g == c for g, c in zip(guess, code))
whites = sum((Counter(guess) & Counter(code)).values()) - blacks
return Feedback(blacks, whites) | bab57da2d7c60869988d6c24b69b8eab1c7da173 | 3,649,821 |
from datetime import date
from .models import PlacedDateBet
def find_winning_dates(placed_bets, winning_date):
"""
Finds the placed bets with the dates closest to the winning date
:param placed_bets: iterable of PlacedDateBet
:param winning_date: datetime.date
:return: list of winning PlacedDateBets
"""
assert isinstance(winning_date, date)
dates = []
for placed_bet in placed_bets:
assert isinstance(placed_bet, PlacedDateBet)
dates.append(placed_bet.placed_date)
timedeltas = []
for date in dates:
timedeltas.append(abs(winning_date - date))
closest = min(timedeltas)
indices = []
for i in range(0, len(timedeltas)):
if timedeltas[i] == closest:
indices.append(i)
winning_bets = []
for index in indices:
winning_bets.append(placed_bets[index])
return winning_bets | 73315f2bebfcc0290f9372af935ded78011c7d4b | 3,649,823 |
def create_greedy_policy(Q):
"""
Creates a greedy policy based on Q values.
Args:
Q: A dictionary that maps from state -> action values
Returns:
A function that takes an observation as input and returns a vector
of action probabilities.
"""
def policy_fn(observation):
a_probs = np.zeros(len(Q[observation]))
a_probs[np.argmax(Q[observation])] = 1.0
return a_probs
return policy_fn | 01966964034504454e3be9926236706371c626c8 | 3,649,824 |
def get_tags(rule, method, **options):
"""
gets the valid tags for given rule.
:param pyrin.api.router.handlers.base.RouteBase rule: rule instance to be processed.
:param str method: http method name.
:rtype: list[str]
"""
return get_component(SwaggerPackage.COMPONENT_NAME).get_tags(rule, method, **options) | 4671d1d9c66934d6b22bee74801d07b30635b3b6 | 3,649,825 |
def get_paybc_transaction_request():
"""Return a stub payment transaction request."""
return {
'clientSystemUrl': 'http://localhost:8080/abcd',
'payReturnUrl': 'http://localhost:8081/xyz'
} | b913438562d4f2b0883e340b48843f9954faa8a4 | 3,649,826 |
def pretreatment(filename):
"""pretreatment"""
poems = []
file = open(filename, "r")
for line in file: #every line is a poem
#print(line)
title, poem = line.strip().split(":") #get title and poem
poem = poem.replace(' ','')
if '_' in poem or '《' in poem or '[' in poem or '(' in poem or '(' in poem:
continue
if len(poem) < 10 or len(poem) > 128: #filter poem
continue
poem = '[' + poem + ']' #add start and end signs
poems.append(poem)
print("唐诗总数: %d"%len(poems))
#counting words
allWords = {}
for poem in poems:
for word in poem:
if word not in allWords:
allWords[word] = 1
else:
allWords[word] += 1
#'''
# erase words which are not common
erase = []
for key in allWords:
if allWords[key] < 2:
erase.append(key)
for key in erase:
del allWords[key]
#'''
wordPairs = sorted(allWords.items(), key = lambda x: -x[1])
words, a= zip(*wordPairs)
#print(words)
words += (" ", )
wordToID = dict(zip(words, range(len(words)))) #word to ID
wordTOIDFun = lambda A: wordToID.get(A, len(words))
poemsVector = [([wordTOIDFun(word) for word in poem]) for poem in poems] # poem to vector
#print(poemsVector)
#padding length to batchMaxLength
batchNum = (len(poemsVector) - 1) // batchSize
X = []
Y = []
#create batch
for i in range(batchNum):
batch = poemsVector[i * batchSize: (i + 1) * batchSize]
maxLength = max([len(vector) for vector in batch])
temp = np.full((batchSize, maxLength), wordTOIDFun(" "), np.int32)
for j in range(batchSize):
temp[j, :len(batch[j])] = batch[j]
X.append(temp)
temp2 = np.copy(temp) #copy!!!!!!
temp2[:, :-1] = temp[:, 1:]
Y.append(temp2)
return X, Y, len(words) + 1, wordToID, words | 5aa85b3bda72d3efb3067ebcc06d7f4038d9990e | 3,649,828 |
def forward_fdm(order, deriv, adapt=1, **kw_args):
"""Construct a forward finite difference method.
Further takes in keyword arguments of the constructor of :class:`.fdm.FDM`.
Args:
order (int): Order of the method.
deriv (int): Order of the derivative to estimate.
adapt (int, optional): Number of recursive calls to higher-order
derivatives to dynamically determine the step size. Defaults to `1`.
Returns:
:class:`.fdm.FDM`: The desired finite difference method.
"""
return FDM(
list(range(order)),
deriv,
bound_estimator=_construct_bound_estimator(
forward_fdm, order, adapt, **kw_args
),
**kw_args
) | 7b5c46fcdfc1a186079b2a4f94a129b8f79dbfde | 3,649,829 |
import requests
def get_list_by_ingredient(ingredient):
""" this should return data for filtered recipes by ingredient """
res = requests.get(f'{API_URL}/{API_KEY}/filter.php', params={"i":ingredient})
return res.json() | 5bb34ffe635499a93decc5d4c080c638ee92c1b5 | 3,649,831 |
def chk_sudo():
"""\
Type: decorator.
The command will only be able to be executed by the author if the author is owner or have permissions.
"""
async def predicate(ctx):
if is_sudoers(ctx.author):
return True
await ctx.message.add_reaction("🛑")
raise excepts.NotMod()
return commands.check(predicate) | 45ddad31e761c9cf227a19fb78e3b3f52414c966 | 3,649,832 |
def have_same_items(list1, list2):
""" Проверяет состоят ли массивы list1 и list2 из одинакового
числа одних и тех же элементов
Parameters
----------
list1 : list[int]
отсортированный по возрастанию массив уникальных элементов
list2 : list[int]
массив произвольной длинны произвольных чисел
Returns
-------
bool
"""
return True | 2973a1961e25686fcbd2003dd366429cbd4c67eb | 3,649,833 |
def analyze(geometry_filenames, mode='global', training_info=None, stride=None,
box_size=None, configs=None, descriptor=None, model=None,
format_=None, descriptors=None, save_descriptors=False,
save_path_descriptors=None, nb_jobs=-1, **kwargs):
"""
Apply ARISE to given list of geometry files.
This function is key to reproduce the single- and polycrystalline predictions in:
[1] A. Leitherer, A. Ziletti, and L.M. Ghiringhelli, arXiv ??? (2021).
Parameters:
gometry_filenames: list
list of geometry files to be analyzed.
mode: str (default='global')
If 'global', a global descriptor will be calculated and a global label (plus uncertainty) predicted.
If 'local', the strided pattern matching algorithm introduced in [1] is applied.
stride: float (default=None)
Step size in strided pattern matching algorithm. Only relevant if mode='local'.
If no value is specified, a stride of 4 Angstroem in each direction, for each of the geometry files
is used.
box_size: float (default=None)
Size of the box employed in strided pattern matching algorithm. Only relevant if mode='local'.
If no value is specified, a box size of 16 Angstroem is used, for each of the geometry files.
configs: object (default=None)
configuration object, defining folder structure. For more details, please have a look at the function set_configs from ai4materials.utils.utils_config
descriptor: ai4materials descriptor object (default=None)
If None, the SOAP descriptor as implemented in the quippy package (see ai4materials.descritpors.quippy_soap_descriptor)
with the standard settings employed in [1] will be used.
model: str, (default=None)
If None, the model of [1] will be automatically loaded. Otherwise the path to the model h5 file needs to be specified alongside
information on the training set (in particular, the relation between integer class labels and
class labels).
training_info: path to dict (default=None)
Information on the realtion between int labels and structure labels. If model=None, training information
of [1] will be loaded regardless of this keyword. If model not None,
then specification of training_info is mandatory. The structure of this dictionary
is defined as dict = {'data': ['nb_classes': 108,
'classes': [text label class 0, text label class 1, ... ie ordered class labels]]}
format_: str, optional (default=None)
format of geometry files. If not specified, the input files are assumed to have aims format in case of
global mode, and xyz format in case of local mode.
descriptors: path to desc or numpy array, optional (default=None)
If mode=local, then this must be a path to a desc file containing the descriptors.
If mode=global, then this must be a numpy array containing the descriptors.
save_descriptors: bool, optional (default=False)
Decides whether to save calculated descriptors into specified savepath or not (only for mode=local).
save_path_descriptors: str, optional (default=None)
path into which descriptors are saved (for mode=global)
"""
if not model == None:
if training_info == None:
raise ValueError("No information on the relation between int and str class labels is provided.")
#if not (type(model) == str or type(model)==keras.engine.training.Model):
# raise NotImplementedError("Either specifiy path or model loaded from h5 via keras.models.load_model")
if stride == None:
stride = [[4.0, 4.0, 4.0] for _ in range(len(geometry_filenames))]
if box_size == None:
box_size = [16.0 for _ in range(len(geometry_filenames))]
if format_ == None:
if mode == 'global':
format_ = 'aims'
elif mode == 'local':
format_ = 'xyz'
if not model == None:
try:
model_file_ending = model.split('.')[1]
if not model_file_ending == '.h5':
raise NotImplementedError("Model path must link to h5 file.")
except:
raise ValueError("Model must be a path to a h5 file or None. In the latter case, a pretrained model is loaded.")
if mode == 'global':
predictions, uncertainty = global_(geometry_filenames, descriptor=descriptor,
model=model, format_=format_,
descriptors=descriptors, save_descriptors=save_descriptors,
save_path_descriptors=save_path_descriptors, **kwargs)
elif mode == 'local':
predictions, uncertainty = local(geometry_filenames, stride, box_size, configs,
descriptor=descriptor, model=model, format_=format_,
desc_filename=descriptors, nb_jobs=nb_jobs, **kwargs)
else:
raise ValueError("Argument 'mode' must either be 'local' or 'global'.")
return predictions, uncertainty | eeec9ac33a91b41b8a90f825aef0fc7605bdbf58 | 3,649,834 |
def get_params(name, seed):
"""Some default parameters.
Note that this will initially include training parameters that you won't need for metalearning since we have our own training loop."""
configs = []
overrides = {}
overrides["dataset_reader"] = {"lazy": True}
configs.append(Params(overrides))
configs.append(
Params({"random_seed": seed, "numpy_seed": seed, "pytorch_seed": seed})
)
configs.append(Params.from_file("config/ud/en/udify_bert_finetune_en_ewt.json"))
configs.append(Params.from_file("config/udify_base.json"))
return util.merge_configs(configs) | 02d70be07a2d7afe793e657d6fb38fefe99171ce | 3,649,835 |
def rgb2hex(rgb):
"""Converts an RGB 3-tuple to a hexadeximal color string.
EXAMPLE
-------
>>> rgb2hex((0,0,255))
'#0000FF'
"""
return ('#%02x%02x%02x' % tuple(rgb)).upper() | 4c3323e34fcd2c1b4402ebe5f433c5fd9320cce9 | 3,649,836 |
from typing import Union
import re
from typing import Optional
def path_regex(
path_regex: Union[str, re.Pattern], *, disable_stage_removal: Optional[bool] = False
):
"""Validate the path in the event against the given path pattern.
The following APIErrorResponse subclasses are used:
PathNotFoundError: When the path doesn't match.
Args:
path: A regular expression to validate against.
disable_stage_removal (bool): Preserve the original path with stage.
"""
return _get_decorator(
validate_path_regex,
path_regex=path_regex,
disable_stage_removal=disable_stage_removal,
update_event=True,
) | 5c54d71a20fa7795b9e6eefa508de5b8516378a6 | 3,649,837 |
async def root():
"""Health check"""
return {"status": "OK"} | 80c3c7ff9e1abebbb9f38dc11a5ecd5a7fe5414a | 3,649,838 |
from typing import Dict
from typing import List
def build_foreign_keys(
resources: Dict[str, dict],
prune: bool = True,
) -> Dict[str, List[dict]]:
"""Build foreign keys for each resource.
A resource's `foreign_key_rules` (if present) determines which other resources will
be assigned a foreign key (`foreign_keys`) to the reference's primary key:
* `fields` (List[List[str]]): Sets of field names for which to create a foreign key.
These are assumed to match the order of the reference's primary key fields.
* `exclude` (Optional[List[str]]): Names of resources to exclude.
Args:
resources: Resource descriptors by name.
prune: Whether to prune redundant foreign keys.
Returns:
Foreign keys for each resource (if any), by resource name.
* `fields` (List[str]): Field names.
* `reference['resource']` (str): Reference resource name.
* `reference['fields']` (List[str]): Reference resource field names.
Examples:
>>> resources = {
... 'x': {
... 'schema': {
... 'fields': ['z'],
... 'primary_key': ['z'],
... 'foreign_key_rules': {'fields': [['z']]}
... }
... },
... 'y': {
... 'schema': {
... 'fields': ['z', 'yy'],
... 'primary_key': ['z', 'yy'],
... 'foreign_key_rules': {'fields': [['z', 'zz']]}
... }
... },
... 'z': {'schema': {'fields': ['z', 'zz']}}
... }
>>> keys = build_foreign_keys(resources)
>>> keys['z']
[{'fields': ['z', 'zz'], 'reference': {'resource': 'y', 'fields': ['z', 'yy']}}]
>>> keys['y']
[{'fields': ['z'], 'reference': {'resource': 'x', 'fields': ['z']}}]
>>> keys = build_foreign_keys(resources, prune=False)
>>> keys['z'][0]
{'fields': ['z'], 'reference': {'resource': 'x', 'fields': ['z']}}
"""
tree = _build_foreign_key_tree(resources)
keys = {}
for name in tree:
firsts = []
followed = []
for fields in tree[name]:
path = _traverse_foreign_key_tree(tree, name, fields)
firsts.append(path[0])
followed.extend(path[1:])
keys[name] = firsts
if prune:
# Keep key if not on path of other key
keys[name] = [key for key in keys[name] if key not in followed]
return keys | 96cb032a03445400eeee57a23a4024ae06f62573 | 3,649,839 |
import ipaddress
def port_scan(ip):
"""Run a scan to determine what services are responding.
Returns nmap output in JSON format.
"""
# validate input
valid_ip = ipaddress.ip_address(ip)
# nnap requires a `-6` option if the target is IPv6
v6_flag = '-6 ' if valid_ip.version == 6 else ''
nmap_command = f'sudo nmap {v6_flag}{valid_ip} --stats-every 60 -oX - ' \
'-R -Pn -T4 --host-timeout 120m --max-scan-delay 5ms ' \
'--max-retries 2 --min-parallelism 32 ' \
'--defeat-rst-ratelimit -sV -O -sS -p1-65535'
completed_process = run_it(nmap_command)
xml_string = completed_process.stdout.decode()
data = bf.data(fromstring(xml_string))
return data | c33cd56635338d3476e4ce5348376a1f6b2cfd68 | 3,649,840 |
def create_table(p, table_name, schema):
"""Create a new Prism table.
Parameters
----------
p : Prism
Instantiated Prism class from prism.Prism()
table_name : str
The name of the table to obtain details about. If the default value
of None is specified, details regarding first 100 tables is returned.
schema : list
A list of dictionaries containing the schema
Returns
-------
If the request is successful, a dictionary containing information about
the table is returned.
"""
p.create_bearer_token()
table = p.create_table(table_name, schema=schema)
return table | 43c8c789d4e212d2d98d68f4f22e3f0fb0a97552 | 3,649,841 |
def get_args():
"""
Parses and processes args, returning the modified arguments as a dict.
This is to maintain backwards compatibility with the old of parsing
arguments.
"""
parser = make_parser()
args = parser.parse_args()
process_args(args)
return vars(args) | 8a6f31bd0c9547a007bdd7644d148e8ba0e126d1 | 3,649,842 |
from typing import Iterable
def run_asm_pprinter(ir: gtirb.IR, args: Iterable[str] = ()) -> str:
"""
Runs the pretty-printer to generate an assembly output.
:param ir: The IR object to print.
:param args: Any additional arguments for the pretty printer.
:returns: The assembly string.
"""
asm, _ = run_asm_pprinter_with_outputput(ir, args)
return asm | 8d71a4b91f90cb449f65d5c95ec740d78836a071 | 3,649,843 |
import re
def fix_ccdsec(hdu):
""" Fix CCDSEC keywords in image extensions """
section_regexp = re.compile(SECTION_STRING)
# In unbinned space
ccdsec = _get_key_value(hdu, 'CCDSEC')
detsec = _get_key_value(hdu, 'DETSEC')
if None in [ccdsec, detsec]:
raise ValueError("CCDSEC {}; detsec {}".format(ccdsec, detsec))
updated = False
ccd_coords = list(section_regexp.match(ccdsec).groups())
detector_coords = list(section_regexp.match(detsec).groups())
# Y coordinates should match!
if ccd_coords[2:4] != detector_coords[2:4]:
raise ValueError("Y values: {} {}".format(ccdsec, detsec))
# X coordinates maybe wrong
if ccd_coords[0:2] != detector_coords[0:2]:
for i, x in enumerate(detector_coords[0:2]):
offset_x = int(x) - CCDWIDTH
if offset_x <= 0:
if ccd_coords[i] != detector_coords[i]:
# Use DETSEC
ccd_coords[i] = detector_coords[i]
updated = True
else:
# Reset offset to x
offset_x = x
elif offset_x > CCDWIDTH:
updated = True
offset_x -= CCDWIDTH
# update ccd_coords
ccd_coords[i] = offset_x
# Reset CCDSEC
ccdsec = "[{}:{},{}:{}]".format(ccd_coords[0],
ccd_coords[1],
ccd_coords[2],
ccd_coords[3])
hdu.header['CCDSEC'] = ccdsec
return updated | 1ce3e7e519f47f63f8894c3a29e269ca77d7cf5d | 3,649,844 |
def reload(hdf):
"""Reload a hdf file, hdf = reload(hdf)"""
filename = hdf.filename
return load(filename) | 6eb17d171b1181ac4ed974de6c36f83c00e72c57 | 3,649,845 |
def read_image(im_name, n_channel, data_dir='', batch_size=1, rescale=None):
""" function for create a Dataflow for reading images from a folder
This function returns a Dataflow object for images with file
name containing 'im_name' in directory 'data_dir'.
Args:
im_name (str): part of image names (i.e. 'jpg' or 'im_').
n_channel (int): number of channels (3 for color images and 1 for grayscale images)
data_dir (str): directory of images
batch_size (int): number of images read from Dataflow for each batch
rescale (bool): whether rescale image to 224 or not
Returns:
Image (object): batch images can be access by Image.next_batch_dict()['image']
"""
def rescale_im(im, short_side=416):
""" Pre-process for images
images are rescaled so that the shorter side = 224
"""
im = np.array(im)
h, w = im.shape[0], im.shape[1]
if h >= w:
new_w = short_side
im = imagetool.rescale_image(im, (int(h * new_w / w), short_side))
# im = skimage.transform.resize(
# im, (int(h * new_w / w), short_side), preserve_range=True)
else:
new_h = short_side
im = imagetool.rescale_image(im, (short_side, int(w * new_h / h)))
# im = skimage.transform.resize(
# im, (short_side, int(w * new_h / h)), preserve_range=True)
# return im.astype('uint8')
return im
def normalize_im(im, *args):
im = imagetool.rescale_image(im, rescale)
# im = skimage.transform.resize(
# im, rescale, preserve_range=True)
# im = rescale_im(im, short_side=rescale)
im = np.array(im)
if np.amax(im) > 1:
im = im / 255.
return np.clip(im, 0., 1.)
# if rescale:
# pf_fnc = rescale_im
# else:
# pf_fnc = normalize_im
if isinstance(rescale, int):
rescale = [rescale, rescale]
else:
assert len(rescale) == 2
image_data = Image(
im_name=im_name,
data_dir=data_dir,
n_channel=n_channel,
shuffle=False,
batch_dict_name=['image', 'shape'],
pf_list=(normalize_im,()))
image_data.setup(epoch_val=0, batch_size=batch_size)
return image_data | 017878c8afce1be73160b338407a920c4f01a286 | 3,649,846 |
def build_optimizer(config, model):
"""
Build optimizer, set weight decay of normalization to 0 by default.
"""
skip = {}
skip_keywords = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
if hasattr(model, 'no_weight_decay_keywords'):
skip_keywords = model.no_weight_decay_keywords()
parameters = set_weight_decay(model, skip, skip_keywords)
opt_lower = config.TRAIN.OPTIMIZER.NAME.lower()
optimizer = None
if opt_lower == 'sgd':
optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True,
lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS,
lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)
return optimizer | 83a09ed34c24caff7367ba1e43e051f362dfa85c | 3,649,847 |
def ising2d_worm(T_range, mcsteps, L):
"""T = temperature [K]; L = Length of grid."""
def new_head_position(worm, lattice):
"""
Extract current worm head position indices,
then randomly set new worm head position index.
lattice.occupied points to either lattice.bonds_x or lattice.bonds_y.
"""
[i, j] = worm.head
bond_type = np.random.randint(1, worm.q)
direction = ["Up", "Down", "Left", "Right"][np.random.randint(0, 4)]
if direction=="Right":
# use current indices to check for bond
bond = [i, j]
site = [0 if i==L-1 else i+1, j]
lattice.bond_idx = 0
elif direction=="Left":
# use new indices to check for bond
site = [L-1 if i==0 else i-1, j]
bond = [site[0], site[1]]
lattice.bond_idx = 0
elif direction=="Up":
# use current indices to check for bond
bond = [i, j]
site = [i, 0 if j==L-1 else j+1]
lattice.bond_idx = 1
elif direction=="Down":
# use new indices to check for bond
site = [i, L-1 if j==0 else j-1]
bond = [site[0], site[1]]
lattice.bond_idx = 1
return bond, bond_type, site, lattice
def accept_movement(current_bond, bond_type, temperature):
"""
Bond creation/deletion using Boltzman factor.
Bonds are always deleted since 1/exp(-2/T) > 1 for all T>0.
"""
if current_bond:
if current_bond==bond_type:
# new_bond = 0 will delete the current bond
accept_probability, new_bond = 1, 0
else:
accept_probability, new_bond = 1-np.exp(-2/temperature), 0
else:
accept_probability, new_bond = np.exp(-2/temperature), bond_type
accept_move = True if np.random.rand()<accept_probability else False
"""
if current_bond==bond_type:
accept_probability, new_bond = 1, 0
else:
accept_probability, new_bond = np.exp(-2/temperature), bond_type
accept_move = True if np.random.rand()<accept_probability else False
"""
return accept_move, new_bond
def monte_carlo_step(lattice, worm, temperature):
"""
Since the lattice matrix is indexed as [column, row], we need to input the
i, j indices in reversed order, as lattice.bond.occupied[j, i].
Measured quantities per step:
Nb_step = number of bonds per step.
G_micro = 2pt correlation function per micro_step corresponding to the
partition function of the worm algorithm for the 2D Ising model.
G_step = 2pt correlation function per step corresponding to the partition
function of the metropolis algorithm for the 2D Ising model.
* Note that G_micro(|i-j|) == G_step(|i-j|) when |i-j|=0.
"""
Nb_step = np.zeros((2))
G_micro, G_step = np.zeros((L+1)), np.zeros((L+1))
G_step_bool = np.zeros((L+1), dtype=bool)
for micro_step in range(2*L**2):
# propose head movement; [i, j] = new bond indices.
[i, j], bond_type, new_site, lattice = new_head_position(worm, lattice)
accept_move, bond_type = accept_movement(lattice.bonds[lattice.bond_idx, j, i], bond_type, temperature)
if accept_move:
# move worm head and either change or delete the bond.
lattice.bonds[lattice.bond_idx, j, i] = bond_type
worm.head = new_site
# Update correlation function every microstep.
diameter = worm.Diameter()
G_micro[diameter] += 1
G_step_bool[diameter] = True
if np.all(worm.head==worm.tail):
# measure observables and reset worm when path is closed.
G_step[G_step_bool] += 1
G_step_bool[:] = False
B=(lattice.bonds>0).sum()
Nb_step += B, B**2
worm.ResetPosition()
return lattice, worm, G_micro, G_step, Nb_step
# initialize main structures.
print('Initializing Worm Algorithm.')
observables = Observables(q, L, T_range, mcsteps)
lattice = Lattice(L)
worm = Worm(q, L)
# correlation, correlation2, and bond_number each act as a pointer.
correlation = observables.correlation #relates to G_micro
correlation2 = observables.correlation2 #relates to G_step
bond_number = observables.mean_bonds #relates to Nb_step
print('Starting thermalization cycle ...')
for step in range(int(mcsteps/5)):
lattice, worm, G_micro, G_step, Nb_step = monte_carlo_step(lattice, worm, T_range[0])
print('Starting measurement cycle ...')
for T_idx, T in enumerate(T_range):
print(" ", "Running temperature =", T, "...")
for step in range(mcsteps):
lattice, worm, G_micro, G_step, Nb_step = monte_carlo_step(lattice, worm, T)
# sum observables
correlation[:, T_idx] += G_micro
correlation2[:, T_idx] += G_step
bond_number[:, T_idx] += Nb_step
# average and store observables.
observables.AverageObservables()
print('Simulation Complete!')
return observables, lattice, worm | 6fba36aceb70f19605e20a460db7054b81264224 | 3,649,848 |
def valid_chapter_name(chapter_name):
"""
判断目录名称是否合理
Args:
chapter_name ([type]): [description]
"""
for each in ["目录"]:
if each in chapter_name:
return False
return True | 9ec71837503f969808a6a666a3bf999ee3290f03 | 3,649,849 |
from typing import Iterable
from typing import Tuple
def lex_min(perms: Iterable[Perm]) -> Tuple[Perm, ...]:
"""Find the lexicographical minimum of the sets of all symmetries."""
return min(all_symmetry_sets(perms)) | 4cbb7e78de32c46684c9e621db90708934bb5e33 | 3,649,850 |
def subfield(string, delim, occurrence):
"""
function to extract specified occurence of subfield from string
using specified field delimiter
eg select subfield('abc/123/xyz','/',0) returns 'abc'
eg select subfield('abc/123/xyz','/',1) returns '123'
eg select subfield('abc/123/xyz','/',2) returns 'xyz'
eg select subfield('abc/123/xyz','/',3) returns ''
"""
"""
# this logic matches the functions written for msql and psql,
# because they do not have a string method to do this
ans = ''
found = 0
for ch in string:
if ch == delim:
found += 1
if found == occurrence + 1:
break
elif found == occurrence:
ans += ch
if not found:
ans = '' # else it returns the entire string
return ans
"""
# python does have a suitable string method, so use it
if delim in string:
try:
return string.split(delim)[occurrence]
except IndexError: # equivalent to the last example above
return ''
else:
return '' | ef022d0ca05e969e8ad69e4644cd24d1b7f47cb8 | 3,649,851 |
def in_hull(points, hull):
"""
Test if points in `p` are in `hull`
`p` should be a `NxK` coordinates of `N` points in `K` dimensions
`hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the
coordinates of `M` points in `K`dimensions for which Delaunay triangulation
will be computed
"""
# if not isinstance(hull,Delaunay):
del points['flight_name']
del points['output']
del points['TEMPS']
del hull['flight_name']
del hull['output']
del hull['TEMPS']
hull = Delaunay(hull.as_matrix())
return hull.find_simplex(points.as_matrix())>=0 | ab116c17b42c26648b02930824dd0ae591b32eef | 3,649,852 |
def sample_random(X_all, N):
"""Given an array of (x,t) points, sample N points from this."""
set_seed(0) # this can be fixed for all N_f
idx = np.random.choice(X_all.shape[0], N, replace=False)
X_sampled = X_all[idx, :]
return X_sampled | b2297c13cf7cf40dcdf82ea97e2029a96d7554ef | 3,649,853 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.