id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
504272
|
from .helpers import ResourceBase, IterableResource
from .repos import Repository
from .compat import update_doc
class Repos(ResourceBase, IterableResource):
def __getitem__(self, item):
"""
Return a :class:`Repository` object for operations on a specific repository
"""
return Repository(item, self.url(item), self._client, self)
update_doc(Repos.all, """Retreive repositories from Stash""")
|
504300
|
import json
import hashlib
import mimetypes
import os
import pprint
import uuid
from kinto_http import cli_utils
from kinto_http.exceptions import KintoException
DEFAULT_SERVER = "http://localhost:8888/v1"
def sha256(content):
m = hashlib.sha256()
m.update(content)
return m.hexdigest()
def files_to_upload(records, files, force=False):
records_by_id = {r['id']: r for r in records if 'attachment' in r}
existing_files = {r['attachment']['filename']: r for r in records if 'attachment' in r}
existing_original_files = {r['attachment']['original']['filename']: r
for r in records
if 'attachment' in r and 'original' in r['attachment']}
to_upload = []
for filepath in files:
filename = os.path.basename(filepath)
record = None
if filename in existing_files.keys():
record = existing_files[filename]
elif filename in existing_original_files.keys():
record = existing_original_files[filename]
if record:
records_by_id.pop(record['id'], None)
local_hash = sha256(open(filepath, 'rb').read())
# If file was uploaded gzipped, compare with hash of
# uncompressed file.
remote_hash = record.get('original', {}).get('hash')
if not remote_hash:
remote_hash = record['attachment']['hash']
# If hash has changed, upload !
if local_hash != remote_hash or force:
print("File '%s' has changed." % filename)
to_upload.append((filepath, record))
else:
print("File '%s' is up-to-date." % filename)
else:
identifier = hashlib.md5(filename.encode('utf-8')).hexdigest()
record_id = str(uuid.UUID(identifier))
record = {'id': record_id}
to_upload.append((filepath, record))
# XXX: add option to delete records when files are missing locally
for id, record in records_by_id.items():
print("Ignore remote file '%s'." % record['attachment']['filename'])
return to_upload
def upload_files(client, files):
permissions = {} # XXX: Permissions are inherited from collection.
for filepath, record in files:
mimetype, _ = mimetypes.guess_type(filepath)
filename = os.path.basename(filepath)
filecontent = open(filepath, "rb").read()
record_uri = client.get_endpoint('record', id=record['id'])
attachment_uri = '%s/attachment' % record_uri
multipart = [("attachment", (filename, filecontent, mimetype))]
try:
body, _ = client.session.request(method='post',
endpoint=attachment_uri,
permissions=json.dumps(permissions),
files=multipart)
except KintoException as e:
print(filepath, "error during upload.", e)
else:
pprint.pprint({"id": record['id'], "attachment": body})
def main():
parser = cli_utils.add_parser_options(
description='Upload files to Kinto',
default_server=DEFAULT_SERVER)
parser.add_argument('--force', dest='force', action='store_true',
help='Force upload even if the hash matches')
parser.add_argument('files', metavar='FILE', action='store',
nargs='+')
args = parser.parse_args()
client = cli_utils.create_client_from_args(args)
try:
client.create_bucket(if_not_exists=True)
client.create_collection(if_not_exists=True)
except KintoException:
# Fail silently in case of 403
pass
existing = client.get_records()
to_upload = files_to_upload(existing, args.files, force=args.force)
upload_files(client, to_upload)
if __name__ == '__main__':
main()
|
504321
|
import random
from typing import List, NamedTuple
from torch.utils.data import DataLoader
from mcp.data.dataset.dataset import Dataset, FewShotDataset, IndexedDataset
class FewShotDataLoader(NamedTuple):
support: DataLoader
query: DataLoader
class FewShotDataLoaderSplits(NamedTuple):
train: DataLoader
valid: FewShotDataLoader
test: FewShotDataLoader
class DataLoaderFactory(object):
def __init__(self, batch_size: int, shuffle: bool, pin_memory: bool):
self.batch_size = batch_size
self.shuffle = shuffle
self.pin_memory = pin_memory
def create(self, dataset: Dataset) -> DataLoader:
return DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=self.shuffle,
pin_memory=self.pin_memory,
)
class FewShotDataLoaderFactory(object):
def __init__(
self, num_classes: int, n_ways: int, dataloader_factory: DataLoaderFactory,
):
self.num_classes = num_classes
self.n_ways = n_ways
self.dataloader_factory = dataloader_factory
def create(self, dataset: FewShotDataset) -> FewShotDataLoader:
classes = random.sample(list(range(self.num_classes)), self.n_ways)
support = self._filter_classes(dataset.support, classes)
query = self._filter_classes(dataset.query, classes)
return FewShotDataLoader(
support=self.dataloader_factory.create(support),
query=self.dataloader_factory.create(query),
)
def _filter_classes(self, dataset: Dataset, classes: List[int]) -> Dataset:
indexes = []
for index in range(len(dataset)):
_, clazz = dataset[index]
if clazz in classes:
indexes.append(index)
return IndexedDataset(dataset, indexes)
|
504332
|
import pytest
import matplotlib
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import LabelEncoder
import data_describe as dd
matplotlib.use("Agg")
@pytest.mark.base
@pytest.mark.xfail(reason="Not implemented for modin")
def test_importance(compute_backend_df):
importance_vals = dd.importance(compute_backend_df, "d", return_values=True)
assert len(importance_vals) == compute_backend_df.shape[1] - 1 - 1
@pytest.mark.base
@pytest.mark.xfail(reason="Not implemented for modin")
def test_importance_num_only(data, compute_backend_df):
data = compute_backend_df.select_dtypes(["number"])
rfr = RandomForestRegressor(random_state=1)
assert isinstance(
dd.importance(data, "a", estimator=rfr, return_values=True), np.ndarray
), "Importance values not a numpy array"
@pytest.mark.base
def test_importance_cat_only(data, compute_backend_df):
num_columns = compute_backend_df.select_dtypes(["number"]).columns.values
data = data[[c for c in data.columns if c not in num_columns]]
assert (
len(dd.importance(data, "d", return_values=True)) == data.shape[1] - 2
), "Wrong size of importance values" # f is null column
@pytest.mark.base
@pytest.mark.xfail(reason="Not implemented for modin")
def test_importance_preprocess(data, compute_backend_df):
def pre(df, target):
y = df[target]
df = df.drop(target, axis=1)
x_num = df.select_dtypes(["number"])
x_num = x_num.fillna("-1")
x_cat = df[[c for c in df.columns if c not in x_num.columns]].astype(str)
x_cat = x_cat.fillna("")
x_cat_encoded = x_cat.apply(LabelEncoder().fit_transform)
X = pd.concat([x_num, x_cat_encoded], axis=1)
return X, y
fig = dd.importance(compute_backend_df, "d", preprocess_func=pre)
assert isinstance(fig, matplotlib.artist.Artist)
@pytest.mark.base
@pytest.mark.xfail(reason="Not implemented for modin")
def test_top_feature(compute_backend_df):
fig = dd.importance(compute_backend_df, "d", top_features=1)
assert isinstance(fig, matplotlib.artist.Artist)
|
504343
|
import torch
import sys
import os
sys.path.append(os.getcwd())
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
from private_test_scripts.all_in_one import all_in_one_train # noqa
from training_structures.Supervised_Learning import train, test # noqa
from unimodals.common_models import GRUWithLinear, MLP # noqa
from datasets.affect.get_data import get_dataloader # noqa
from fusions.common_fusions import Concat, TensorFusion # noqa
# mosi_data.pkl, mosei_senti_data.pkl
# mosi_raw.pkl, mosei_raw.pkl, sarcasm.pkl, humor.pkl
# raw_path: mosi.hdf5, mosei.hdf5, sarcasm_raw_text.pkl, humor_raw_text.pkl
traindata, validdata, test_robust = get_dataloader(
'/home/paul/MultiBench/mosi_raw.pkl', robust_test=False)
# mosi/mosei
encoders = [GRUWithLinear(35, 64, 4, dropout=True, has_padding=True).cuda(),
GRUWithLinear(74, 128, 19, dropout=True, has_padding=True).cuda(),
GRUWithLinear(300, 512, 79, dropout=True, has_padding=True).cuda()]
head = MLP(8000, 512, 1).cuda()
# humor/sarcasm
# encoders=[GRUWithLinear(371,512,4,dropout=True,has_padding=True).cuda(), \
# GRUWithLinear(81,256,19,dropout=True,has_padding=True).cuda(),\
# GRUWithLinear(300,600,79,dropout=True,has_padding=True).cuda()]
# head=MLP(8000,512,1).cuda()
fusion = TensorFusion().cuda()
train(encoders, fusion, head, traindata, validdata, 100, task="regression", optimtype=torch.optim.AdamW,
early_stop=False, is_packed=True, lr=1e-3, save='mosi_tf_best.pt', weight_decay=0.01, objective=torch.nn.L1Loss())
print("Testing:")
model = torch.load('mosi_tf_best.pt').cuda()
test(model=model, test_dataloaders_all=test_robust, dataset='mosi',
is_packed=True, criterion=torch.nn.L1Loss(), task='posneg-classification', no_robust=True)
|
504383
|
import logging
from io import BytesIO
from pathlib import Path
from typing import BinaryIO, Optional, cast
import requests
from tqdm import tqdm
ROOT_DIR = Path(__file__).parent.parent / 'data'
DOMAIN = "obamawhitehouse.archives.gov"
BASE_URL = f"https://{DOMAIN}/sites/default/files/omb/memoranda/"
# Found at https://obamawhitehouse.archives.gov/omb/memoranda_default.
PDFS = [
"2011/m11-29.pdf",
"2014/m-14-10.pdf",
"2015/m-15-17.pdf",
"2016/m_16_19_1.pdf",
"2017/m-17-02.pdf",
"2017/m-17-11_0.pdf",
"2017/m-17-13.pdf",
"2017/m-17-15.pdf",
]
logger = logging.getLogger(__name__)
def download(relpath, base_url=BASE_URL, domain=DOMAIN):
url = base_url + relpath
path = ROOT_DIR / Path(*relpath.split('/'))
if not path.exists() or path.stat().st_size == 0:
print(f"Downloading {path} from {domain}...")
path.parent.mkdir(parents=True, exist_ok=True)
output_file = cast(BinaryIO, path.open('wb'))
download_with_progress(url, output_file)
return path
def safe_content_length(response: requests.Response) -> int:
"""Account for missing or malformed content-length data."""
length = response.headers.get('content-length', '0')
if not length.isdigit():
length = '0'
return int(length)
def download_with_progress(url, write_to: Optional[BinaryIO]=None) -> BinaryIO:
if write_to is None:
write_to = BytesIO()
logger.info('Retrieving %s', url)
with requests.get(url, stream=True) as response:
length = safe_content_length(response)
with tqdm(total=length) as pbar:
for chunk in response.iter_content(chunk_size=1024):
write_to.write(chunk)
pbar.update(len(chunk))
return write_to
def main():
for relpath in PDFS:
download(relpath)
print(f"Finished downloading PDFs into '{ROOT_DIR}' directory.")
|
504411
|
import os
import sys
import numpy as np
import json
import random
import trimesh
from sklearn.decomposition import PCA
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '..', 'code'))
from pyquaternion import Quaternion
def load_obj(fn):
fin = open(fn, 'r')
lines = [line.rstrip() for line in fin]
fin.close()
vertices = []; faces = [];
for line in lines:
if line.startswith('v '):
vertices.append(np.float32(line.split()[1:4]))
elif line.startswith('f '):
faces.append(np.int32([item.split('/')[0] for item in line.split()[1:4]]))
f_arr = np.vstack(faces)
v_arr = np.vstack(vertices)
mesh = dict()
mesh['v'] = v_arr
mesh['f'] = f_arr
return mesh
def export_obj(out, mesh):
v = mesh['v']; f = mesh['f'];
with open(out, 'w') as fout:
for i in range(v.shape[0]):
fout.write('v %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2]))
for i in range(f.shape[0]):
fout.write('f %d %d %d\n' % (f[i, 0], f[i, 1], f[i, 2]))
def get_quaternion_from_axis_angle(axis, angle):
return Quaternion(axis=axis, angle=angle)
def get_quaternion_from_xy_axes(x, y):
x /= np.linalg.norm(x)
y /= np.linalg.norm(y)
z = np.cross(x, y)
z /= np.linalg.norm(z)
y = np.cross(z, x)
y /= np.linalg.norm(y)
R = np.vstack([x, y, z]).T
return Quaternion(matrix=R)
def get_rot_mat_from_quaternion(q):
return np.array(q.transformation_matrix, dtype=np.float32)
# center: numpy array of length 3
# size: numpy array of length 3
# q: numpy array of length 4 for quaternion
# output: mesh
# v --> vertices
# f --> faces
# setting --> 4 x 4 numpy array containing the world coordinates for the cube center (0, 0, 0, 1)
# and three local axes (1, 0, 0, 1), (0, 1, 0, 1), (0, 0, 1, 1)
def gen_cuboid(center, size, q):
cube_mesh = load_obj('cube.obj')
cube_v = cube_mesh['v']
cube_f = cube_mesh['f']
n_vert = cube_v.shape[0]
n_face = cube_f.shape[1]
cube_v = np.concatenate([cube_v, np.ones((n_vert, 1))], axis=1)
cube_control_v = np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]], dtype=np.float32)
S = np.array([[size[0], 0, 0, 0], [0, size[1], 0, 0], [0, 0, size[2], 0], [0, 0, 0, 1]], dtype=np.float32)
R = q.transformation_matrix
T = np.array([[1, 0, 0, center[0]], [0, 1, 0, center[1]], [0, 0, 1, center[2]], [0, 0, 0, 1]], dtype=np.float32)
rot = T.dot(R).dot(S)
cube_v = rot.dot(cube_v.T).T
cube_control_v = rot.dot(cube_control_v.T).T
mesh = dict()
mesh['v'] = cube_v
mesh['f'] = cube_f
mesh['setting'] = cube_control_v
return mesh
def assemble_meshes(mesh_list):
n_vert = 0
verts = []; faces = [];
for mesh in mesh_list:
verts.append(mesh['v'])
faces.append(mesh['f']+n_vert)
n_vert += mesh['v'].shape[0]
vert_arr = np.vstack(verts)
face_arr = np.vstack(faces)
mesh = dict()
mesh['v'] = vert_arr
mesh['f'] = face_arr
return mesh
def export_settings(out_fn, setting_list):
with open(out_fn, 'w') as fout:
for setting in setting_list:
for i in range(4):
for j in range(4):
fout.write('%f ' % setting[i, j])
fout.write('\n')
def export_csg(out_fn, csg):
with open(out_fn, 'w') as fout:
json.dump(csg, fout)
def export_meshes(out, mesh_list):
with open(out, 'w') as fout:
n_vert = 0
verts = []; faces = [];
for idx, mesh in enumerate(mesh_list):
fout.write('\ng %d\n' % idx)
for i in range(mesh['v'].shape[0]):
fout.write('v %f %f %f\n' % (mesh['v'][i, 0], mesh['v'][i, 1], mesh['v'][i, 2]))
for i in range(mesh['f'].shape[0]):
fout.write('f %d %d %d\n' % (mesh['f'][i, 0]+n_vert, mesh['f'][i, 1]+n_vert, mesh['f'][i, 2]+n_vert))
n_vert += mesh['v'].shape[0]
def gen_cuboid_from_setting(setting):
R = np.array([setting[1] - setting[0],
setting[2] - setting[0],
setting[3] - setting[0],
setting[0]], dtype=np.float32).T
cube_mesh = load_obj('cube.obj')
cube_v = cube_mesh['v']
cube_f = cube_mesh['f']
n_vert = cube_v.shape[0]
cube_v = np.concatenate([cube_v, np.ones((n_vert, 1))], axis=1)
mesh = dict()
mesh['v'] = R.dot(cube_v.T).T
mesh['f'] = cube_f
mesh['setting'] = setting
return mesh
def settings_to_meshes(settings):
meshes = []
for setting in settings:
meshes.append(gen_cuboid_from_setting(setting))
return meshes
def create_axis_aligned_setting(x_min, x_max, y_min, y_max, z_min, z_max):
setting = np.array([[(x_min+x_max)/2, (y_min+y_max)/2, (z_min+z_max)/2, 1],
[x_max, (y_min+y_max)/2, (z_min+z_max)/2, 1],
[(x_min+x_max)/2, y_max, (z_min+z_max)/2, 1],
[(x_min+x_max)/2, (y_min+y_max)/2, z_max, 1]], dtype=np.float32)
return setting
def create_rotate_45_setting(x_min, x_max, y_min, y_max, z_min, z_max):
l1 = (x_max - x_min) / 2 / np.sqrt(2)
l2 = (z_max - z_min) / 2 / np.sqrt(2)
setting = np.array([[(x_min+x_max)/2, (y_min+y_max)/2, (z_min+z_max)/2, 1],
[(x_min+x_max)/2+l1, (y_min+y_max)/2, (z_min+z_max)/2+l1, 1],
[(x_min+x_max)/2, y_max, (z_min+z_max)/2, 1],
[(x_min+x_max)/2-l2, (y_min+y_max)/2, (z_min+z_max)/2+l2, 1]], dtype=np.float32)
return setting
def normalize_shape(settings):
mesh = assemble_meshes(settings_to_meshes(settings))
pts = sample_pc(mesh['v'][:, :3], mesh['f'], n_points=200)
center = np.mean(pts, axis=0)
pts -= center
scale = np.sqrt(np.max(np.sum(pts**2, axis=1)))
T = np.array([[1, 0, 0, -center[0]],
[0, 1, 0, -center[1]],
[0, 0, 1, -center[2]],
[0, 0, 0, 1]], dtype=np.float32)
S = np.array([[1.0/scale, 0, 0, 0],
[0, 1.0/scale, 0, 0],
[0, 0, 1.0/scale, 0],
[0, 0, 0, 1]], dtype=np.float32)
rot_mat = S.dot(T)
new_settings = []
for setting in settings:
new_settings.append(rot_mat.dot(setting.T).T)
return new_settings
def random_rotate(settings):
rotation_angle = random.random() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval, 0],
[0, 1, 0, 0],
[-sinval, 0, cosval, 0],
[0, 0, 0, 1]], dtype=np.float32)
new_settings = []
for setting in settings:
new_settings.append(rotation_matrix.dot(setting.T).T)
return new_settings
def gen_obb_mesh(obbs):
# load cube
cube_mesh = load_obj('cube.obj')
cube_v = cube_mesh['v']
cube_f = cube_mesh['f']
all_v = []; all_f = []; vid = 0;
for pid in range(obbs.shape[0]):
p = obbs[pid, :]
center = p[0: 3]
lengths = p[3: 6]
dir_1 = p[6: 9]
dir_2 = p[9: ]
dir_1 = dir_1/np.linalg.norm(dir_1)
dir_2 = dir_2/np.linalg.norm(dir_2)
dir_3 = np.cross(dir_1, dir_2)
dir_3 = dir_3/np.linalg.norm(dir_3)
v = np.array(cube_v, dtype=np.float32)
f = np.array(cube_f, dtype=np.int32)
rot = np.vstack([dir_1, dir_2, dir_3])
v *= lengths
v = np.matmul(v, rot)
v += center
all_v.append(v)
all_f.append(f+vid)
vid += v.shape[0]
all_v = np.vstack(all_v)
all_f = np.vstack(all_f)
return all_v, all_f
def sample_pc(v, f, n_points=10000):
mesh = trimesh.Trimesh(vertices=v, faces=f-1)
points, __ = trimesh.sample.sample_surface(mesh=mesh, count=n_points)
return points
def fit_box(points):
pca = PCA()
pca.fit(points)
pcomps = pca.components_
points_local = np.matmul(pcomps, points.transpose()).transpose()
all_max = points_local.max(axis=0)
all_min = points_local.min(axis=0)
center = np.dot(np.linalg.inv(pcomps), (all_max + all_min) / 2)
size = all_max - all_min
xdir = pcomps[0, :]
ydir = pcomps[1, :]
return np.hstack([center, size, xdir, ydir]).astype(np.float32)
|
504418
|
import ast
import operator
import pytest
from radon.complexity import *
from radon.contrib.flake8 import Flake8Checker
from radon.visitors import Class, Function
from .test_complexity_visitor import GENERAL_CASES, dedent
get_index = lambda seq: lambda index: seq[index]
def _compute_cc_rank(score):
# This is really ugly
# Luckily the rank function in radon.complexity is not like this!
if score < 0:
rank = ValueError
elif 0 <= score <= 5:
rank = 'A'
elif 6 <= score <= 10:
rank = 'B'
elif 11 <= score <= 20:
rank = 'C'
elif 21 <= score <= 30:
rank = 'D'
elif 31 <= score <= 40:
rank = 'E'
else:
rank = 'F'
return rank
RANK_CASES = [(score, _compute_cc_rank(score)) for score in range(-1, 100)]
@pytest.mark.parametrize('score,expected_rank', RANK_CASES)
def test_rank(score, expected_rank):
if hasattr(expected_rank, '__call__') and isinstance(
expected_rank(), Exception
):
with pytest.raises(expected_rank):
cc_rank(score)
else:
assert cc_rank(score) == expected_rank
fun = lambda complexity: Function(
'randomname', 1, 4, 23, False, None, [], complexity
)
cls = lambda complexity: Class('randomname_', 3, 21, 18, [], [], complexity)
# This works with both the next two tests
SIMPLE_BLOCKS = [
([], [], 0.0),
([fun(12), fun(14), fun(1)], [1, 0, 2], 9.0),
([fun(4), cls(5), fun(2), cls(21)], [3, 1, 0, 2], 8.0),
]
@pytest.mark.parametrize('blocks,indices,_', SIMPLE_BLOCKS)
def test_sorted_results(blocks, indices, _):
expected_result = list(map(get_index(blocks), indices))
assert sorted_results(blocks) == expected_result
@pytest.mark.parametrize('blocks,_,expected_average', SIMPLE_BLOCKS)
def test_average_complexity(blocks, _, expected_average):
assert average_complexity(blocks) == expected_average
CC_VISIT_CASES = [
(GENERAL_CASES[0][0], 1, 1, 'f.inner'),
(GENERAL_CASES[1][0], 3, 1, 'f.inner'),
(
'''
class joe1:
i = 1
def doit1(self):
pass
class joe2:
ii = 2
def doit2(self):
pass
class joe3:
iii = 3
def doit3(self):
pass
''',
2,
4,
'joe1.joe2.joe3',
),
]
@pytest.mark.parametrize('code,number_of_blocks,diff,lookfor', CC_VISIT_CASES)
def test_cc_visit(code, number_of_blocks, diff, lookfor):
code = dedent(code)
blocks = cc_visit(code)
assert isinstance(blocks, list)
assert len(blocks) == number_of_blocks
with_inner_blocks = add_inner_blocks(blocks)
names = set(map(operator.attrgetter('name'), with_inner_blocks))
assert len(with_inner_blocks) - len(blocks) == diff
assert lookfor in names
def test_flake8_checker():
c = Flake8Checker(ast.parse(dedent(GENERAL_CASES[0][0])), 'test case')
assert c.max_cc == -1
assert c.no_assert is False
assert list(c.run()) == []
c.max_cc = 3
assert list(c.run()) == [(7, 0, 'R701 \'f\' is too complex (4)', type(c))]
|
504419
|
import setuptools
__version__ = '1.0.2'
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name = 'pyssian',
version = __version__,
description = 'Parser Library for Gaussian Files',
long_description=long_description,
long_description_content_type="text/x-rst",
url = 'https://github.com/maserasgroup-repo/pyssian',
author = '<NAME>',
author_email = '<EMAIL>',
classifiers = ['License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
keywords = ['compchem, gaussian, parser'],
packages = setuptools.find_packages(),
python_requires='>=3.6',
install_requires=['setuptools','pathlib','numpy'],
include_package_data=True,
package_data = {'test_files': ['pyssian/tests/test_files/*.txt'],
'tests' : ['pyssian/tests/*.py']},
project_urls={'Bug Reports': 'https://github.com/maserasgroup-repo/pyssian/issues',
'Source': 'https://github.com/maserasgroup-repo/pyssian',
'Docs' : 'https://maserasgroup-repo.github.io/pyssian/'
},
)
|
504457
|
class InjectException(Exception):
"""Base class for all exceptions."""
pass
class NonInjectableTypeError(InjectException):
"""Raised when a type could not be injected (i.e. they are no corresponding bindings)."""
pass
class NoBindingFound(NonInjectableTypeError):
"""Raised when no binding was found for a particular class, is caught internally."""
pass
class BindingError(InjectException):
"""Raised when registering a binding with an invalid value."""
pass
class NamedError(InjectException):
"""Raised when named_arg is used with an unexpected argument."""
pass
class CyclicDependencyError(InjectException):
"""Raised when a cyclic dependency is detected."""
pass
|
504494
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import pins
from esphome.components import remote_base
from esphome.const import (
CONF_BUFFER_SIZE,
CONF_DUMP,
CONF_FILTER,
CONF_ID,
CONF_IDLE,
CONF_PIN,
CONF_TOLERANCE,
CONF_MEMORY_BLOCKS,
)
from esphome.core import CORE
AUTO_LOAD = ["remote_base"]
remote_receiver_ns = cg.esphome_ns.namespace("remote_receiver")
RemoteReceiverComponent = remote_receiver_ns.class_(
"RemoteReceiverComponent", remote_base.RemoteReceiverBase, cg.Component
)
MULTI_CONF = True
CONFIG_SCHEMA = remote_base.validate_triggers(
cv.Schema(
{
cv.GenerateID(): cv.declare_id(RemoteReceiverComponent),
cv.Required(CONF_PIN): cv.All(pins.internal_gpio_input_pin_schema),
cv.Optional(CONF_DUMP, default=[]): remote_base.validate_dumpers,
cv.Optional(CONF_TOLERANCE, default=25): cv.All(
cv.percentage_int, cv.Range(min=0)
),
cv.SplitDefault(
CONF_BUFFER_SIZE, esp32="10000b", esp8266="1000b"
): cv.validate_bytes,
cv.Optional(
CONF_FILTER, default="50us"
): cv.positive_time_period_microseconds,
cv.Optional(
CONF_IDLE, default="10ms"
): cv.positive_time_period_microseconds,
cv.Optional(CONF_MEMORY_BLOCKS, default=3): cv.Range(min=1, max=8),
}
).extend(cv.COMPONENT_SCHEMA)
)
async def to_code(config):
pin = await cg.gpio_pin_expression(config[CONF_PIN])
if CORE.is_esp32:
var = cg.new_Pvariable(config[CONF_ID], pin, config[CONF_MEMORY_BLOCKS])
else:
var = cg.new_Pvariable(config[CONF_ID], pin)
dumpers = await remote_base.build_dumpers(config[CONF_DUMP])
for dumper in dumpers:
cg.add(var.register_dumper(dumper))
await remote_base.build_triggers(config)
await cg.register_component(var, config)
cg.add(var.set_tolerance(config[CONF_TOLERANCE]))
cg.add(var.set_buffer_size(config[CONF_BUFFER_SIZE]))
cg.add(var.set_filter_us(config[CONF_FILTER]))
cg.add(var.set_idle_us(config[CONF_IDLE]))
|
504510
|
HAVE_RUN_INIT = "False"
def on_init(server):
global HAVE_RUN_INIT
HAVE_RUN_INIT = "True"
def on_message(msg, server):
if msg["text"] == u"test_init":
return HAVE_RUN_INIT
|
504511
|
from distutils.util import strtobool
print(strtobool('true'))
print(strtobool('True'))
print(strtobool('TRUE'))
# 1
# 1
# 1
print(strtobool('t'))
print(strtobool('yes'))
print(strtobool('y'))
print(strtobool('on'))
print(strtobool('1'))
# 1
# 1
# 1
# 1
# 1
print(strtobool('false'))
print(strtobool('False'))
print(strtobool('FALSE'))
# 0
# 0
# 0
print(strtobool('f'))
print(strtobool('no'))
print(strtobool('n'))
print(strtobool('off'))
print(strtobool('0'))
# 0
# 0
# 0
# 0
# 0
# print(strtobool('abc'))
# ValueError: invalid truth value 'abc'
try:
strtobool('abc')
except ValueError as e:
print('other value')
# other value
print(type(strtobool('true')))
# <class 'int'>
if strtobool('yes'):
print('True!')
# True!
|
504527
|
import heapq
import math
class Solution:
def minimizeError(self, prices: List[str], target: int) -> str:
pq = []
error = 0
for p in map(float, prices):
f = math.floor(p)
c = math.ceil(p)
target -= f
error += p - f
if f != c:
heapq.heappush(pq, (c - p) - (p - f))
if target < 0 or len(pq) < target:
return '-1'
while target > 0:
error += heapq.heappop(pq)
target -= 1
return '{:.3f}'.format(error)
|
504530
|
import sys
import json
import yaml
import requests
import argparse
def _get_interior_materials_common_descriptor(eegeo_assets_host_name, interior_materials_version):
descriptor_url = "http://{host_name}/interior-materials/v{version}/common/descriptor.json.gz".format(
host_name=eegeo_assets_host_name, version=interior_materials_version)
descriptor_request = requests.get(descriptor_url)
return json.loads(descriptor_request.content)
def process_manifest(source_file, version, eegeo_assets_host_name, theme_assets_host_name, landmark_textures_version, interior_materials_version):
_, eegeo_assets_host = eegeo_assets_host_name.split("//")
interior_materials_common_descriptor = _get_interior_materials_common_descriptor(eegeo_assets_host, interior_materials_version)
with open(source_file, "r") as f:
lines = f.readlines()
yaml_document = yaml.load("".join(lines))['ThemeManifest']
for k in yaml_document:
if isinstance(yaml_document[k], str):
yaml_document[k] = yaml_document[k].replace("%EEGEO_ASSETS_HOST_NAME%", eegeo_assets_host_name)
yaml_document[k] = yaml_document[k].replace("%THEME_ASSETS_HOST_NAME%", theme_assets_host_name)
yaml_document[k] = yaml_document[k].replace("%VERSION%", version)
yaml_document[k] = yaml_document[k].replace("%LANDMARK_TEXTURES_VERSION%", landmark_textures_version)
yaml_document[k] = yaml_document[k].replace("%INTERIOR_MATERIALS_VERSION%", interior_materials_version)
yaml_document["InteriorMaterials"] = interior_materials_common_descriptor
print json.dumps(yaml_document, sort_keys=True, indent=4, separators=(',', ': '))
def read_version_from_file(version_filename):
with open(version_filename, 'r') as f:
version = f.readline()
return version.rstrip()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='build a theme manifest')
parser.add_argument('source_file', type=str, help='source yaml file path. E.g. manifest/manifest.yaml')
parser.add_argument('version', type=str, help='version. E.g. 123')
parser.add_argument('eegeo_assets_host_name', type=str, help='the hostname that the theme assets provided by eegeo will be served from. E.g. cdn-resources.wrld3d.com')
parser.add_argument('theme_assets_host_name', type=str, help='the hostname that the theme assets created by this script will be served from. E.g. cdn-resources.wrld3d.com')
parser.add_argument('landmark_textures_version_file', type=str, help='File containing the version number of the landmark textures store.')
parser.add_argument('interior_materials_version_file', type=str, help='File containing the version number of the interior materials store')
args = parser.parse_args()
process_manifest(
args.source_file,
args.version,
args.eegeo_assets_host_name,
args.theme_assets_host_name,
read_version_from_file(args.landmark_textures_version_file),
read_version_from_file(args.interior_materials_version_file))
|
504629
|
import numpy as np
import unittest
import os
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal
import pycycle.api as pyc
from N3_MDP import N3_MDP_model
class N3MDPTestCase(unittest.TestCase):
def benchmark_case1(self):
prob = N3_MDP_model()
prob.setup()
# Define the design point
prob.set_val('TOC.splitter.BPR', 23.7281)
prob.set_val('TOC.balance.rhs:hpc_PR', 53.6332)
# Set specific cycle parameters
prob.set_val('fan:PRdes', 1.300)
prob.set_val('SLS.balance.rhs:FAR', 28620.9, units='lbf')
prob.set_val('CRZ.balance.rhs:FAR', 5466.5, units='lbf')
prob.set_val('lpc:PRdes', 3.000),
prob.set_val('T4_ratio.TR', 0.926470588)
prob.set_val('bal.mult:TOC_BPR', 1.41038)
prob.set_val('RTO.hpt_cooling.x_factor', 0.9)
# Set initial guesses for balances
prob['TOC.balance.FAR'] = 0.02650
prob['bal.TOC_W'] = 820.95
prob['TOC.balance.lpt_PR'] = 10.937
prob['TOC.balance.hpt_PR'] = 4.185
prob['TOC.fc.balance.Pt'] = 5.272
prob['TOC.fc.balance.Tt'] = 444.41
FAR_guess = [0.02832, 0.02541, 0.02510]
W_guess = [1916.13, 2000. , 802.79]
BPR_guess = [25.5620, 27.3467, 24.3233]
fan_Nmech_guess = [2132.6, 1953.1, 2118.7]
lp_Nmech_guess = [6611.2, 6054.5, 6567.9]
hp_Nmech_guess = [22288.2, 21594.0, 20574.1]
Pt_guess = [15.349, 14.696, 5.272]
Tt_guess = [552.49, 545.67, 444.41]
hpt_PR_guess = [4.210, 4.245, 4.197]
lpt_PR_guess = [8.161, 7.001, 10.803]
fan_Rline_guess = [1.7500, 1.7500, 1.9397]
lpc_Rline_guess = [2.0052, 1.8632, 2.1075]
hpc_Rline_guess = [2.0589, 2.0281, 1.9746]
trq_guess = [52509.1, 41779.4, 22369.7]
for i, pt in enumerate(prob.model.od_pts):
# initial guesses
prob[pt+'.balance.FAR'] = FAR_guess[i]
prob[pt+'.balance.W'] = W_guess[i]
prob[pt+'.balance.BPR'] = BPR_guess[i]
prob[pt+'.balance.fan_Nmech'] = fan_Nmech_guess[i]
prob[pt+'.balance.lp_Nmech'] = lp_Nmech_guess[i]
prob[pt+'.balance.hp_Nmech'] = hp_Nmech_guess[i]
prob[pt+'.fc.balance.Pt'] = Pt_guess[i]
prob[pt+'.fc.balance.Tt'] = Tt_guess[i]
prob[pt+'.hpt.PR'] = hpt_PR_guess[i]
prob[pt+'.lpt.PR'] = lpt_PR_guess[i]
prob[pt+'.fan.map.RlineMap'] = fan_Rline_guess[i]
prob[pt+'.lpc.map.RlineMap'] = lpc_Rline_guess[i]
prob[pt+'.hpc.map.RlineMap'] = hpc_Rline_guess[i]
prob[pt+'.gearbox.trq_base'] = trq_guess[i]
prob.run_model()
tol = 1e-4
assert_near_equal(prob['TOC.inlet.Fl_O:stat:W'], 820.44097898, tol)#
assert_near_equal(prob['TOC.inlet.Fl_O:tot:P'], 5.26210728, tol)#
assert_near_equal(prob['TOC.hpc.Fl_O:tot:P'], 275.21039426, tol)#
assert_near_equal(prob['TOC.burner.Wfuel'], 0.74702034, tol)#
assert_near_equal(prob['TOC.inlet.F_ram'], 19854.83873204, tol)#
assert_near_equal(prob['TOC.core_nozz.Fg'], 1547.14500321, tol)#
assert_near_equal(prob['TOC.byp_nozz.Fg'], 24430.78721659, tol)#
assert_near_equal(prob['TOC.perf.TSFC'], 0.43920593, tol)#
assert_near_equal(prob['TOC.perf.OPR'], 52.30041498, tol)#
assert_near_equal(prob['TOC.balance.FAR'], 0.02671119, tol)#
assert_near_equal(prob['TOC.hpc.Fl_O:tot:T'], 1517.97985269, tol)#
assert_near_equal(prob['RTO.inlet.Fl_O:stat:W'], 1915.22359344, tol)#
assert_near_equal(prob['RTO.inlet.Fl_O:tot:P'], 15.3028198, tol)#
assert_near_equal(prob['RTO.hpc.Fl_O:tot:P'], 623.40703024, tol)#
assert_near_equal(prob['RTO.burner.Wfuel'], 1.73578775, tol)#
assert_near_equal(prob['RTO.inlet.F_ram'], 17040.48046811, tol)#
assert_near_equal(prob['RTO.core_nozz.Fg'], 2208.8023950, tol)#
assert_near_equal(prob['RTO.byp_nozz.Fg'], 37631.67807307, tol)#
assert_near_equal(prob['RTO.perf.TSFC'], 0.27407175, tol)#
assert_near_equal(prob['RTO.perf.OPR'], 40.73804947, tol)#
assert_near_equal(prob['RTO.balance.FAR'], 0.02854964, tol)#
assert_near_equal(prob['RTO.balance.fan_Nmech'], 2133.20964469, tol)#
assert_near_equal(prob['RTO.balance.lp_Nmech'], 6612.99872459, tol)#
assert_near_equal(prob['RTO.balance.hp_Nmech'], 22294.43280596, tol)#
assert_near_equal(prob['RTO.hpc.Fl_O:tot:T'], 1707.84433893, tol)#
assert_near_equal(prob['SLS.inlet.Fl_O:stat:W'], 1733.66701727, tol)#
assert_near_equal(prob['SLS.inlet.Fl_O:tot:P'], 14.62242048, tol)#
assert_near_equal(prob['SLS.hpc.Fl_O:tot:P'], 509.33689017, tol)#
assert_near_equal(prob['SLS.burner.Wfuel'], 1.32070102, tol)#
assert_near_equal(prob['SLS.inlet.F_ram'], 0.06170041, tol)#
assert_near_equal(prob['SLS.core_nozz.Fg'], 1526.46929726, tol)#
assert_near_equal(prob['SLS.byp_nozz.Fg'], 27094.43240315, tol)#
assert_near_equal(prob['SLS.perf.TSFC'], 0.16612104, tol)#
assert_near_equal(prob['SLS.perf.OPR'], 34.8325977, tol)#
assert_near_equal(prob['SLS.balance.FAR'], 0.02560289, tol)#
assert_near_equal(prob['SLS.balance.fan_Nmech'], 1953.67920923, tol)#
assert_near_equal(prob['SLS.balance.lp_Nmech'], 6056.45026545, tol)#
assert_near_equal(prob['SLS.balance.hp_Nmech'], 21599.43696168, tol)#
assert_near_equal(prob['SLS.hpc.Fl_O:tot:T'], 1615.20862445, tol)#
assert_near_equal(prob['CRZ.inlet.Fl_O:stat:W'], 802.28514996, tol)#
assert_near_equal(prob['CRZ.inlet.Fl_O:tot:P'], 5.26210728, tol)#
assert_near_equal(prob['CRZ.hpc.Fl_O:tot:P'], 258.04448231, tol)#
assert_near_equal(prob['CRZ.burner.Wfuel'], 0.67564259, tol)#
assert_near_equal(prob['CRZ.inlet.F_ram'], 19415.50698852, tol)#
assert_near_equal(prob['CRZ.core_nozz.Fg'], 1375.45106569, tol)#
assert_near_equal(prob['CRZ.byp_nozz.Fg'], 23550.78724671, tol)#
assert_near_equal(prob['CRZ.perf.TSFC'], 0.44137759, tol)#
assert_near_equal(prob['CRZ.perf.OPR'], 49.03824052, tol)#
assert_near_equal(prob['CRZ.balance.FAR'], 0.02530018, tol)#
assert_near_equal(prob['CRZ.balance.fan_Nmech'], 2118.62665338, tol)#
assert_near_equal(prob['CRZ.balance.lp_Nmech'], 6567.79111774, tol)#
assert_near_equal(prob['CRZ.balance.hp_Nmech'], 20574.44969253, tol)#
assert_near_equal(prob['CRZ.hpc.Fl_O:tot:T'], 1481.97697491, tol)#
if __name__ == "__main__":
unittest.main()
|
504641
|
from .cassette import Cassette
from .exceptions import InvalidOption, validation_error_map
def validate_record(record):
return record in ['all', 'new_episodes', 'none', 'once']
def validate_matchers(matchers):
from betamax.matchers import matcher_registry
available_matchers = list(matcher_registry.keys())
return all(m in available_matchers for m in matchers)
def validate_serializer(serializer):
from betamax.serializers import serializer_registry
return serializer in list(serializer_registry.keys())
def validate_placeholders(placeholders):
"""Validate placeholders is a dict-like structure"""
keys = ['placeholder', 'replace']
try:
return all(sorted(list(p.keys())) == keys for p in placeholders)
except TypeError:
return False
def translate_cassette_options():
for (k, v) in Cassette.default_cassette_options.items():
yield (k, v) if k != 'record_mode' else ('record', v)
def isboolean(value):
return value in [True, False]
class Options(object):
valid_options = {
'match_requests_on': validate_matchers,
're_record_interval': lambda x: x is None or x > 0,
'record': validate_record,
'serialize': validate_serializer, # TODO: Remove this
'serialize_with': validate_serializer,
'preserve_exact_body_bytes': isboolean,
'placeholders': validate_placeholders,
'allow_playback_repeats': isboolean,
}
defaults = {
'match_requests_on': ['method', 'uri'],
're_record_interval': None,
'record': 'once',
'serialize': None, # TODO: Remove this
'serialize_with': 'json',
'preserve_exact_body_bytes': False,
'placeholders': [],
'allow_playback_repeats': False,
}
def __init__(self, data=None):
self.data = data or {}
self.validate()
self.defaults = Options.defaults.copy()
self.defaults.update(translate_cassette_options())
def __repr__(self):
return 'Options(%s)' % self.data
def __getitem__(self, key):
return self.data.get(key, self.defaults.get(key))
def __setitem__(self, key, value):
self.data[key] = value
return value
def __delitem__(self, key):
del self.data[key]
def __contains__(self, key):
return key in self.data
def items(self):
return self.data.items()
def validate(self):
for key, value in list(self.data.items()):
if key not in Options.valid_options:
raise InvalidOption('{0} is not a valid option'.format(key))
else:
is_valid = Options.valid_options[key]
if not is_valid(value):
raise validation_error_map[key]('{0!r} is not valid'
.format(value))
|
504723
|
from tool.runners.python import SubmissionPy
class ThChSubmission(SubmissionPy):
def run(self, input):
valid = 0
for line in input.split("\n"):
policy, letter_with_colon, password = line.split(" ")
policy_min, policy_max = policy.split("-")
letter = letter_with_colon[:-1]
actual_count = password.count(letter)
if int(policy_min) <= actual_count <= int(policy_max):
valid += 1
return valid
|
504765
|
import cv2
import numpy as np
from trojai.datagen.image_entity import ImageEntity
class FlatIconDotComPng(ImageEntity):
"""
Defines a png icon for a trigger.
"""
def __init__(self, trigger_fpath, mode='graffiti', trigger_color=None, postit_bg_color=None, size=None):
"""
Initializes a trigger from a png file
:param trigger_fpath: filepath to the png image defining the trigger.
:param mode: trigger mode.
:param trigger_color: trigger color RGB.
:param postit_bg_color: trigger background color RBG.
:param size: trigger target size.
"""
if postit_bg_color is None:
postit_bg_color = [0, 0, 0] # default black background
if trigger_color is None:
trigger_color = [255, 255, 255] # default white trigger
self.data = cv2.imread(trigger_fpath, cv2.IMREAD_UNCHANGED)
if size is not None:
self.data = cv2.resize(self.data, dsize=size, interpolation=cv2.INTER_NEAREST)
if mode.lower() == 'graffiti':
self.mask = (self.data[:,:,3] > 0).astype(bool)
for c in range(3):
self.data[:, :, c] = trigger_color[c]
elif mode.lower() == 'postit':
self.mask = np.ones((self.data.shape[0], self.data.shape[1]), dtype=bool)
data_new = np.zeros((self.data.shape[0], self.data.shape[1], 3), dtype=np.uint8)
ident_mat = np.ones((self.data.shape[0], self.data.shape[1]), dtype=np.uint8)
np.putmask(data_new[:, :, 0], self.data[:, :, 3].astype(bool), trigger_color[0] * ident_mat)
np.putmask(data_new[:, :, 0], ~self.data[:, :, 3].astype(bool), postit_bg_color[0] * ident_mat)
np.putmask(data_new[:, :, 1], self.data[:, :, 3].astype(bool), trigger_color[1] * ident_mat)
np.putmask(data_new[:, :, 1], ~self.data[:, :, 3].astype(bool), postit_bg_color[1] * ident_mat)
np.putmask(data_new[:, :, 2], self.data[:, :, 3].astype(bool), trigger_color[2] * ident_mat)
np.putmask(data_new[:, :, 2], ~self.data[:, :, 3].astype(bool), postit_bg_color[2] * ident_mat)
self.data = cv2.cvtColor(data_new, cv2.COLOR_RGB2RGBA)
def get_data(self) -> np.ndarray:
return self.data
def get_mask(self) -> np.ndarray:
return self.mask
|
504780
|
from tectosaur.util.geometry import *
def test_internal_angles():
angles = triangle_internal_angles([[0,0,0],[1,0,0],[0,1,0]])
np.testing.assert_almost_equal(angles, [np.pi / 2, np.pi / 4, np.pi / 4])
def test_longest_edge():
assert(get_longest_edge(get_edge_lens([[0,0,0],[1,0,0],[0.5,0.5,0]])) == 0)
assert(get_longest_edge(get_edge_lens([[0,0,0],[0.5,0.5,0],[1,0,0]])) == 2)
def test_vec_angle180():
np.testing.assert_almost_equal(vec_angle([1,1,0],[-1,-1,0]), np.pi)
def test_tri_normal():
tri = [[0,0,0],[1,0,0],[0,1,0]]
np.testing.assert_almost_equal(tri_normal(tri), [0,0,1])
def test_tri_unit_normal():
tri = [[0,0,0],[0,5,0],[0,0,5]]
np.testing.assert_almost_equal(tri_normal(tri, normalize = True), [1,0,0])
def test_tri_area():
np.testing.assert_almost_equal(tri_area(np.array([[0,0,0],[1,0,0],[0,1,0]])), 0.5)
|
504797
|
from torch.nn import functional as F
def model_saved_path(base):
return base + "/model.pth"
def model_params_saved_path(base):
return base + '/model_params.json'
def load_model(args, node_featurizer, n_tasks=1):
num_gnn_layers = len(args.gnn_hidden_feats)
model = None
if(args.gnn_model_name == 'GCN-p'):
from dgllife.model import GCNPredictor
model = GCNPredictor(
in_feats=node_featurizer.feat_size(),
hidden_feats=args.gnn_hidden_feats,
activation=[F.relu] * num_gnn_layers,
residual=[args.gnn_residuals] * num_gnn_layers,
batchnorm=[args.gnn_batchnorm] * num_gnn_layers,
dropout=[args.gnn_dropout] * num_gnn_layers,
predictor_hidden_feats=args.gnn_predictor_hidden_feats,
predictor_dropout=args.gnn_dropout,
n_tasks=n_tasks
)
elif (args.gnn_model_name == 'GAT-p'):
from dgllife.model import GATPredictor
model = GATPredictor(
in_feats=node_featurizer.feat_size(),
hidden_feats=args.gnn_hidden_feats,
num_heads=[args.gnn_num_heads] * num_gnn_layers,
feat_drops=[args.gnn_dropout] * num_gnn_layers,
attn_drops=[args.gnn_dropout] * num_gnn_layers,
alphas=[args.gnn_alphas] * num_gnn_layers,
residuals=[args.gnn_residuals] * num_gnn_layers,
predictor_hidden_feats=args.gnn_predictor_hidden_feats,
predictor_dropout=args.gnn_dropout,
n_tasks=n_tasks
)
return model
def init_featurizers(featurizer_type):
node_feaurizer = None
edge_featurizer = None
if(featurizer_type == 'canonical'):
from dgllife.utils import CanonicalAtomFeaturizer
node_feaurizer = CanonicalAtomFeaturizer()
elif(featurizer_type == 'attentivefp'):
from dgllife.utils import AttentiveFPAtomFeaturizer
node_feaurizer = AttentiveFPAtomFeaturizer()
else:
raise ValueError(
"Expect featurizer_type to be in ['canonical', 'attentivefp'], "
"got {}".format(featurizer_type))
return node_feaurizer, edge_featurizer
|
504832
|
import numpy as np
import numpy.ma as ma
from .._lib import SpatialGridStruct as SpatialGridStructBase
from .transform import TransformMethodsMixin, array_bounds
class SpatialGridStruct(SpatialGridStructBase,TransformMethodsMixin):
_accessors = set()
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self._crs = '' # private variable for use by raster accessor
class _SpatialGridStruct(SpatialGridStruct):
""" Private class that acts like SpatialGridStuct used to wrap raster dataset
"""
def __init__(self,data,grid_info):
self._gridinfo = {key:val for key,val in grid_info.items()}
self._data = {}
if isinstance(data,ma.core.MaskedArray):
self._data['data'] = data
elif isinstance(data,np.ndarray):
self._data['data'] = ma.masked_values(data,np.nan)
else:
raise Exception('Invalid data. It must be ndarray or masked array')
print(data.shape)
rows,cols = data.shape
self._data['width'] = cols
self._data['height'] = rows
self._crs = self._gridinfo['grid_crs']
def cellsize(self):
return self.transform.xoff
def origin_coords(self):
xmin,xmax,ymin,ymax = self.GetExtents()
return (xmin,ymin)
def lower_left_x(self):
extent = self.GetExtents()
return extent[0]
def lower_left_y(self):
extent = self.GetExtents()
return extent[2]
def GetExtents(self):
trans = self.transform
width = self.width
height = self.height
xmin,ymin,xmax,ymax = array_bounds(height,width,trans)
return (xmin,xmax,ymin,ymax)
def _get_mview(self, dtype = 'f'):
data = np.flipud(self._data['data']._data.copy())
return data
def read(self):
return self._data.get('data',None)
@property
def width(self):
return self._data.get('width',None)
@property
def height(self):
return self._data.get('height',None)
@property
def transform(self):
return self._gridinfo.get('grid_transform',None )
def stats(self):
# delegate stats computation to put_grid method
# while saving to dss file
# put_grid method will recompute stats if stats is empy or null equivalent value
return {}
@property
def crs(self):
return self._gridinfo.get('grid_crs',None )
@property
def interval(self):
return self.cellsize()
@property
def data_type(self):
return self._gridinfo.get('data_type',None)
@property
def nodata(self):
return self._data['data'].fill_value
@property
def units(self):
return self._gridinfo.get('grid_units','')
@property
def profile(self):
return self._gridinfo.copy()
|
504906
|
def get_higher_closing(df1, df2):
# true if df is higher
categories = (df1['close'] - df2['close'])
print('something')
# categories =
def get_higher_closing_test():
df1 =
# function to create column showing percentage by which higher price is higher
def get_pct_higher(df):
# i.e., if exchange 1 has a higher closing price than exchange 2...
if df['higher_closing_price'] == 1:
# return the percentage by which the exchange 1 closing price is
# greater than the exchange 2 closing price
return ((df['close_exchange_1'] /
df['close_exchange_2'])-1)*100
# otherwise, if exchange 2 has a higher closing price than exchange 1...
elif df['higher_closing_price'] == 2:
# return the percentage by which the exchange 2 closing price is
# greater than the exchange 1 closing price
return ((df['close_exchange_2'] /
df['close_exchange_1'])-1)*100
# otherwise, i.e., if the closing prices are equivalent...
else:
# return zero
return 0
# function to create column showing available arbitrage opportunities
def get_arbitrage_opportunity(df):
# assuming the total fees are 0.55%, if the higher closing price is less
# than 0.55% higher than the lower closing price...
if df['pct_higher'] < .55:
# return 0, for no arbitrage
return 0
# otherwise, if the exchange 1 closing price is more than 0.55% higher
# than the exchange 2 closing price...
elif df['higher_closing_price'] == 1:
# return -1, for arbitrage from exchange 2 to exchange 1
return -1
# otherwise, if the exchange 2 closing price is more than 0.55% higher
# than the exchange 1 closing price...
elif df['higher_closing_price'] == 2:
# return 1, for arbitrage from exchange 1 to exchange 2
return 1
|
504947
|
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
if not matrix:
return False
row, col = len(matrix), len(matrix[0])
r, c = row - 1, 0
while r >= 0 and c < col:
tmp = matrix[r][c]
if tmp == target:
return True
elif tmp < target:
c += 1
else:
r -= 1
return False
|
504964
|
import unittest
from constants import RAW, URL_ENCODED
from document_generator import DocumentGenerator
from models import APIModel, APIBodyModel
class DocumentGeneratorTest(unittest.TestCase):
def setUp(self) -> None:
self.json_env = {
"id": "b052da30-a4fe-41be-9d36-c7eccd5fa7ef",
"name": "Sample-Env",
"values": [
{
"key": "HOST",
"value": "https://cpd-service.dev.apps.scholastic.tech",
"enabled": True
},
{
"key": "SAMPLE_ID",
"value": 5964,
"enabled": True
},
{
"key": "SAMPLE_NAME",
"value": "Test",
"enabled": True
}
]
}
self.request_body = APIBodyModel
self.request_body.mode = RAW
self.request_body.raw = "{\n\t\"id\": null,\n\t\"name\": {{SAMPLE_NAME}},\n\t\"array\": [\n\t\t{\n\t\t\t\"someId\":123\n\t\t}\n\t]\n\t\n}"
self.request_body_formatted ="{\n\t\"id\": null,\n\t\"name\": Test,\n\t\"array\": [\n\t\t{\n\t\t\t\"someId\":123\n\t\t}\n\t]\n\t\n}"
self.response_body = "{\n\t\"id\": {{SAMPLE_ID}},\n\t\"name\": {{SAMPLE_NAME}},\n\t\"array\": [\n\t\t{\n\t\t\t\"someId\":123\n\t\t}\n\t]\n\t\n}"
self.response_body_formatted = "{\n\t\"id\": 5964,\n\t\"name\": Test,\n\t\"array\": [\n\t\t{\n\t\t\t\"someId\":123\n\t\t}\n\t]\n\t\n}"
self.request_body_raw = {
"mode": "raw",
"raw": "{\n\t\"id\": null,\n\t\"name\": {{SAMPLE_NAME}},\n\t\"array\": [\n\t\t{\n\t\t\t\"someId\":123\n\t\t}\n\t]\n\t\n}",
"options": {
"raw": {
"language": "json"
}
}
}
self.json_responses = [
{
"name": "Example 1",
"originalRequest": {
"method": "POST",
"header": [],
"body": self.request_body_raw,
"url": {
"raw": "{{HOST}}/sample/url/path",
"host": [
"{{HOST}}"
],
"path": [
"sample",
"url",
"path"
]
}
},
"_postman_previewlanguage": "json",
"header": None,
"cookie": [],
"body": self.response_body
},
{
"name": "Example 2",
"_postman_previewlanguage": "json",
"header": None,
"cookie": [],
"body": self.response_body
}
]
self.url_encoded_body_json = {
"mode": "urlencoded",
"urlencoded": [
{
"key": "key1",
"value": "value1",
"description": "test description 1"
},
{
"key": "key2",
"value": "<string>"
}
]
}
self.json_request = {
"name": "Request 1",
"request": {
"method": "POST",
"header": [
{
"key": "Content-Type",
"value": "application/x-www-form-urlencoded"
}
],
"body": self.url_encoded_body_json,
"url": {
"raw": "{{HOST}}/sample/url/path?query_param=<string>",
"host": [
"{{HOST}}"
],
"path": [
"sample",
"url",
"path"
],
"query": [
{
"key": "query_param",
"value": "<string>",
"description": "Sample query description"
}
],
"variable": [
{
"key": "request.url.host",
"value": "{{request.url.host}}",
"description": {
"content": "",
"type": "text/plain"
}
},
{
"key": "request.url.port",
"value": "{{request.url.port}}",
"description": {
"content": "",
"type": "text/plain"
}
}
]
},
"description": "Sample request description"
}
}
self.document_generator = DocumentGenerator()
def test_escape_string(self):
escaped_string = DocumentGenerator.escape_string('test\"this\"')
self.assertEqual('test\\"this\\"', escaped_string)
def test_apply_env_values(self):
json_collection = {
'testNumber': '{{SAMPLE_ID}}',
'testUrl': '{{HOST}}',
'testString': '{{SAMPLE_NAME}}'
}
formatted_json = DocumentGenerator.apply_env_values(json_collection, self.json_env)
self.assertEqual('5964', formatted_json['testNumber'])
self.assertEqual('https://cpd-service.dev.apps.scholastic.tech', formatted_json['testUrl'])
self.assertEqual('Test', formatted_json['testString'])
def test_apply_env_values_string(self):
json_string = '{{HOST}}/test/{{SAMPLE_ID}}'
formatted_string = DocumentGenerator.apply_env_values_string(json_string, self.json_env)
self.assertEqual('https://cpd-service.dev.apps.scholastic.tech/test/5964', formatted_string)
def test_get_examples(self):
api = APIModel()
self.document_generator.response_id = 10
self.document_generator.api_id_counter = 1
examples = self.document_generator.get_examples(api, self.json_responses)
self.assertEqual(2, len(examples))
for api_example in examples:
if api_example.name == 'Example 1':
self.assertEqual('1', api_example.request_id)
self.assertEqual('response_11', api_example.id)
self.assertEqual('POST', api_example.method)
self.assertEqual('{{HOST}}/sample/url/path', api_example.url)
self.assertEqual('\nPOST {{HOST}}/sample/url/path\n' + self.request_body.raw, api_example.request_body)
self.assertIsNone(api_example.status)
self.assertIsNone(api_example.code)
self.assertEqual('\n' + self.response_body, api_example.response_body)
elif api_example.name == 'Example 2':
self.assertEqual('1', api_example.request_id)
self.assertEqual('response_12', api_example.id)
self.assertIsNone(api_example.method)
self.assertIsNone(api_example.url)
self.assertIsNone(api_example.request_body)
self.assertIsNone(api_example.status)
self.assertIsNone(api_example.code)
self.assertEqual('\n' + self.response_body, api_example.response_body)
def test_get_examples_with_env(self):
api = APIModel()
self.document_generator.response_id = 10
self.document_generator.api_id_counter = 1
self.document_generator.env_file = self.json_env
examples = self.document_generator.get_examples(api, self.json_responses)
self.assertEqual(2, len(examples))
for api_example in examples:
if api_example.name == 'Example 1':
self.assertEqual('1', api_example.request_id)
self.assertEqual('response_11', api_example.id)
self.assertEqual('POST', api_example.method)
self.assertEqual('https://cpd-service.dev.apps.scholastic.tech/sample/url/path', api_example.url)
self.assertEqual('\nPOST https://cpd-service.dev.apps.scholastic.tech/sample/url/path\n'
+ self.request_body_formatted, api_example.request_body)
self.assertIsNone(api_example.status)
self.assertIsNone(api_example.code)
self.assertEqual('\n' + self.response_body_formatted, api_example.response_body)
elif api_example.name == 'Example 2':
self.assertEqual('1', api_example.request_id)
self.assertEqual('response_12', api_example.id)
self.assertIsNone(api_example.method)
self.assertIsNone(api_example.url)
self.assertIsNone(api_example.request_body)
self.assertIsNone(api_example.status)
self.assertIsNone(api_example.code)
self.assertEqual('\n' + self.response_body_formatted, api_example.response_body)
def test_get_examples_from_api(self):
api = APIModel()
api.id = 11
api.name = 'Example 1'
api.url = '{{HOST}}/sample/url/path'
api.body = self.request_body
api.method = 'POST'
self.document_generator.response_id = 10
self.document_generator.api_id_counter = 1
examples = self.document_generator.get_examples(api, [])
self.assertEqual(1, len(examples))
api_example = examples[0]
self.assertEqual('1', api_example.request_id)
self.assertEqual('response_11', api_example.id)
self.assertEqual('Example 1', api_example.name)
self.assertEqual('POST', api_example.method)
self.assertEqual('{{HOST}}/sample/url/path', api_example.url)
self.assertEqual('\nPOST {{HOST}}/sample/url/path\n' + self.request_body.raw, api_example.request_body)
self.assertIsNone(api_example.status)
self.assertIsNone(api_example.code)
def test_get_examples_from_api_no_body(self):
api = APIModel()
api.id = 11
api.name = 'Example 1'
api.url = None
api.body = None
api.method = 'POST'
self.document_generator.response_id = 10
self.document_generator.api_id_counter = 1
examples = self.document_generator.get_examples(api, [])
self.assertEqual(1, len(examples))
api_example = examples[0]
self.assertEqual('1', api_example.request_id)
self.assertEqual('response_11', api_example.id)
self.assertEqual('Example 1', api_example.name)
self.assertEqual('POST', api_example.method)
self.assertIsNone(api_example.url)
self.assertIsNone(api_example.request_body)
self.assertIsNone(api_example.status)
self.assertIsNone(api_example.code)
def test_get_examples_from_api_only_url(self):
api = APIModel()
api.id = 11
api.name = 'Example 1'
api.url = '{{HOST}}/sample/url/path'
api.body = None
api.method = 'GET'
self.document_generator.response_id = 10
self.document_generator.api_id_counter = 1
examples = self.document_generator.get_examples(api, [])
self.assertEqual(1, len(examples))
api_example = examples[0]
self.assertEqual('1', api_example.request_id)
self.assertEqual('response_11', api_example.id)
self.assertEqual('Example 1', api_example.name)
self.assertEqual('GET', api_example.method)
self.assertEqual('{{HOST}}/sample/url/path', api_example.url)
self.assertEqual('\nGET {{HOST}}/sample/url/path', api_example.request_body)
self.assertIsNone(api_example.status)
self.assertIsNone(api_example.code)
def test_get_examples_from_api_method_and_body(self):
api = APIModel()
api.id = 11
api.name = 'Example 1'
api.url = None
api.body = self.request_body
api.method = 'POST'
self.document_generator.response_id = 10
self.document_generator.api_id_counter = 1
examples = self.document_generator.get_examples(api, [])
self.assertEqual(1, len(examples))
api_example = examples[0]
self.assertEqual('1', api_example.request_id)
self.assertEqual('response_11', api_example.id)
self.assertEqual('Example 1', api_example.name)
self.assertEqual('POST', api_example.method)
self.assertIsNone(api_example.url)
self.assertEqual('\n' + self.request_body.raw, api_example.request_body)
self.assertIsNone(api_example.status)
self.assertIsNone(api_example.code)
def test_get_body_encoded(self):
api_body = DocumentGenerator.get_body(self.url_encoded_body_json)
self.assertEqual(URL_ENCODED, api_body.mode)
self.assertEqual(2, len(api_body.key_values))
self.assertEqual('key1', api_body.key_values[0].key)
self.assertEqual('value1', api_body.key_values[0].value)
self.assertEqual('test description 1', api_body.key_values[0].description)
self.assertEqual('key2', api_body.key_values[1].key)
self.assertEqual('<string>', api_body.key_values[1].value)
self.assertIsNone(api_body.key_values[1].description)
def test_get_body_raw(self):
api_body = DocumentGenerator.get_body(self.request_body_raw)
self.assertEqual(RAW, api_body.mode)
self.assertIsNone(api_body.key_values)
self.assertEqual(self.request_body.raw, api_body.raw)
def test_add_api(self):
self.document_generator.response_id = 0
self.document_generator.api_id_counter = 1
self.assertEqual(0, len(self.document_generator.api_info))
self.document_generator.add_apis(self.json_request)
self.assertEqual(1, len(self.document_generator.api_info))
api_model: APIModel = self.document_generator.api_info[0]
self.assertEqual(1, api_model.id)
self.assertEqual("Request 1", api_model.name)
self.assertEqual("<p>Sample request description</p>", api_model.description)
self.assertEqual(URL_ENCODED, api_model.body.mode)
self.assertEqual("POST", api_model.method)
self.assertEqual(1, len(api_model.headers))
self.assertEqual(1, len(api_model.params))
self.assertEqual(2, len(api_model.path_variables))
self.assertEqual("{{HOST}}/sample/url/path?query_param=<string>", api_model.url)
|
505013
|
from streamlink.plugins.lrt import LRT
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlLRT(PluginCanHandleUrl):
__plugin__ = LRT
should_match = [
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-opus",
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-klasika",
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-radijas",
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-lituanica",
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-plius",
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-televizija",
]
should_not_match = [
"https://www.lrt.lt",
"https://www.lrt.lt/mediateka/irasas/1013694276/savanoriai-tures-galimybe-pamatyti-popieziu-is-arciau",
]
|
505014
|
from pylab import *
rc('axes', linewidth=5)
rc('axes', grid=True)
rc('font', weight='heavy')
rc('font', size=16)
rc('xtick.major', size=6)
rc('xtick.major', width=3)
rc('ytick.major', size=6)
rc('ytick.major', width=3)
plt.ylim([-10.0/2048*1000,-20.0/2048*1000])
plt.xlim([700,1050])
fontsize=16
ax=gca()
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
tick.label1.set_fontweight('heavy')
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
tick.label1.set_fontweight('heavy')
plt.show()
|
505015
|
import sys
import time
import os
import threading
bytestoremote = 0
bytesfromremote = 0
_relayport = 0
_remoteaddress = ""
_remoteport = 0
def reportbandwidth():
global bytestoremote
global bytesfromremote
step = 0
while True:
time.sleep(1)
if (sys.platform == "win32"):
os.system('cls')
else:
os.system('clear')
print "Relaying on port {0} to {1}:{2}".format(_relayport, _remoteaddress, _remoteport)
print "From remote: {0:.6f}MB/s | To remote: {1:.6f}MB/s".format(float(bytesfromremote)/1000000, float(bytestoremote)/1000000)
if (step == 0):
print "\\"
step += 1
elif (step == 1):
print "|"
step += 1
elif (step == 2):
print "/"
step += 1
elif (step == 3):
print "-"
step = 0
bytesfromremote = 0
bytestoremote = 0
def start(relayport, remoteaddress, remoteport):
global _relayport
global _remoteaddress
global _remoteport
reportbandwidththread = threading.Thread(target = reportbandwidth)
reportbandwidththread.daemon = True
reportbandwidththread.start()
_relayport = relayport
_remoteaddress = remoteaddress
_remoteport = remoteport
|
505055
|
import glob
import os
import tempfile
import fasttext
"""
Parameters for supervised training of fasttext
input # training file path (required)
lr # learning rate [0.1]
dim # size of word vectors [100]
ws # size of the context window [5]
epoch # number of epochs [5]
minCount # minimal number of word occurences [1]
minCountLabel # minimal number of label occurences [1]
minn # min length of char ngram [0]
maxn # max length of char ngram [0]
neg # number of negatives sampled [5]
wordNgrams # max length of word ngram [1]
loss # loss function {ns, hs, softmax, ova} [softmax]
bucket # number of buckets [2000000]
thread # number of threads [number of cpus]
lrUpdateRate # change the rate of updates for the learning rate [100]
t # sampling threshold [0.0001]
label # label prefix ['__label__']
verbose # verbose [2]
pretrainedVectors # pretrained word vectors (.vec file) for supervised learning []
"""
def train_fasttext(path, split, model_path):
training_tmp, test_tmp = create_temp_file_split(split, path)
model = fasttext.train_supervised(training_tmp, epoch=15, minn=1, maxn=6)
model.save_model(model_path)
print(model.test(test_tmp))
os.remove(training_tmp)
os.remove(test_tmp)
def create_temp_file_split(split_index, data_path):
files = []
for filename in glob.iglob(data_path + '**/*' + '.txt', recursive=True):
files.append(filename)
test_tmp = tempfile.NamedTemporaryFile(encoding='utf-8', mode='w', delete=False)
training_tmp = tempfile.NamedTemporaryFile(encoding='utf-8', mode='w', delete=False)
for i in range(len(files)):
filename = files[i]
file = open(filename, 'r', encoding='utf-8')
if str(split_index) + ".txt" in filename:
test_tmp.write(file.read())
else:
training_tmp.write((file.read()))
training_tmp_name = training_tmp.name
test_tmp_name = test_tmp.name
training_tmp.close()
test_tmp.close()
return training_tmp_name, test_tmp_name
|
505114
|
from django.apps import apps
from django.core.checks import Error, register
@register()
def modeladmin_installed_check(app_configs, **kwargs):
errors = []
MODELADMIN_APP = "wagtail.contrib.modeladmin"
if not apps.is_installed(MODELADMIN_APP):
error_hint = "Is '{}' in settings.INSTALLED_APPS?".format(
MODELADMIN_APP
)
errors.append(
Error(
"wagtail-sharing requires the Wagtail ModelAdmin app.",
hint=error_hint,
id="wagtailsharing.E001",
)
)
return errors
|
505125
|
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from app import helpers
from config import strings, styles
def get_stats_card1_data(
df: pd.DataFrame, port: str, vessel_type: str, year: int, month: int
):
"""
Gets values for the first card in the Stats tab.
:param df: Pandas DataFrame, input data
:param port: str, port of interest
:param vessel_type: str, vessel type of interest
:param year: int, year of interest
:param month: int, month of interest
:return: list - [pct, direction] if no errors, [0, '-'] if there are errors
"""
data = helpers.filter_by_vessel_and_time(
df=df, vessel_type=vessel_type, year=year, month=month
)
df_port = data[data["port"] == port]
df_other = data[data["port"] != port]
try:
pct = -np.round(
(
(df_other["n"].mean() - df_port["n"].mean())
/ np.abs(df_other["n"].mean())
)
* 100
)
pct = int(pct)
direction = "lower" if pct < 0 else "higher"
return [np.abs(pct), direction]
except Exception as _:
return [0, "-"]
def get_stats_card2_data(
df: pd.DataFrame, port: str, vessel_type: str, year: int, month: int
):
"""
Gets values for the second card in the Stats tab.
:param df: Pandas DataFrame, input data
:param port: str, port of interest
:param vessel_type: str, vessel type of interest
:param year: int, year of interest
:param month: int, month of interest
:return: list - [pct, direction] if no errors, [0, '-'] if there are errors
"""
data = helpers.filter_by_vessel_and_time(
df=df, vessel_type=vessel_type, year=year, month=month
)
data = data[data["len_stop"] > 20]
data = data.groupby(by=["port", "ship_type"]).mean()
data = data.reset_index()
port_stop_mean = data[data["port"] == port]["len_stop"].mean()
port_other_mean = data[data["port"] != port]["len_stop"].mean()
try:
pct = -int(
np.round(((port_other_mean - port_stop_mean) / port_other_mean) * 100)
)
direction = "shorter" if pct < 0 else "longer"
return [np.abs(pct), direction]
except Exception as _:
return [0, "-"]
def get_stats_card3_data(
df: pd.DataFrame, port: str, vessel_type: str, year: int, month: int
):
"""
Gets values for the third card in the Stats tab.
:param df: Pandas DataFrame, input data
:param port: str, port of interest
:param vessel_type: str, vessel type of interest
:param year: int, year of interest
:param month: int, month of interest
:return: list - [pct, direction] if no errors, [0, '-'] if there are errors
"""
data = helpers.filter_by_vessel_and_time(
df=df, vessel_type=vessel_type, year=year, month=month
)
df_port = data[data["port"] == port]
df_other = data[data["port"] != port]
try:
pct = -np.round(
(
(df_other["sum_dwt"].mean() - df_port["sum_dwt"].mean())
/ np.abs(df_other["sum_dwt"].mean())
)
* 100
)
pct = int(pct)
direction = "lower" if pct < 0 else "higher"
return [np.abs(pct), direction]
except Exception as _:
return [0, 0]
def plot_stats_total_num_vessels(
df: pd.DataFrame, port: str, vessel_type: str, year: int, month: int
) -> go.Figure:
"""
Returns a figure for the first chart on the Stats tab. It shows the total number of vessels in port
by applied conditions.
:param df: Pandas DataFrame, input data
:param port: str, port of interest
:param vessel_type: str, vessel type of interest
:param year: int, year of interest
:param month: int, month of interest
:return: Plotly figure
"""
data = helpers.filter_by_port_vessel_and_time(
df=df, port=port, vessel_type=vessel_type, year=year, month=month
)
if len(data) > 0:
plot_data = []
for dt in data["date"].unique():
for stype in data["ship_type"].unique():
curr = data[(data["date"] == dt) & (data["ship_type"] == stype)]
if len(curr) > 0:
plot_data.append(
{"date": dt, "ship_type": stype, "num": curr["n"].values[0]}
)
plot_data = pd.DataFrame(plot_data)
plot_data["color"] = plot_data["ship_type"].apply(helpers.generate_color)
fig_data = []
for stype in plot_data["ship_type"].unique():
ss = plot_data[plot_data["ship_type"] == stype]
fig_data.append(
go.Bar(
name=stype,
x=ss["date"].tolist(),
y=ss["num"].tolist(),
marker_color=ss.iloc[0]["color"],
)
)
else:
fig_data = go.Bar(x=np.arange(1, 9), y=[0] * 8)
return go.Figure(
data=fig_data,
layout=styles.generate_plot_layout(
x_title=strings.CHART_STATS_TOTAL_VESSELS_X,
y_title=strings.CHART_STATS_TOTAL_VESSELS_Y,
bar_mode="stack",
),
)
def plot_avg_vessel_stop_duration(
df: pd.DataFrame, port: str, vessel_type: str, year: int, month: int
) -> go.Figure:
"""
Returns a figure for the second chart on the Stats tab. It shows the average stop duration
by applied conditions.
:param df: Pandas DataFrame, input data
:param port: str, port of interest
:param vessel_type: str, vessel type of interest
:param year: int, year of interest
:param month: int, month of interest
:return: Plotly figure
"""
data = helpers.filter_by_port_vessel_and_time(
df=df, port=port, vessel_type=vessel_type, year=year, month=month
)
if len(data) > 0:
data = data.groupby(by="ship_type").mean().reset_index()
data = data[["ship_type", "len_stop"]]
data["len_stop"] = data["len_stop"].apply(lambda x: np.round(x, 2))
data["color"] = data["ship_type"].apply(helpers.generate_color)
fig_data = go.Bar(
x=data["ship_type"], y=data["len_stop"], marker_color=data["color"]
)
else:
fig_data = go.Bar(x=np.arange(1, 9), y=[0] * 8)
return go.Figure(
data=fig_data,
layout=styles.generate_plot_layout(
x_title=strings.CHART_STATS_STOP_DUR_X,
y_title=strings.CHART_STATS_STOP_DUR_Y,
bar_mode="stack",
),
)
def plot_total_capacity_of_vessels(
df: pd.DataFrame, port: str, vessel_type: str, year: int, month: int
) -> go.Figure:
"""
Returns a figure for the third chart on the Stats tab. It shows the total capacity of vessels
by applied conditions.
:param df: Pandas DataFrame, input data
:param port: str, port of interest
:param vessel_type: str, vessel type of interest
:param year: int, year of interest
:param month: int, month of interest
:return: Plotly figure
"""
data = helpers.filter_by_port_vessel_and_time(
df=df, port=port, vessel_type=vessel_type, year=year, month=month
)
if len(data) > 0:
fig_data = []
data = data.groupby(by=["date", "ship_type"]).sum().reset_index()
data = data[["date", "ship_type", "sum_dwt"]]
data["color"] = data["ship_type"].apply(helpers.generate_color)
for stype in data["ship_type"].unique():
ss = data[data["ship_type"] == stype]
fig_data.append(
go.Bar(
name=stype,
x=ss["date"].tolist(),
y=ss["sum_dwt"].tolist(),
marker_color=ss.iloc[0]["color"],
)
)
else:
fig_data = go.Bar(x=np.arange(1, 9), y=[0] * 8)
return go.Figure(
data=fig_data,
layout=styles.generate_plot_layout(
x_title=strings.CHART_STATS_TOTAL_CAP_VESSELS_X,
y_title=strings.CHART_STATS_TOTAL_CAP_VESSELS_Y,
bar_mode="stack",
),
)
|
505130
|
import discord
from discord.ext import commands
from core import checks
from core.models import PermissionLevel
class Purger(commands.Cog):
"""Plugin to delete multiple messages at once."""
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command()
@checks.has_permissions(PermissionLevel.MODERATOR)
async def purge(self, ctx: commands.Context, amount: int):
"""Delete multiple messages at once."""
if amount < 1:
raise commands.BadArgument(
"The amount of messages to delete should be a scrictly "
f"positive integer, not `{amount}`."
)
try:
deleted = await ctx.channel.purge(limit=amount + 1)
except discord.Forbidden:
embed = discord.Embed(color=self.bot.error_color)
embed.description = (
"This command requires the `Manage Messages` permission, "
"which the bot does not have at the moment."
)
return await ctx.send(embed=embed)
message = f"{len(deleted)} messages have been deleted!"
to_delete = await ctx.send(message)
await to_delete.delete(delay=3)
def setup(bot: commands.Bot):
bot.add_cog(Purger(bot))
|
505195
|
from naruhodo.utils.dicts import ProDict, MeaninglessDict, VerbLikeFuncDict, VerbLikeExclude
from naruhodo.utils.misc import preprocessText
import re
class CaboChunk(object):
"""Class for cabocha chunks"""
def __init__(self, chunk_id, parent):
"""Initialize a chunk."""
self.id = chunk_id
"""
id of the chunk.
"""
self.parent = parent
"""
parent id of this chunk.
"""
self.children = None
"""
list of children of this chunk.
"""
self.nouns = list()
"""
list of nouns 名詞
"""
self.verbs = list()
"""
list of verbs 動詞
"""
self.adjs = list()
"""
list of adjectives 形容詞
"""
self.postps = list()
"""
list of postpositions 助詞
"""
self.auxvs = list()
"""
list of auxilary verbs 助動詞
"""
self.conjs = list()
"""
list of conjection 接続詞
"""
self.interjs = list()
"""
list of interjections 感動詞
"""
self.signs = list()
"""
list of signs 記号
"""
self.advs = list()
"""
list of adverbs 副詞
"""
self.connects = list()
"""
list of connects 連体詞
"""
self.headings = list()
"""
list of headings 接頭詞
"""
self.main = ""
"""
Main component of the chunk.
"""
self.main_surface = ""
"""
Surface of the main component.
"""
self.func = ""
"""
Functional component of the chunk.
"""
self.surface = ""
"""
Original surface of the chunk.
"""
self.negative = 0
"""
If chunk is negative 1, elif chunk double negtive(strongly positive) -1, else 0
"""
self.passive = 0
"""
If chunk is passive 1, else 0.
"""
self.compulsory = 0
"""
If chunk is compulsory 1, else 0.
"""
self.question = 0
"""
If chunk contains ? 1, else 0.
"""
self.yomi = ""
"""
Contains the yomi of this chunk's surface.
"""
self.tense = 0
"""
If chunk has no tense or present 0, elif past -1, elif present continuous 1
"""
self.type = -1
"""
Type of this chunk.
-------------------
-1: unknown type
0: noun
1: adjective
2: verb
3: conjective
4: interjection
5: adverb
6: connect
"""
self.type2 = -1
"""
2nd type of this chunk.
-----------------------
-1: no 2nd type
0: noun
1: adjective
2: verb
"""
self.NE = 0
"""
Named entity type of this chunk.
The name of NE type can be retrieved using
'NEList' in naruhodo.utils.dicts like
NEtype = NEList[NE].
--------------------------------
0: no named entity(or unknown)
1: person
2: location
3: organization
4: number
5: general
"""
self.pro = -1
"""
Pronoun type of this chunk.
---------------------------
-1: no pronoun(or unknown)
0: demonstrative-loc
1: demonstrative-obj
2: personal(1st)
3: personal(2nd)
4: personal(3rd)
5: indefinite
6: inclusive
7: omitted *This type is assigned by naruhodo.core.KnowledgeCoreJa.
"""
self.npro = 0
"""
Rank of this pronoun in the sentence it appears.
"""
self.meaning = ""
"""
If the main of this chunk is in MeaninglessDict,
this variable will be set to the main of its child node that contains its meaning.
"""
def add(self, inp):
"""Add components to chunk lists."""
# if inp[1] != "記号" or inp[0] == "?":
# self.surface += inp[0]
self.surface += inp[0]
if len(inp) > 8:
self.yomi += inp[8]
else:
self.yomi += inp[0]
elem = {
'surface': inp[0],
'lemma' : inp[7],
'labels': inp[2:7],
}
if inp[1] == "名詞":
self.nouns.append(elem)
elif inp[1] == "動詞":
self.verbs.append(elem)
elif inp[1] == "形容詞":
self.adjs.append(elem)
elif inp[1] == "助詞":
self.postps.append(elem)
elif inp[1] == "助動詞":
self.auxvs.append(elem)
elif inp[1] == "接続詞":
self.conjs.append(elem)
elif inp[1] == "感動詞":
self.interjs.append(elem)
elif inp[1] == "記号":
self.signs.append(elem)
elif inp[1] == "副詞":
self.advs.append(elem)
elif inp[1] == "連体詞":
self.connects.append(elem)
elif inp[1] == "接頭詞":
self.headings.append(elem)
else:
pass
def _cleanUp(self):
"""Clean up all the lists stored in the object that is no longer needed."""
del self.nouns
del self.verbs
del self.adjs
del self.postps
del self.auxvs
del self.conjs
del self.interjs
del self.signs
del self.advs
del self.connects
del self.headings
def _getMain(self):
"""Get the main component of the chunk."""
if len(self.nouns) > 0 and self.nouns[0]['labels'][0] not in ['非自立', '接尾']:
self.main = "".join([x['lemma'] for x in self.nouns if x['labels'][0] != '非自立'])
self.main_surface = "".join([x['surface'] for x in self.nouns if x['labels'][0] != '非自立'])
self.type = 0
if len(self.adjs) > 0:
if self.adjs[0]['lemma'] == "ない":
self.negative = 1
# Corrections for special patterns.
if self.nouns[0]['labels'][0] == 'サ変接続':
if len(self.nouns) > 1 and len(self.verbs) == 0:
self.type = 0
else:
self.type = 2
self.type2 = 0
elif self.nouns[0]['labels'][0] == '形容動詞語幹':
if len(self.nouns) > 1:
self.type = 0
else:
self.type = 1
self.type2 = 2
# NE recognition.
elif self.nouns[0]['labels'][0] == '固有名詞':
if self.nouns[0]['labels'][1] == '人名':
self.NE = 1
elif self.nouns[0]['labels'][1] == '地域':
self.NE = 2
elif self.nouns[0]['labels'][1] == '組織':
self.NE = 3
elif self.nouns[0]['labels'][1] == '一般':
self.NE = 5
else:
pass
# Pronoun identification(for correference analysis.)
elif self.nouns[0]['labels'][0] == '代名詞':
if self.nouns[0]['lemma'] in ProDict['demonstrative-loc']:
self.pro = 0
elif self.nouns[0]['lemma'] in ProDict['demonstrative-obj']:
self.pro = 1
elif self.nouns[0]['lemma'] in ProDict['personal1st']:
self.pro = 2
elif self.nouns[0]['lemma'] in ProDict['personal2nd']:
self.pro = 3
elif self.nouns[0]['lemma'] in ProDict['personal3rd']:
self.pro = 4
elif self.nouns[0]['lemma'] in ProDict['indefinite']:
self.pro = 5
elif self.nouns[0]['lemma'] in ProDict['inclusive']:
self.pro = 6
else:
pass
elif self.nouns[0]['labels'][0] == '数':
self.main = "".join([x['lemma'] for x in self.nouns])
self.main_surface = "".join([x['surface'] for x in self.nouns])
self.NE = 4
else:
pass
elif len(self.nouns) > 0 and self.nouns[0]['lemma'] in MeaninglessDict:
if len(self.verbs) > 0:
self.main = self.verbs[0]['surface']
self.main_surface = self.verbs[0]['surface']
self.main += self.nouns[0]['lemma']
self.main_surface += self.nouns[0]['surface']
self.type = 0
elif len(self.adjs) > 0:
self.main = self.adjs[0]['lemma']
self.main_surface = self.adjs[0]['surface']
self.type = 1
if self.adjs[0]['lemma'] == "ない":
self.negative = 1
elif len(self.verbs) > 0:
self.main = self.verbs[0]['lemma']
self.main_surface = self.verbs[0]['surface']
self.type = 2
elif len(self.advs) > 0:
self.main = self.advs[0]['lemma']
self.main_surface = self.advs[0]['surface']
self.type = 5
elif len(self.conjs) > 0:
self.main = self.conjs[0]['lemma']
self.main_surface = self.conjs[0]['surface']
self.type = 3
elif len(self.interjs) > 0:
self.main = self.interjs[0]['lemma']
self.main_surface = self.interjs[0]['surface']
self.type = 4
elif len(self.connects) > 0:
self.main = self.connects[0]['lemma']
self.main_surface = self.connects[0]['surface']
self.type = 6
elif len(self.postps) > 0:
self.main = self.postps[0]['lemma']
self.main_surface = self.postps[0]['surface']
elif len(self.auxvs) > 0:
self.main = self.auxvs[0]['lemma']
self.main_surface = self.auxvs[0]['surface']
elif len(self.signs) > 0:
if len(self.nouns) > 0:
self.main = self.nouns[0]['lemma']
self.main_surface = self.nouns[0]['surface']
else:
self.main = self.signs[0]['lemma']
self.main_surface = self.signs[0]['surface']
elif len(self.nouns) > 0 and self.nouns[0]['labels'][0] == '非自立':
self.main = self.nouns[0]['lemma']
self.main_surface = self.nouns[0]['surface']
self.type = 0
else:
self.main = 'UNKNOWN'
if len(self.headings) > 0:
self.main = "\n".join([x['lemma'] for x in self.headings]) + self.main
self.main_surface = "\n".join([x['surface'] for x in self.headings]) + self.main_surface
# Convert main with no lemma to surface
if self.main.find("*") != -1:
self.main = self.main_surface
def _getFunc(self):
"""Get the func component of the chunk."""
# Get func by excluding main from surface.
self.func = self.surface.replace(self.main_surface, "")
# Process func to get properties
if len(self.verbs) > 0:
for item in self.verbs:
if item['labels'][0] == '接尾':
if item['lemma'] == "れる" or item['lemma'] == "られる":
self.passive = 1
elif item['lemma'] == "させる":
self.compulsory = 1
elif item['labels'][0] == "非自立":
if item['lemma'] == "いる":
self.tense = 1
if len(self.postps) > 0:
if self.parent == -1:
for item in self.postps:
if item['lemma'] in ["の", "なの", "か"]:
self.question = 1
if len(self.auxvs) > 0:
neg = sum([
[x['lemma'] for x in self.auxvs].count('ん'),
[x['lemma'] for x in self.auxvs].count('ない'),
[x['lemma'] for x in self.auxvs].count('ぬ'),
[x['lemma'] for x in self.auxvs].count('まい')
])
if neg == 1:
if len(self.signs) > 0 and any([self.signs[x]['surface'] == '?' for x in range(len(self.signs))]):
pass
else:
self.negative = 1
elif neg > 1:
if neg % 2 == 0:
self.negative = -1
else:
self.negative = 1
else:
pass
if any([self.auxvs[x]['lemma'] == "た" for x in range(len(self.auxvs))]):
self.tense = -1
# Fix for nouns used as verbs.
for item in VerbLikeFuncDict:
if self.func.find(item) != -1 and self.func not in VerbLikeExclude:
self.type = 2
if len(self.signs) > 0:
for item in self.signs:
if item['surface'] == '?':
self.question = 1
# Fix for special words.
if self.main == "できる" and self.func not in ["た", "ます", "いるて"]:
self.type = 5
def processChunk(self, pos, npro):
"""Process the chunk to get main and func component of it."""
self._getMain()
self._getFunc()
# Modify pronouns
if self.pro != -1:
self.main += "[{0}@{1}]".format(pos, npro)
self.npro = npro
# Add tense label to main
if self.tense == -1:
self.main += "\n(過去)"
elif self.tense == 1:
self.main += "\n(現在)"
# Add compulsory label to main
if self.compulsory == 1:
self.main += "\n(強制)"
if self.passive == 1:
self.main += "\n(被動)"
# Add question label to main
if self.question == 1:
self.main += "\n(質問)"
# Add negative label to main
if self.negative == 1:
self.main += "\n(否定)"
elif self.negative == -1:
self.main += "\n(二重否定)"
self._cleanUp()
class CabochaClient(object):
"""Class for CaboCha backend."""
def __init__(self):
"""Initialize a native database."""
self.rsplit = re.compile(r'[,]+|\t')
self.chunks = list()
self.root = None
self.npro = 0
def add(self, inp, pos=0):
"""Takes in the block output from CaboCha and add it to native database."""
ck = None
for elem in inp.splitlines():
if elem[0] == '*':
if ck is not None:
ck.processChunk(pos, self.npro)
if ck.pro != -1:
self.npro += 1
self.chunks.append(ck)
ck = CaboChunk(*self._processHead(elem))
else:
ck.add(self.rsplit.split(elem))
ck.processChunk(pos, self.npro)
if ck.pro != -1:
self.npro += 1
self.chunks.append(ck)
# Get children list and store in self.childrenList
self._getChildrenList()
self._processMeaningless()
self._processNegative()
def _processHead(self, inp):
"""Takes in the head of the chunk and process ids / parents."""
elem = inp.split()
return int(elem[1]), int(elem[2][:-1])
def _getChildrenList(self):
"""Process to get the list of children for each chunk."""
nck = len(self.chunks)
self.childrenList = [list() for x in range(nck)]
for i in range(nck):
pid = self.chunks[i].parent
if pid == -1:
self.root = i
else:
self.childrenList[pid].append(i)
for i in range(nck):
self.chunks[i].children = self.childrenList[i]
def _processMeaningless(self):
"""This function makes meaningless words tagged with its meaning."""
nck = len(self.chunks)
for i in range(nck):
if preprocessText(self.chunks[i].main) in MeaninglessDict:
if len(self.childrenList[i]) > 0:
self.chunks[i].meaning = self.chunks[self.childrenList[i][-1]].main
self.chunks[i].main = "({0})\n{1}".format(
self.chunks[self.childrenList[i][-1]].surface,
self.chunks[i].main
)
def _processNegative(self):
"""This function makes the words that has negative child tagged negative."""
nck = len(self.chunks)
for i in range(nck):
if preprocessText(self.chunks[i].main) in ["ない", ]:
if len(self.childrenList[i]) > 0:
self.chunks[self.childrenList[i][-1]].main += "\n(否定)"
self.chunks[self.childrenList[i][-1]].negative = 1
self.chunks[i].meaning = self.chunks[self.childrenList[i][-1]].main
self.chunks[i].main = self.chunks[i].main.replace("\n(否定)", "")
|
505228
|
import structlog
from servicelayer.worker import Worker
from servicelayer.logs import apply_task_context
from memorious.logic.context import Context
from memorious.logic.stage import CrawlerStage
from memorious.core import conn, crawler
log = structlog.get_logger(__name__)
class MemoriousWorker(Worker):
def handle(self, task):
apply_task_context(task)
data = task.payload
stage = CrawlerStage.detach_namespace(task.stage.stage)
state = task.context
context = Context.from_state(state, stage)
context.execute(data)
def after_task(self, task):
if task.job.is_done():
stage = CrawlerStage.detach_namespace(task.stage.stage)
state = task.context
context = Context.from_state(state, stage)
context.crawler.aggregate(context)
def get_stages(self):
return [stage.namespaced_name for stage in crawler.stages.values()]
def get_worker(num_threads=None):
return MemoriousWorker(conn=conn, num_threads=num_threads)
|
505231
|
import torch
import torchaudio
from torch.utils.data import DataLoader
import torch.nn.functional as F
def postprocess_features(feats, sample_rate):
if feats.dim() == 2: feats = feats.mean(-1)
assert feats.dim() == 1, feats.dim()
with torch.no_grad():
feats = F.layer_norm(feats, feats.shape)
return feats
def get_feature(batch_sample):
return postprocess_features(batch_sample[0][0], batch_sample[1])
def get_padding_mask(batch_sample):
return torch.BoolTensor(batch_sample[0].size(1)).fill_(False)
def get_batch_encoder_input(batch_samples):
features = [get_feature(batch_sample) for batch_sample in batch_samples]
features = torch.nn.utils.rnn.pad_sequence(features, batch_first=True, padding_value=0)
padding_masks = [get_padding_mask(batch_sample) for batch_sample in batch_samples]
padding_masks = torch.nn.utils.rnn.pad_sequence(padding_masks, batch_first=True, padding_value=True)
mask = False
features_only = True
return features, padding_masks, mask, features_only
class LibriSpeechDataLoader:
"""
Data loaders for the LibriSpeech dataset.
Arguments:
train_batch_size (int): batch size for the training data loader
val_batch_size (int): batch size for the validation data loader
num_workers (int): number of workers for training and validation data loaders
train_data_path (str): Path to training data
val_data_path (str): Path to validation data
train_on_dev_clean (bool): Set to True if you want to train on parts of the dev-clean dataset and validate on the other part. This is useful when testing ideas
use_train_clean_100 (bool): Set to True if using LibriSpeech's train-clean-100 dataset during training
use_train_clean_360 (bool): Set to True if using LibriSpeech's train-clean-360 dataset during training
use_train_other_500 (bool): Set to True if using LibriSpeech's train-other-500 dataset during training
"""
def __init__(self,
train_batch_size,
val_batch_size,
num_workers,
train_data_path,
val_data_path,
train_on_dev_clean,
use_train_clean_100,
use_train_clean_360,
use_train_other_500,
):
self.train_batch_size = train_batch_size
self.val_batch_size = val_batch_size
self.num_workers = num_workers
dev_clean_dataset = torchaudio.datasets.LIBRISPEECH(val_data_path, url='dev-clean', download=False)
dev_other_dataset = torchaudio.datasets.LIBRISPEECH(val_data_path, url='dev-other', download=False)
dev_other_data_loader = DataLoader(dev_other_dataset,
batch_size = val_batch_size,
shuffle = False,
num_workers = num_workers)
if train_on_dev_clean:
train_data_loader, dev_train_data_loader, dev_clean_data_loader = self.create_data_loaders_from_dev_clean(dev_clean_dataset,
train_batch_size,
val_batch_size,
num_workers)
else:
train_data_loader, dev_train_data_loader = self.create_data_loaders_from_train_dataset(train_data_path,
train_batch_size,
val_batch_size,
num_workers,
use_train_clean_100,
use_train_clean_360,
use_train_other_500,)
dev_clean_data_loader = DataLoader(dev_clean_dataset,
batch_size = val_batch_size,
shuffle = False,
num_workers = num_workers)
self.train_data_loader = train_data_loader
self.val_data_loaders = {
#"dev_train": dev_train_data_loader,
"dev_clean": dev_clean_data_loader,
#"dev_other": dev_other_data_loader
}
def create_data_loaders_from_dev_clean(self,
dev_clean_dataset,
train_batch_size,
val_batch_size,
num_workers):
"""
Create train_data_loader and dev_train_data_loader from dev_clean_dataset.
Parts of dev_clean_dataset will be used for training, and the other part will be used for validating.
Arguments:
dev_clean_dataset (torchaudio.datasets.LIBRISPEECH): dev-clean data set from LibriSpeech
train_batch_size (int): batch size for the training data loader
val_batch_size (int): batch size for the validation data loader
num_workers (int): number of workers for the data loaders
Returns:
train_data_loader (torch.utils.data.DataLoader): data loader for training created from the dev clean dataset
dev_train_data_loader (torch.utils.data.DataLoader): data loader for validating created from the dev clean dataset
"""
train_dataset, val_dataset = torch.utils.data.random_split(dev_clean_dataset,
[2203,500],
generator=torch.Generator().manual_seed(42))
train_data_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=train_batch_size,
shuffle=False,
num_workers=num_workers,
collate_fn=get_batch_encoder_input)
dev_train_data_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=val_batch_size,
shuffle=False,
num_workers=num_workers,
sampler=torch.utils.data.sampler.SubsetRandomSampler(torch.randint(high=2203, size=(500,))),)
dev_clean_data_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=val_batch_size,
shuffle=False,
num_workers=num_workers)
return train_data_loader, dev_train_data_loader, dev_clean_data_loader
def create_data_loaders_from_train_dataset(self,
train_data_path,
train_batch_size,
val_batch_size,
num_workers,
use_train_clean_100,
use_train_clean_360,
use_train_other_500):
"""
Create train_data_loader and dev_train_data_loader from training datasets of LibriSpeech.
Create the joint training dataset based on user's selections.
Arguments:
train_data_path (str): path to LibriSpeech training data
train_batch_size (int): batch size for train_data_loader
val_batch_size (int): batch size for dev_traiin_data_loader
num_workers (int): number of workers for data loaders
use_train_clean_100 (bool): Set to True if using LibriSpeech's train-clean-100 dataset during training
use_train_clean_360 (bool): Set to True if using LibriSpeech's train-clean-360 dataset during training
use_train_other_500 (bool): Set to True if using LibriSpeech's train-other-500 dataset during training
Returns:
train_data_loader (torch.utils.data.DataLoader): data loader for training created from LibriSpeech training datasets
dev_train_data_loader (torch.utils.data.DataLoader): data loader for validating created from LibriSpeech training datasets
"""
selected_datasets = []
if use_train_clean_100: selected_datasets.append(torchaudio.datasets.LIBRISPEECH(train_data_path, url='train-clean-100', download=False))
if use_train_clean_360: selected_datasets.append(torchaudio.datasets.LIBRISPEECH(train_data_path, url='train-clean-360', download=False))
if use_train_other_500: selected_datasets.append(torchaudio.datasets.LIBRISPEECH(train_data_path, url='train-other-500', download=False))
train_dataset = torch.utils.data.ConcatDataset(selected_datasets)
train_data_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=train_batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=get_batch_encoder_input)
dev_train_data_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=val_batch_size,
shuffle=False,
num_workers=num_workers,
sampler=torch.utils.data.sampler.SubsetRandomSampler(torch.randint(high=len(train_data_loader), size=(2000,))),)
return train_data_loader, dev_train_data_loader
def get_train_data_loader(self):
return self.train_data_loader
def get_val_data_loaders(self):
return self.val_data_loaders
|
505271
|
import pytest
import numpy as np
from xgboost_distribution.distributions import Laplace
@pytest.fixture
def laplace():
return Laplace()
@pytest.mark.parametrize(
"y, params, natural_gradient, expected_grad",
[
(
np.array([0, 0]),
np.array([[0, 1], [1, 0]]),
True,
np.array([[0, 1], [1, 0]]),
),
(
np.array([0, 0]),
np.array([[0, 1], [1, 0]]),
False,
np.array([[0, 1], [1, 0]]),
),
],
)
def test_gradient_calculation(laplace, y, params, natural_gradient, expected_grad):
grad, hess = laplace.gradient_and_hessian(
y, params, natural_gradient=natural_gradient
)
np.testing.assert_array_equal(grad, expected_grad)
def test_loss(laplace):
loss_name, loss_value = laplace.loss(
# fmt: off
y=np.array([1, ]),
params=np.array([[1, np.log(1)], ]),
)
assert loss_name == "LaplaceError"
np.testing.assert_approx_equal(loss_value, -np.log(0.5))
|
505272
|
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
img = cv.imread(r'C:\Users\PIYUS\Desktop\Image Processing\learning\Resources\Photos\cats 2.jpg')
cv.imshow("img", img)
###### the dimesnion of this blank must be same as the read image
blank = np.zeros(img.shape[:2], dtype='uint8')
circle = cv.circle(blank, (img.shape[1]//2 + 45, img.shape[0]//2), 100, 255, -1)
rectangle = cv.rectangle(blank, (30,30), (370,370), 255, -1)
# cv.imshow("mask", circle)
weird_shape = cv.bitwise_and(circle, rectangle)
masked_img = cv.bitwise_and(img, img, mask=weird_shape)
cv.imshow("masked_image", masked_img)
cv.waitKey(0)
|
505374
|
import pandas as __pd
import datetime as __dt
from dateutil import relativedelta as __rd
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
from functools import reduce as __red
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __dogrulama as __dogrulama
__first_part_url = "consumption/"
def sehir():
"""
Şehir ve şehirlere ait ilçelerin bilgisini vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
Şehir ve Şehirlere Ait İlçeler (Şehir Id, İlçe Id, Şehir İsmi, İlçe İsmi)
"""
try:
particular_url = __first_part_url + "city"
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["cityList"])
df.rename(index=str, columns={"cityId": "Şehir Id", "districtId": "İlçe Id", "cityName": "Şehir İsmi",
"districtName": "İlçe İsmi"}, inplace=True)
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df.drop_duplicates().reset_index(drop=True)
def gerceklesen(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik gerçek zamanlı tüketim bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Gerçek Zamanlı Tüketim (Tarih, Saat, Tüketim)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "real-time-consumption" + "?startDate=" + baslangic_tarihi + \
"&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["hourlyConsumptions"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str, columns={"consumption": "Tüketim"}, inplace=True)
df = df[["Tarih", "Saat", "Tüketim"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def uecm(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Uzlaştırmaya Esas Çekiş Miktarı (Tarih, Saat, UEÇM)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "swv" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["swvList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str, columns={"swv": "UEÇM"}, inplace=True)
df = df[["Tarih", "Saat", "UEÇM"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def uecm_donemlik(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığına tekabül eden uzlaştırma dönemleri için serbest tüketici hakkını kullanan serbest
tüketicilerin, tedarik yükümlülüğü kapsamındaki ve toplam Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini
vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Serbest Tüketici, Tedarik Kapsamındaki ve Toplam Uzlaştırmaya Esas Çekiş Miktarı (Tarih, Saat, UEÇM,
Serbest Tüketici UEÇM, Tedarik Yükümlülüğü Kapsamındaki UEÇM)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m')
son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m')
date_list = []
while ilk <= son:
date_list.append(ilk.strftime("%Y-%m-%d"))
ilk = ilk + __rd.relativedelta(months=+1)
with __Pool(__mp.cpu_count()) as p:
df_list = p.map(__uecm_donemlik, date_list, chunksize=1)
return __pd.concat(df_list, sort=False)
def uecm_serbest_tuketici(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığına tekabül eden uzlaştırma dönemleri için serbest tüketici hakkını kullanan serbest
tüketicilerin saatlik Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Serbest Tüketici UEÇM (Tarih, Saat, Tüketim)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m')
son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m')
date_list = []
while ilk <= son:
date_list.append(ilk.strftime("%Y-%m-%d"))
ilk = ilk + __rd.relativedelta(months=+1)
with __Pool(__mp.cpu_count()) as p:
df_list = p.map(__uecm_serbest_tuketici, date_list, chunksize=1)
return __pd.concat(df_list, sort=False)
def uecm_donemlik_tedarik(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığına tekabül eden uzlaştırma dönemleri için tedarik yükümlülüğü kapsamındaki dönemlik bazlı toplam
Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Tedarik Yükümlülüğü Kapsamındaki UEÇM (Dönem, Tüketim)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m')
son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m')
date_list = []
while ilk <= son:
date_list.append(ilk.strftime("%Y-%m-%d"))
ilk = ilk + __rd.relativedelta(months=+1)
with __Pool(__mp.cpu_count()) as p:
df_list = p.map(__uecm_tedarik, date_list, chunksize=1)
return __pd.concat(df_list, sort=False)
def tahmin(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik yük tahmin plan bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Yük Tahmin Planı (Tarih, Saat, Tüketim)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "load-estimation-plan" + "?startDate=" + baslangic_tarihi + \
"&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["loadEstimationPlanList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str, columns={"lep": "Tüketim"}, inplace=True)
df = df[["Tarih", "Saat", "Tüketim"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def serbest_tuketici_sayisi(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığına tekabül eden uzlaştırma dönemleri için profil abone grubuna göre serbest tüketici hakkını
kullanan serbest tüketici sayıları bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Profil Abone Grubuna Göre Serbest Tüketici Sayıları (Tarih, Aydınlatma, Diğer, Mesken, Sanayi, Tarimsal,
Sulama, Ticarethane, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m')
son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m')
date_list = []
while ilk <= son:
date_list.append(ilk.strftime("%Y-%m-%d"))
ilk = ilk + __rd.relativedelta(months=+1)
with __Pool(__mp.cpu_count()) as p:
df_list = p.map(__profil_serbest_tuketici_sayisi, date_list, chunksize=1)
df_st = __pd.concat(df_list, sort=False)
df_toplam = __serbest_tuketici_sayisi()
return __pd.merge(df_st, df_toplam, how="left", on=["Dönem"])
def sayac_okuyan_kurum(tarih=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
Sayaç okuyan kurumların bilgisini vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
Serbest Tüketici Sayısı (Tarih, Serbest Tüketici Sayısı, Artış Oranı)
"""
if __dogrulama.__tarih_dogrulama(tarih):
try:
particular_url = __first_part_url + "meter-reading-company" + "?period=" + tarih
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["meterReadingCompanyList"])
df.rename(index=str,
columns={"id": "Id", "name": "Şirket Adı", "status": "Durum"},
inplace=True)
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def dagitim_bolgeleri():
"""
Dağıtım bölgelerine dair bilgileri vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
Dağıtım Bölgeleri (Id, Dağıtım Bölgesi)
"""
try:
particular_url = __first_part_url + "distribution"
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["distributionList"])
df.rename(index=str, columns={"id": "Id", "name": "Dağıtım Şirket Adı"}, inplace=True)
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def profil_abone_grubu(tarih=__dt.datetime.today().strftime("%Y-%m-%d"), distribution_id=""):
"""
İlgili tarihe tekabül eden uzlaştırma dönemi ve ağıtım bölgesi için profil abone grup listesini vermektedir.
Parametreler
------------
periyot : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Serbest Tüketici, Tedarik Kapsamındaki ve Toplam Uzlaştırmaya Esas Çekiş Miktarı (Tarih, Saat, UEÇM,
Serbest Tüketici UEÇM, Tedarik Yükümlülüğü Kapsamındaki UEÇM)
"""
if __dogrulama.__tarih_dogrulama(tarih):
try:
particular_url = __first_part_url + "subscriber-profile-group" + "?period=" + tarih + "&distributionId=" \
+ str(distribution_id)
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["subscriberProfileGroupList"])
df.rename(index=str, columns={"id": "Id", "name": "Profil Adı"}, inplace=True)
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def tum_dagitimlar_profil_gruplari(tarih=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyonların saatlik
KGUP bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonların KGUP Değerleri (Tarih, Saat, KGUP)
"""
if __dogrulama.__tarih_dogrulama(tarih):
dist = dagitim_bolgeleri()
list_dist = list(dist["Id"])
org_len = len(list_dist)
list_date_dist = list(zip([tarih] * org_len, list_dist))
list_date_dist = list(map(list, list_date_dist))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(profil_abone_grubu, list_date_dist, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Id"], sort=True),
list_df_unit)
df_unit.columns = ["Id"] + list(dist["Dağıtım Şirket Adı"])
return df_unit
def sayac_okuma_tipi():
"""
Sayaç okuma tip bilgileri vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
Sayaç Okuma Tipleri (Id, Dağıtım Bölgesi)
"""
try:
particular_url = __first_part_url + "meter-reading-type"
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["meterReadingTypeList"])
df.rename(index=str, columns={"id": "Id", "name": "Sayaç Tipi"}, inplace=True)
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def __uecm_donemlik(tarih=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarihe tekabül eden uzlaştırma dönemi için serbest tüketici hakkını kullanan serbest tüketicilerin, tedarik
yükümlülüğü kapsamındaki ve toplam Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini vermektedir.
Parametreler
------------
periyot : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Serbest Tüketici, Tedarik Kapsamındaki ve Toplam Uzlaştırmaya Esas Çekiş Miktarı (Tarih, Saat, UEÇM,
Serbest Tüketici UEÇM, Tedarik Yükümlülüğü Kapsamındaki UEÇM)
"""
if __dogrulama.__tarih_dogrulama(tarih):
try:
particular_url = __first_part_url + "consumption" + "?period=" + tarih
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["consumptions"])
df["Dönem"] = df["period"].apply(lambda d: d[:7])
df.rename(index=str,
columns={"consumption": "UEÇM", "eligibleCustomerConsumption": "Serbest Tüketici UEÇM",
"underSupplyLiabilityConsumption": "Tedarik Yükümlülüğü Kapsamındaki UEÇM"},
inplace=True)
df = df[["Dönem", "UEÇM", "Serbest Tüketici UEÇM", "Tedarik Yükümlülüğü Kapsamındaki UEÇM"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def __uecm_serbest_tuketici(tarih=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarihe tekabül eden uzlaştırma dönemi için serbest tüketici hakkını kullanan serbest tüketicilerin saatlik
Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini vermektedir.
Parametreler
------------
periyot : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Serbest Tüketici Uzlaştırmaya Esas Çekiş Miktarı (Tarih, Saat, Tüketim)
"""
if __dogrulama.__tarih_dogrulama(tarih):
try:
particular_url = __first_part_url + "swv-v2" + "?period=" + tarih
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["swvV2List"])
df["Saat"] = df["vc_gec_trh"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["vc_gec_trh"].apply(lambda d: d[:10]))
df.rename(index=str, columns={"st": "Serbest Tüketici UEÇM"}, inplace=True)
df = df[["Tarih", "Saat", "Serbest Tüketici UEÇM"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def __uecm_tedarik(tarih=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarihe tekabül eden uzlaştırma dönemi için tedarik yükümlülüğü kapsamındaki toplam Uzlaştırmaya Esas Çekiş
Miktarı (UEÇM) bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Tedarik Yükümlülüğü Kapsamındaki UEÇM (Tarih, Saat, UEÇM)
"""
if __dogrulama.__tarih_dogrulama(tarih):
try:
particular_url = __first_part_url + "under-supply-liability-consumption" + "?startDate=" + tarih + \
"&endDate=" + tarih
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["swvList"])
df["Dönem"] = df["date"].apply(lambda d: d[:7])
df.rename(index=str, columns={"swv": "Tedarik Yükümlülüğü Kapsamındaki UEÇM"}, inplace=True)
df = df[["Dönem", "Tedarik Yükümlülüğü Kapsamındaki UEÇM"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def __serbest_tuketici_sayisi():
"""
İlgili tarih aralığına tekabül eden uzlaştırma dönemleri için serbest tüketici hakkını kullanan serbest
tüketicilerin aylık toplam sayısını vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
Serbest Tüketici Sayısı (Tarih, Serbest Tüketici Sayısı, Artış Oranı)
"""
try:
particular_url = __first_part_url + "eligible-consumer-quantity"
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["eligibleConsumerQuantityList"])
df["Dönem"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"meterQuantity": "Serbest Tüketici Sayısı", "meterIncreaseRate": "Artış Oranı"},
inplace=True)
df = df[["Dönem", "Serbest Tüketici Sayısı", "Artış Oranı"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def __profil_serbest_tuketici_sayisi(tarih=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarihe tekabül eden uzlaştırma dönemi için profil abone grubuna göre serbest tüketici hakkını
kullanan serbest tüketici sayıları bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Profil Abone Grubuna Göre Serbest Tüketici Sayıları (Tarih, Aydınlatma, Diğer, Mesken, Sanayi, Tarimsal,
Sulama, Ticarethane)
"""
try:
particular_url = __first_part_url + "st" + "?startDate=" + tarih + "&endDate=" + tarih
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["stList"])
df["Profil"] = df["id"].apply(lambda x: x["profilAboneGrupAdi"])
df["Dönem"] = df["id"].apply(lambda x: __pd.to_datetime(x["date"][:10]))
df = df.pivot(index='Dönem', columns='Profil', values='stCount').reset_index()
df.columns.name = None
df.columns = df.columns.str.title()
df.rename(index=str,
columns={"Aydinlatma": "Aydınlatma", "Diger": "Diğer", "Tarimsal": "Tarımsal"},
inplace=True)
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
|
505378
|
from typing import Optional
from sqlalchemy.orm import Session
from app import crud, models
from app.schemas.track import TrackCreate
from app.tests.utils.utils import random_lower_string, random_url
from app.utils import canonical_preview_uri
from .provider import create_random_provider
from .license import create_random_license
def create_random_track(db: Session, *, provider_name: Optional[str] = None, license_name: Optional[str] = None) -> models.Track:
if provider_name is None:
provider= create_random_provider(db)
provider_name = provider.name
if license_name is None:
license = create_random_license(db)
license_name = license.name
track_in = TrackCreate(
title = random_lower_string(),
artist = random_lower_string(),
url = random_url(),
provider_name = provider_name,
license_name = license_name,
media_url = random_url()
)
track_in.s3_preview_key = canonical_preview_uri(track_in)
return crud.track.create(db, obj_in=track_in)
# item_in = ItemCreate(title=title, description=description, id=id)
# return crud.item.create_with_owner(db=db, obj_in=item_in, owner_id=owner_id)
|
505379
|
from ctypescrypto.oid import Oid
from ctypescrypto import digest
from base64 import b16decode,b16encode
import unittest
class TestDigestType(unittest.TestCase):
def test_md4(self):
d=digest.DigestType("md4")
self.assertEqual(d.digest_size,16)
self.assertEqual(d.block_size,64)
self.assertEqual(d.oid,Oid("md4"))
self.assertEqual(d.name,'md4')
def test_md5(self):
d=digest.DigestType("md5")
self.assertEqual(d.digest_size,16)
self.assertEqual(d.block_size,64)
self.assertEqual(d.oid,Oid("md5"))
self.assertEqual(d.name,'md5')
def test_sha1(self):
d=digest.DigestType("sha1")
self.assertEqual(d.digest_size,20)
self.assertEqual(d.block_size,64)
self.assertEqual(d.oid,Oid("sha1"))
self.assertEqual(d.name,'sha1')
def test_sha256(self):
d=digest.DigestType("sha256")
self.assertEqual(d.digest_size,32)
self.assertEqual(d.block_size,64)
self.assertEqual(d.oid,Oid("sha256"))
self.assertEqual(d.name,'sha256')
def test_sha384(self):
d=digest.DigestType("sha384")
self.assertEqual(d.digest_size,48)
self.assertEqual(d.block_size,128)
self.assertEqual(d.oid,Oid("sha384"))
self.assertEqual(d.name,'sha384')
def test_sha512(self):
d=digest.DigestType("sha512")
self.assertEqual(d.digest_size,64)
self.assertEqual(d.block_size,128)
self.assertEqual(d.oid,Oid("sha512"))
self.assertEqual(d.name,'sha512')
def test_createfromoid(self):
oid=Oid('sha256')
d=digest.DigestType(oid)
self.assertEqual(d.digest_size,32)
self.assertEqual(d.block_size,64)
self.assertEqual(d.oid,Oid("sha256"))
self.assertEqual(d.name,'sha256')
def test_createfromEVP_MD(self):
d1=digest.DigestType("sha256")
d2=digest.DigestType(None)
with self.assertRaises(AttributeError):
s=d2.name
d2.digest=d1.digest
self.assertEqual(d2.digest_size,32)
self.assertEqual(d2.block_size,64)
self.assertEqual(d2.oid,Oid("sha256"))
self.assertEqual(d2.name,'sha256')
def test_invalidDigest(self):
with self.assertRaises(digest.DigestError):
d=digest.DigestType("no-such-digest")
class TestIface(unittest.TestCase):
""" Test all methods with one algorithms """
msg=b"A quick brown fox jumps over the lazy dog."
dgst="00CFFE7312BF9CA73584F24BDF7DF1D028340397"
def test_cons(self):
md=digest.DigestType("sha1")
dgst=digest.Digest(md)
dgst.update(self.msg)
self.assertEqual(dgst.digest_size,20)
self.assertEqual(dgst.hexdigest(),self.dgst)
def test_digestwithdata(self):
md=digest.DigestType("sha1")
dgst=digest.Digest(md)
self.assertEqual(dgst.digest(self.msg),b16decode(self.dgst))
def test_length(self):
l=len(self.msg)
msg=self.msg+b" Dog barks furiously."
dgst=digest.new("sha1")
dgst.update(msg,length=l)
self.assertEqual(dgst.hexdigest(),self.dgst)
def test_badlength(self):
l=len(self.msg)
dgst=digest.new("sha1")
with self.assertRaises(ValueError):
dgst.update(self.msg,length=l+1)
def test_bindigest(self):
dgst=digest.new("sha1")
dgst.update(self.msg)
self.assertEqual(dgst.digest_size,20)
self.assertEqual(dgst.digest(),b16decode(self.dgst,True))
def test_duplicatedigest(self):
dgst=digest.new("sha1")
dgst.update(self.msg)
v1=dgst.digest()
v2=dgst.digest()
self.assertEqual(v1,v2)
def test_updatefinalized(self):
dgst=digest.new("sha1")
dgst.update(self.msg)
h=dgst.hexdigest()
with self.assertRaises(digest.DigestError):
dgst.update(self.msg)
def test_wrongtype(self):
dgst=digest.new("sha1")
with self.assertRaises(TypeError):
dgst.update(['a','b','c'])
with self.assertRaises(TypeError):
dgst.update(18)
with self.assertRaises(TypeError):
dgst.update({"a":"b","c":"D"})
with self.assertRaises(TypeError):
dgst.update(u'\u0430\u0431')
def test_copy(self):
dgst=digest.new("sha1")
dgst.update(b"A quick brown fox jumps over ")
d2=dgst.copy()
dgst.update(b"the lazy dog.")
value1=dgst.hexdigest()
d2.update(b"the fat pig.")
value2=d2.hexdigest()
self.assertEqual(value1,"00CFFE7312BF9CA73584F24BDF7DF1D028340397")
self.assertEqual(value2,"5328F33739BEC2A15B6A30F17D3BC13CC11A7C78")
class TestAlgo(unittest.TestCase):
""" Test all statdard algorithms """
def test_md5(self):
d=digest.new("md5")
self.assertEqual(d.digest_size,16)
d.update(b"A quick brown fox jumps over the lazy dog.")
self.assertEqual(d.hexdigest(),"DF756A3769FCAB0A261880957590C768")
def test_md4(self):
d=digest.new("md4")
d.update(b"A quick brown fox jumps over the lazy dog.")
self.assertEqual(d.digest_size,16)
self.assertEqual(d.hexdigest(),"FAAED595A3E38BBF0D9B4B98021D200F")
def test_sha256(self):
d=digest.new("sha256")
d.update(b"A quick brown fox jumps over the lazy dog.")
self.assertEqual(d.digest_size,32)
self.assertEqual(d.hexdigest(),"FFCA2587CFD4846E4CB975B503C9EB940F94566AA394E8BD571458B9DA5097D5")
def test_sha384(self):
d=digest.new("sha384")
d.update(b"A quick brown fox jumps over the lazy dog.")
self.assertEqual(d.digest_size,48)
self.assertEqual(d.hexdigest(),"C7D71B1BA81D0DD028E79C7E75CF2F83169C14BA732CA5A2AD731151584E9DE843C1A314077D62B96B03367F72E126D8")
def test_sha512(self):
d=digest.new("sha512")
self.assertEqual(d.digest_size,64)
d.update(b"A quick brown fox jumps over the lazy dog.")
self.assertEqual(d.hexdigest(),"3045575CF3B873DD656F5F3426E04A4ACD11950BB2538772EE14867002B408E21FF18EF7F7B2CAB484A3C1C0BE3F8ACC4AED536A427353C7748DC365FC1A8646")
def test_wrongdigest(self):
with self.assertRaises(digest.DigestError):
dgst=digest.new("no-such-digest")
if __name__ == "__main__":
unittest.main()
|
505382
|
import numpy as np
import yaml
import sys, os
import trimesh
pykin_path = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.append(pykin_path)
from pykin.robots.single_arm import SingleArm
from pykin.kinematics.transform import Transform
from pykin.collision.collision_manager import CollisionManager
from pykin.utils.collision_utils import apply_robot_to_collision_manager, apply_robot_to_scene
custom_fpath = '../../asset/config/sawyer_init_params.yaml'
with open(custom_fpath) as f:
controller_config = yaml.safe_load(f)
init_qpos = controller_config["init_qpos"]
file_path = '../../asset/urdf/sawyer/sawyer.urdf'
robot = SingleArm(file_path, Transform(rot=[0.0, 0.0, 0.0], pos=[0, 0, -0.5]))
# fk = robot.forward_kin(np.array([0, np.pi/2, np.pi/2, np.pi/3, -np.pi/2, -np.pi/2, -np.pi/2, np.pi/2]))
fk = robot.forward_kin(np.array(np.concatenate((np.zeros(1), init_qpos))))
# fk = robot.forward_kin(np.zeros(8))
mesh_path = pykin_path+"/asset/urdf/sawyer/"
# init_trainsform
c_manager = CollisionManager(mesh_path)
c_manager.setup_robot_collision(robot, fk)
test, name, data = c_manager.in_collision_internal(return_names=True, return_data=True)
scene = trimesh.Scene()
scene = apply_robot_to_scene(scene=scene, mesh_path=mesh_path, robot=robot, fk=fk)
scene.set_camera(np.array([np.pi/2, 0, np.pi/2]), 5, resolution=(1024, 512))
# scene.show()
milk_path = pykin_path+"/asset/objects/meshes/milk.stl"
test_mesh = trimesh.load_mesh(milk_path)
scene.add_geometry(test_mesh, node_name="milk1", transform=Transform(pos=[0.0, 0, 0.1]).h_mat)
scene.add_geometry(test_mesh, node_name="milk2", transform=Transform(pos=[0.1, 0, 0.1]).h_mat)
o_manager = CollisionManager(milk_path)
o_manager.add_object("milk1", gtype="mesh", gparam=test_mesh, transform=Transform(pos=[0.0, 0, 0.1]).h_mat)
o_manager.add_object("milk2", gtype="mesh", gparam=test_mesh, transform=Transform(pos=[0.1, 0, 0.1]).h_mat)
test, name, data = o_manager.in_collision_internal(return_names=True, return_data=True)
result = o_manager.get_distances_other(c_manager)
print(result)
for (a,b), dis in result.items():
if dis <= 0.0:
print(a,b, dis)
scene.show()
|
505386
|
import tempfile
import os
import pytest
from sklearn import datasets
from tensorflow.keras import backend as K
import tensorflow as tf
from ivis.nn import losses as losses
from ivis.nn.distances import euclidean_distance
from ivis import Ivis
@pytest.fixture(scope='function')
def model_filepath():
with tempfile.TemporaryDirectory() as temp_dir:
fpath = os.path.join(temp_dir, 'test_loss_plugin.ivis')
yield fpath
def test_loss_function_call():
for loss_name in losses.loss_dict:
# Attempt to construct loss by name
losses.triplet_loss(distance=loss_name)
def test_custom_loss_fn_registration():
@losses.register_loss
def custom_loss_fn(y_true, y_pred):
return y_pred - y_true
assert custom_loss_fn.__name__ in losses.loss_dict
assert custom_loss_fn is losses.loss_dict[custom_loss_fn.__name__]
assert losses.triplet_loss(distance=custom_loss_fn.__name__) is custom_loss_fn
def test_custom_loss_ivis(model_filepath):
iris = datasets.load_iris()
X = iris.data
def euclidean_loss(y_true, y_pred):
margin = 1
anchor, positive, negative = tf.unstack(y_pred)
return K.mean(K.maximum(euclidean_distance(anchor, positive) - euclidean_distance(anchor, negative) + margin, 0))
model = Ivis(distance=euclidean_loss, k=15, batch_size=16, epochs=3)
y_pred = model.fit_transform(X)
# Test model saving and loading
model.save_model(model_filepath, overwrite=True)
model_2 = Ivis(distance=euclidean_loss)
model_2.load_model(model_filepath)
model_3 = Ivis()
with pytest.raises(ValueError):
model_3.load_model(model_filepath)
def test_custom_loss_ivis_callable(model_filepath):
iris = datasets.load_iris()
X = iris.data
class EuclideanDistance:
def __init__(self, margin=1):
self.margin = margin
self.__name__ = self.__class__.__name__
def _euclidean_distance(self, x, y):
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=-1, keepdims=True), K.epsilon()))
def __call__(self, y_true, y_pred):
anchor, positive, negative = tf.unstack(y_pred)
return K.mean(K.maximum(self._euclidean_distance(anchor, positive) - self._euclidean_distance(anchor, negative) + self.margin, 0))
model = Ivis(distance=EuclideanDistance(margin=2), k=15, batch_size=16, epochs=5)
y_pred = model.fit_transform(X)
# Test model saving and loading
model.save_model(model_filepath, overwrite=True)
model_2 = Ivis(distance=EuclideanDistance(margin=2))
model_2.load_model(model_filepath)
model_2.fit(X)
|
505402
|
import subprocess
import os
import threading
import traceback
import tempfile
import shutil
import atexit
import signal
import shlex
from typing import Optional, Tuple, List
from copy import deepcopy
from pysrt import SubRipFile, SubRipItem
from decimal import Decimal
from .embedder import FeatureEmbedder
from .exception import TerminalException
from .exception import NoFrameRateException
from .logger import Logger
TEMP_DIR_PATH = tempfile.mkdtemp()
def clear_temp(*_):
if os.path.isdir(TEMP_DIR_PATH):
shutil.rmtree(TEMP_DIR_PATH)
class MediaHelper(object):
""" Utility for processing media assets including audio, video and
subtitle files.
"""
FFMPEG_BIN = os.getenv("FFMPEG_PATH") or os.getenv("ffmpeg_path") or "ffmpeg"
AUDIO_FILE_EXTENSION = [".wav", ".aac"]
__MIN_SECS_PER_WORD = 0.414 # 60 secs / 145 wpm
__MIN_GAP_IN_SECS = (
1 # minimum gap in seconds between consecutive subtitle during segmentation
)
__CMD_TIME_OUT = 180 # time out for subprocess
atexit.register(clear_temp)
signal.signal(signal.SIGTERM, clear_temp)
def __init__(self):
self.__LOGGER = Logger().get_logger(__name__)
def extract_audio(self, video_file_path, decompress: bool = False, freq: int = 16000) -> str:
"""Extract audio track from the video file and save it to a WAV file.
Arguments:
video_file_path {string} -- The input video file path.
Keyword Arguments:
decompress {bool} -- Extract WAV if True otherwise extract AAC (default: {False}).
freq {int} -- The audio sample frequency (default: {16000}).
Returns:
string -- The file path of the extracted audio.
"""
basename = os.path.basename(video_file_path)
# Using WAV for training or prediction is faster than using AAC.
# However the former will result in larger temporary audio files saved on the disk.
if decompress:
assert freq is not None, "Frequency is needed for decompression"
audio_file_path = "{0}/{1}{2}".format(
TEMP_DIR_PATH, basename, self.AUDIO_FILE_EXTENSION[0]
)
else:
audio_file_path = "{0}/{1}{2}".format(
TEMP_DIR_PATH, basename, self.AUDIO_FILE_EXTENSION[1]
)
command = (
"{0} -y -xerror -i '{1}' -ac 2 -ar {2} -vn '{3}'".format(
self.FFMPEG_BIN, video_file_path, freq, audio_file_path
)
if decompress
else "{0} -y -xerror -i '{1}' -vn -acodec copy '{2}'".format(
self.FFMPEG_BIN, video_file_path, audio_file_path
)
)
with subprocess.Popen(
shlex.split(command),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
universal_newlines=True,
bufsize=1,
) as process:
try:
self.__LOGGER.debug("[{}] Running: {}".format(process.pid, command))
_, std_err = process.communicate(timeout=self.__CMD_TIME_OUT)
self.__LOGGER.debug("[{}] {}".format(process.pid, std_err))
if process.returncode != 0:
self.__LOGGER.error("[{}] Cannot extract audio from video: {}\n{}"
.format(process.pid, video_file_path, std_err))
raise TerminalException(
"Cannot extract audio from video: {}".format(video_file_path)
)
self.__LOGGER.info(
"[{}] Extracted audio file: {}".format(process.pid, audio_file_path))
return audio_file_path
except subprocess.TimeoutExpired as te:
self.__LOGGER.error("Timeout on extracting audio from video: {}".format(video_file_path))
if os.path.exists(audio_file_path):
os.remove(audio_file_path)
raise TerminalException(
"Timeout on extracting audio from video: {}".format(video_file_path)
) from te
except Exception as e:
if os.path.exists(audio_file_path):
os.remove(audio_file_path)
if isinstance(e, TerminalException):
raise e
else:
raise TerminalException(
"Cannot extract audio from video: {}".format(video_file_path)
) from e
except KeyboardInterrupt:
self.__LOGGER.error(
"[{}] Extracting audio from video {} interrupted".format(
process.pid, video_file_path
)
)
if os.path.exists(audio_file_path):
os.remove(audio_file_path)
process.send_signal(signal.SIGINT)
raise TerminalException(
"Extracting audio from video {} interrupted".format(video_file_path)
)
finally:
process.kill()
os.system("stty sane")
def get_duration_in_seconds(self, start: Optional[str], end: Optional[str]) -> Optional[float]:
"""Get the duration in seconds between a start time and an end time.
Arguments:
start {string} -- The start time (e.g., 00:00:00,750).
end {string} -- The end time (e.g., 00:00:10,230).
Returns:
float -- The duration in seconds.
"""
if start is None:
start = "00:00:00,000"
if end is None:
return None
start = start.replace(",", ".")
end = end.replace(",", ".")
start_h, start_m, start_s = map(Decimal, start.split(":"))
end_h, end_m, end_s = map(Decimal, end.split(":"))
return float(
(end_h * 3600 + end_m * 60 + end_s)
- (start_h * 3600 + start_m * 60 + start_s)
)
def extract_audio_from_start_to_end(self, audio_file_path: str, start: str, end: Optional[str] = None) -> Tuple[str, Optional[float]]:
"""Extract audio based on the start time and the end time and save it to a temporary file.
Arguments:
audio_file_path {string} -- The path of the audio file.
start {string} -- The start time (e.g., 00:00:00,750).
Keyword Arguments:
end {string} -- The end time (e.g., 00:00:10,230) (default: {None}).
Returns:
tuple -- The file path to the extracted audio and its duration.
"""
segment_duration = self.get_duration_in_seconds(start, end)
basename = os.path.basename(audio_file_path)
filename, extension = os.path.splitext(basename)
start = start.replace(",", ".")
if end is not None:
end = end.replace(",", ".")
segment_path = "{0}/{1}_{2}_{3}{4}".format(TEMP_DIR_PATH, filename, str(start), str(end), extension)
if end is not None:
duration = self.get_duration_in_seconds(start, end)
command = "{0} -y -xerror -i '{1}' -ss {2} -t {3} -acodec copy '{4}'".format(
self.FFMPEG_BIN, audio_file_path, start, duration, segment_path
)
else:
command = "{0} -y -xerror -i '{1}' -ss {2} -acodec copy '{3}'".format(
self.FFMPEG_BIN, audio_file_path, start, segment_path
)
with subprocess.Popen(
shlex.split(command),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
bufsize=1,
) as process:
self.__LOGGER.debug("[{}] Running: {}".format(process.pid, command))
try:
_, std_err = process.communicate(timeout=self.__CMD_TIME_OUT)
self.__LOGGER.debug("[{}] {}".format(process.pid, std_err))
if process.returncode != 0:
self.__LOGGER.error("[{}] Cannot clip audio: {} Return Code: {}\n{}"
.format(process.pid, audio_file_path, process.returncode, std_err))
raise TerminalException(
"Cannot clip audio: {} Return Code: {}".format(audio_file_path, process.returncode)
)
self.__LOGGER.info(
"[{}] Extracted audio segment: {}".format(process.pid, segment_path))
return segment_path, segment_duration
except subprocess.TimeoutExpired as e:
self.__LOGGER.error(
"[{}] Extracting {} timed out: {}\n{}".format(
process.pid, segment_path, str(e), "\n".join(traceback.format_stack())
)
)
traceback.print_tb(e.__traceback__)
if os.path.exists(segment_path):
os.remove(segment_path)
raise TerminalException(
"Timeout on extracting audio from audio: {} after {} seconds".format(audio_file_path, self.__CMD_TIME_OUT)
) from e
except Exception as e:
self.__LOGGER.error(
"[{}] Extracting {} failed: {}\n{}".format(
process.pid, segment_path, str(e), "\n".join(traceback.format_stack())
)
)
traceback.print_tb(e.__traceback__)
if os.path.exists(segment_path):
os.remove(segment_path)
if isinstance(e, TerminalException):
raise e
else:
raise TerminalException(
"Cannot clip audio: {}".format(audio_file_path)
) from e
except KeyboardInterrupt:
self.__LOGGER.error(
"[{}] Extracting with start and end from {} interrupted".format(
process.pid, segment_path
)
)
if os.path.exists(segment_path):
os.remove(segment_path)
process.send_signal(signal.SIGINT)
raise TerminalException("Extracting with start and end from {} interrupted".format(segment_path))
finally:
process.kill()
os.system("stty sane")
def get_audio_segment_starts_and_ends(self, subs: List[SubRipItem]) -> Tuple[List[str], List[str], List[SubRipFile]]:
"""Group subtitle cues into larger segments in terms of silence gaps.
Arguments:
subs {list} -- A list of SupRip cues.
Returns:
tuple -- A list of start times, a list of end times and a list of grouped SubRip files.
"""
local_subs = self.__preprocess_subs(subs)
segment_starts = []
segment_ends = []
combined = []
new_subs = []
current_start = str(local_subs[0].start)
for i in range(len(local_subs)):
if i == len(local_subs) - 1:
combined.append(local_subs[i])
segment_starts.append(current_start)
segment_ends.append(str(local_subs[i].end))
new_subs.append(SubRipFile(combined))
del combined[:]
else:
# Do not segment when the subtitle is too short
duration = FeatureEmbedder.time_to_sec(
local_subs[i].end
) - FeatureEmbedder.time_to_sec(local_subs[i].start)
if duration < self.__MIN_SECS_PER_WORD:
combined.append(local_subs[i])
continue
# Do not segment consecutive subtitles having little or no gap.
gap = FeatureEmbedder.time_to_sec(
local_subs[i + 1].start
) - FeatureEmbedder.time_to_sec(local_subs[i].end)
if (
local_subs[i].end == local_subs[i + 1].start
or gap < self.__MIN_GAP_IN_SECS
):
combined.append(local_subs[i])
continue
combined.append(local_subs[i])
# The start time is set to last cue's end time
segment_starts.append(current_start)
# The end time cannot be set to next cue's start time due to possible overlay
segment_ends.append(str(local_subs[i].end))
current_start = str(local_subs[i].end)
new_subs.append(SubRipFile(combined))
del combined[:]
return segment_starts, segment_ends, new_subs
def get_frame_rate(self, file_path: str) -> float:
"""Extract the video frame rate. Will return 25 when input is audio
Arguments:
file_path {string} -- The input audiovisual file path.
Returns:
float -- The frame rate
"""
with subprocess.Popen(
shlex.split("{0} -i '{1}' -t 00:00:10 -f null /dev/null".format(self.FFMPEG_BIN, file_path)),
shell=False,
stderr=subprocess.PIPE,
close_fds=True,
universal_newlines=True,
bufsize=1,
) as proc:
with subprocess.Popen(
['grep', '-Eo', r"[0-9]{1,3}(\.[0-9]{1,3})?\sfps,"],
shell=False,
stdin=proc.stderr,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
universal_newlines=True,
bufsize=1,
) as process:
try:
std_out, std_err = process.communicate(timeout=self.__CMD_TIME_OUT)
if process.returncode != 0:
self.__LOGGER.warning("[{}] Cannot extract the frame rate from video: {}\n{}".format(process.pid, file_path, std_err))
raise NoFrameRateException(
"Cannot extract the frame rate from video: {}".format(file_path)
)
fps = float(std_out.split(" ")[0])
# ffmpeg uses two decimal places so be this hack
fps = fps if fps != 23.98 else 23.976
self.__LOGGER.info("[{}] Extracted frame rate: {} fps".format(process.pid, fps))
return fps
except subprocess.TimeoutExpired as te:
raise NoFrameRateException(
"Timeout on extracting the frame rate from video: {}".format(file_path)
) from te
except Exception as e:
if isinstance(e, TerminalException):
raise e
else:
raise NoFrameRateException(
"Cannot extract the frame rate from video: {}".format(file_path)
) from e
except KeyboardInterrupt:
self.__LOGGER.error(
"[{}] Extracting frame rate from video {} interrupted".format(
process.pid, file_path
)
)
process.send_signal(signal.SIGINT)
proc.send_signal(signal.SIGINT)
raise TerminalException("Extracting frame rate from video {} interrupted".format(file_path))
finally:
process.kill()
proc.kill()
os.system("stty sane")
def refragment_with_min_duration(self, subs: List[SubRipItem], minimum_segment_duration: float) -> List[SubRipItem]:
"""Re-fragment a list of subtitle cues into new cues each of spans a minimum duration
Arguments:
subs {list} -- A list of SupRip cues.
minimum_segment_duration {float} -- The minimum duration in seconds for each output subtitle cue.
Returns:
list -- A list of new SupRip cues after fragmentation.
"""
new_segment = []
new_segment_index = 0
new_segment_duration = 0.0
new_segment_text = ""
new_subs = []
for sub in subs:
if minimum_segment_duration > new_segment_duration:
new_segment.append(sub)
new_segment_duration += self.get_duration_in_seconds(str(sub.start), str(sub.end)) or 0.0
new_segment_text += "{}\n".format(sub.text)
else:
concatenated_item = SubRipItem(new_segment_index, new_segment[0].start, new_segment[-1].end,
new_segment_text, new_segment[0].position)
new_subs.append(concatenated_item)
new_segment_index += 1
new_segment = [sub]
new_segment_duration = self.get_duration_in_seconds(str(sub.start), str(sub.end)) or 0.0
new_segment_text = "{}\n".format(sub.text)
if new_segment:
concatenated_item = SubRipItem(new_segment_index, new_segment[0].start, new_segment[-1].end,
new_segment_text, new_segment[0].position)
new_subs.append(concatenated_item)
return new_subs
def __preprocess_subs(self, subs: List[SubRipItem]) -> List[SubRipItem]:
local_subs = deepcopy(subs)
# Preprocess overlapping subtitles
for i in range(len(local_subs)):
if i != 0 and local_subs[i].start < local_subs[i - 1].end:
self.__LOGGER.warning("Found overlapping subtitle cues and the earlier one's duration will be shortened.")
local_subs[i - 1].end = local_subs[i].start
return local_subs
|
505447
|
from abc import ABC, abstractmethod
from typing import List
from core.domain.profile.entity.user import User, UserBasicProfile, UserExtraProfile
class ProfileRepository(ABC):
@abstractmethod
def get_user(self, user_type: str, user_id: int) -> User:
return NotImplemented
@abstractmethod
def create_user(self, user: User) -> None:
return NotImplemented
@abstractmethod
def update_user_basic(self, user_type: str, user_id: int, basic_profile: UserBasicProfile) -> None:
return NotImplemented
@abstractmethod
def update_user_extra(self, user_type: str, user_id: int, extra_profiles: List[UserExtraProfile]) -> None:
return NotImplemented
|
505450
|
import grequests
import httplib
import logging
import os
import requests
import subprocess
import tempfile
import time
import unittest
from e2e.extensions.filters.common import filtertest
DEBUG=True
class NatsStreamingTestCase(filtertest.TestCase):
def __init__(self, *args, **kwargs):
artifact_root_path = "./e2e/extensions/filters/http/nats/streaming"
super(NatsStreamingTestCase, self).__init__(artifact_root_path, *args, **kwargs)
def setUp(self):
super(NatsStreamingTestCase, self).setUp()
# A temporary file is used to avoid pipe buffering issues.
self.stderr = tempfile.NamedTemporaryFile("rw+", delete=True)
def tearDown(self):
super(NatsStreamingTestCase, self).tearDown()
# The file is deleted as soon as it is closed.
if self.stderr is not None:
self.stderr.close()
self.stderr = None
def __create_config(self):
create_config_path = self._join_artifact_path("create_config.sh")
subprocess.check_call(create_config_path)
def __start_nats_server(self):
args = ["gnatsd", "-DV"] if DEBUG else "gnatsd"
self._processes["nats_server"] = subprocess.Popen(args)
def __start_nats_streaming_server(self):
args = ["nats-streaming-server", "-ns", "nats://localhost:4222"]
if DEBUG:
args.append("-SDV")
self._processes["nats_streaming_server"] = subprocess.Popen(args)
def __sub(self):
self._processes["sub_process"] = subprocess.Popen(
["stan-sub", "-id", "17", "subject1"],
stderr=self.stderr)
time.sleep(.1)
def __make_request(self, payload, expected_status):
response = requests.post('http://localhost:10000/post', payload)
self.assertEqual(expected_status, response.status_code)
def __make_many_requests(self, payloads, expected_status):
requests = (grequests.post('http://localhost:10000/post', data=p) for p in payloads)
responses = grequests.map(requests)
if expected_status:
for response in responses:
self.assertEqual(expected_status, response.status_code)
def __wait_for_response(self, data):
time.sleep(0.1)
self._processes["sub_process"].terminate()
del self._processes["sub_process"]
self.stderr.seek(0, 0)
stderr = self.stderr.read()
# TODO(talnordan): Validate the entire Protobuf message, including headers.
self.assertIn('subject:"subject1"', stderr)
self.assertIn(data, stderr)
def __make_request_batches(self,
format_string,
batches,
requests_in_batch,
sleep_interval,
expected_status):
for i in xrange(batches):
payloads = [(format_string % (i, j)) for j in xrange(requests_in_batch)]
self.__make_many_requests(payloads, expected_status)
time.sleep(sleep_interval)
def test_make_many_requests(self):
# Set up environment.
self.__create_config()
self.__start_nats_server()
self.__start_nats_streaming_server()
self._start_envoy("./envoy.yaml", DEBUG)
self.__sub()
# Make many requests and assert that they succeed.
self.__make_request_batches("solopayload %d %d", 3, 1024, 0.1, httplib.OK)
self.__wait_for_response("solopayload 2 1023")
# Terminate NATS Streaming to make future requests timeout.
self._processes["nats_streaming_server"].terminate()
del self._processes["nats_streaming_server"]
# Make many requests and assert that they timeout.
self.__make_request_batches("solopayload %d %d", 2, 1024, 0.1, httplib.REQUEST_TIMEOUT)
def test_profile(self):
report_loc = os.environ.get("TEST_PROF_REPORT","")
if not report_loc:
self.skipTest("to enable, set TEST_PROF_REPORT to where you want the report to be saved. " + \
"i.e. TEST_PROF_REPORT=report.data")
print("Starting perf tests; if you have issues you might need to enable perf for normal users:")
print("'echo -1 | sudo tee /proc/sys/kernel/perf_event_paranoid'")
print("'echo 0 | sudo tee /proc/sys/kernel/kptr_restrict'")
# Set up environment.
# See https://github.com/envoyproxy/envoy/blob/e51c8ad0e0526f78c47a7f90807c184a039207d5/tools/envoy_collect/envoy_collect.py#L192
self.__create_config()
self.__start_nats_server()
self.__start_nats_streaming_server()
self._start_envoy(["perf", "record", "-g","--"], ["-l","error"])
self.__sub()
# Make many requests and assert that they succeed.
self.__make_request_batches("solopayload %d %d", 20, 1024, 0.1, None)
# The performance tests are slower so we have lower expectations of whats received
self.__wait_for_response("solopayload 0 500")
# tear down everything so we can copy the report
self.tearDown()
# print the report
subprocess.check_call(["cp", "perf.data", report_loc])
if __name__ == "__main__":
global DEBUG
DEBUG = True if os.environ.get("DEBUG","") != "0" else False
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
505458
|
import sys
import glob
import serial
import serial.tools.list_ports
import socket
import time
from SerialConsole import SerialConsole
def serial_ports():
ports = list(serial.tools.list_ports.comports())
result = {}
for p in ports:
result[p.device] = p.description
return result
def get_computer_name():
return socket.gethostname()
PRESETS = {
"MODE_RAINBOW": "1",
"MODE_RAINBOW_CYCLE": "2",
"MODE_RAINBOW_THEATRE": "3",
"MODE_THEATRE_WHITE": "4",
"MODE_THEATRE_RED": "5",
"MODE_THEATRE_GREEN": "6",
"MODE_THEATRE_BLUE": "7",
"MODE_RED": "8",
"MODE_GREEN": "9",
"MODE_BLUE": "10",
"MODE_WHITE": "CUSTOM#255#255#255",
"MODE_CUSTOM": "11",
"MODE_MOVE": "12",
"CHANGE_WAIT_TIME_1": "WT#1",
"CHANGE_WAIT_TIME_5": "WT#5",
"CHANGE_WAIT_TIME_10": "WT#10",
"CHANGE_WAIT_TIME_20": "WT#20",
"CHANGE_WAIT_TIME_30": "WT#30",
"CHANGE_WAIT_TIME_40": "WT#40",
"CHANGE_WAIT_TIME_50": "WT#50"
}
def handle_change(data):
selected_com = data['selectedPort']
argb_mode = data['argbMode']
argb_value = data['customARGB']
a = serial.Serial(selected_com, 9600, timeout=.1)
time.sleep(5)
instruction = argb_mode if argb_mode != PRESETS['MODE_CUSTOM'] else "CUSTOM#"+argb_value
a.write(bytearray(instruction, 'utf-8'))
a.close()
if __name__ == '__main__':
print(serial_ports())
|
505470
|
import unittest
from limitlessled.bridge import Bridge
from limitlessled.group.white import WhiteGroup, WHITE
from limitlessled.group.rgbw import RgbwGroup, RGBW
from limitlessled.group.rgbww import RgbwwGroup, RGBWW
# TODO: mock socket
class TestWhiteGroup(unittest.TestCase):
def setUp(self):
self.bridge = Bridge('localhost', 9999, version=5)
def test_brightness(self):
pass
def test_temperature(self):
pass
def test_transition(self):
pass
def tearDown(self):
self.bridge.close()
|
505474
|
from argparse import Namespace
import json
import os
from pathlib import Path
from eth_typing import URI
from eth_utils import is_same_address
from ethpm.backends.registry import is_valid_registry_uri, parse_registry_uri
from ethpm.exceptions import EthPMValidationError
from ethpm.uri import is_ipfs_uri, is_valid_content_addressed_github_uri
from ethpm.validation.package import validate_package_name
from web3 import Web3
from ethpm_cli._utils.etherscan import is_etherscan_uri
from ethpm_cli.constants import ETHERSCAN_KEY_ENV_VAR, SOLC_OUTPUT
from ethpm_cli.exceptions import (
EtherscanKeyNotFound,
InstallError,
UriNotSupportedError,
ValidationError,
)
def validate_parent_directory(parent_dir: Path, child_dir: Path) -> None:
if parent_dir not in child_dir.parents:
raise InstallError(f"{parent_dir} was not found in {child_dir} directory tree.")
def validate_project_directory(project_dir: Path) -> None:
if not project_dir.is_dir():
raise ValidationError(f"{project_dir} is not a valid directory")
if not (project_dir / "contracts").is_dir():
raise ValidationError(
f"{project_dir} must contain a contracts/ directory that contains project contracts."
)
def validate_solc_output(project_dir: Path) -> None:
solc_output_path = project_dir / SOLC_OUTPUT
if not solc_output_path.is_file():
raise ValidationError(
f"{project_dir} does not contain solc output. Please follow the steps in the "
"documentation to generate your Solidity compiler output."
)
try:
solc_output_data = json.loads(solc_output_path.read_text())
except ValueError:
raise ValidationError(
f"Content found at {solc_output_path} does not look like valid json."
)
if "contracts" not in solc_output_data:
raise ValidationError(
f"JSON found at {solc_output_path} does not look like valid "
"Solidity compiler standard json output."
)
def validate_install_cli_args(args: Namespace) -> None:
validate_supported_uri(args.uri)
if args.alias:
validate_alias(args.alias)
if args.ethpm_dir:
validate_ethpm_dir(args.ethpm_dir)
if is_etherscan_uri(args.uri):
if not args.package_name or not args.package_version:
raise InstallError(
"To install an Etherscan verified contract, you must specify both the "
"--package-name and --package-version."
)
else:
if args.package_name:
raise InstallError(
"You cannot redefine the package_name of an existing package. "
"Consider aliasing the package instead."
)
if args.package_version:
raise InstallError(
"You cannot redefine the version of an existing package."
)
def validate_uninstall_cli_args(args: Namespace) -> None:
validate_package_name(args.package)
if args.ethpm_dir:
validate_ethpm_dir(args.ethpm_dir)
def validate_etherscan_key_available() -> None:
if ETHERSCAN_KEY_ENV_VAR not in os.environ:
raise EtherscanKeyNotFound(
"No Etherscan API key found. Please ensure that the "
f"{ETHERSCAN_KEY_ENV_VAR} environment variable is set."
)
def validate_supported_uri(uri: URI) -> None:
if (
not is_ipfs_uri(uri)
and not is_etherscan_uri(uri) # noqa: W503
and not is_valid_registry_uri(uri) # noqa: W503
and not is_valid_content_addressed_github_uri(uri) # noqa: W503
):
raise UriNotSupportedError(
f"Target uri: {uri} not a currently supported uri. "
"Target uris must be one of: ipfs, github blob, etherscan, or registry."
)
def validate_alias(alias: str) -> None:
try:
validate_package_name(alias)
except EthPMValidationError:
raise ValidationError(
f"{alias} is not a valid package name. All aliases must conform "
"to the ethpm spec definition of a package name."
)
def validate_ethpm_dir(ethpm_dir: Path) -> None:
if ethpm_dir.name != "_ethpm_packages" or not ethpm_dir.is_dir():
raise InstallError(
"--ethpm-dir must point to an existing '_ethpm_packages' directory."
)
def validate_chain_data_store(chain_data_path: Path, w3: Web3) -> None:
"""
Validates that chain_data_path points to a file corresponding
to the provided web3 instance.
"""
if not chain_data_path.is_file():
raise InstallError(
f"{chain_data_path} does not appear to be a valid EthPM CLI datastore."
)
chain_data = json.loads(chain_data_path.read_text())
if chain_data["chain_id"] != w3.eth.chainId:
raise InstallError(
f"Chain ID found in EthPM CLI datastore: {chain_data['chain_id']} "
f"does not match chain ID of provided web3 instance: {w3.eth.chainId}"
)
def validate_same_registry(left: str, right: str) -> None:
left_uri = parse_registry_uri(left)
right_uri = parse_registry_uri(right)
if (
not is_same_address(left_uri.address, right_uri.address)
or left_uri.chain_id != right_uri.chain_id # noqa: W503
):
raise ValidationError(
f"Registry URI: {left} does not match the registry found on URI: {right}."
)
|
505480
|
from flask_unchained.cli import cli, click
@cli.command()
def vendor_top_level():
"""vendor_bundle docstring"""
click.echo('vendor_bundle')
# this group will have its baz command overridden
@cli.group()
def foo_group():
"""vendor_bundle docstring"""
@foo_group.command()
def bar():
"""vendor_bundle docstring"""
click.echo('vendor_bundle')
@foo_group.command()
def baz():
"""vendor_bundle docstring"""
click.echo('vendor_bundle')
# this group should get overridden by the myapp bundle
@cli.group()
def goo_group():
"""vendor_bundle docstring"""
@goo_group.command()
def gar():
"""vendor_bundle docstring"""
click.echo('vendor_bundle')
@goo_group.command()
def gaz():
"""the overridden group should not contain this command"""
click.echo('vendor_bundle')
|
505501
|
from __future__ import absolute_import
from future.utils import PY3
if PY3:
from _thread import *
else:
__future_module__ = True
from thread import *
|
505533
|
from typing import Optional, List, Tuple
from drkns.configunit.ConfigUnit import ConfigUnit
from drkns.runner.get_execution_plan import get_execution_plan
from drkns.runner.run_plan import run_plan
from drkns.runner.get_successful_flag_and_combined_output import\
get_successful_flag_and_combined_output
def run(
root_config_unit: ConfigUnit,
target_step_name: Optional[str] = None,
summary: bool = False,
limit_output: bool = False) \
-> Tuple[int, List[str]]:
plan = get_execution_plan(root_config_unit, target_step_name)
status_history = run_plan(plan)
return get_successful_flag_and_combined_output(
status_history, summary, limit_output)
|
505563
|
import requests
from bs4 import BeautifulSoup
import csv
main_url = "https://www.amazon.in/gp/bestsellers/books/"
req = requests.get(main_url)
htmltext = BeautifulSoup(req.content, "lxml")
pagetxt = htmltext.find_all("div", {"id": "zg_paginationWrapper"})
listing = []
pageURL = []
for i in range(0, len(pagetxt)):
listing.append(pagetxt[i].find("ol").find_all("li"))
for i in range(0, len(listing[0])):
pageURL.append(listing[0][i].find("a")['href'])
bookURL = []
author = []
author_type1 = []
author_type2 = []
price = []
name = []
ratings = []
averageRating = []
for j in range(0, len(pageURL)):
req = requests.get(pageURL[j])
sourceCode = BeautifulSoup(req.content, "lxml")
urltxt = sourceCode.find_all("div", {"class": "zg_itemWrapper"})
for i in range(0, len(urltxt)):
string = urltxt[i].find("div").find(
"a", {"class": "a-link-normal"})['href']
bookURL.append("https://www.amazon.in" + string
)
author_type1.append(urltxt[i].find(
"div", {"class": "a-row a-size-small"}).find("a"))
author_type2.append(urltxt[i].find(
"div", {"class": "a-row a-size-small"}).find("span"))
price.append(urltxt[i].find(
"span", {"class": "p13n-sc-price"}))
temp = urltxt[i].find(
"div", {"aria-hidden": "true"}).contents[0].rsplit()
name.append(' '.join(temp))
ratings.append(urltxt[i].find(
"a", {"class": "a-size-small a-link-normal"}))
averageRating.append(urltxt[i].find("i"))
'''print(author_type1)
print(author_type2)
print(price)
print(len(author_type1))
print(len(author_type2))
print(len(price))'''
data = [["Name", "URL", "Author", "Price",
"Number of Ratings", "Average Rating"]]
index = 0
for i in range(100):
# print(index)
'''author.append(None)
if(author_type1[index] is None):
if(author_type2[index] is None):
author[index]=("Not available")
else:
if author_type2[index] is not None:
author[index]=(author_type2[index].find("span").contents[0])
else:
if author_type1[index] is not None:
author[index]=(author_type1[index].find("a").contents[0])'''
try:
if(author_type1[index] is None):
author.append(author_type2[index].contents[0])
else:
author.append(author_type1[index].contents[0])
except:
author.append("Not available")
if(price[index] is None):
price[index] = "Not available"
else:
price[index] = price[index].contents[1][1:]
if(averageRating[index] is None):
averageRating[index] = "Not available"
else:
averageRating[index] = averageRating[index].find("span").contents[0]
if(ratings[index] is None):
ratings[index] = "Not available"
averageRating[index] = "Not available"
else:
ratings[index] = ratings[index].contents[0]
entry = [name[index], bookURL[index], author[index],
price[index], ratings[index], averageRating[index]]
index = index+1
data.append(entry)
with open('./output/in_book.csv', 'w') as file:
writer = csv.writer(file, delimiter=";")
for row in data:
writer.writerow(row)
|
505604
|
import abc
import dataclasses
import re
from typing import List, Optional, Tuple
from . import helpers, inflection
from .helpers import MatchError, ModSet
from .js_function import JsFunction, RustParam
from .js_type import JsType, TypeWithDocumentation
from .models import Context, Documented, ToRust
@dataclasses.dataclass()
class JsMember(Documented, ToRust, abc.ABC):
class_: str
ident: str
@staticmethod
def consume(s: str, class_: str) -> Tuple["JsMember", str]:
return helpers.consume_first(s, JsMethod, JsProperty, args=(class_,))
def this_parameter(self) -> RustParam:
return RustParam(ident="this", ty=f"&{self.class_}")
def build_wasm_bindgen_attr(self, *args: str, **kwargs: str) -> str:
return helpers.build_wasm_bindgen_attr(
*args, js_class=f'"{self.class_}"', js_name=f'"{self.ident}"', **kwargs
)
_PATTERN_PROPERTY = re.compile(
r"^ *(?P<mods>(?:static |readonly )*)(?P<ident>\w+)(?P<optional>\??):\s*(?P<type>.+?);\n",
re.DOTALL,
)
@dataclasses.dataclass()
class JsProperty(JsMember):
type_: JsType
static: bool
readonly: bool
optional: bool
@classmethod
def consume(cls, s: str, class_: str) -> Tuple["JsProperty", str]:
doc, s = Documented.consume(s)
match, s = helpers.consume_match(_PATTERN_PROPERTY, s)
mods = ModSet.create(match["mods"])
static = mods.pop("static")
readonly = mods.pop("readonly")
mods.assert_empty()
optional = bool(match["optional"])
method = cls(
documentation=doc,
class_=class_,
ident=match["ident"],
type_=JsType(match["type"]),
static=static,
readonly=readonly,
optional=optional,
)
return method, s
def type_to_rust(self, ctx: Context, owned: bool) -> TypeWithDocumentation:
ty = self.type_.to_rust(ctx, owned, create_helpers=owned)
return (
ty.to_option()
if self.optional
else ty
)
def type_documentation(self, ty: TypeWithDocumentation) -> Optional[str]:
if doc := ty.documentation:
raw = f"\nType: {doc}"
doc = helpers.add_line_prefix(raw, "/// ", empty_lines=True)
return doc
def rust_documentation(self, ty: TypeWithDocumentation) -> str:
doc = super().rust_documentation()
return helpers.join_nonempty_lines((doc, self.type_documentation(ty)))
def this_parameter(self) -> Optional[RustParam]:
if not self.static:
return super().this_parameter()
return None
def to_rust(self, ctx: Context) -> str:
method = f"static_method_of = {self.class_}" if self.static else "method"
getter_ident = f"{inflection.camel_to_snake_case(self.ident)}"
ctx = ctx.push(getter_ident)
if this_param := self.this_parameter():
this_param = str(this_param)
else:
this_param = ""
ty = self.type_to_rust(ctx, True)
code = helpers.join_nonempty_lines(
(
self.rust_documentation(ty),
self.build_wasm_bindgen_attr(method, getter=self.ident),
f"pub fn {getter_ident}({this_param}) -> {ty};",
)
)
if not self.readonly:
ty = self.type_to_rust(ctx, False)
code = helpers.join_nonempty_lines(
(
code,
f"/// Set the `{self.ident}` property.",
self.build_wasm_bindgen_attr(method, setter=self.ident),
f"pub fn set_{getter_ident}({this_param}, val: {ty});",
)
)
return code
_PATTERN_METHOD = re.compile(
r"^ *(?P<mods>(?:get |set |static )*)(?P<ident>\w+)(?P<optional>\??)\((?P<params>.*?)\)(?:: (?P<ret>.+?))?;\n",
re.DOTALL,
)
@dataclasses.dataclass()
class JsMethod(JsMember, JsFunction):
is_static: bool
is_getter: bool
is_setter: bool
@classmethod
def consume(cls, s: str, class_: str) -> Tuple["JsMethod", str]:
doc, s = Documented.consume(s)
match, s = helpers.consume_match(_PATTERN_METHOD, s)
if match["optional"]:
raise ValueError("can't handle optional functions right now")
mods = ModSet.create(match["mods"])
is_static = mods.pop("static")
is_getter = mods.pop("get")
is_setter = mods.pop("set")
mods.assert_empty()
method = cls.from_match(
documentation=doc,
ident=match["ident"],
params=match["params"],
ret=match["ret"],
class_=class_,
is_static=is_static,
is_getter=is_getter,
is_setter=is_setter,
)
return method, s
def ident_to_rust(self) -> str:
ident = super().ident_to_rust()
if self.is_setter:
ident = f"set_{ident}"
return ident
def params_to_rust(self, ctx: Context) -> List[RustParam]:
params = super().params_to_rust(ctx)
if not self.is_static:
params.insert(0, self.this_parameter())
return params
def wasm_bindgen_attr(self) -> str:
args = []
kwargs = {}
if self.is_static:
kwargs["static_method_of"] = self.class_
else:
args.append("method")
if self.is_getter:
kwargs["getter"] = self.ident
if self.is_setter:
kwargs["setter"] = self.ident
return self.build_wasm_bindgen_attr(*args, **kwargs)
_PATTERN_OBJECT_OPEN = re.compile(
r"^ *export (?P<type>interface|class) (?P<ident>\w+)\s+(?:extends (?P<extends>\w+(?:,\s*\w+\s*)*) )?(?:implements (?P<implements>\w+(?:,\s*\w+\s*)*) )?{\n"
)
@dataclasses.dataclass()
class JsObject(Documented, ToRust):
ident: str
members: List[JsMember]
extends: List[str]
implements: List[str]
@staticmethod
def consume(s: str) -> Tuple["JsObject", str]:
doc, s = Documented.consume(s)
match, s = helpers.consume_match(_PATTERN_OBJECT_OPEN, s)
ident = match["ident"]
if extends := match["extends"]:
extends = helpers.split_trim(extends, ",")
if implements := match["implements"]:
implements = helpers.split_trim(implements, ",")
cls = JsClass if match["type"] == "class" else JsInterface
members = []
obj = cls(
documentation=doc,
ident=ident,
members=members,
extends=extends,
implements=implements,
)
body, s = helpers.read_until_closing_bracket(s)
while body:
member, body = JsMember.consume(body, ident)
members.append(member)
return obj, s
def wasm_bindgen_attr(self, *, extends: List[str] = None) -> str:
if extends is None:
extends = []
if v := self.extends:
extends.extend(v)
if v := self.implements:
extends.extend(v)
if extends:
return helpers.build_wasm_bindgen_attr(extends=extends)
return ""
def to_rust(self, ctx: Context) -> str:
ctx = ctx.push(self.ident)
return helpers.join_nonempty_lines(
(
self.rust_documentation(),
f"#[derive(Debug)]",
self.wasm_bindgen_attr(),
f"pub type {self.ident};",
*(member.to_rust(ctx) for member in self.members),
)
)
@dataclasses.dataclass()
class JsClass(JsObject):
...
@dataclasses.dataclass()
class JsInterface(JsObject):
def wasm_bindgen_attr(self) -> str:
return super().wasm_bindgen_attr(extends=["Object"])
|
505607
|
from spectractor import parameters
from spectractor.fit.fit_spectrogram import SpectrogramFitWorkspace, run_spectrogram_minimisation
from spectractor.fit.fit_spectrum import SpectrumFitWorkspace, run_spectrum_minimisation
from spectractor.config import load_config
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(dest="input", metavar='path', default=["tests/data/reduc_20170530_134_spectrum.fits"],
help="Input fits file name. It can be a list separated by spaces, or it can use * as wildcard.",
nargs='*')
parser.add_argument("-d", "--debug", dest="debug", action="store_true",
help="Enter debug mode (more verbose and plots).", default=False)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
help="Enter verbose (print more stuff).", default=False)
parser.add_argument("-o", "--output_directory", dest="output_directory", default="outputs/",
help="Write results in given output directory (default: ./outputs/).")
parser.add_argument("-l", "--logbook", dest="logbook", default="ctiofulllogbook_jun2017_v5.csv",
help="CSV logbook file. (default: ctiofulllogbook_jun2017_v5.csv).")
parser.add_argument("-c", "--config", dest="config", default="config/ctio.ini",
help="INI config file. (default: config/ctio.ini).")
args = parser.parse_args()
parameters.VERBOSE = args.verbose
if args.debug:
parameters.DEBUG = True
parameters.VERBOSE = True
file_names = args.input
load_config(args.config)
for file_name in file_names:
atmgrid_filename = file_name.replace('sim', 'reduc').replace('spectrum', 'atmsim')
w = SpectrumFitWorkspace(file_name, atmgrid_file_name=atmgrid_filename, nsteps=1000,
burnin=200, nbins=10, verbose=1, plot=True, live_fit=False)
run_spectrum_minimisation(w, method="newton")
w = SpectrogramFitWorkspace(file_name, atmgrid_file_name=atmgrid_filename, nsteps=2000,
burnin=1000, nbins=10, verbose=1, plot=True, live_fit=False)
run_spectrogram_minimisation(w, method="newton")
|
505608
|
from unittest.mock import MagicMock, patch
from geostore.logging_keys import LOG_MESSAGE_LAMBDA_START, LOG_MESSAGE_VALIDATION_COMPLETE
from geostore.step_function import Outcome
from geostore.step_function_keys import DATASET_ID_KEY, VERSION_ID_KEY
from geostore.validation_summary import task
from .aws_utils import any_lambda_context
from .stac_generators import any_dataset_id, any_dataset_version_id
def should_log_event() -> None:
# Given
event = {DATASET_ID_KEY: any_dataset_id(), VERSION_ID_KEY: any_dataset_version_id()}
with patch("geostore.validation_summary.task.validation_results_model_with_meta"), patch(
"geostore.validation_summary.task.LOGGER.debug"
) as logger_mock:
# When
task.lambda_handler(event, any_lambda_context())
# Then
logger_mock.assert_any_call(LOG_MESSAGE_LAMBDA_START, extra={"lambda_input": event})
@patch("geostore.validation_summary.task.validation_results_model_with_meta")
def should_log_failure_result(validation_results_model_mock: MagicMock) -> None:
# Given
event = {DATASET_ID_KEY: any_dataset_id(), VERSION_ID_KEY: any_dataset_version_id()}
validation_results_model_mock.return_value.validation_outcome_index.count.return_value = 1
with patch("geostore.validation_summary.task.LOGGER.debug") as logger_mock:
# When
task.lambda_handler(event, any_lambda_context())
# Then
logger_mock.assert_any_call(
LOG_MESSAGE_VALIDATION_COMPLETE, extra={"outcome": Outcome.PASSED}
)
@patch("geostore.validation_summary.task.validation_results_model_with_meta")
def should_log_success_result(validation_results_model_mock: MagicMock) -> None:
# Given
event = {DATASET_ID_KEY: any_dataset_id(), VERSION_ID_KEY: any_dataset_version_id()}
validation_results_model_mock.return_value.validation_outcome_index.count.return_value = 0
with patch("geostore.validation_summary.task.LOGGER.debug") as logger_mock:
# When
task.lambda_handler(event, any_lambda_context())
# Then
logger_mock.assert_any_call(
LOG_MESSAGE_VALIDATION_COMPLETE, extra={"outcome": Outcome.PASSED}
)
|
505619
|
import unittest
import sys
from jep_pipe import jep_pipe
from jep_pipe import build_java_process_cmd
class TestPreInits(unittest.TestCase):
def test_inits(self):
jep_pipe(build_java_process_cmd('jep.test.TestPreInitVariables'))
|
505637
|
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import trimesh
import numpy as np
import glob
import json
import open3d as o3d
import multiprocessing as mp
from multiprocessing import Pool
import argparse
import traceback
import shutil
import utils.pcd_utils as pcd_utils
import config as cfg
def translate(mesh_path):
assert os.path.isfile(mesh_path), mesh_path
# Load mesh
mesh = trimesh.load(mesh_path, process=False)
# Apply delta
mesh.apply_translation(delta)
##########################################################################################
if viz:
center_mass_current = np.copy(mesh.center_mass)
sphere_current = o3d.geometry.TriangleMesh.create_sphere(radius=0.01)
sphere_current = sphere_current.translate(center_mass_current, relative=True)
sphere_current.paint_uniform_color([1.0, 0.0, 1.0])
mesh_o3d = o3d.geometry.TriangleMesh(
o3d.utility.Vector3dVector(mesh.vertices),
o3d.utility.Vector3iVector(mesh.faces)
)
num_triangles = np.array(mesh_o3d.triangles).shape[0]
mesh_o3d = mesh_o3d.simplify_quadric_decimation(int(num_triangles / 100))
mesh_wireframe_o3d = o3d.geometry.LineSet.create_from_triangle_mesh(mesh_o3d)
o3d.visualization.draw_geometries([world_frame, sphere, mesh_amass])
o3d.visualization.draw_geometries([world_frame, sphere_current, mesh_wireframe_o3d])
o3d.visualization.draw_geometries([world_frame, sphere, sphere_current, mesh_amass, mesh_wireframe_o3d])
##########################################################################################
return mesh
def translate_mesh(path_dict):
try:
in_path = path_dict['src']
out_path = path_dict['tgt']
if not os.path.isdir(out_path):
os.makedirs(out_path)
if "SPLITS" in in_path or in_path.endswith("json") or in_path.endswith("txt") or in_path.endswith("npz"):
return
#################################
# Translate mesh
#################################
in_mesh_path = os.path.join(in_path, MESH_FILENAME)
out_mesh_path = os.path.join(out_path, MESH_FILENAME)
if not OVERWRITE and os.path.isfile(out_mesh_path):
print("------ Skipping", in_path)
return
mesh = translate(in_mesh_path)
mesh.export(out_mesh_path)
#################################
# Translate watertight mesh
#################################
in_mesh_watertight_path = os.path.join(in_path, 'mesh_watertight_poisson.ply')
if "a_t_pose" in in_path: assert os.path.isfile(in_mesh_watertight_path)
if os.path.isfile(in_mesh_watertight_path):
mesh_watertight = translate(in_mesh_watertight_path)
# Export
out_mesh_watertight_path = os.path.join(out_path, 'mesh_watertight_poisson.ply')
mesh_watertight.export(out_mesh_watertight_path)
print("Processed", in_path)
except:
print('\t------------ Error with {}: {}'.format(in_path, traceback.format_exc()))
def compute_delta_mixamo_to_amass(mesh_path, center_mass_amass):
mesh = trimesh.load(mesh_path, process=False)
center_mass_mixamo = np.copy(mesh.center_mass)
delta = center_mass_amass - center_mass_mixamo
return delta
def get_center_mass_amass(mesh_path):
mesh = trimesh.load(mesh_path, process=False)
mesh_center_mass = np.copy(mesh.center_mass)
mesh_o3d = o3d.geometry.TriangleMesh(
o3d.utility.Vector3dVector(mesh.vertices),
o3d.utility.Vector3iVector(mesh.faces)
)
mesh_wireframe_o3d = o3d.geometry.LineSet.create_from_triangle_mesh(mesh_o3d)
return mesh_center_mass, mesh_wireframe_o3d
if __name__ == '__main__':
#####################################################################
# Set up
#####################################################################
viz = False
OVERWRITE = False
parser = argparse.ArgumentParser(
description='Applying a translation to a dataset'
)
parser.add_argument('-t', '-max_threads', dest='max_threads', type=int, default=-1)
args = parser.parse_args()
try:
n_jobs = int(os.environ['SLURM_CPUS_ON_NODE'])
assert args.max_threads != 0
if args.max_threads > 0:
n_jobs = args.max_threads
except:
n_jobs = 1
# Data to which we will align
ROOT_amass = f'/cluster/lothlann/ppalafox/datasets/amass'
# -----------------------------------------------------------------------------
ROOT_HDD = f'/cluster_HDD/lothlann/ppalafox/datasets'
# ROOT_mixamo = os.path.join(ROOT_HDD, "mixamo")
# ROOT_mixamo_trans = f'/cluster/lothlann/ppalafox/datasets/mixamo_trans_all'
DATA_NAME = "dfaust" # mixamo
ROOT_DATA_SRC = os.path.join(ROOT_HDD, DATA_NAME)
ROOT_DATA_TGT = f'/cluster/lothlann/ppalafox/datasets/{DATA_NAME}'
MESH_FILENAME = "mesh_raw.ply"
# -----------------------------------------------------------------------------
print()
print(f"Translating {ROOT_DATA_SRC}")
print(f"...into {ROOT_DATA_TGT}")
print(f"...using {ROOT_amass} as reference.")
print()
if not os.path.isdir(ROOT_DATA_TGT):
os.makedirs(ROOT_DATA_TGT)
#####################################################################
# Find center_mass of amass tposes
#####################################################################
# mesh_path_ref = os.path.join(ROOT_amass, "ACCAD_s004", "a_t_pose", "000000", "mesh_normalized.ply")
mesh_path_ref = os.path.join(ROOT_amass, "Transitionsmocap_s003", "a_t_pose", "000000", "mesh_raw.ply")
center_mass_amass, mesh_amass = get_center_mass_amass(mesh_path_ref)
if viz:
sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.01)
sphere = sphere.translate(center_mass_amass, relative=True)
sphere.paint_uniform_color([1.0, 0.0, 0.0])
world_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1, origin=[0, 0, 0])
# Delta that transforms points from mixamo's frame to amass' frame
# mesh_path_src = os.path.join(ROOT_DATA_SRC, "sophie", "a_t_pose", "000000", "mesh_watertight_poisson.ply")
mesh_path_src = os.path.join(ROOT_DATA_SRC, "50021", "chicken_wings", "000000", "mesh_raw.ply")
delta = compute_delta_mixamo_to_amass(mesh_path_src, center_mass_amass)
#####################################################################
# Characters
#####################################################################
dataset_name = "<DATASET NAME HERE>"
from utils.parsing_utils import get_dataset_type_from_dataset_name
dataset_type = get_dataset_type_from_dataset_name(dataset_name)
splits_dir = f"{cfg.splits_dir}_{dataset_type}"
labels_json = os.path.join(ROOT_HDD, splits_dir, dataset_name, "labels.json")
with open(labels_json, "r") as f:
labels = json.loads(f.read())
labels = labels
path_to_samples = []
for label in labels:
path_to_samples.append(
{
"src": os.path.join(ROOT_DATA_SRC, label['identity_name'], label['animation_name'], label['sample_id']),
"tgt": os.path.join(ROOT_DATA_TGT, label['identity_name'], label['animation_name'], label['sample_id']),
}
)
print()
print("Number of frames to process:", len(path_to_samples))
print()
# #####################################################################
# # Process
# #####################################################################
print()
print("Jobs", n_jobs)
print()
input("Continue?")
try:
p = Pool(n_jobs)
p.map(translate_mesh, path_to_samples)
finally:
p.close()
p.join()
|
505680
|
import argparse
import os
from operator import itemgetter
import torch
from NVLL.data.lm import DataLM
from NVLL.model.nvrnn import RNNVAE
from NVLL.util.util import GVar
def load_args(path, name):
with open(os.path.join(path, name + '.args'), 'rb') as f:
args = torch.load(f)
return args
def load_data(data_path, eval_batch_siez, condition):
data = DataLM(data_path, eval_batch_siez, eval_batch_siez, condition)
return data
def load_model(args, ntoken, path, name):
model = RNNVAE(args, args.enc_type, ntoken, args.emsize,
args.nhid, args.lat_dim, args.nlayers,
dropout=args.dropout, tie_weights=args.tied,
input_z=args.input_z, mix_unk=args.mix_unk,
condition=(args.cd_bit or args.cd_bow),
input_cd_bow=args.cd_bow, input_cd_bit=args.cd_bit)
print("Loading {}".format(name))
model.load_state_dict(torch.load(os.path.join(path, name + '.model')))
from NVLL.util.gpu_flag import GPU_FLAG
if torch.cuda.is_available() and GPU_FLAG:
model = model.cuda()
model = model.eval()
return model
def parse_arg():
parser = argparse.ArgumentParser(description='Transfer experiment')
parser.add_argument('--data_path', type=str, default='data/ptb', help='location of the data corpus')
parser.add_argument('--root_path', type=str, default='/home/jcxu/vae_txt')
parser.add_argument('--model_vmf', type=str,
default="Dataptb_Distvmf_Modelnvrnn_EnclstmBiFalse_Emb100_Hid400_lat50_lr10.0_drop0.5_kappa120.0_auxw0.0001_normfFalse_nlay1_mixunk1.0_inpzTrue_cdbit0_cdbow0_ann0_5.891579498848754")
parser.add_argument('--model_nor', type=str,
default=
"Dataptb_Distnor_Modelnvrnn_EnclstmBiFalse_Emb100_Hid400_lat50_lr10.0_drop0.5_kappa0.1_auxw0.0001_normfFalse_nlay1_mixunk1.0_inpzTrue_cdbit0_cdbow0_ann2_5.933433308374706")
parser.add_argument('--exp_path', type=str, default='/backup2/jcxu/exp-nvrnn')
parser.add_argument('--eval_batch_size', type=int, default=10, help='evaluation batch size')
parser.add_argument('--batch_size', type=int, default=10, help='batch size')
args = parser.parse_args()
return args
class Transfer():
@staticmethod
def write_word_embedding(exp_path, file_name, word_list, embedding_mat):
embedding_mat = embedding_mat.data
path = os.path.join(exp_path, file_name)
print("To save {}".format(os.path.join(exp_path, file_name)))
bag = []
for idx, w in enumerate(word_list):
name = w[0]
emb = embedding_mat[idx]
l = [name] + emb.tolist()
l = [str(x) for x in l]
l = " ".join(l)
bag.append(l)
s = "\n".join(bag)
with open(path, 'w') as fd:
fd.write(s)
def __init__(self, args):
self.data = DataLM(os.path.join(args.root_path, args.data_path),
args.batch_size,
args.eval_batch_size,
condition=True)
word_list = sorted(self.data.dictionary.word2idx.items(), key=itemgetter(1))
vmf_args = load_args(args.exp_path, args.model_vmf)
vmf_model = load_model(vmf_args, len(self.data.dictionary), args.exp_path, args.model_vmf)
vmf_emb = vmf_model.emb.weight
self.write_word_embedding(args.exp_path, args.model_vmf + '_emb', word_list, vmf_emb)
nor_args = load_args(args.exp_path, args.model_nor)
nor_model = load_model(nor_args, len(self.data.dictionary), args.exp_path, args.model_nor)
nor_emb = nor_model.emb.weight
self.write_word_embedding(args.exp_path, args.model_nor + '_emb', word_list, nor_emb)
def synthesis_bow_rep(args):
data = DataLM(os.path.join(args.root_path, args.data_path),
args.batch_size,
args.eval_batch_size,
condition=True)
import random
class Code2Code(torch.nn.Module):
def __init__(self, inp_dim, tgt_dim):
super().__init__()
self.linear = torch.nn.Linear(inp_dim, tgt_dim)
self.linear2 = torch.nn.Linear(tgt_dim, tgt_dim)
self.loss_func = torch.nn.CosineEmbeddingLoss()
def forward(self, inp, tgt):
pred = self.linear(inp)
pred = torch.nn.functional.tanh(pred)
pred = self.linear2(pred)
# print(pred.size())
loss = 1 - torch.nn.functional.cosine_similarity(pred, tgt)
loss = torch.mean(loss)
return loss
class CodeLearner():
def __init__(self, args, condition, c2b, nor):
self.data = DataLM(os.path.join(args.root_path, args.data_path),
args.batch_size,
args.eval_batch_size,
condition=condition)
self.c2b = c2b
if nor:
args.model_run = args.model_nor
else:
args.model_run = args.model_vmf
self.args = load_args(args.exp_path, args.model_run)
self.model = load_model(self.args, len(self.data.dictionary),
args.exp_path, args.model_run)
self.learner = Code2Code(self.model.lat_dim, self.model.ninp)
self.learner.cuda()
self.optim = torch.optim.Adam(self.learner.parameters(), lr=0.001)
def run_train(self):
valid_acc = []
for e in range(10):
print("EPO: {}".format(e))
self.train_epo(self.data.train)
acc = self.evaluate(self.data.test)
valid_acc.append(acc)
return min(valid_acc)
def train_epo(self, train_batches):
self.learner.train()
print("Epo start")
acc_loss = 0
cnt = 0
random.shuffle(train_batches)
for idx, batch in enumerate(train_batches):
self.optim.zero_grad()
seq_len, batch_sz = batch.size()
if self.data.condition:
seq_len -= 1
if self.model.input_cd_bit > 1:
bit = batch[0, :]
bit = GVar(bit)
else:
bit = None
batch = batch[1:, :]
else:
bit = None
feed = self.data.get_feed(batch)
seq_len, batch_sz = feed.size()
emb = self.model.drop(self.model.emb(feed))
if self.model.input_cd_bit > 1:
bit = self.model.enc_bit(bit)
else:
bit = None
h = self.model.forward_enc(emb, bit)
tup, kld, vecs = self.model.forward_build_lat(h) # batchsz, lat dim
if self.model.dist_type == 'vmf':
code = tup['mu']
elif self.model.dist_type == 'nor':
code = tup['mean']
else:
raise NotImplementedError
emb = torch.mean(emb, dim=0)
if self.c2b:
loss = self.learner(code, emb)
else:
loss = self.learner(code, emb)
loss.backward()
self.optim.step()
acc_loss += loss.data[0]
cnt += 1
if idx % 400 == 0 and (idx > 0):
print("Training {}".format(acc_loss / cnt))
acc_loss = 0
cnt = 0
def evaluate(self, dev_batches):
self.learner.eval()
print("Test start")
acc_loss = 0
cnt = 0
random.shuffle(dev_batches)
for idx, batch in enumerate(dev_batches):
self.optim.zero_grad()
seq_len, batch_sz = batch.size()
if self.data.condition:
seq_len -= 1
if self.model.input_cd_bit > 1:
bit = batch[0, :]
bit = GVar(bit)
else:
bit = None
batch = batch[1:, :]
else:
bit = None
feed = self.data.get_feed(batch)
seq_len, batch_sz = feed.size()
emb = self.model.drop(self.model.emb(feed))
if self.model.input_cd_bit > 1:
bit = self.model.enc_bit(bit)
else:
bit = None
h = self.model.forward_enc(emb, bit)
tup, kld, vecs = self.model.forward_build_lat(h) # batchsz, lat dim
if self.model.dist_type == 'vmf':
code = tup['mu']
elif self.model.dist_type == 'nor':
code = tup['mean']
else:
raise NotImplementedError
emb = torch.mean(emb, dim=0)
if self.c2b:
loss = self.learner(code, emb)
else:
loss = self.learner(code, emb)
acc_loss += loss.data[0]
cnt += 1
if idx % 400 == 0:
acc_loss = 0
cnt = 0
# print("===============test===============")
# print(acc_loss / cnt)
print(acc_loss / cnt)
return float(acc_loss / cnt)
if __name__ == '__main__':
print("Transfer btw Learnt Code and learnt BoW. "
"Assume data is Yelp and model is vMF or nor.")
args = parse_arg()
# t = Transfer(args)
# Synthesis data
bags = []
for c2b in [True, False]:
for nor in [True]:
learn = CodeLearner(args, condition=False, c2b=c2b, nor=nor)
result = learn.run_train()
bags.append("c2b\t{}\tnor\t{}\tresult:{}\n".format(c2b, nor, result))
print("c2b\t{}\tnor\t{}\tresult:{}".format(c2b, nor, result))
print(args)
print("=" * 100)
for b in bags:
print(b)
|
505767
|
import argparse
from utee import misc, quant, selector
import torch
import torch.backends.cudnn as cudnn
cudnn.benchmark =True
from collections import OrderedDict
import pprint
import os
known_models = [
'mnist', 'svhn', # 28x28
'cifar10', 'cifar100', # 32x32
'stl10', # 96x96
'alexnet', # 224x224
'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn', # 224x224
'resnet18', 'resnet34', 'resnet50', 'resnet101','resnet152', # 224x224
'squeezenet_v0', 'squeezenet_v1', #224x224
'inception_v3', # 299x299
]
parser = argparse.ArgumentParser(description='PyTorch Quantization')
parser.add_argument('--use_model_zoo', type=int, default=1, help='decide if use model_zoo')
parser.add_argument('--type', default='MobileNetV2', help='|'.join(selector.known_models))
parser.add_argument('--data_root', default='~/dataset', help='folder to save the model')
parser.add_argument('--model_root', default='~/pytorch-mobilenet-v2/mobilenetv2_718.pth', help='the path of pre-trained parammeters')
parser.add_argument('--net_root', default='~/pytorch-mobilenet-v2/MobileNetV2.py', help='the path of pre-trained parammeters')
parser.add_argument('--test', type=int, default=1, help='test data distribution')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size for training')
parser.add_argument('--n_sample', type=int, default=10, help='number of samples to infer the scaling factor')
parser.add_argument('--gpu', default="0", help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=1, help='number of gpus to use')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--replace_bn', type=int, default=0, help='decide if replace bn layer')
parser.add_argument('--map_bn', type=int, default=1, help='decide if map bn layer to conv layer')
parser.add_argument('--input_size', type=int, default=224, help='input size of image')
parser.add_argument('--shuffle', type=int, default=1, help='data shuffle')
parser.add_argument('--overflow_rate', type=float, default=0.0, help='overflow rate')
parser.add_argument('--quant_method', default='linear', help='linear|minmax|log|tanh|scale')
parser.add_argument('--param_bits', type=int, default=8, help='bit-width for parameters')
parser.add_argument('--bn_bits', type=int, default=8, help='bit-width for running mean and std')
parser.add_argument('--fwd_bits', type=int, default=8, help='bit-width for layer output')
args = parser.parse_args()
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
misc.ensure_dir(args.logdir)
args.model_root = misc.expand_user(args.model_root)
args.data_root = misc.expand_user(args.data_root)
args.input_size = 299 if 'inception' in args.type else args.input_size
assert args.quant_method in ['linear', 'minmax', 'log', 'tanh','scale']
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
assert torch.cuda.is_available(), 'no cuda'
#torch.manual_seed(args.seed)
#torch.cuda.manual_seed(args.seed)
# load model and dataset fetcher
if args.use_model_zoo:
args.model_root = os.path.expanduser('~/.torch/models/')
model, ds_fetcher, is_imagenet = selector.select(model_name=args.type, model_root=args.model_root)
args.ngpu = args.ngpu if is_imagenet else 1
else:
args.model_root = '~/pytorch-mobilenet-v2/mobilenetv2_718.pth'
model, ds_fetcher = selector.find(
model_name = args.type,
model_root = args.model_root,
net_root = args.net_root)
# replace bn with 1x1 conv
if args.replace_bn:
quant.replace_bn(model)
# map bn to conv
if args.map_bn:
quant.bn2conv(model)
# quantize parameters
print("=================quantize parameters==================")
if args.param_bits < 32:
state_dict = model.state_dict()
state_dict_quant = OrderedDict()
sf_dict = OrderedDict()
for k, v in state_dict.items():
if 'running' in k: # quantize bn layer
#print("k:{}, v:\n{}".format(k,v))
if args.bn_bits >=32:
print("Ignoring {}".format(k))
state_dict_quant[k] = v
continue
else:
bits = args.bn_bits
else:
bits = args.param_bits
if args.quant_method == 'linear':
sf = bits - 1. - quant.compute_integral_part(v, overflow_rate=args.overflow_rate)
# sf stands for float bits
v_quant = quant.linear_quantize(v, sf, bits=bits)
#if 'bias' in k:
#print("{}, sf:{}, quantized value:\n{}".format(k,sf, v_quant.sort(dim=0, descending=True)[0]))
elif args.quant_method == 'log':
v_quant = quant.log_minmax_quantize(v, bits=bits)
elif args.quant_method == 'minmax':
v_quant = quant.min_max_quantize(v, bits=bits)
else:
v_quant = quant.tanh_quantize(v, bits=bits)
state_dict_quant[k] = v_quant
print("k={0:<35}, bits={1:<5}, sf={2:d>}".format(k,bits,sf))
model.load_state_dict(state_dict_quant)
print("======================================================")
# quantize forward activation
print("=================quantize activation==================")
if args.fwd_bits < 32:
model = quant.duplicate_model_with_quant(model,
bits=args.fwd_bits,
overflow_rate=args.overflow_rate,
counter=args.n_sample,
type=args.quant_method)
# ds_fetcher is in path: /imagenet/dataset.get
val_ds_tmp = ds_fetcher(batch_size=args.batch_size,
data_root=args.data_root,
train=False,
val = True,
shuffle=args.shuffle,
input_size=args.input_size
)
print("load dataset done")
misc.eval_model(model, val_ds_tmp, ngpu=1, n_sample=args.n_sample)
print("======================================================")
# eval model
print("===================eval model=========================")
print(model)
val_ds = ds_fetcher(batch_size=args.batch_size,
data_root=args.data_root,
train=False,
val = True,
shuffle=args.shuffle,
input_size=args.input_size)
if args.test:
acc1, acc5 = misc.eval_model(model, val_ds, ngpu=args.ngpu, n_sample=1)
else:
acc1, acc5 = misc.eval_model(model, val_ds, ngpu=args.ngpu)
print("======================================================")
res_str = "type={}, quant_method={}, \n \
param_bits={}, bn_bits={}, fwd_bits={}, overflow_rate={},\n \
acc1={:.4f}, acc5={:.4f}".format(
args.type, args.quant_method, args.param_bits, args.bn_bits,
args.fwd_bits, args.overflow_rate, acc1, acc5)
print(res_str)
with open('acc1_acc5.txt', 'a') as f:
f.write(res_str + '\n')
# show data distribution
if args.test:
#import visdom
import numpy as np
#viz = visdom.Visdom()
import matplotlib.pyplot as plt
'''
# plot bar
for k,v in quant.extractor.items():
#print(k)
des_v= np.sort(np.abs(v).reshape(-1))
nums,times = np.unique(des_v,return_counts=True)
fig = plt.figure()
ax1 = plt.subplot(111)
width = 0.2
print(nums)
print(times)
rect = ax1.bar(left=nums,height=times,width=width,color="blue")
ax1.set_title(k)
plt.show()
'''
'''
# plot hist
for k,v in quant.extractor.items():
#print(k)
des_v= np.sort(np.abs(v).reshape(-1))
nums,times = np.unique(des_v,return_counts=True)
fig = plt.figure()
print("nums\n{}".format(nums))
print("times\n{}".format(times))
plt.hist(des_v, bins=len(nums), density=0, facecolor="blue", edgecolor="black", alpha=0.7)
plt.xlabel("nums")
# 显示纵轴标签
plt.ylabel("times")
# 显示图标题
plt.title("nums hist")
plt.show()
'''
|
505768
|
import logging
import json
import re
import random
from df_engine.core import Context, Actor
import common.dff.integration.context as int_ctx
import common.dff.integration.condition as int_cnd
logger = logging.getLogger(__name__)
with open(
"data/stories.json",
) as stories_json:
stories = json.load(stories_json)
with open(
"data/phrases.json",
) as phrases_json:
phrases = json.load(phrases_json)
def get_previous_node(ctx: Context) -> str:
try:
return [node_tuple[1] for node_tuple in ctx.labels.values()][-2]
except Exception:
return "start_node"
def get_story_type(ctx: Context, actor: Actor) -> str:
human_sentence = ctx.last_request
if re.search("fun((ny)|(niest)){0,1}", human_sentence):
return "funny"
elif re.search("(horror)|(scary)|(frightening)|(spooky)", human_sentence):
return "scary"
elif re.search(
"(bedtime)|(good)|(kind)|(baby)|(children)|(good night)|(for kid(s){0,1})",
human_sentence,
):
return "bedtime"
else:
return ""
def get_story_left(ctx: Context, actor: Actor) -> str:
story_type = get_story_type(ctx, actor)
stories_left = list(set(stories.get(story_type, [])) - set(ctx.misc.get("stories_told", [])))
try:
return random.choice(sorted(stories_left))
except Exception:
return ""
def choose_story(ctx: Context, actor: Actor, *args, **kwargs) -> str:
prev_node = get_previous_node(ctx)
story = get_story_left(ctx, actor)
story_type = get_story_type(ctx, actor)
setup = stories.get(story_type, {}).get(story, {}).get("setup", "")
what_happend_next_phrase = random.choice(sorted(phrases.get("what_happend_next", [])))
# include sure if user defined a type of story at the beginnig, otherwise include nothing
sure_phrase = random.choice(sorted(phrases.get("sure", []))) if prev_node == "start_node" else ""
ctx.misc["stories_told"] = ctx.misc.get("stories_told", []) + [story]
ctx.misc["story"] = story
ctx.misc["story_type"] = story_type
return sure_phrase + " " + setup + " " + "..." + " " + what_happend_next_phrase
def which_story(ctx: Context, actor: Actor, *args, **kwargs) -> str:
prev_node = get_previous_node(ctx)
if prev_node in ["start_node", "fallback_node"]:
int_ctx.set_can_continue(ctx, actor, "MUST_CONTINUE")
# include sure if user asked to tell a story, include nothing if agent proposed to tell a story
sure_phrase = random.choice(sorted(phrases.get("sure", []))) if prev_node == "start_node" else ""
return sure_phrase + " " + random.choice(sorted(phrases.get("which_story", [])))
elif prev_node == "choose_story_node":
int_ctx.set_can_continue(ctx, actor, "CANNOT_CONTINUE")
return random.choice(sorted(phrases.get("no", [])))
else:
return "Ooops."
def tell_punchline(ctx: Context, actor: Actor, *args, **kwargs) -> str:
int_ctx.set_can_continue(ctx, actor, "CAN_CONTINUE")
int_ctx.set_confidence(ctx, actor, 0.8) if int_cnd.is_do_not_know_vars(ctx, actor) else None
story = ctx.misc.get("story", "")
story_type = ctx.misc.get("story_type", "")
return stories.get(story_type, {}).get(story, {}).get("punchline", "")
def fallback(ctx: Context, actor: Actor, *args, **kwargs) -> str:
prev_node = get_previous_node(ctx)
story_type = get_story_type(ctx, actor)
story_left = get_story_left(ctx, actor)
# runout stories
if prev_node == "which_story_node" and story_type and not story_left:
int_ctx.set_can_continue(ctx, actor, "CANNOT_CONTINUE")
return "Oh, sorry, but I've run out of stories."
# no stories
elif prev_node == "which_story_node" and not story_type:
int_ctx.set_can_continue(ctx, actor, "CAN_CONTINUE")
return random.choice(sorted(phrases.get("no_stories", [])))
# if prev_node is tell_punchline_node or fallback_node
else:
int_ctx.set_can_continue(ctx, actor, "MUST_CONTINUE")
int_ctx.set_confidence(ctx, actor, 0.5) if int_cnd.is_do_not_know_vars(ctx, actor) else None
return random.choice(sorted(phrases.get("start_phrases", [])))
|
505842
|
if not "mo21=o" in sm.getQRValue(22013):
sm.avatarOriented("Effect/OnUserEff.img/guideEffect/evanTutorial/evanBalloon21")
sm.addQRValue(22013, "mo21=o")
|
505845
|
import argparse
import glob
import os
import random
import warnings
from PyQt5 import QtCore
from PyQt5.QtWidgets import *
from canvas import Canvas
from display_pad import DisplayPad
from hparams import *
from main_window import Ui_MainWindow
parser = argparse.ArgumentParser()
parser.add_argument('--use_cpu', action='store_true',
help='whether to use cpu to render images '
'(TVM model only supports GPU)')
parser.add_argument('--model', type=str, default='tvm', choices=['tvm', 'compressed', 'legacy', 'original'],
help='which model do you use [tvm | original | legacy | compressed]')
opt = parser.parse_args()
if opt.model == 'tvm' and opt.use_cpu:
warnings.warn('TVM model only supports gpu')
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi(self)
self.canvas = Canvas()
self.canvas.initialize()
# We need to enable mouse tracking to follow the mouse without the button pressed.
self.canvas.setMouseTracking(True)
# Enable focus to capture key inputs.
self.canvas.setFocusPolicy(Qt.StrongFocus)
self.horizontalLayout.addWidget(self.canvas, 0, QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
self.display_pad = DisplayPad(self.canvas, opt)
self.horizontalLayout.addWidget(self.display_pad, 0, QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
self.canvas.set_display_pad(self.display_pad)
# Setup the mode buttons
mode_group = QButtonGroup(self)
mode_group.setExclusive(True)
self.clearButton.released.connect(self.canvas.initialize)
for mode in MODES:
btn = getattr(self, '%sButton' % mode)
print(btn)
btn.pressed.connect(lambda mode=mode: self.canvas.set_mode(mode))
mode_group.addButton(btn)
# Setup up action signals
self.actionCopy.triggered.connect(self.copy_to_clipboard)
# Initialize animation timer.
self.timer = QTimer()
self.timer.timeout.connect(self.canvas.on_timer)
self.timer.setInterval(100)
self.timer.start()
# Setup to agree with Canvas.
self.set_primary_color('#000000')
self.set_secondary_color('#ffffff')
# Signals for canvas-initiated color changes (dropper).
self.canvas.primary_color_updated.connect(self.set_primary_color)
self.canvas.secondary_color_updated.connect(self.set_secondary_color)
# Setup the stamp state.
self.current_stamp_n = -1
# self.next_stamp()
# self.stampnextButton.pressed.connect(self.next_stamp)
# Menu options
self.actionNewImage.triggered.connect(self.canvas.initialize)
self.actionOpenImage.triggered.connect(self.open_file)
self.actionRandomSketch.triggered.connect(self.open_random)
self.actionSaveImage.triggered.connect(self.save_file)
self.actionSaveGenerated.triggered.connect(self.save_generated)
self.actionClearImage.triggered.connect(self.canvas.reset)
self.actionInvertColors.triggered.connect(self.invert)
self.actionFlipHorizontal.triggered.connect(self.flip_horizontal)
self.actionFlipVertical.triggered.connect(self.flip_vertical)
# Setup the drawing toolbar.
self.show()
self.canvas.update()
def choose_color(self, callback):
dlg = QColorDialog()
if dlg.exec():
callback(dlg.selectedColor().name())
def set_primary_color(self, hex):
self.canvas.set_primary_color(hex)
# self.primaryButton.setStyleSheet('QPushButton { background-color: %s; }' % hex)
def set_secondary_color(self, hex):
self.canvas.set_secondary_color(hex)
# self.secondaryButton.setStyleSheet('QPushButton { background-color: %s; }' % hex)
def copy_to_clipboard(self):
clipboard = QApplication.clipboard()
if self.canvas.mode == 'selectrect' and self.canvas.locked:
clipboard.setPixmap(self.canvas.selectrect_copy())
elif self.canvas.mode == 'selectpoly' and self.canvas.locked:
clipboard.setPixmap(self.canvas.selectpoly_copy())
else:
clipboard.setPixmap(self.canvas.pixmap())
def open_random(self):
choices = glob.glob("sketch/*.png")
path = random.choice(choices)
if os.path.exists(path):
pixmap = QPixmap()
pixmap.load(path)
# We need to crop down to the size of our canvas. Get the size of the loaded image.
iw = pixmap.width()
ih = pixmap.height()
# Get the size of the space we're filling.
cw, ch = CANVAS_DIMENSIONS
if iw / cw < ih / ch: # The height is relatively bigger than the width.
pixmap = pixmap.scaledToWidth(cw)
hoff = (pixmap.height() - ch) // 2
pixmap = pixmap.copy(
QRect(QPoint(0, hoff), QPoint(cw, pixmap.height() - hoff))
)
elif iw / cw > ih / ch: # The height is relatively bigger than the width.
pixmap = pixmap.scaledToHeight(ch)
woff = (pixmap.width() - cw) // 2
pixmap = pixmap.copy(
QRect(QPoint(woff, 0), QPoint(pixmap.width() - woff, ch))
)
self.canvas.setPixmap(pixmap)
self.canvas.update()
def open_file(self):
"""
Open image file for editing, scaling the smaller dimension and cropping the remainder.
:return:
"""
path, _ = QFileDialog.getOpenFileName(self, "Open file", "",
"PNG image files (*.png); JPEG image files (*jpg); All files (*.*)")
if path:
pixmap = QPixmap()
pixmap.load(path)
# We need to crop down to the size of our canvas. Get the size of the loaded image.
iw = pixmap.width()
ih = pixmap.height()
# Get the size of the space we're filling.
cw, ch = CANVAS_DIMENSIONS
if iw / cw < ih / ch: # The height is relatively bigger than the width.
pixmap = pixmap.scaledToWidth(cw)
hoff = (pixmap.height() - ch) // 2
pixmap = pixmap.copy(
QRect(QPoint(0, hoff), QPoint(cw, pixmap.height() - hoff))
)
elif iw / cw > ih / ch: # The height is relatively bigger than the width.
pixmap = pixmap.scaledToHeight(ch)
woff = (pixmap.width() - cw) // 2
pixmap = pixmap.copy(
QRect(QPoint(woff, 0), QPoint(pixmap.width() - woff, ch))
)
self.canvas.setPixmap(pixmap)
self.canvas.update()
def save_file(self):
"""
Save active canvas to image file.
:return:
"""
path, _ = QFileDialog.getSaveFileName(self, "Save sketch file", "", "PNG Image file (*.png)")
if path:
pixmap = self.canvas.pixmap()
pixmap.save(path, "PNG")
def save_generated(self):
path, _ = QFileDialog.getSaveFileName(self, "Save generated file", "", "PNG Image file (*.png)")
if path:
pixmap = self.display_pad.pixmap()
pixmap.save(path, "PNG")
def invert(self):
img = QImage(self.canvas.pixmap())
img.invertPixels()
pixmap = QPixmap()
pixmap.convertFromImage(img)
self.canvas.setPixmap(pixmap)
def flip_horizontal(self):
pixmap = self.canvas.pixmap()
self.canvas.setPixmap(pixmap.transformed(QTransform().scale(-1, 1)))
def flip_vertical(self):
pixmap = self.canvas.pixmap()
self.canvas.setPixmap(pixmap.transformed(QTransform().scale(1, -1)))
if __name__ == '__main__':
app = QApplication([])
window = MainWindow()
if DART_THEME:
file = QFile("qdark.stylesheet")
file.open(QFile.ReadOnly | QFile.Text)
stream = QTextStream(file)
app.setStyleSheet(stream.readAll())
app.exec_()
|
505855
|
from .arrays import *
from .errors import *
from .timeseries import *
from .readers import *
from .xml import *
__displayname__ = 'Internal Helpers'
|
505909
|
from django import template
from django.template import Node
from django.conf import settings
register = template.Library()
@register.filter(name='fdivide')
def fdivide(value,arg):
return float(value) / float(arg)
@register.filter(name='fmultiply')
def fmultiply(value,arg):
return float(value) * float(arg)
def var_tag_compiler(params, defaults, name, node_class, parser, token):
"Returns a template.Node subclass."
bits = token.split_contents()[1:]
return node_class(map(parser.compile_filter, bits))
def simple_var_tag(func):
params, xx, xxx, defaults = template.getargspec(func)
class SimpleNode(Node):
def __init__(self, vars_to_resolve):
self.vars_to_resolve = vars_to_resolve
def render(self, context):
resolved_vars = [var.resolve(context, True) for var in self.vars_to_resolve]
return func(*resolved_vars)
compile_func = template.curry(var_tag_compiler, params, defaults, getattr(func, "_decorated_function", func).__name__, SimpleNode)
compile_func.__doc__ = func.__doc__
register.tag(getattr(func, "_decorated_function", func).__name__, compile_func)
return func
@simple_var_tag
def static(*parts):
path = ''.join(parts)
urls = settings.STATIC_URLS
size = len(urls)
h = hash(path) % size
if h < 0:
h += size
return urls[h] + '/' + path
@register.filter(name='get')
def doget(value,arg):
return dict(value).get(arg) or ''
@register.filter(name='a1000times')
def a1000times(value):
return value * 1000
@register.filter(name='range')
def rangefilter(value):
return xrange(int(value))
|
505940
|
import bst_traversal as program
import unittest
class BST:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def insert(self, value):
if value < self.value:
if self.left is None:
self.left = BST(value)
else:
self.left.insert(value)
else:
if self.right is None:
self.right = BST(value)
else:
self.right.insert(value)
return self
test1 = BST(10).insert(5).insert(15)
test2 = BST(10).insert(5).insert(15).insert(5).insert(2).insert(1).insert(22)
test3 = BST(100).insert(5).insert(15).insert(5).insert(2).insert(1).insert(22) \
.insert(1).insert(1).insert(3).insert(1).insert(1).insert(502).insert(55000) \
.insert(204).insert(205).insert(207).insert(206).insert(208).insert(203)
class TestProgram(unittest.TestCase):
def test_case_1(self):
self.assertEqual(program.inOrderTraverse(test1, []), [5, 10, 15])
def test_case_2(self):
self.assertEqual(program.inOrderTraverse(test2, []), [1, 2, 5, 5, 10, 15, 22])
def test_case_3(self):
self.assertEqual(program.inOrderTraverse(test3, []), [1, 1, 1, 1, 1, 2, 3, 5, 5, 15, 22, 100, 203, 204, 205, 206, 207, 208, 502, 55000])
def test_case_4(self):
self.assertEqual(program.preOrderTraverse(test1, []), [10, 5, 15])
def test_case_5(self):
self.assertEqual(program.preOrderTraverse(test2, []), [10, 5, 2, 1, 5, 15, 22])
def test_case_6(self):
self.assertEqual(program.preOrderTraverse(test3, []), [100, 5, 2, 1, 1, 1, 1, 1, 3, 15, 5, 22, 502, 204, 203, 205, 207, 206, 208, 55000])
def test_case_7(self):
self.assertEqual(program.postOrderTraverse(test1, []), [5, 15, 10])
def test_case_8(self):
self.assertEqual(program.postOrderTraverse(test2, []), [1, 2, 5, 5, 22, 15, 10])
def test_case_9(self):
self.assertEqual(program.postOrderTraverse(test3, []), [1, 1, 1, 1, 1, 3, 2, 5, 22, 15, 5, 203, 206, 208, 207, 205, 204, 55000, 502, 100])
if __name__ == "__main__":
unittest.main()
|
505943
|
import unittest
from lxml import etree
from soapfish import core, soap, soap11, soap12
SOAP11_ERROR_MESSAGE = '''
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body>
<SOAP-ENV:Fault>
<faultcode>Result</faultcode>
<faultstring/>
<faultactor>Resultset empty2.</faultactor>
<detail/>
</SOAP-ENV:Fault>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
SOAP11_ERROR_MESSAGE_NO_ACTOR = '''
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body>
<SOAP-ENV:Fault>
<faultcode>Result</faultcode>
<faultstring>String</faultstring>
<detail/>
</SOAP-ENV:Fault>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
SOAP12_ERROR_ROLE = '''<env:Envelope xmlns:env="http://www.w3.org/2003/05/soap-envelope">
<env:Header/><env:Body>
<env:Fault>
<env:Code><env:Value>env:Sender</env:Value></env:Code>
<env:Reason><env:Text xml:lang="en-US">
Message does not have necessary info
</env:Text></env:Reason>
<env:Role>http://gizmos.com/order</env:Role>
<env:Detail>
<PO:order xmlns:PO="http://gizmos.com/orders/">
Quantity element does not have a value</PO:order>
<PO:confirmation xmlns:PO="http://gizmos.com/confirm">
Incomplete address: no zip code</PO:confirmation>
</env:Detail></env:Fault>
</env:Body></env:Envelope>
'''
SOAP12_ERROR_NOROLE = '''<env:Envelope xmlns:env="http://www.w3.org/2003/05/soap-envelope">
<env:Header/><env:Body>
<env:Fault>
<env:Code><env:Value>env:Sender</env:Value></env:Code>
<env:Reason><env:Text xml:lang="en-US">
Message does not have necessary info
</env:Text></env:Reason>
<env:Detail>
<PO:order xmlns:PO="http://gizmos.com/orders/">
Quantity element does not have a value</PO:order>
<PO:confirmation xmlns:PO="http://gizmos.com/confirm">
Incomplete address: no zip code</PO:confirmation>
</env:Detail></env:Fault>
</env:Body></env:Envelope>
'''
class ErrorHandling(unittest.TestCase):
def test_soap11_actor_parsing(self):
envelope = soap11.Envelope.parsexml(SOAP11_ERROR_MESSAGE)
code, message, actor = soap11.parse_fault_message(envelope.Body.Fault)
self.assertEqual('Result', code)
self.assertIsNone(message)
self.assertEqual('Resultset empty2.', actor)
def test_soap11_noactor_parsing(self):
envelope = soap11.Envelope.parsexml(SOAP11_ERROR_MESSAGE_NO_ACTOR)
code, message, actor = soap11.parse_fault_message(envelope.Body.Fault)
self.assertEqual('Result', code)
self.assertEqual('String', message)
self.assertIsNone(actor)
def test_soap11_fault_handling(self):
service = soap.Service(
location='mock_location',
methods=[],
name=None,
schemas=[],
targetNamespace=None,
version=soap.SOAPVersion.SOAP11,
)
stub = soap.Stub(location='empty', service=service)
with self.assertRaises(core.SOAPError) as cm:
stub._handle_response(None, None, SOAP11_ERROR_MESSAGE)
self.assertEqual('Result', cm.exception.code)
self.assertIsNone(cm.exception.message)
self.assertEqual('Resultset empty2.', cm.exception.actor)
def test_soap12_actor_parsing(self):
envelope = soap12.Envelope.parsexml(SOAP12_ERROR_ROLE)
code, message, actor = soap12.parse_fault_message(envelope.Body.Fault)
self.assertEqual('env:Sender', code)
self.assertEqual('\nMessage does not have necessary info\n', message)
self.assertEqual('http://gizmos.com/order', actor)
def test_soap12_noactor_parsing(self):
envelope = soap12.Envelope.parsexml(SOAP12_ERROR_NOROLE)
code, message, actor = soap12.parse_fault_message(envelope.Body.Fault)
self.assertEqual('env:Sender', code)
self.assertEqual('\nMessage does not have necessary info\n', message)
self.assertIsNone(actor)
def test_soap12_fault_handling(self):
service = soap.Service(
location='mock_location',
methods=[],
name=None,
schemas=[],
targetNamespace=None,
version=soap.SOAPVersion.SOAP12,
)
stub = soap.Stub(location='empty', service=service)
with self.assertRaises(core.SOAPError) as cm:
stub._handle_response(None, None, SOAP12_ERROR_ROLE)
self.assertEqual('env:Sender', cm.exception.code)
self.assertEqual('\nMessage does not have necessary info\n', cm.exception.message)
self.assertEqual('http://gizmos.com/order', cm.exception.actor)
class SOAPVersionTest(unittest.TestCase):
WSDL = (
'<?xml version="1.0" encoding="utf-8"?>'
'<definitions '
'xmlns:http="http://schemas.xmlsoap.org/wsdl/http/" '
'xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/" '
'xmlns:soap12="http://schemas.xmlsoap.org/wsdl/soap12/" '
'xmlns:s="http://www.w3.org/2001/XMLSchema" '
'xmlns:s0="http://tempuri.org/encodedTypes" '
'xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/" '
'xmlns:tns="http://tempuri.org/" '
'xmlns:tm="http://microsoft.com/wsdl/mime/textMatching/" '
'xmlns:mime="http://schemas.xmlsoap.org/wsdl/mime/" '
'targetNamespace="http://tempuri.org/" '
'xmlns="http://schemas.xmlsoap.org/wsdl/">'
'<binding name="HelloWorldSoap" type="tns:HelloWorldSoap">'
'<{soap_version}:binding transport="http://schemas.xmlsoap.org/soap/http" style="document" />'
'</binding>'
'</definitions>'
)
def test_can_detect_soap12_from_xml(self):
WSDL = self.WSDL.format(soap_version='soap12').encode()
xml = etree.fromstring(WSDL)
soap_version = soap.SOAPVersion.get_version_from_xml(xml)
self.assertEqual(soap.SOAPVersion.SOAP12, soap_version)
def test_can_detect_soap11_from_xml(self):
WSDL = self.WSDL.format(soap_version='soap').encode()
xml = etree.fromstring(WSDL)
soap_version = soap.SOAPVersion.get_version_from_xml(xml)
self.assertEqual(soap.SOAPVersion.SOAP11, soap_version)
def test_get_version_soap11(self):
v = soap.SOAPVersion.get_version(soap11.ENVELOPE_NAMESPACE)
self.assertEqual(soap11.NAME, v.NAME)
v = soap.SOAPVersion.get_version(soap11.BINDING_NAMESPACE)
self.assertEqual(soap11.NAME, v.NAME)
def test_get_version_soap12(self):
v = soap.SOAPVersion.get_version(soap12.ENVELOPE_NAMESPACE)
self.assertEqual(soap12.NAME, v.NAME)
v = soap.SOAPVersion.get_version(soap12.BINDING_NAMESPACE)
self.assertEqual(soap12.NAME, v.NAME)
|
506070
|
from . import Loader
import pandas as pd
import os
class OrbitalInsightLoader(Loader):
dataset = 'ORBITALINSIGHT'
fileglob = 'ORBITALINSIGHT_*.csv'
columns = ['storage.capacity.estimate', 'volume.estimate.stderr', 'scaled.estimate.stderr',
'total.available.tanks', 'smoothed.estimate', 'sampled.tanks.1w',
'sampled.tanks.1d', 'volume.estimate', 'scaled.estimate', 'truth_value_mb',
'sampled.tanks', 'date', 'location']
dtypes = {'category': ('location',),
'int64': ('sampled.tanks', 'sampled.tanks.1d', 'sampled.tanks.1w', 'total.available.tanks'),
'float': ('smoothed.estimate', 'storage.capacity.estimate',
'truth_value_mb', 'volume.estimate', 'volume.estimate.stderr',
'scaled.estimate', 'scaled.estimate.stderr'),
'date': 'date'}
def _load(self, file):
_, location, sublocation, _ = os.path.basename(file).split('_', 3)
if sublocation != '0':
location = location + '_' + sublocation
df = pd.read_csv(file, low_memory=False)
df['location'] = location
return df
orbitalInsightLoader = OrbitalInsightLoader()
|
506081
|
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report
from pymatch.utils.functional import scale_confusion_matrix, sliding_window
from pymatch.utils.DataHandler import DataHandler
from pymatch.utils.exception import TerminationException
import pandas as pd
import seaborn as sn
import wandb
import numpy as np
import torch
import os
class Callback:
def __init__(self, frequency=1):
self.frequency = frequency
self.started = False
def __call__(self, model, *args, **kwargs):
if model.train_dict['epochs_run'] % self.frequency == 0:
return self.forward(model, *args, **kwargs)
def forward(self, model, *args, **kwargs):
raise NotImplementedError
def start(self, model):
pass
class Checkpointer(Callback):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.path = None
def start(self, model):
if not self.started:
self.path = f'{model.dump_path}/checkpoint'
if not os.path.exists(self.path):
os.makedirs(self.path)
self.started = True
def forward(self, model):
model.dump_checkpoint(path=self.path, tag='checkpoint')
class Validator(Callback):
def __init__(self, data_loader, verbose=1, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data_loader = data_loader
self.verbose = verbose
def forward(self, model):
train_mode = model.model.training
with torch.no_grad():
model.eval()
model.to(model.device)
loss = []
accuracies = []
for data, y in self.data_loader:
data = data.to(model.device)
y = y.to(model.device)
y_pred = model.model(data)
loss += [model.crit(y_pred, y)]
y_pred = y_pred.max(dim=1)[1]
accuracies += [(y_pred == y).float()]
loss = torch.stack(loss).mean().item()
model.train_dict['val_losses'] = model.train_dict.get('val_losses', []) + [loss]
model.train_dict['val_epochs'] = model.train_dict.get('val_epochs', []) + [model.train_dict['epochs_run']]
accuracy = torch.cat(accuracies).mean().item()
model.train_dict['val_accuracy'] = model.train_dict.get('val_accuracy', []) + [accuracy]
if loss < model.train_dict.get('best_val_performance', np.inf):
model.train_dict['best_train_performance'] = loss
model.train_dict['epochs_since_last_val_improvement'] = 0
if self.verbose == 1:
print('val loss: {:.4f} - val accuracy: {:.4f}'.format(loss, accuracy))
if train_mode: # reset to original mode
model.train()
return loss
class EarlyStopping(Validator):
def __init__(self, data_loader, verbose=1, *args, **kwargs):
super(EarlyStopping, self).__init__(data_loader, verbose, *args, **kwargs)
self.path = None
def start(self, model):
if not self.started:
self.path = f'{model.dump_path}/early_stopping'
if not os.path.exists(self.path):
os.makedirs(self.path)
self.started = True
def forward(self, model):
if not os.path.exists(model.early_stopping_path):
os.makedirs(model.early_stopping_path)
if self.verbose == 1:
print('evaluating')
val_loss = Validator.__call__(self, model=model)
if val_loss < model.train_dict['best_val_performance']:
model.train_dict['best_val_performance'] = val_loss
model.dump_checkpoint(path=self.path, tag='early_stopping')
class EarlyTermination(Callback):
def __init__(self, patience, *args, **kwargs):
super(EarlyTermination, self).__init__(*args, **kwargs)
self.patience = patience
def forward(self, model):
if self.patience < model.train_dict['epochs_since_last_val_improvement']:
raise TerminationException(f'The model did not improve for the last {self.patience} steps and is '
f'therefore terminated')
class ClassificationCurvePlotter(Callback):
def __init__(self, img_path='tmp', *args, **kwargs):
super(ClassificationCurvePlotter, self).__init__(*args, **kwargs)
self.img_path = img_path
def forward(self, model, args=None, return_fig=False):
if args is None:
args = {}
if 'figsize' not in args:
args['figsize'] = (10, 10)
fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=args['figsize'])
fig.suptitle('{}'.format(model.name))
ax[0].plot(model.train_dict['train_losses'])
ax[0].plot(model.train_dict['val_losses'])
ax[0].legend(['train', 'val'])
ax[0].set_title('loss')
ax[0].set_ylabel('loss')
ax[1].plot(model.train_dict['train_accuracy'])
ax[1].plot(model.train_dict['val_accuracy'])
ax[1].legend(['train', 'val'])
ax[1].set_title('accuracy')
ax[1].set_ylabel('accuracy in %')
ax[1].set_xlabel('epoch')
if return_fig:
return fig, ax
img_path = '{}/learning_curve_{}.png'.format(self.img_path, model.name)
fig.savefig(img_path)
plt.close(fig)
class RegressionCurvePlotter(Callback):
def __init__(self, img_path='tmp', *args, **kwargs):
super(RegressionCurvePlotter, self).__init__(*args, **kwargs)
self.img_path = img_path
def forward(self, model, args=None, return_fig=False):
if args is None:
args = {}
if 'figsize' not in args:
args['figsize'] = (10, 10)
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True, figsize=args['figsize'])
ax.plot(model.train_dict['train_losses'])
ax.plot(model.train_dict['val_epochs'], model.train_dict['val_losses'])
ax.legend(['train', 'val'])
ax.set_title('loss')
ax.set_ylabel('loss')
ax.set_xlabel('epoch')
if return_fig:
return fig, ax
img_path = '{}/learning_curve_{}.png'.format(self.img_path, model.name)
fig.savefig(img_path)
plt.close(fig)
class MetricPlotter(Callback):
def __init__(self, metric='rewards', x=None, x_label=None, y_label=None, title=None, name=None,
smoothing_window=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.y = metric if isinstance(metric, list) else [metric]
self.x = x if isinstance(x, list) else [x]
if len(self.x) != len(self.y):
raise ValueError(f'metric and x have to have the same length but where x: {self.x} - y: {self.y}')
self.y_label = y_label if y_label is not None else 'metric'
self.x_label = x_label if x_label is not None else 'iters'
self.title = title if title is not None else ' '.join(self.y)
self.name = name if name is not None else '_'.join(self.y)
self.smoothing_window = smoothing_window
def forward(self, model):
for x, y in zip(self.x, self.y):
self.plot(model, x, y)
plt.ylabel(self.y_label)
plt.xlabel(self.x_label)
plt.title(self.title)
plt.legend(framealpha=.3)
plt.tight_layout()
plt.savefig(f'{model.dump_path}/{self.name}.png')
plt.close()
def plot(self, model, x, y):
if self.smoothing_window is None:
if x is None:
plt.plot(model.train_dict[y])
else:
plt.plot(model.train_dict[x], model.train_dict[y])
else:
if x is None:
plt.plot(model.train_dict[y], label=y, alpha=.5)
plt.plot(*sliding_window(self.smoothing_window,
model.train_dict[y]), label=f'smoothed {y}')
else:
plt.plot(model.train_dict[x], model.train_dict[y], label=y, alpha=.5)
plt.plot(*sliding_window(self.smoothing_window,
model.train_dict[y],
index=model.train_dict.get(x, None)), label=f'smoothed {y}')
# class SmoothedMetricPlotter(Callback):
# # @todo is this simply redundant?
# def __init__(self, metric, frequency=1, window=10,
# x=None, x_label=None, y_label=None, title=None, name=None):
# super().__init__()
# self.frequency = frequency
# self.y = metric
# self.x = x
# self.window = window
# self.y_label = y_label if y_label is not None else 'metric'
# self.x_label = x_label if x_label is not None else 'iters'
# self.title = title if title is not None else metric
# self.name = name if name is not None else metric
#
# def __call__(self, model):
# if model.train_dict['epochs_run'] % self.frequency == 0:
# if self.x is None:
# plt.plot(*sliding_window(self.window,
# model.train_dict[self.y]))
# else:
# plt.plot(*sliding_window(self.window,
# model.train_dict[self.y],
# index=model.train_dict.get(self.x, None)))
# plt.ylabel(self.y_label)
# plt.xlabel(self.x_label)
# plt.title(f'smoothed {self.title}')
# plt.tight_layout()
# plt.savefig(f'{model.dump_path}/smoothed_{self.name}.png')
# plt.close()
class EnsembleLearningCurvePlotter(Callback):
def __init__(self, target_folder_path='tmp', *args, **kwargs):
"""
Plotting the learning curves of an entire ensemble
Args:
target_folder_path: path to dump the resulting image to
"""
super(EnsembleLearningCurvePlotter, self).__init__(*args, **kwargs)
self.img_path = target_folder_path
def forward(self, ensemble, args=None, return_fig=False):
if args is None:
args = {}
if 'figsize' not in args:
args['figsize'] = (10, 10)
fig, ax = plt.subplots(nrows=2, ncols=2, sharex=True, figsize=args['figsize'])
fig.text(0.5, 0.04, 'epochs', ha='center', va='center', fontsize=20)
fig.text(0.02, 0.5, 'performance', ha='center', va='center', fontsize=20, rotation='vertical')
fig.suptitle('Training performance', fontsize=25)
names = []
for learner in ensemble.learners:
ax[0, 0].plot(learner.train_dict['train_losses'])
names += [learner.name]
ax[0, 0].set_ylabel('loss', fontsize=15)
for learner in ensemble.learners:
ax[0, 1].plot(learner.train_dict['val_epochs'], learner.train_dict['val_losses'])
for learner in ensemble.learners:
ax[1, 0].plot(learner.train_dict['train_accuracy'])
ax[1, 0].set_ylabel('accuracy in %', fontsize=15)
ax[1, 0].set_xlabel('train', fontsize=15)
for learner in ensemble.learners:
ax[1, 1].plot(learner.train_dict['val_epochs'], learner.train_dict['val_accuracy'])
ax[1, 1].set_xlabel('validation', fontsize=15)
fig.legend(names, framealpha=0.5, loc='center right')
img_path = '{}/learning_curve_ensemble.png'.format(self.img_path)
if return_fig:
return fig, ax
fig.savefig(img_path, dpi=fig.dpi, bbox_inches='tight', pad_inches=0.5)
plt.close(fig)
class ConfusionMatrixPlotter(Callback):
def __init__(self, data_loader, img_path='./tmp', img_name='confusion_matrix', *args, **kwargs):
super(ConfusionMatrixPlotter, self).__init__(*args, **kwargs)
self.data_loader = data_loader
self.img_path = '{}/{}.png'.format(img_path, img_name)
def forward(self, model, classes, device='cpu', return_fig=False, title='Confusion Matrix'):
y_pred, y_true = DataHandler.predict_data_loader(model=model, data_loader=self.data_loader, device=device, return_true=True)
cm = scale_confusion_matrix(confusion_matrix(y_true, y_pred))
fig, ax = self.plot_confusion_matrix(cm, figsize=(10, 10), class_names=classes)
fig.suptitle(title, y=.95, fontsize=25)
if return_fig:
return fig, ax
fig.savefig(self.img_path, dpi=fig.dpi, bbox_inches='tight', pad_inches=0.5)
plt.close(fig)
def plot_confusion_matrix(self, confm, class_names=None, figsize=(8, 8), heat_map_args={}):
if 'annot' not in heat_map_args:
heat_map_args['annot'] = True
if 'fmt' not in heat_map_args:
heat_map_args['fmt'] = '.2%'
if 'vmin' not in heat_map_args:
heat_map_args['vmin'] = 0.
if 'vmax' not in heat_map_args:
heat_map_args['vmax'] = 1.
if class_names is None:
class_names = ['{}'.format(i) for i in range(len(confm))]
df_cm = pd.DataFrame(confm, index=class_names, columns=class_names)
fig, ax = plt.subplots(figsize=figsize)
ax = sn.heatmap(df_cm, **heat_map_args, ax=ax)
ax.set_yticklabels(ax.get_yticklabels(), rotation=45.)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45.)
ax.set_ylim(0., len(class_names) + .5)
ax.set_xlabel('predicted', fontsize=15)
ax.set_ylabel('true', fontsize=15)
return fig, ax
class Reporter(Callback):
def __init__(self, data_loader, folder_path='./tmp', file_name='report', mode='w', *args, **kwargs):
"""
Args:
data_loader:
folder_path:
file_name:
mode: mode of the writer, 'a': append, 'w': overwrite
"""
super(Reporter, self).__init__(*args, **kwargs)
self.mode = mode
self.data_loader = data_loader
self.file_path = '{}/{}.txt'.format(folder_path, file_name)
def forward(self, model, classes):
y_pred, y_true = DataHandler.predict_data_loader(model, self.data_loader, return_true=True)
report = classification_report(y_true.numpy(), y_pred.numpy(), digits=3, target_names=classes)
self._write_report(report)
def _write_report(self, report):
with open(self.file_path, self.mode) as file:
file.write(report)
file.write('\n\n')
file.close()
class WandbTrainDictLogger(Callback):
def __init__(self, *args, **kwargs):
super(WandbTrainDictLogger, self).__init__(*args, **kwargs)
def forward(self, model, args={}): # @todo what the fuck is args for?
log_dict = {}
for k, v in model.train_dict.items():
if isinstance(v, (list, np.ndarray)):
log_dict[k] = v[-1]
if isinstance(v, (int, float)):
log_dict[k] = v
wandb.log(log_dict)
class WandbExperiment(Callback):
def __init__(self, wandb_args, *args, **kwargs):
super(WandbExperiment, self).__init__(*args, **kwargs)
self.wandb_args = wandb_args
def start(self, learner):
self.wandb_args['name'] = learner.name
print(self.wandb_args)
wandb.init(**self.wandb_args, reinit=True)
wandb.watch(learner.model)
def forward(self, model, args={}): # @todo what the fuck is args for?
log_dict = {}
for k, v in model.train_dict.items():
if isinstance(v, (list, np.ndarray)):
log_dict[k] = v[-1]
if isinstance(v, (int, float)):
log_dict[k] = v
wandb.log(log_dict)
|
506084
|
import asyncio
import logging
import unittest
import json
import copy
from random import randrange, random, choice, randint
from pprint import pprint
from gremlinpy.gremlin import Gremlin
from gizmo.connection import Response
from gizmo.mapper import *
from gizmo.exception import *
from gizmo.entity import GenericVertex, GenericEdge
from gizmo.util import camel_to_underscore, entity_name, _query_debug
DEFAULT_PROPRTIES = sorted([GIZMO_LABEL[0], GIZMO_TYPE, GIZMO_ENTITY])
logging.disable(logging.CRITICAL)
class TestVertex(GenericVertex):
pass
class TestEdge(GenericEdge):
label = 'test_edge_label'
class TestRequest:
async def send(*args, **kwargs):
pass
def get_dict_key(params, value, unset=False):
for k, v in params.items():
if v == value:
if unset:
del(params[k])
return k, params
return None, dict
def build_prop(key, value, params=None, value_properties=None):
prop = ['property({}'.format(key)]
properties = []
if value_properties:
for k, v in value_properties.items():
kv, _ = get_dict_key(params, k)
vv, _ = get_dict_key(params, v)
properties += ['{}, {}'.format(kv, vv)]
prop += [', {}'.format(value)]
if properties:
properties = ', '.join(properties)
prop += [', {}'.format(properties)]
prop += [')']
return ''.join(prop)
def build_params(entity, values, mapper, params=None, value_properties=None,
entities=None, deleted=None, ignore=None):
ignore = ignore or []
params = copy.deepcopy(params or {})
expected = []
value_properties = value_properties or {}
deleted = deleted or []
entity_name = str(entity)
entity_alias = '{}_alias'.format(entity_name)
def get_key(key):
if key.startswith('T.'):
return key
else:
k, _ = get_dict_key(params, key)
return k
def delete_key(key):
nonlocal expected
k, _ = get_dict_key(params, key)
a, _ = get_dict_key(params, entity_alias)
expected += ['as({})'.format(a)]
expected += ['properties({})'.format(k)]
expected += ['sideEffect{it.get().remove()}']
expected += ['select({})'.format(a)]
pass
for key, val in entity.data.items():
if key in ignore:
continue
if key in deleted:
delete_key(key)
continue
if isinstance(val, (list, tuple)) and len(val)\
and isinstance(val[0], dict)\
and 'value' in val[0]:
if isinstance(val[0], (list, tuple)):
for v in val[0]['value']:
if v not in values:
continue
var, _ = get_dict_key(params, v)
prop = build_prop(get_key(key), var, params,
value_properties.get(v, None))
expected.append(prop)
else:
if val[0]['value'] not in values:
continue
v = val[0]['value']
var, _ = get_dict_key(params, v)
prop = build_prop(get_key(key), var, params,
value_properties.get(v, None))
expected.append(prop)
else:
if val not in values:
continue
var, _ = get_dict_key(params, val)
prop = build_prop(get_key(key), var, params,
value_properties.get(val, None))
expected.append(prop)
return expected
def build_vertex_create_query(entity, values, mapper, params=None,
value_properties=None, entities=None, deleted=None, return_var=None):
expected = []
label_str = str(entity)
label, _ = get_dict_key(params, label_str)
add = '{}.addV(T.label, {})'.format(mapper.gremlin.gv, label)
ignore = ['T.label', 'label']
if return_var:
expected += ['{} = {}'.format(return_var, add)]
else:
expected += [add]
expected += build_params(entity=entity, values=values, params=params,
value_properties=value_properties, entities=entities, mapper=mapper,
deleted=deleted, ignore=ignore)
expected.append('next()')
return '.'.join(expected)
def build_update_query(entity, values, mapper, params=None,
value_properties=None, entities=None, deleted=None, return_var=None):
entity_type, _id = entity.get_rep()
alias = '{}_{}_updating'.format(entity_type, _id)
_id, _ = get_dict_key(params, _id)
alias, _ = get_dict_key(params, alias)
expected = []
update = '{}.{}({})'.format(mapper.gremlin.gv, entity_type.upper(), _id)
if return_var:
expected += ['{} = {}'.format(return_var, update)]
else:
expected += [update]
expected += ['as({})'.format(alias)]
expected += build_params(entity=entity, values=values, params=params,
value_properties=value_properties, entities=entities, mapper=mapper,
deleted=deleted)
expected += ['select({})'.format(alias), 'next()']
return '.'.join(expected)
def build_delete_query(entity, mapper, params=None):
params = params or {}
e_type, _id = entity.get_rep()
_id, _ = get_dict_key(params, _id)
return '{}.{}({}).next().remove()'.format(mapper.gremlin.gv,
e_type.upper(), _id)
def build_edge_create_query(entity, out_v, in_v, values, mapper, params=None,
value_properties=None, entities=None, deleted=None):
def g_v(_id):
if isinstance(_id, str) and _id.startswith('var:'):
_id = _id.split(':')[1]
else:
_id, _ = get_dict_key(params, _id)
return '{}.V({}).next()'.format(mapper.gremlin.gv, _id)
v_in = g_v(in_v)
v_out = g_v(out_v)
label, _ = get_dict_key(params, entity[GIZMO_LABEL[0]])
edge_params = []
ignore = [GIZMO_LABEL[0], GIZMO_LABEL[1], GIZMO_TYPE]
for f, changes in entity.changes.items():
if f in ignore:
continue
try:
if changes['immutable']:
val = changes['values']['values'][-1]
else:
val = changes['values'][-1]
except:
continue
f_arg, _ = get_dict_key(params, f)
v_arg, _ = get_dict_key(params, val)
edge_params += [f_arg, v_arg]
edge_params = ', '.join(edge_params)
expected = [v_out, ]
if edge_params:
expected += ['addEdge({}, {}, {})'.format(label, v_in, edge_params)]
else:
expected += ['addEdge({}, {})'.format(label, v_in)]
return '.'.join(expected)
class QueryTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.request = TestRequest()
self.mapper = Mapper(request=self.request)
self.query = Query(mapper=self.mapper)
def test_can_save_vertex_with_no_field_values(self):
v = TestVertex()
self.query.save(v)
queries = self.query.queries
values = ['vertex', entity_name(v), str(v), GIZMO_ENTITY, GIZMO_TYPE]
self.assertEqual(1, len(queries))
entry = queries[0]
params = entry['params']
expected = build_vertex_create_query(entity=v, values=values,
params=params, mapper=self.mapper)
self.assertEqual(expected, entry['script'])
self.assertEqual(len(values), len(params))
def test_can_save_vertex_with_one_field_value(self):
v = TestVertex()
ik = '__some_field' + str(random())
iv = 'some_value' + str(random())
v[ik] = iv
self.query.save(v)
values = [ik, iv, 'vertex', entity_name(v), str(v), GIZMO_ENTITY,
GIZMO_TYPE]
queries = self.query.queries
self.assertEqual(1, len(queries))
entry = queries[0]
params = entry['params']
expected = build_vertex_create_query(entity=v, values=values,
params=params, mapper=self.mapper)
self.assertEqual(expected, entry['script'])
self.assertEqual(len(values), len(params))
def test_can_save_vertex_with_one_field_value_one_property(self):
v = TestVertex()
ik = 'some_field' + str(random())
iv = 'some_value' + str(random())
pk = 'prop_key' + str(random())
pv = 'prop_value' + str(random())
v[ik] = iv
v[ik][iv].properties[pk] = pv
self.query.save(v)
values = [ik, iv, 'vertex', entity_name(v), str(v), GIZMO_ENTITY,
GIZMO_TYPE]
value_properties = {
iv: {
pk: pv,
}
}
queries = self.query.queries
self.assertEqual(1, len(queries))
entry = queries[0]
params = entry['params']
expected = build_vertex_create_query(entity=v, values=values, params=params,
value_properties=value_properties, mapper=self.mapper)
self.assertEqual(expected, entry['script'])
# 2 is the number of params added because of the one property defined
self.assertEqual(len(values) + 2, len(params))
def test_can_save_vertex_with_one_field_value_two_properties(self):
v = TestVertex()
ik = 'some_field' + str(random())
iv = 'some_value' + str(random())
pk = 'prop_key' + str(random())
pv = 'prop_value' + str(random())
pk2 = 'prop2_key' + str(random())
pv2 = 'prop2_value' + str(random())
v[ik] = iv
v[ik][iv].properties[pk] = pv
v[ik][iv].properties[pk2] = pv2
self.query.save(v)
values = [ik, iv, 'vertex', entity_name(v), str(v), GIZMO_ENTITY, GIZMO_TYPE]
value_properties = {
iv: {
pk: pv,
pk2: pv2,
}
}
queries = self.query.queries
self.assertEqual(1, len(queries))
entry = queries[0]
params = entry['params']
expected = build_vertex_create_query(entity=v, values=values, params=params,
value_properties=value_properties, mapper=self.mapper)
self.assertEqual(expected, entry['script'])
# 4 is the number of params added because of the two props defined
self.assertEqual(len(values) + 4, len(params))
def test_can_save_vertex_with_two_field_values(self):
v = TestVertex()
ik = '__some_field' + str(random())
iv = 'some_value' + str(random())
ik2 = '2__some_field' + str(random())
iv2 = '2some_value' + str(random())
v[ik] = iv
v[ik2] = iv2
self.query.save(v)
values = [ik, iv, ik2, iv2, 'vertex', entity_name(v), str(v),
GIZMO_ENTITY, GIZMO_TYPE]
queries = self.query.queries
self.assertEqual(1, len(queries))
entry = queries[0]
params = entry['params']
expected = build_vertex_create_query(entity=v, values=values,
params=params, mapper=self.mapper)
self.assertEqual(expected, entry['script'])
self.assertEqual(len(values), len(params))
def test_can_save_vertex_with_two_fields_value_one_property_on_one_field(self):
v = TestVertex()
ik = 'some_field' + str(random())
iv = 'some_value' + str(random())
ik2 = '2__some_field' + str(random())
iv2 = '2some_value' + str(random())
pk = 'prop_key' + str(random())
pv = 'prop_value' + str(random())
v[ik] = iv
v[ik][iv].properties[pk] = pv
v[ik2] = iv2
self.query.save(v)
values = [ik, iv, iv2, ik2, 'vertex', entity_name(v), str(v),
GIZMO_ENTITY, GIZMO_TYPE]
value_properties = {
iv: {
pk: pv,
}
}
queries = self.query.queries
self.assertEqual(1, len(queries))
entry = queries[0]
params = entry['params']
expected = build_vertex_create_query(entity=v, values=values, params=params,
value_properties=value_properties, mapper=self.mapper)
self.assertEqual(expected, entry['script'])
# 2 is the number of params added because of the one property defined
self.assertEqual(len(values) + 2, len(params))
def test_can_save_vertex_with_two_fields_value_one_property_on_each_field(self):
v = TestVertex()
ik = 'some_field' + str(random())
iv = 'some_value' + str(random())
ik2 = '2__some_field' + str(random())
iv2 = '2some_value' + str(random())
pk = 'prop_key' + str(random())
pv = 'prop_value' + str(random())
pk2 = '2prop_key' + str(random())
pv2 = '2prop_value' + str(random())
v[ik] = iv
v[ik][iv].properties[pk] = pv
v[ik2] = iv2
v[ik2][iv2].properties[pk2] = pv2
self.query.save(v)
values = [ik, iv, iv2, ik2, 'vertex', entity_name(v), str(v),
GIZMO_ENTITY, GIZMO_TYPE, pk2, pv2, pk, pv]
value_properties = {
iv: {
pk: pv,
},
iv2: {
pk2: pv2,
}
}
queries = self.query.queries
self.assertEqual(1, len(queries))
entry = queries[0]
params = entry['params']
expected = build_vertex_create_query(entity=v, values=values, params=params,
value_properties=value_properties, mapper=self.mapper)
self.assertEqual(expected, entry['script'])
self.assertEqual(len(values), len(params))
def test_can_save_vertex_with_two_fields_value_two_props_on_one_field(self):
v = TestVertex()
ik = 'some_field' + str(random())
iv = 'some_value' + str(random())
ik2 = '2__some_field' + str(random())
iv2 = '2some_value' + str(random())
pk = 'prop_key' + str(random())
pv = 'prop_value' + str(random())
pk2 = '2prop_key' + str(random())
pv2 = '2prop_value' + str(random())
v[ik] = iv
v[ik][iv].properties[pk] = pv
v[ik][iv].properties[pk2] = pv2
v[ik2] = iv2
self.query.save(v)
values = [ik, iv, iv2, ik2, 'vertex', entity_name(v), str(v),
GIZMO_ENTITY, GIZMO_TYPE, pk2, pv2, pk, pv]
value_properties = {
iv: {
pk: pv,
pk2: pv2,
},
}
queries = self.query.queries
self.assertEqual(1, len(queries))
entry = queries[0]
params = entry['params']
expected = build_vertex_create_query(entity=v, values=values, params=params,
value_properties=value_properties, mapper=self.mapper)
self.assertEqual(expected, entry['script'])
self.assertEqual(len(values), len(params))
def test_can_save_vertex_with_two_fields_value_two_props_on_one_field_one_on_the_other(self):
v = TestVertex()
ik = 'some_field' + str(random())
iv = 'some_value' + str(random())
ik2 = '2__some_field' + str(random())
iv2 = '2some_value' + str(random())
pk = 'prop_key' + str(random())
pv = 'prop_value' + str(random())
pk2 = '2prop_key' + str(random())
pv2 = '2prop_value' + str(random())
pk3 = '3prop_key' + str(random())
pv3 = '3prop_value' + str(random())
v[ik] = iv
v[ik][iv].properties[pk] = pv
v[ik][iv].properties[pk2] = pv2
v[ik2] = iv2
v[ik2][iv2].properties[pk3] = pv3
self.query.save(v)
values = [ik, iv, iv2, ik2, 'vertex', entity_name(v), str(v),
GIZMO_ENTITY, GIZMO_TYPE, pk2, pv2, pk, pv, pv3, pk3]
value_properties = {
iv: {
pk: pv,
pk2: pv2,
},
iv2: {
pk3: pv3
}
}
queries = self.query.queries
self.assertEqual(1, len(queries))
entry = queries[0]
params = entry['params']
expected = build_vertex_create_query(entity=v, values=values, params=params,
value_properties=value_properties, mapper=self.mapper)
self.assertEqual(expected, entry['script'])
self.assertEqual(len(values), len(params))
def test_can_update_vertex_with_no_field_values(self):
_id = str(random())
data = {GIZMO_ID: _id}
v = TestVertex(data)
self.query.save(v)
queries = self.query.queries
values = ['vertex', entity_name(v), GIZMO_ENTITY, GIZMO_ID]
self.assertEqual(1, len(queries))
entry = queries[0]
params = entry['params']
expected = build_update_query(entity=v, values=values, mapper=self.mapper,
params=params)
self.assertEqual(expected, entry['script'])
# +1 because we cannot add the _id var to the values list and alias
self.assertEqual(len(values) + 2, len(params))
def test_can_update_vertext_with_one_field_and_two_properties(self):
# we only need one test bc properties are tested for adding vertex
_id = str(random())
data = {GIZMO_ID: _id}
v = TestVertex(data)
ik = 'some_field' + str(random())
iv = 'some_value' + str(random())
pk = 'prop_key' + str(random())
pv = 'prop_value' + str(random())
pk2 = '2prop_key' + str(random())
pv2 = '2prop_value' + str(random())
v[ik] = iv
v[ik][iv].properties[pk] = pv
v[ik][iv].properties[pk2] = pv2
self.query.save(v)
values = [ik, iv, 'vertex', entity_name(v), GIZMO_ENTITY, GIZMO_ID,
pk2, pv2, pk, pv]
value_properties = {
iv: {
pk: pv,
pk2: pv2,
},
}
queries = self.query.queries
self.assertEqual(1, len(queries))
entry = queries[0]
params = entry['params']
expected = build_update_query(entity=v, values=values, mapper=self.mapper,
params=params, value_properties=value_properties)
self.assertEqual(expected, entry['script'])
# +1 because we cannot add the _id var to the values list and alias
self.assertEqual(len(values) + 2, len(params))
def test_can_update_vertex_with_two_fields_after_deleting_one(self):
_id = str(random())
ik = 'key' + str(random())
iv = 'val' + str(random())
ik2 = '2key' + str(random())
iv2 = '2val' + str(random())
data = {GIZMO_ID: _id, ik: iv, ik2: iv2}
v = TestVertex(data)
del v[ik2]
values = [ik, iv, ik2, iv2, GIZMO_ENTITY, entity_name(v), GIZMO_TYPE, 'vertex']
deleted = [ik2, ]
self.query.save(v)
queries = self.query.queries
entry = queries[0]
params = entry['params']
expected = build_update_query(entity=v, values=values, mapper=self.mapper,
params=params, deleted=deleted)
self.assertEqual(expected, entry['script'])
def test_cannot_delete_vertex(self):
v = TestVertex()
self.assertRaises(AstronomerQueryException, self.query.delete, v)
def test_can_delete_vertex(self):
_id = str(random())
d = {GIZMO_ID: _id}
v = TestVertex(data=d)
self.query.delete(v)
queries = self.query.queries
entry = queries[0]
expected = build_delete_query(entity=v, mapper=self.mapper,
params=entry['params'])
self.assertEqual(expected, entry['script'])
self.assertEqual(1, len(entry['params']))
def test_cannot_save_edge(self):
e = TestEdge()
self.assertRaises(AstronomerQueryException, self.query.save, e)
def test_cannot_save_edge_one_end_isnt_defined(self):
d = {
'outV': 15,
}
e = TestEdge(data=d)
self.assertRaises(AstronomerQueryException, self.query.save, e)
def test_can_save_edge_with_ends_being_ids(self):
d = {
'outV': 10,
'inV': 99,
}
e = TestEdge(data=d)
self.query.save(e)
values = [str(e), e.outV, e.inV,
entity_name(e)]
entry = self.query.queries[0]
expected = build_edge_create_query(entity=e, out_v=e.outV,
in_v=e.inV, values=values, mapper=self.mapper,
params=entry['params'])
self.assertEqual(expected, entry['script'])
self.assertEqual(len(values) + 1, len(entry['params']))
def test_can_save_edge_one_end_being_new_entity_other_being_id(self):
v = TestVertex()
d = {
'outV': 15,
'inV': v,
}
e = TestEdge(data=d)
self.query.save(e)
in_v = 'var:{}'.format(self.mapper.get_entity_variable(v))
values = [GIZMO_LABEL[0], str(e), e.outV,
entity_name(e)]
entry = self.query.queries[0]
expected = build_edge_create_query(entity=e, out_v=e.outV,
in_v=in_v, values=values, mapper=self.mapper,
params=entry['params'])
self.assertEqual(expected, entry['script'])
self.assertEqual(len(values), len(entry['params']))
def test_can_save_edge_with_two_new_entities(self):
v = TestVertex()
v2 = TestVertex()
d = {
'outV': v2,
'inV': v,
}
e = TestEdge(data=d)
self.query.save(e)
in_v = 'var:{}'.format(self.mapper.get_entity_variable(v))
out_v = 'var:{}'.format(self.mapper.get_entity_variable(v2))
values = [GIZMO_LABEL[0], str(e), entity_name(e)]
entry = self.query.queries[0]
expected = build_edge_create_query(entity=e, out_v=out_v,
in_v=in_v, values=values, mapper=self.mapper,
params=entry['params'])
self.assertEqual(expected, entry['script'])
self.assertEqual(len(values), len(entry['params']))
class MapperTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.request = TestRequest()
self.mapper = Mapper(request=self.request)
self.query = Query(mapper=self.mapper)
self.gremlin = Gremlin()
self.ioloop = asyncio.get_event_loop()
def test_mapper_instance(self):
m = Mapper(self.request, self.gremlin)
self.assertTrue(type(m) == Mapper)
def test_can_create_vertex(self):
v = self.mapper.create(entity=TestVertex)
self.assertTrue(isinstance(v, Vertex))
self.assertEqual(v[GIZMO_TYPE], 'vertex')
def test_can_create_vertex_with_data(self):
d = {'some_field': str(random())}
v = self.mapper.create(d, TestVertex)
vd = v.data
self.assertTrue(isinstance(v, Vertex))
self.assertIn('some_field', vd)
self.assertEqual(1, len(vd['some_field']))
self.assertEqual(d['some_field'], vd['some_field'][0]['value'])
def test_can_update_existing_vertex(self):
vid = '1111'
d = {
GIZMO_ID: vid,
'some_field': 'mark',
}
v = self.mapper.create(d, TestVertex)
v['some_field'] = 'xxxx'
self.mapper.save(v)._build_queries()
sent_params = copy.deepcopy(self.mapper.params)
values = ['some_field', 'xxxx', 'vertex', entity_name(v), GIZMO_ENTITY,
GIZMO_TYPE]
queries = self.mapper.queries
self.assertEqual(2, len(queries))
params = self.mapper.params
return_var = self.mapper.get_entity_variable(v)
expected = build_update_query(entity=v, values=values,
params=params, mapper=self.mapper, return_var=return_var)
self.assertEqual(expected, queries[0])
# +2 for id and alias
self.assertEqual(len(values) + 2, len(params))
def test_can_queue_save_vertex_with_two_params_query(self):
d = {
'some_field': 'mark',
}
v = self.mapper.create(d, TestVertex)
self.mapper.save(v)._build_queries()
params = copy.deepcopy(self.mapper.params)
values = ['some_field', 'mark', 'vertex', entity_name(v), GIZMO_ENTITY,
GIZMO_TYPE, str(v)]
return_var = self.mapper.get_entity_variable(v)
expected = build_vertex_create_query(entity=v, values=values,
params=params, mapper=self.mapper, return_var=return_var)
self.assertEqual(expected, self.mapper.queries[0])
self.assertEqual(len(values), len(params))
def test_can_delete_existing_vertex(self):
vid = '1111'
d = {
GIZMO_ID: vid,
'some_field': 'mark',
}
v = self.mapper.create(d, TestVertex)
self.mapper.delete(v)._build_queries()
params = copy.deepcopy(self.mapper.params)
sent_params = copy.deepcopy(self.mapper.params)
eyed = get_dict_key(params, vid)
expected = '{}.V({}).next().remove()'.format(self.mapper.gremlin.gv,
eyed[0])
self.assertEqual(expected, self.mapper.queries[0])
def test_can_delete_multiple_entities(self):
v1 = {'id': '15'}
v2 = {'id': '10'}
out_v = self.mapper.create(v1, TestVertex)
in_v = self.mapper.create(v2, TestVertex)
ed = {'outV': out_v, 'inV': in_v, 'id': '44'}
edge = self.mapper.create(ed, TestEdge)
self.mapper.delete(out_v)
self.mapper.delete(in_v)
self.mapper.delete(edge)
self.mapper._build_queries()
params = self.mapper.params
v1_id = get_dict_key(params, v1['id'])
v2_id = get_dict_key(params, v2['id'])
e_id = get_dict_key(params, ed['id'])
gv = self.mapper.gremlin.gv
expected = [
'{}.V({}).next().remove()'.format(gv, v1_id[0]),
'{}.V({}).next().remove()'.format(gv, v2_id[0]),
'{}.E({}).next().remove()'.format(gv, e_id[0]),
]
self.assertEqual(3, len(self.mapper.queries))
self.assertEqual(3, len(self.mapper.params))
for exp in expected:
self.assertIn(exp, self.mapper.queries)
def test_can_call_callback_when_save_method_is_called(self):
variable = {'v': ''}
updated = random()
def save_test_callback(entity):
variable['v'] = updated
async def test():
m = self.mapper.create({}, TestVertex)
await self.mapper.save(m, callback=save_test_callback).send()
self.assertEqual(variable['v'], updated)
self.ioloop.run_until_complete(test())
def test_can_call_callback_when_delete_method_is_called(self):
variable = {'v': ''}
updated = random()
def delete_test_callback(entity):
variable['v'] = updated
async def test():
m = self.mapper.create({'id': '15'}, TestVertex)
await self.mapper.delete(m, callback=delete_test_callback).send()
self.assertEqual(variable['v'], updated)
self.ioloop.run_until_complete(test())
def test_can_retrieve_data_from_entity_via_mapper(self):
class TestCaseVertex1(Vertex):
allow_undefined = True
d = {
'name': 'name{}'.format(str(random()))
}
async def test():
v = self.mapper.create(d, TestCaseVertex1)
data = await self.mapper.data(v)
self.assertIn('name', data)
self.assertEqual(d['name'], data['name'][0]['value'])
self.ioloop.run_until_complete(test())
def test_can_retrieve_data_from_collection_via_mapper(self):
class TestCaseVertex1(Vertex):
allow_undefined = True
class C(object):
data = []
coll = []
items = 15
for i in range(items):
d = {
'name': 'name{}'.format(str(random()))
}
v = self.mapper.create(d, TestCaseVertex1)
coll.append(dict(v.data))
resp = Response()
resp.result = {'data': coll}
collection = Collection(self.mapper, resp)
async def test():
data = await self.mapper.data(collection)
self.assertEqual(items, len(data))
names = [dd['name'] for dd in data]
for d in coll:
self.assertIn(d['name'], names)
self.ioloop.run_until_complete(test())
def test_can_retrieve_data_from_two_nested_entities_via_custom_mapper_methods(self):
city = 'city-{}'.format(str(random()))
class TestCaseVertex2(Vertex):
allow_undefined = True
class TestCaseVertex2Mapper(EntityMapper):
entity = TestCaseVertex2
async def get_city(self, entity, data):
data['city'] = city
return data
d = {
'name': 'name{}'.format(str(random()))
}
v = self.mapper.create(d, TestCaseVertex2)
async def test():
data = await self.mapper.data(v, 'get_city')
self.assertIn('name', data)
self.assertEqual(d['name'], data['name'][0]['value'])
self.assertIn('city', data)
self.assertEqual(city, data['city'])
self.ioloop.run_until_complete(test())
def test_can_assure_saving_vertex_mulitple_times_only_crud_once(self):
d = {'some_field': str(random())}
v = self.mapper.create(d, TestVertex)
self.mapper.save(v).save(v)._build_queries()
params = copy.deepcopy(self.mapper.params)
return_var = self.mapper.get_entity_variable(v)
values = ['some_field', d['some_field'], 'vertex', entity_name(v),
GIZMO_ENTITY, GIZMO_TYPE, str(v)]
expected = build_vertex_create_query(entity=v, values=values,
params=params, mapper=self.mapper, return_var=return_var)
self.assertEqual(3, len(self.mapper.queries))
self.assertIn(expected, self.mapper.queries)
def test_can_assure_saving_edge_mulitple_times_only_crud_once(self):
d = {
'outV': 10,
'inV': 99,
}
e = TestEdge(data=d)
self.mapper.save(e).save(e)._build_queries()
self.assertEqual(3, len(self.mapper.queries))
def test_can_assure_saving_edge_and_vertex_mulitple_times_only_crud_once(self):
v = TestVertex()
d = {
'outV': v,
'inV': 99,
}
e = TestEdge(data=d)
self.mapper.save(e).save(e)._build_queries()
self.assertEqual(4, len(self.mapper.queries))
class TestCallbackVertex(Vertex):
allow_undefined = True
test_callback_mapper_on_create_variable = None
test_callback_mapper_on_update_variable = None
test_callback_mapper_on_delete_variable = 'DEL'
class TestCallbackMapper(EntityMapper):
entity = TestCallbackVertex
on_create_variable = ''
def on_create(self, entity):
global test_callback_mapper_on_create_variable
test_callback_mapper_on_create_variable = \
entity['on_create_variable'].data[0]['value']
def on_update(self, entity):
global test_callback_mapper_on_update_variable
test_callback_mapper_on_update_variable = \
entity['on_update_variable'].values[0]
def on_delete(self, entity):
global test_callback_mapper_on_delete_variable
x = entity['on_delete_variable'].values[0]
test_callback_mapper_on_delete_variable= \
entity['on_delete_variable'].values[0]
class CustomMapperTests(unittest.TestCase):
def setUp(self):
self.gremlin = Gremlin()
self.request = TestRequest()
self.mapper = Mapper(self.request, self.gremlin)
self.ioloop = asyncio.get_event_loop()
def test_can_can_on_create_level_callback(self):
async def test():
global test_callback_mapper_on_create_variable
r = random()
v = TestCallbackVertex({'on_create_variable': r})
await self.mapper.save(v).send()
self.assertEqual(r, test_callback_mapper_on_create_variable)
self.ioloop.run_until_complete(test())
def test_can_can_on_update_entity_level_callback(self):
async def test():
global test_callback_mapper_on_update_variable
r = random()
v = TestCallbackVertex({'id': 10, 'on_update_variable': r})
mapper = self.mapper.get_mapper(v)
await self.mapper.save(v).send()
self.assertEqual(r, test_callback_mapper_on_update_variable)
self.ioloop.run_until_complete(test())
def test_can_can_on_delete_entity_level_callback(self):
async def test():
global test_callback_mapper_on_delete_variable
r = random()
v = TestCallbackVertex({'id': 10, 'on_delete_variable': r})
mapper = self.mapper.get_mapper(v)
await self.mapper.delete(v).send()
self.assertEqual(r, test_callback_mapper_on_delete_variable)
self.ioloop.run_until_complete(test())
def test_can_can_on_create_level_callback_and_onetime_callback(self):
async def test():
global test_callback_mapper_on_create_variable
variable = {'v': ''}
updated = random()
def create_test_callback(entity):
variable['v'] = updated
r = random()
v = TestCallbackVertex({'on_create_variable': r})
mapper = self.mapper.get_mapper(v)
await self.mapper.save(v, callback=create_test_callback).send()
self.assertEqual(r, test_callback_mapper_on_create_variable)
self.assertEqual(variable['v'], updated)
self.ioloop.run_until_complete(test())
def test_can_can_on_update_entity_level_callback_and_onetime_callback(self):
async def test():
global test_callback_mapper_on_update_variable
variable = {'v': ''}
updated = random()
def update_test_callback(entity):
variable['v'] = updated
r = random()
v = TestCallbackVertex({'id': 10, 'on_update_variable': r})
mapper = self.mapper.get_mapper(v)
await self.mapper.save(v, callback=update_test_callback).send()
self.assertEqual(r, test_callback_mapper_on_update_variable)
self.assertEqual(variable['v'], updated)
self.ioloop.run_until_complete(test())
def test_can_can_on_delete_entity_level_callback_and_onetime_callback(self):
async def test():
global test_callback_mapper_on_delete_variable
variable = {'v': ''}
updated = random()
def delete_test_callback(entity):
variable['v'] = updated
r = random()
v = TestCallbackVertex({'id': 10, 'on_delete_variable': r})
mapper = self.mapper.get_mapper(v)
await self.mapper.delete(v, callback=delete_test_callback).send()
self.assertEqual(r, test_callback_mapper_on_delete_variable)
self.assertEqual(variable['v'], updated)
self.ioloop.run_until_complete(test())
if __name__ == '__main__':
unittest.main()
|
506092
|
import typing
import ipaddress
import enum
import decimal
import uuid
import datetime
import pathlib
import pytest
import pydantic
import pydantic2graphene
import graphene
def to_pydantic_class(field_type):
class Fake(pydantic.BaseModel):
field: field_type
return Fake
class TestTypeMappingPydantic2Graphene:
def test_bytes_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(to_pydantic_class(bytes))
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_list_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(list))
def test_tuple_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(tuple))
def test_dict_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(dict))
def test_set_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(set))
def test_frozenset_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(frozenset))
def test_datetime_date_field(self, normalize_sdl):
version_1_x = graphene.__version__.startswith("1.")
version_2_0 = graphene.__version__ == "2.0"
if version_1_x or version_2_0:
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(datetime.date))
return
value = pydantic2graphene.to_graphene(to_pydantic_class(datetime.date))
expected_value = """
scalarDatetypeFakeGql {
field: Date!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_datetime_datetime_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(datetime.datetime)
)
expected_value = """
scalarDateTimetypeFakeGql {
field: DateTime!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_datetime_time_field(self, normalize_sdl):
versions_1_x = {"1.1.2", "1.1.1", "1.1", "1.0.2", "1.0.1", "1.0"}
if graphene.__version__ in versions_1_x:
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(datetime.time))
return
value = pydantic2graphene.to_graphene(to_pydantic_class(datetime.time))
expected_value = """
type FakeGql {
field: Time!
}scalarTime
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_datetime_timedelta_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(
to_pydantic_class(datetime.timedelta)
)
def test_any_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(typing.Any))
def test_type_var_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(
to_pydantic_class(typing.TypeVar("custom_types"))
)
def test_optional_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(typing.Optional[int])
)
expected_value = """
type FakeGql {
field: Int
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_typing_list_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(typing.List[str])
)
expected_value = """
type FakeGql {
field: [String!]!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_typing_tuple_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(typing.Tuple[str])
)
expected_value = """
type FakeGql {
field: [String!]!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_typing_dict_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(
to_pydantic_class(typing.Dict[str, str])
)
def test_typing_defaultdict_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(
to_pydantic_class(typing.DefaultDict[str, str])
)
def test_typing_set_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(typing.Set[str])
)
expected_value = """
type FakeGql {
field: [String!]!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_typing_frozenset_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(typing.FrozenSet[str])
)
expected_value = """
type FakeGql {
field: [String!]!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_typing_sequence_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(typing.Sequence[str])
)
expected_value = """
type FakeGql {
field: [String!]!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_typing_iterable_field(self, normalize_sdl):
not_supported = str(pydantic.VERSION)[:3] in {
"1.3",
"1.2",
"1.1",
"1.0",
}
if not_supported:
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(
to_pydantic_class(typing.Type[str])
)
return
value = pydantic2graphene.to_graphene(
to_pydantic_class(typing.Iterable[str])
)
expected_value = """
type FakeGql {
field: [String!]!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_typing_type_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(typing.Type[str]))
def test_typing_callable_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(
to_pydantic_class(typing.Callable[[int], str])
)
def test_typing_pattern_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(typing.Pattern)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_ipaddress_ipv4address_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(ipaddress.IPv4Address)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_ipaddress_ipv4interface_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(ipaddress.IPv4Interface)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_ipaddress_ipv4network_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(ipaddress.IPv4Network)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_ipaddress_ipv6address_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(ipaddress.IPv6Address)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_ipaddress_ipv6interface_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(ipaddress.IPv6Interface)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_ipaddress_ipv6network_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(ipaddress.IPv6Network)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_enum_field(self, normalize_sdl):
class EnumTest(enum.Enum):
ONE = 1
TWO = 2
value = pydantic2graphene.to_graphene(to_pydantic_class(EnumTest))
expected_value = """
enum EnumTest {
ONE
TWO
}
type FakeGql {
field: EnumTest!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_int_enum_field(self, normalize_sdl):
class Enumer(enum.IntEnum):
ONE = 1
TWO = 2
value = pydantic2graphene.to_graphene(to_pydantic_class(Enumer))
expected_value = """
enum Enumer {
ONE
TWO
}
type FakeGql {
field: Enumer!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_decimal_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(decimal.Decimal)
)
expected_value = """
type FakeGql {
field: Float!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pathlib_path_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(pathlib.Path))
def test_uuid_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(to_pydantic_class(uuid.UUID))
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_filepath_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(pydantic.FilePath))
def test_pydantic_directorypath_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.DirectoryPath)
)
def test_pydantic_pyobject_field(self):
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(pydantic.PyObject))
def test_pydantic_color_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.color.Color)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_json_field(self, normalize_sdl):
graphene_not_suported = graphene.__version__ in {
"1.4.2",
"1.4.1",
"1.4",
"1.3",
"1.2",
"1.1.3",
"1.1.2",
"1.1.1",
"1.1",
"1.0.2",
"1.0.1",
"1.0",
}
pydantic_not_supported = str(pydantic.VERSION)[:3] in {
"1.2",
"1.1",
"1.0",
}
if graphene_not_suported or pydantic_not_supported:
with pytest.raises(pydantic2graphene.FieldNotSupported):
pydantic2graphene.to_graphene(to_pydantic_class(pydantic.Json))
return
value = pydantic2graphene.to_graphene(to_pydantic_class(pydantic.Json))
expected_value = """
type FakeGql {
field: JSONString
}
scalar JSONString
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_payment_card_number_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.PaymentCardNumber)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_any_url_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.AnyUrl)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_any_http_url_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.AnyHttpUrl)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_http_url_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.HttpUrl)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_postgresdsn_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.PostgresDsn)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_redisdsn_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.RedisDsn)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_stricturl_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.stricturl())
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_uuid1_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.UUID1)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_uuid3_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.UUID3)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_uuid4_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.UUID4)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_uuid5_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.UUID5)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_secret_bytes_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.SecretBytes)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_secret_str_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.SecretStr)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_ipv_any_address_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.IPvAnyAddress)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_ipv_any_interface_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.IPvAnyInterface)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_ipv_any_network_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.IPvAnyNetwork)
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_negative_float_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.NegativeFloat)
)
expected_value = """
type FakeGql {
field: Float!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_negative_int_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.NegativeInt)
)
expected_value = """
type FakeGql {
field: Int!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_positive_float_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.PositiveFloat)
)
expected_value = """
type FakeGql {
field: Float!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_positive_int_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.PositiveInt)
)
expected_value = """
type FakeGql {
field: Int!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_conbytes_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.conbytes())
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_condecimal_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.condecimal())
)
expected_value = """
type FakeGql {
field: Float!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_confloat_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.confloat())
)
expected_value = """
type FakeGql {
field: Float!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_conint_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.conint())
)
expected_value = """
type FakeGql {
field: Int!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_conlist_int_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.conlist(int, min_items=1, max_items=4))
)
expected_value = """
type FakeGql {
field: [Int!]!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_conlist_str_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.conlist(str, min_items=1, max_items=4))
)
expected_value = """
type FakeGql {
field: [String!]!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_conset_int_field(self, normalize_sdl):
not_implemented = str(pydantic.VERSION)[:3] in {
"1.5",
"1.4",
"1.3",
"1.2",
"1.1",
"1.0",
}
if not_implemented:
# AttributeError: module 'pydantic' has no attribute 'conset'
# Pydantic versions < 1.6 return error when using conset
return
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.conset(int, min_items=1, max_items=4))
)
expected_value = """
type FakeGql {
field: [Int!]!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_conset_str_field(self, normalize_sdl):
not_implemented = str(pydantic.VERSION)[:3] in {
"1.5",
"1.4",
"1.3",
"1.2",
"1.1",
"1.0",
}
if not_implemented:
# AttributeError: module 'pydantic' has no attribute 'conset'
# Pydantic versions < 1.6 return error when using conset
return
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.conset(str, min_items=1, max_items=4))
)
expected_value = """
type FakeGql {
field: [String!]!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
def test_pydantic_constr_field(self, normalize_sdl):
value = pydantic2graphene.to_graphene(
to_pydantic_class(pydantic.constr())
)
expected_value = """
type FakeGql {
field: String!
}
"""
assert normalize_sdl(value) == normalize_sdl(expected_value)
@pytest.mark.parametrize('base_type, graphene_type_name', (
(str, 'String'),
(int, 'Int'),
(float, 'Float'),
(decimal.Decimal, 'Float'),
(bytes, 'String'),
))
def test_subclass_of_supported_fields(self, normalize_sdl, base_type,
graphene_type_name):
class MyCustomSubclass(base_type):
pass
value = pydantic2graphene.to_graphene(
to_pydantic_class(MyCustomSubclass)
)
expected_value = """
type FakeGql {
field: %s!
}
""" % graphene_type_name
assert normalize_sdl(value) == normalize_sdl(expected_value)
|
506126
|
import os
PROJECT_ID = 'GOOGLE_CLOUD_PROJECT'
def get_application_id():
"""
Get the associated Google Cloud Project ID
:return:
"""
# NOTE: Google interchangeably refers to this identifier as application_id or project_id
project_id = os.getenv(PROJECT_ID, '')
# ensure project id is a non-empty string
if project_id:
return project_id
# in all other cases, OBJECTION!
raise RuntimeError(f'{PROJECT_ID} is not set. Set and retry.')
|
506139
|
import sys
if sys.version_info < (3, 0):
import testcase
else:
from . import testcase
#
# Tests elements of the heading levels
#
class TestLevelDepth(testcase.TestCase):
title = "Test Level Depth"
# Test that headings can go at least to
def test_level_depth_unlimited(self):
self.set_settings({'toc_level': 0})
self.set_text(self.text())
self.run_plugin()
self.find('* Heading 1')
self.find('----- Heading 6')
# Test that the the "toc_level" setting
def test_level_depth_limited(self):
self.set_settings({'toc_level': 2})
self.set_text(self.text())
self.run_plugin()
if self.get_text().find('----- Heading 6') == -1:
self.ok()
else:
self.error('Should not find heading level 6')
def text(self):
return """
/*
* TOC
*
* Heading 1
* +++++ Heading 6
*/
// > Heading 1
// >>>>>> Heading 6
"""
|
506144
|
import pandas as pd
import time
import os
from concierge import data_io
from concierge import constants
from concierge.collaborative_filter import CollaborativeFilter
from concierge.concierge_queue import ConciergeQueue
from river import metrics
import redis
cache = redis.Redis(host=constants.REDIS_HOST, port=6379, db=0)
df = data_io.load_dataset(',',constants.PLACE_RATINGS_FILE)
max_ts,dataset = CollaborativeFilter.df_to_timestamp_and_dataset(df)
cf = CollaborativeFilter(constants.CF_PLACE,CollaborativeFilter.fm_model(),metrics.MAE() + metrics.RMSE())
cf.timestamp = max_ts
# cf.data_stats(dataset)
tLearnStart = time.time()
cf.learn(dataset,max_ts)
# cf.evaluate(dataset)
tLearnEnd = time.time()
print('tLearn',tLearnEnd-tLearnStart)
pq = ConciergeQueue(constants.CF_PLACE,constants.place_queue,constants.PLACE_RATINGS_FILE)
pq.popularity_map(df)
timestamp = int(time.time())
new_model_metric_path = '/tmp/' + str(timestamp)
cf.export_to_s3(new_model_metric_path)
# clear local model files
os.system('rm -rf ' + new_model_metric_path)
os.system('rm /tmp/model.sav')
os.system('rm /tmp/metric.sav')
# make sure it works
load_cf = CollaborativeFilter(constants.CF_PLACE)
tLoadStart = time.time()
load_cf.import_from_s3()
tLoadEnd = time.time()
print('tImport from s3',tLoadEnd-tLoadStart)
print('metric',cf.metric)
print('model',cf.model)
user_id = '1<PASSWORD>'
# grab 10 feed places I have ratings for this
df_user = df.loc[df['user_id'] == user_id]
item_ids = df_user['item_id'].tolist()
print({ 'user_id': user_id, 'item_ids': item_ids})
scores = cf.predict(user_id,item_ids)
print('predictions',scores)
|
506155
|
import numpy as np
class Tolerance(object):
'''tolerance level class'''
def __init__(self,tol_type,tmin,tmax,nt):
'''
Input:
tol_type: specify const, linear,exp or log; default is exp
tmin: minimum threshold for metric
tmax: maximum threshold for metric
nt: number of iterations
'''
self.tol_type = tol_type
self.nt=nt
self.tmin = tmin ;self.tmax = tmax
self.tol = self.set_tolerance()
def set_tolerance(self):
'''
method to set tolerance type either const, linear,exp or log
'''
if self.tol_type =="const":
return self.const_tol()
elif self.tol_type =="linear":
return self.linear_tol()
elif self.tol_type =="exp":
return self.exp_tol()
elif self.tol_type =="log":
return self.log_tol()
else:
print("Specify either const, linear, exp or log for tolerance class")
def linear_tol(self):
'''Linearly decreasing tolerance level'''
return np.linspace(self.tmax,self.tmin,num=self.nt)
def log_tol(self):
'''Log decreasing tolerance level'''
return np.logspace(self.tmax,self.tmin,num=self.nt)
def const_tol(self):
'''Constant tolerance level for every iteration'''
return np.ones(self.nt)*self.tmin
def exp_tol(self):
'''Exponentially decreasing tolerance level'''
return np.logspace(np.log10(self.tmax), np.log10(self.tmin), num=self.nt)
|
506221
|
import os
directory = os.path.dirname(os.path.realpath(__file__))
class TestResultImaging:
pass
|
506229
|
from rest_framework import serializers
from messaging.models import Contact, ContactAssignment, ContactRole, Email
from peering_manager.api import WritableNestedSerializer
class NestedContactRoleSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(
view_name="messaging-api:contactrole-detail"
)
class Meta:
model = ContactRole
fields = ["id", "url", "display", "name", "slug"]
class NestedContactSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="messaging-api:contact-detail")
class Meta:
model = Contact
fields = ["id", "url", "display", "name"]
class NestedContactAssignmentSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(
view_name="messaging-api:contactassignment-detail"
)
contact = NestedContactSerializer()
role = NestedContactRoleSerializer()
class Meta:
model = ContactAssignment
fields = ["id", "url", "display", "contact", "role"]
class NestedEmailSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="messaging-api:email-detail")
class Meta:
model = Email
fields = ["id", "url", "display", "name"]
|
506247
|
def col_groupby_pdf(data, col, metric_col, ascending=False):
'''Cumulative Distribution Function
Takes in a dataframe with at least 2 columns, and returns a
groupby table with PDF.
data | DataFrame | a pandas dataframe with the data
col | str | name of the column to be grouped by
metric_col | str | name of the column to be evaluated against
ascending | bool | the direction of sorting to be applied
'''
def _pdf(x):
return round(1 - (x.groupby(metric_col).agg('count') / len(x)).cumsum(), 2)
out = data[[col, metric_col]].groupby(col).apply(_pdf).rename(columns = {col: 'PDF'}).sort_values(metric_col, ascending=ascending)
return out
|
506254
|
from django.utils.functional import cached_property
from waldur_core.structure.tests import fixtures as structure_fixtures
from . import factories
class SupportFixture(structure_fixtures.ServiceFixture):
@cached_property
def issue(self):
issue = factories.IssueFactory(customer=self.customer, project=self.project)
factories.SupportCustomerFactory(user=issue.caller)
return issue
@cached_property
def comment(self):
return factories.CommentFactory(issue=self.issue)
@cached_property
def feedback(self):
return factories.FeedbackFactory(issue=self.issue)
|
506271
|
import ipywidgets as widgets
from traitlets import Unicode, Any
# See js/lib/widgets.js for the frontend counterpart to this file.
@widgets.register
class PlottingProgressBar(widgets.DOMWidget):
"""Progressivis PlottingProgressBar widget."""
# Name of the widget view class in front-end
_view_name = Unicode('PlottingProgressBarView').tag(sync=True)
# Name of the widget model class in front-end
_model_name = Unicode('PlottingProgressBarModel').tag(sync=True)
# Name of the front-end module containing widget view
_view_module = Unicode('progressivis-nb-widgets').tag(sync=True)
# Name of the front-end module containing widget model
_model_module = Unicode('progressivis-nb-widgets').tag(sync=True)
# Version of the front-end module containing widget view
_view_module_version = Unicode('^0.1.0').tag(sync=True)
# Version of the front-end module containing widget model
_model_module_version = Unicode('^0.1.0').tag(sync=True)
data = Any('{}').tag(sync=True)
|
506278
|
import os
import shutil
from pathlib import Path
for root, dirs, files in os.walk('archive_en_US'):
root = Path(root)
for file in files:
moved_root = Path('en_US') / root.relative_to('archive_en_US')
shutil.move(root / file, moved_root / file)
os.remove(moved_root / (Path(file).stem + '.rst'))
|
506325
|
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator
from django.shortcuts import redirect
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import UserPassesTestMixin, LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic import (
CreateView,
UpdateView,
DeleteView,
View,
)
from hitcount.views import HitCountDetailView
from comments.models import Comment, Reply
from .models import Category, Post
# ---------- global setup start ---------
User = get_user_model()
users = User.objects.all()
posts = Post.objects.all().order_by('-created_on')
latest_posts = Post.objects.order_by('-created_on')[0:3]
popular_posts = Post.objects.order_by('-hit_count__hits')[:6]
categories = Category.objects.all()
# pagination
def paginate(req ,posts=None, page_num=5):
paginator = Paginator(posts, page_num)
page_number = req.GET.get('page')
return paginator.get_page(page_number)
# view for blog/add_post.html
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
template_name = "blog/add_post.html"
fields = ['title', 'post_thumbnail', 'tags',
'content', 'categories',
'featured',
]
success_url = '/blog'
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
# view for blog/update_post.html
class PostUpdateView(SuccessMessageMixin, LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
template_name = "blog/update_post.html"
fields = ['title', 'post_thumbnail', 'tags',
'content', 'categories', 'featured',
]
success_message = 'Post Updated'
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author or self.request.user.is_superuser:
return True
return False
class PostDeleteView(SuccessMessageMixin, LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
success_url = '/blog'
success_message = 'Post Deleted'
def test_func(self):
post = self.get_object()
if self.request.user == post.author or self.request.user.is_superuser:
return True
return False
# view for blog/post_details.html
class PostDetailView(HitCountDetailView):
model = Post
template_name = 'blog/post_detail.html'
context_object_name = 'post'
count_hit = True
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
this_post = Post.objects.filter(id=self.object.id)
tags = [tag.strip() for tag in this_post[0].tags.split(',')]
context["latest_posts"] = latest_posts
context["popular_posts"] = popular_posts
context["tags"] = tags
return context
# view for blog/user_posts.html
def UserPostsView(request, username):
user = get_object_or_404(User, username=username)
posts = Post.objects.filter(author=user).order_by('-created_on')
page_obj = paginate(request, posts=posts, page_num=8)
posts_count = posts.count()
comments_count = Comment.objects.filter(author=user).count()
replies_count = Reply.objects.filter(author=user).count()
total_user_comments = int(comments_count) + int(replies_count)
context = {
'author_user': user,
'posts_count': posts_count,
'comments_count': total_user_comments,
'page_obj': page_obj,
}
return render(request, 'blog/user_posts.html', context)
# view for blog/posts_in_category.html
def Posts_in_CategoryView(request, id):
category = get_object_or_404(Category, id=id)
posts_in_cat = category.post_set.all()
page_obj = paginate(request, posts=posts_in_cat, page_num=8)
context = {
'posts_in_cat': posts_in_cat,
'cat_name': category,
'page_obj': page_obj,
}
return render(request, 'blog/posts_in_category.html', context)
# this view will add a category if it doesn't exist
class AddCategoryView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
category = request.POST['category']
for_not_admin_view = request.POST.get('add_category', 'not_admin')
if category and category.lower() not in [cat.category.lower() for cat in categories]:
cat = Category.objects.create(category=category)
cat.save()
messages.success(request, f'{category} added as Category')
if for_not_admin_view == 'add_post_view':
return redirect('add_post')
else:
return redirect('admin-dashboard')
else:
messages.error(request, f'{category} is already a Category')
return redirect('admin-dashboard')
# view for blog/tag_posts.html
def TagPostsView(request, tag):
posts_in_tag = Post.objects.filter(tags__icontains=tag).all()
page_obj = paginate(request, posts=posts_in_tag, page_num=7)
context = {
'posts_in_tag': posts_in_tag,
'page_obj': page_obj,
'tag_name': tag,
}
return render(request, 'blog/tag_posts.html', context)
|
506349
|
import os
from pathlib import Path
import librosa
import numpy as np
import soundfile
from tqdm import tqdm
###
noisy_dir = Path("~/Datasets/simulation_array26cm_20210119_shuf100/noisy").expanduser().absolute()
clean_dir = Path("~/Datasets/simulation_array26cm_20210119_shuf100/clean").expanduser().absolute()
text_dir = Path("~/Datasets/simulation_array26cm_20210119_shuf100/txt").expanduser().absolute()
dist_dir = Path("~/Datasets/simulation_array26cm_20210119_shuf100/dist_single").expanduser().absolute()
(dist_dir / "noisy").mkdir(exist_ok=True, parents=True)
(dist_dir / "clean").mkdir(exist_ok=True)
####
noisy_file_paths = librosa.util.find_files(noisy_dir.as_posix(), ext="wav")
for noisy_file_path in tqdm(noisy_file_paths):
basename = os.path.basename(noisy_file_path)
mark = os.path.splitext(os.path.basename(noisy_file_path))[0].split("_")[0:2]
mark = "_".join(mark) # single_AF0976
print(mark)
if mark[:6] != "single":
continue
clean_file_path = clean_dir / basename
txt_file_path = text_dir / (mark + ".wav.txt")
noisy_wav, _ = librosa.load(noisy_file_path, sr=16000, mono=False)
clean_wav, _ = librosa.load(clean_file_path, sr=16000, mono=False)
valid_noisy_wav = np.array([])
valid_clean_wav = np.array([])
with open(txt_file_path, "r") as f:
lines = f.readlines()
for line in lines:
name, start_time, end_time = line.split(" ")
if name != "sil":
if valid_clean_wav.size == 0:
valid_noisy_wav = noisy_wav[:, int(start_time):int(end_time)]
valid_clean_wav = clean_wav[int(start_time):int(end_time)]
else:
valid_noisy_wav = np.concatenate((valid_noisy_wav, noisy_wav[:, int(start_time):int(end_time)]), axis=-1)
valid_clean_wav = np.concatenate((valid_clean_wav, clean_wav[int(start_time):int(end_time)]))
soundfile.write((dist_dir / "noisy" / basename).as_posix(), valid_noisy_wav.T, samplerate=16000)
soundfile.write((dist_dir / "clean" / basename).as_posix(), valid_clean_wav, samplerate=16000)
|
506407
|
import logging
import grequests
import newrelic
import pylibmc as memcache
from django.conf import settings
from django.http import HttpResponseRedirect
from core.api.resources import Site
from core.api.resources import WhoAmI
logger = logging.getLogger('core.middleware.context')
class ContextMiddleware():
"""
Provides request context such as the current site and authentication status.
"""
def __init__(self):
self.mc = memcache.Client(['%s:%d' % (settings.MEMCACHE_HOST, settings.MEMCACHE_PORT)])
def process_request(self, request):
"""
Checks for access_token cookie and appends it to the request object if present.
All request objects have a view_requests attribute which is a list of requests
that will be executed by grequests to fetch data for the view.
"""
request.access_token = None
request.whoami_url = ''
request.view_requests = []
if request.COOKIES.has_key('access_token'):
# Clean up empty access token.
if request.COOKIES['access_token'] == '':
response = HttpResponseRedirect('/')
response.set_cookie('access_token', '', expires="Thu, 01 Jan 1970 00:00:00 GMT")
return response
request.access_token = request.COOKIES['access_token']
request.whoami_url, params, headers = WhoAmI.build_request(request.get_host(), request.access_token)
request.view_requests.append(grequests.get(request.whoami_url, params=params, headers=headers))
newrelic.agent.add_custom_parameter('access_token', request.access_token[:6])
request.site_url, params, headers = Site.build_request(request.get_host())
request.view_requests.append(grequests.get(request.site_url, params=params, headers=headers))
|
506434
|
from .Function import Cfunction
class ProximalFunction(Cfunction):
"""
Class that represents a function with its proximal mapping
"""
def __init__(self,prox):
self._prox=prox
@property
def prox(self):
return self._prox
|
506436
|
import os
import time
import argparse
import pandas as pd
from SR import SequentialRules
parser = argparse.ArgumentParser()
parser.add_argument('--prune', type=int, default=0, help="Association Rules Pruning Parameter")
parser.add_argument('--K', type=int, default=20, help="K items to be used in Recall@K and MRR@K")
parser.add_argument('--steps', type=int, default=10, help="Max Number of steps to walk back from the currently viewed item")
parser.add_argument('--weighting', type=str, default='div', help="Weighting function for the previous items (linear, same, div, log, qudratic)")
parser.add_argument('--itemid', default='ItemID', type=str)
parser.add_argument('--sessionid', default='SessionID', type=str)
parser.add_argument('--item_feats', default='', type=str,
help="Names of Columns containing items features separated by #")
parser.add_argument('--valid_data', default='recSys15Valid.txt', type=str)
parser.add_argument('--train_data', default='recSys15TrainOnly.txt', type=str)
parser.add_argument('--data_folder', default='/Dataset/RecSys_Dataset_After/', type=str)
# Get the arguments
args = parser.parse_args()
train_data = os.path.join(args.data_folder, args.train_data)
x_train = pd.read_csv(train_data)
valid_data = os.path.join(args.data_folder, args.valid_data)
x_valid = pd.read_csv(valid_data)
x_valid.sort_values(args.sessionid, inplace=True)
items_feats = [args.itemid]
ffeats = args.item_feats.strip().split("#")
if ffeats[0] != '':
items_feats.extend(ffeats)
print('Finished Reading Data \nStart Model Fitting...')
# Fitting AR Model
t1 = time.time()
model = SequentialRules(session_key = args.sessionid, item_keys = items_feats,
pruning=args.prune, steps=args.steps, weighting=args.weighting)
model.fit(x_train)
t2 = time.time()
print('End Model Fitting with total time =', t2 - t1, '\n Start Predictions...')
# Test Set Evaluation
test_size = 0.0
hit = 0.0
MRR = 0.0
cur_length = 0
cur_session = -1
last_items = []
t1 = time.time()
index_item = x_valid.columns.get_loc(args.itemid)
index_session = x_valid.columns.get_loc(args.sessionid)
train_items = model.items_features.keys()
counter = 0
for row in x_valid.itertuples( index=False ):
counter += 1
if counter % 5000 == 0:
print('Finished Prediction for ', counter, 'items.')
session_id, item_id = row[index_session], row[index_item]
if session_id != cur_session:
cur_session = session_id
last_items = []
cur_length = 0
if not item_id in last_items and item_id in train_items:
if len(last_items) > cur_length: #make prediction
cur_length += 1
test_size += 1
# Predict the most similar items to items
predictions = model.predict_next(last_items, k = args.K)
#print('preds:', predictions)
# Evaluation
rank = 0
for predicted_item in predictions:
rank += 1
if predicted_item == item_id:
hit += 1.0
MRR += 1/rank
break
last_items.append(item_id)
t2 = time.time()
print('Recall: {}'.format(hit / test_size))
print ('\nMRR: {}'.format(MRR / test_size))
print('End Model Predictions with total time =', t2 - t1)
|
506463
|
from handlers.base_handler import BaseHandler
from lib.objects import Message
class ObsceneHandler(BaseHandler):
TOTAL_AMOUNT = 3
def _set_pattern(self):
self.pattern = 'obscene.warning'
def _need_data(self) -> bool:
return True
def _works_in_chat(self) -> bool:
return True
def check_intent(self) -> bool:
return True
async def handle(self, message: Message) -> bool:
await super().handle(message)
if not self.client_object.is_curator and not self.client_object.is_leader:
return False
if message.author.is_curator or message.author.is_leader:
return False
user_id = message.from_id
if user_id not in self.data_handler.data[message.community_id][message.chat_id].keys():
self.data_handler.data[message.community_id][message.chat_id][user_id] = {
'amount': 0,
}
user_data = self.data_handler.data[message.community_id][message.chat_id][user_id]
user_data['amount'] += 1
self.data_handler.data[message.community_id][message.chat_id][user_id] = user_data
if user_data['amount'] >= self.TOTAL_AMOUNT:
user_data['amount'] = 0
self.data_handler.data[message.community_id][message.chat_id][user_id] = user_data
self.data_handler.save_data()
await self.client_object.kick(community_id=message.community_id,
user_id=message.from_id, chat_id=message.chat_id)
await self.client_object.delay_action.wait()
await self.answer('Да будет кикнут этот злостный нарушитель')
return True
self.data_handler.save_data()
return False
|
506495
|
import torch
import torch.nn.functional as F
def _differentiation_1_distance(X):
#Perform differentiation for each consecuent point in the X dataset (time series)
#Only for axis=0
X = X.permute(2, 1, 0)
aux = X - F.pad(X, (1, 0))[:, :, :-1]
return aux.permute(2,1,0)
def diff(X):
'''
Only works for two shapes:
shape 1: experts, samples, classes
shape 2: experts1, experts2, samples, classes
'''
x_len = len(X.shape)
if x_len == 3:
return _differentiation_1_distance(X)
elif x_len == 4:
exp1, exp2, samples, clases = X.shape
X = X.reshape((exp1, exp2 * samples, clases))
aux = _differentiation_1_distance(X)
return aux.reshape((exp1, exp2, samples, clases))
# =============================================================================
# TNORMS
# =============================================================================
def hamacher_product(x, y):
return x*y / (x + y - x*y + 0.00000001)
# =============================================================================
# TCNORMS
# =============================================================================
def torch_max(x, axis=0, keepdims=False):
v, i = torch.max(x, dim=axis, keepdims=False)
return v
# =============================================================================
# INTEGRALS
# =============================================================================
def torch_mean(x, axis=0, keepdims=False):
v = torch.mean(x, dim=axis, keepdims=False)
return v
def generate_cardinality(N, p = 2):
'''
Generate the cardinality measure for a N-sized vector.
'''
return (torch.arange(N, 0, -1)/ N)**p
def generate_cardinality_matrix(N, matrix_shape, p = 2):
'''
Generate the cardinality measure for a N-sized vector, and returns it in a matrix shape.
Use this if you cannot broadcast generate_cardinality() correctly.
N and matrix_shape must be coherent (matrix_shape[0] == N)
'''
res = torch.zeros(matrix_shape)
dif_elements = [(x/ N)**p for x in torch.arange(N, 0, -1)]
for ix, elements in enumerate(dif_elements):
res[ix,...] = dif_elements[ix]
return res
#ALL TORCH SUGENO IMPL ARE DIRECT TRANSLATIONS FROM THE NUMPY ONES
def torch_sugeno(X, measure=None, axis = 0, f1 = torch.minimum, f2 = torch.amax, keepdims=False):
'''
Aggregates data using a generalization of the Choquet integral.
:param X: Data to aggregate.
:param measure: Vector containing the measure numeric values (Symmetric!)
:param axis: Axis alongside to aggregate.
'''
if measure is None:
measure = generate_cardinality(X.shape[axis])
new_shape = [1] * len(X.shape)
new_shape[axis] = len(measure)
measure = torch.reshape(measure, new_shape)
X_sorted, indices = torch.sort(X, dim=axis)
return f2(f1(X_sorted, measure), axis=axis, keepdims=keepdims)
def torch_choquet(X, measure=None, axis=0, keepdims=True):
'''
Aggregates a numpy array alongise an axis using the choquet integral.
:param X: Data to aggregate.
:param measure: Vector containing the measure numeric values (Symmetric!)
:param axis: Axis alongside to aggregate.
'''
if measure is None:
measure = generate_cardinality(X.shape[axis]) #Uses an implementation trick not valid for generallizations
measure_twin = torch.cat((measure[1:], torch.tensor([0])))
measure = measure - measure_twin
new_shape = [1] * len(X.shape)
new_shape[axis] = len(measure)
measure = torch.reshape(measure, new_shape)
X_sorted, indices = torch.sort(X, axis = axis)
X_agg = torch.sum(X_sorted * measure, dim=axis, keepdims=keepdims)
return X_agg
def torch_CF(X, measure=None, axis=0, tnorm=hamacher_product, keepdims=False):
'''
Aggregates a numpy array alongise an axis using the choquet integral.
:param X: Data to aggregate.
:param measure: Vector containing the measure numeric values (Symmetric!)
:param axis: Axis alongside to aggregate.
'''
if measure is None:
measure = generate_cardinality(X.shape[axis])
new_shape = [1] * len(X.shape)
new_shape[axis] = len(measure)
measure = torch.reshape(measure, new_shape)
X_sorted, indices = torch.sort(X, axis = axis)
assert axis == 0 #Not implemented for other axis
X_differenced = diff(X_sorted)
X_agg = torch.sum(tnorm(X_differenced, measure), dim=axis, keepdims=keepdims)
return X_agg
def torch_CF1F2(X, measure=None, axis=0, f1=torch.minimum, f2=torch.minimum, keepdims=False):
'''
Aggregates data using a generalization of the Choquet integral.
:param X: Data to aggregate.
:param measure: Vector containing the measure numeric values (Symmetric!)
:param axis: Axis alongside to aggregate.
'''
if measure is None:
measure = generate_cardinality(X.shape[axis])
new_shape = [1] * len(X.shape)
new_shape[axis] = len(measure)
measure = torch.reshape(measure, new_shape)
X1_sorted, indices = torch.sort(X, axis = axis)
X2 = diff(X1_sorted)
X2_sorted = X1_sorted - X2
F_1 = f1(X1_sorted, measure)
F_2 = f2(X2_sorted, measure)
X_agg = torch.sum(F_1 - F_2, dim=axis, keepdims=keepdims)
return X_agg
class CCA_unimodal(torch.nn.Module):
def __init__(self, agg1, agg2):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super(CCA_unimodal, self).__init__()
#HARDCODED AGGS
self.agg1 = agg1
self.agg2 = agg2
self.alpha = torch.tensor(0.5, requires_grad=True)
self.myparameters = torch.nn.Parameter(self.alpha)
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
"""
#HARDCODED FORWARD
#Phase 1
c1 = self.agg1(x, axis=0, keepdims=False)
c2 = self.agg2(x, axis=0, keepdims=False)
c_f = c1 * self.alpha + c2 * (1 - self.alpha)
logits = self.softmax(c_f)
return logits
class CCA_adaptative_unimodal(torch.nn.Module):
def __init__(self, alfa_shape, agg1, agg2, activation_function=torch.sigmoid):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super(CCA_adaptative_unimodal, self).__init__()
#HARDCODED AGGS
self.agg1 = agg1
self.agg2 = agg2
self.w = torch.nn.Parameter(torch.rand(alfa_shape, 1, 1), requires_grad=True)
self.b = torch.nn.Parameter(torch.rand(alfa_shape, 1, 1), requires_grad=True)
self.act = activation_function
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x, axis=0):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
"""
#HARDCODED FORWARD
#Phase 1
c1 = self.agg1(x, axis=0, keepdims=False)
c2 = self.agg2(x, axis=0, keepdims=False)
alpha = torch.sum(x * self.w + self.b, dim=axis)
alpha = self.act(alpha)
c_f = c1 * alpha + c2 * (1 - alpha)
logits = self.softmax(c_f)
return logits
class CCA_multimodal(torch.nn.Module):
def __init__(self, alfa_shape_s1, s1_agg1, s1_agg2, s2_agg1, s2_agg2):
"""
alfa_shape_1 should be n_classifiers2
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super(CCA_multimodal, self).__init__()
#HARDCODED AGGS
self.s1_agg1 = s1_agg1
self.s1_agg2 = s1_agg2
self.s2_agg1 = s2_agg1
self.s2_agg2 = s2_agg2
self.alpha1 = torch.nn.Parameter(torch.rand(alfa_shape_s1), requires_grad=True)
self.alpha2 = torch.nn.Parameter(torch.tensor(0.5, requires_grad=True))
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
"""
x shape should be:
n_classifiers1 x n_classifiers2 x samples x clases
"""
#HARDCODED FORWARD
#Phase 1
c1 = self.s1_agg1(x, axis=0 , keepdims=False)
c2 = self.s1_agg2(x, axis=0 , keepdims=False)
c_f = c1 * self.alpha1 + c2 * (1 - self.alpha1)
c_f1 = self.s2_agg1(c_f, axis=0 , keepdims=False)
c_f2 = self.s2_agg2(c_f, axis=0 , keepdims=False)
c_f2 = c_f1 * self.alpha2 + c_f2 * (1 - self.alpha2)
logits = self.softmax(c_f2)
return logits
class CCA_adaptative_multimodal(torch.nn.Module):
def __init__(self, alfa_shape_s1, alfa_shape_s2, s1_agg1, s1_agg2, s2_agg1, s2_agg2, act_func=torch.sigmoid):
"""
Adaptative convex combination of two aggregations in a multimodal setting.
alfa_shape_1 should be n_classifiers2
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super(CCA_adaptative_multimodal, self).__init__()
#HARDCODED AGGS
self.s1_agg1 = s1_agg1
self.s1_agg2 = s1_agg2
self.s2_agg1 = s2_agg1
self.s2_agg2 = s2_agg2
self.weights1 = torch.nn.Parameter(torch.rand(1, alfa_shape_s1, 1, 1), requires_grad=True)
self.weights2 = torch.nn.Parameter(torch.rand(alfa_shape_s2, 1, 1), requires_grad=True)
self.bias1 = torch.nn.Parameter(torch.rand(alfa_shape_s1, 1, 1, 1), requires_grad=True)
self.bias2 = torch.nn.Parameter(torch.rand(alfa_shape_s2, 1, 1), requires_grad=True)
self.act = act_func
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x, axis=0):
"""
x shape should be:
n_classifiers1 x n_classifiers2 x samples x clases
"""
#HARDCODED FORWARD
#Phase 1
c1 = self.s1_agg1(x, axis=1 , keepdims=False)
c2 = self.s1_agg2(x, axis=1 , keepdims=False)
alpha1 = torch.sum(x * self.weights1 + self.bias1, dim=1)
c_f = c1 * alpha1 + c2 * (1 - alpha1)
c_f1 = self.s2_agg1(c_f, axis=0 , keepdims=False)
c_f2 = self.s2_agg2(c_f, axis=0 , keepdims=False)
alpha2 = self.act(torch.sum(c_f * self.weights2 + self.bias2, dim=0))
c_f2 = c_f1 * alpha2 + c_f2 * (1 - alpha2)
logits = self.softmax(c_f2)
return logits
#Helpers.
def ready_CCA_unimodal(x, ag1, ag2):
clasi, samples, clases = x.shape
net_ag = CCA_unimodal(ag1, ag2)
return net_ag
def ready_CCA_multimodal(x, ag1, ag2, ag3, ag4):
clasi1, clasi2, samples, clases = x.shape
net_ag = CCA_multimodal(clasi2, ag1, ag2, ag3, ag4)
return net_ag
|
506527
|
import numpy as np
import pytorch3d
import torch
from .abstract import AbstractDataset
class PointcloudDataset(AbstractDataset):
def __init__(self, cfg, root_path, data_dict, split, rotated=False):
name = cfg.name
super(PointcloudDataset, self).__init__(name, split, root_path)
self.cfg = cfg
self.split = split
self.data_dict = data_dict
self.rotated = rotated
self.instances = self.dict_to_instances(self.data_dict)
# Print out dataset stats
print("================================")
print(f"Stats for {self.name} - {split}")
print(f"Numer of instances {len(self.instances)}")
print("Configs:")
print(cfg)
print("================================")
def __len__(self):
return len(self.instances)
def getraw(self, index):
return self.__getitem__(index)
def __getitem__(self, index):
cls_id, s_id, f_ids, transform = self.instances[index]
output = {"uid": index, "class_id": cls_id, "sequence_id": s_id}
# load pointclouds
for i in range(2):
pc_path = self.data_dict[s_id]["pointclouds"][f_ids[i]]
pc = self.get_pointcloud(pc_path).points_list()[0]
output[f"points_{i}"] = pc
# get transform
output["Rt_0"] = np.concatenate((np.eye(3), np.zeros((3, 1))), axis=1)
Rt = np.linalg.inv(transform)[:3, :]
R = Rt[:, :3] / (Rt[:, :3] ** 2).sum(axis=1)[:, None]
output["Rt_1"] = torch.tensor(np.concatenate((R, Rt[:, 3:4]), axis=1)).float()
if self.rotated:
pc_0 = output["points_0"]
pc_1 = output["points_1"]
Rt_1 = output["Rt_1"]
# genereate two random rotation matrices
R_rand_0 = pytorch3d.transforms.random_rotation().float()
R_rand_1 = pytorch3d.transforms.random_rotation().float()
# transpose r to handle the fact that P in num_points x 3
# yT = (RX)T = XT @ RT
# rotate pc_0 and pc_1 with R_rand_0
pc_0 = pc_0 @ R_rand_0.transpose(0, 1)
pc_1 = pc_1 @ R_rand_0.transpose(0, 1)
# rotate pc_1 and Rt_1 with R_rand_1
pc_1 = pc_1 @ R_rand_1.transpose(0, 1)
R = Rt_1[:3, :3]
t = Rt_1[:3, 3:4]
"""
# calculating augment Rt is a bit tricky
Y = RX + t
X' = R0 @ X -> X = R0^-1 X'
Y' = R1 @ R0 @ Y -> Y = R0-1 @ R1-1 @ Y'
R0-1 R1-1 Y' = R(R0-1 X') + t
Y- = R1 R0 R R0^-1 X' + R1 R0 t
"""
R = R_rand_1 @ R_rand_0 @ R @ R_rand_0.transpose(0, 1)
t = R_rand_1 @ R_rand_0 @ t
Rt_1 = torch.cat((R, t), dim=1)
# reasign output
output["points_0"] = pc_0
output["points_1"] = pc_1
output["Rt_1"] = Rt_1
return output
def dict_to_instances(self, data_dict):
"""
converts the data dictionary into a list of instances
Input: data_dict -- sturcture <classes>/<models>/<instances>
Output: all dataset instances
"""
instances = []
# populate dictionary
cls_id = "3DMatch_PCReg"
for s_id in data_dict:
# get pairs from transformations
transforms = data_dict[s_id]["transforms"]
for pair in transforms:
meta, transform = pair
# Hacky way of getting source to be in the middle for triplets
instances.append([cls_id, s_id, (meta[0], meta[1]), transform])
return instances
|
506554
|
import pathlib
from setuptools import setup, find_packages
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
with open('requirements.txt') as f:
dependencies = f.read().splitlines()
setup(name='idewavecore',
version='0.0.1',
description='Framework for creating custom servers',
long_description=README,
long_description_content_type="text/markdown",
url='https://github.com/idewave/idewavecore',
author='<NAME>',
author_email='<EMAIL>',
license='Apache license 2.0',
packages=find_packages(exclude=("test",)),
install_requires=dependencies,
zip_safe=False)
|
506572
|
import sys
import os
if sys.platform == 'win32':
import ntfslink
os.symlink = ntfslink.symlink
os.readlink = ntfslink.readlink
os.path.islink = ntfslink.islink
|
506690
|
import unittest
from opyoid import PerLookupScope
from opyoid.bindings import FromClassProvider
class MyType:
pass
class TestPerLookupScope(unittest.TestCase):
def setUp(self) -> None:
self.scope = PerLookupScope()
self.class_provider = FromClassProvider(MyType, [], None, {})
def test_get_scoped_provider_returns_unscoped_provider(self):
scoped_provider = self.scope.get_scoped_provider(self.class_provider)
instance_1 = scoped_provider.get()
instance_2 = scoped_provider.get()
self.assertIsNot(instance_1, instance_2)
self.assertIsInstance(instance_1, MyType)
self.assertIsInstance(instance_2, MyType)
|
506700
|
import flask
from fence.models import IdentityProvider
from fence.blueprints.login.base import DefaultOAuth2Login, DefaultOAuth2Callback
class ORCIDLogin(DefaultOAuth2Login):
def __init__(self):
super(ORCIDLogin, self).__init__(
idp_name=IdentityProvider.orcid, client=flask.current_app.orcid_client
)
class ORCIDCallback(DefaultOAuth2Callback):
def __init__(self):
super(ORCIDCallback, self).__init__(
idp_name=IdentityProvider.orcid,
client=flask.current_app.orcid_client,
username_field="orcid",
)
|
506721
|
from operator import attrgetter
class Country:
def __init__(self, name, population, area):
self.name = name
self.population = population
self.area = area
def __repr__(self):
return repr((self.name,self.population,self.area))
countries = [Country('India',1200,100),
Country('China', 1400, 200),
Country('USA', 120, 300)]
countries.append(Country('Russia',80,900))
countries.sort(key=attrgetter('population'), reverse=True)
print(max(countries, key=attrgetter('population')))
print(min(countries, key=attrgetter('population')))
print(min(countries, key=attrgetter('area')))
print(max(countries, key=attrgetter('area')))
print(countries)
|
506788
|
from unittest import expectedFailure
from django.db.models import Q, Field, F
from django.test import SimpleTestCase
from mock import sentinel
from natural_query.query import NaturalQueryDescriptor
from tests.unit.support import assertQObjectsEqual
class NaturalQueryDescriptorTestCase(SimpleTestCase):
def setUp(self):
self.addTypeEqualityFunc(Q, assertQObjectsEqual)
@property
def system_under_test(self):
sut = NaturalQueryDescriptor('field')
return sut
@property
def field(self):
return NaturalQueryDescriptor(name=sentinel.FIELD_NAME)
def test_equals_operator_generates_the_right_expression_for_the_exact_lookup(self):
sut = self.system_under_test
expected = Q(field__exact=sentinel.VALUE)
actual = sut == sentinel.VALUE
self.assertEqual(actual, expected)
def test_concated_equals_operator_generates_the_right_expression_for_the_exact_lookup(self):
sut = self.system_under_test
expected = Q(field__exact=sentinel.VALUE)
actual = sentinel.VALUE == sut == sentinel.VALUE
self.assertEqual(actual, expected)
def test_equals_operator_generates_the_right_expression_for_the_exact_lookup_when_comparing_to_another_field(self):
sut = self.system_under_test
expected = Q(field__exact=F(sentinel.FIELD_NAME))
actual = sut == self.field
self.assertEqual(actual, expected)
def test_greater_than_operator_generates_the_right_expression_for_the_gt_lookup(self):
sut = self.system_under_test
expected = Q(field__gt=sentinel.VALUE)
actual = sut > sentinel.VALUE
self.assertEqual(actual, expected)
def test_greater_than_operator_generates_the_right_expression_for_the_gt_lookup_when_comparing_to_another_field(
self):
sut = self.system_under_test
expected = Q(field__gt=F(sentinel.FIELD_NAME))
actual = sut > self.field
self.assertEqual(actual, expected)
def test_greater_than_or_equal_operator_generates_the_right_expression_for_the_gte_lookup(self):
sut = self.system_under_test
expected = Q(field__gte=sentinel.VALUE)
actual = sut >= sentinel.VALUE
self.assertEqual(actual, expected)
def test_greater_than_or_equal_operator_generates_the_right_expression_for_the_gte_lookup_when_comparing_to_another_field(
self):
sut = self.system_under_test
expected = Q(field__gte=F(sentinel.FIELD_NAME))
actual = sut >= self.field
self.assertEqual(actual, expected)
def test_less_than_operator_generates_the_right_expression_for_the_lt_lookup(self):
sut = self.system_under_test
expected = Q(field__lt=sentinel.VALUE)
actual = sut < sentinel.VALUE
self.assertEqual(actual, expected)
def test_less_than_operator_generates_the_right_expression_for_the_lt_lookup_when_comparing_to_another_field(self):
sut = self.system_under_test
expected = Q(field__lt=F(sentinel.FIELD_NAME))
actual = sut < self.field
self.assertEqual(actual, expected)
def test_less_than_or_equal_operator_generates_the_right_expression_for_the_lte_lookup(self):
sut = self.system_under_test
expected = Q(field__lte=sentinel.VALUE)
actual = sut <= sentinel.VALUE
self.assertEqual(actual, expected)
def test_less_than_or_equal_operator_generates_the_right_expression_for_the_lte_lookup_when_comparing_to_another_field(
self):
sut = self.system_under_test
expected = Q(field__lte=F(sentinel.FIELD_NAME))
actual = sut <= self.field
self.assertEqual(actual, expected)
def test_not_equal_operator_generates_the_right_negated_expression_for_the_exact_lookup(self):
sut = self.system_under_test
expected = ~Q(field__exact=sentinel.VALUE)
actual = sut != sentinel.VALUE
self.assertEqual(actual, expected)
def test_not_equal_operator_generates_the_right_negated_expression_for_the_exact_lookup_when_comparing_to_another_field(
self):
sut = self.system_under_test
expected = ~Q(field__exact=F(sentinel.FIELD_NAME))
actual = sut != self.field
self.assertEqual(actual, expected)
def test_concated_gte_operator_generates_the_right_expression_for_the_greater_than_or_equal_lookup(self):
"""
This should generate an expression that picks the lower value for comparison.
"""
sut = self.system_under_test
expected = Q(field__gte=sentinel.LOWER_VALUE)
actual = sentinel.HIGHER_VALUE <= sut >= sentinel.LOWER_VALUE
self.assertEqual(actual, expected)
def test_concated_gt_operator_generates_the_right_expression_for_the_greater_than_lookup(self):
"""
This should generate an expression that picks the lower value for comparison.
"""
sut = self.system_under_test
expected = Q(field__gt=sentinel.LOWER_VALUE)
actual = sentinel.HIGHER_VALUE < sut > sentinel.LOWER_VALUE
self.assertEqual(actual, expected)
def test_concated_gte_and_gt_operator_generates_the_right_expression_for_the_greater_than_lookup(self):
"""
This should generate an expression that picks the lower value for comparison.
"""
sut = self.system_under_test
expected = Q(field__gt=sentinel.LOWER_VALUE)
actual = sentinel.HIGHER_VALUE <= sut > sentinel.LOWER_VALUE
self.assertEqual(actual, expected)
def test_concated_gt_and_gte_operator_generates_the_right_expression_for_the_greater_than_or_equal_lookup(self):
"""
This should generate an expression that picks the lower value for comparison.
"""
sut = self.system_under_test
expected = Q(field__gte=sentinel.LOWER_VALUE)
actual = sentinel.HIGHER_VALUE < sut >= sentinel.LOWER_VALUE
self.assertEqual(actual, expected)
def test_negating_generates_the_right_expression_for_the_not_lookup(self):
sut = self.system_under_test
expected = ~Q('field')
actual = ~sut
self.assertEqual(actual, expected)
def test_can_and_expressions_when_braces_are_present(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__exact=sentinel.VALUE1, field2__exact=sentinel.VALUE2)
actual = (field1 == sentinel.VALUE1) & (field2 == sentinel.VALUE2)
self.assertEqual(actual, expected)
def test_can_or_expressions_when_braces_are_present(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__exact=sentinel.VALUE1) | Q(field2__exact=sentinel.VALUE2)
actual = (field1 == sentinel.VALUE1) | (field2 == sentinel.VALUE2)
self.assertEqual(actual, expected)
def test_can_add_to_field_and_compare(self):
sut = self.system_under_test
expected = Q(field__exact=F('field') + sentinel.VALUE)
actual = sut == sut + sentinel.VALUE
self.assertEqual(actual, expected)
def test_can_substract_from_field_and_compare(self):
sut = self.system_under_test
expected = Q(field__exact=F('field') - sentinel.VALUE)
actual = sut == sut - sentinel.VALUE
self.assertEqual(actual, expected)
def test_can_multiply_field_and_compare(self):
sut = self.system_under_test
expected = Q(field__exact=F('field') * sentinel.VALUE)
actual = sut == sut * sentinel.VALUE
self.assertEqual(actual, expected)
def test_can_divide_field_and_compare(self):
sut = self.system_under_test
expected = Q(field__exact=F('field') / sentinel.VALUE)
actual = sut == sut / sentinel.VALUE
self.assertEqual(actual, expected)
def test_can_raise_to_power_field_and_compare(self):
sut = self.system_under_test
expected = Q(field__exact=pow(F('field'), sentinel.VALUE))
actual = sut == pow(F('field'), sentinel.VALUE)
self.assertEqual(actual, expected)
def test_can_mod_field_and_compare(self):
sut = self.system_under_test
expected = Q(field__exact=F('field') % sentinel.VALUE)
actual = sut == sut % sentinel.VALUE
self.assertEqual(actual, expected)
def test_can_add_value_to_field_and_compare(self):
sut = self.system_under_test
# For some reason this test fails with a sentinel. I used a real value instead.
expected = Q(field__exact=1 + F('field'))
actual = sut == 1 + sut
self.assertEqual(actual, expected)
def test_can_substract_value_from_field_and_compare(self):
sut = self.system_under_test
expected = Q(field__exact=sentinel.VALUE - F('field'))
actual = sut == sentinel.VALUE - sut
self.assertEqual(actual, expected)
def test_iexact_generates_the_right_expression_for_the_iexact_lookup(self):
sut = self.system_under_test
expected = Q(field__iexact=sentinel.VALUE)
actual = sut.iexact(sentinel.VALUE)
self.assertEqual(actual, expected)
def test_iexact_generates_the_right_expression_for_the_iexact_lookup_when_comparing_to_a_field(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__iexact=F('field2'))
actual = field1.iexact(field2)
self.assertEqual(actual, expected)
def test_contains_generates_the_right_expression_for_the_contains_lookup(self):
sut = self.system_under_test
expected = Q(field__contains=sentinel.VALUE)
actual = sut.contains(sentinel.VALUE)
self.assertEqual(actual, expected)
def test_contains_generates_the_right_expression_for_the_contains_lookup_when_comparing_to_a_field(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__contains=F('field2'))
actual = field1.contains(field2)
self.assertEqual(actual, expected)
def test_icontains_generates_the_right_expression_for_the_icontains_lookup(self):
sut = self.system_under_test
expected = Q(field__icontains=sentinel.VALUE)
actual = sut.icontains(sentinel.VALUE)
self.assertEqual(actual, expected)
def test_icontains_generates_the_right_expression_for_the_icontains_lookup_when_comparing_to_a_field(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__icontains=F('field2'))
actual = field1.icontains(field2)
self.assertEqual(actual, expected)
def test_startswith_generates_the_right_expression_for_the_startswith_lookup(self):
sut = self.system_under_test
expected = Q(field__startswith=sentinel.VALUE)
actual = sut.startswith(sentinel.VALUE)
self.assertEqual(actual, expected)
def test_startswith_generates_the_right_expression_for_the_startswith_lookup_when_comparing_to_a_field(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__startswith=F('field2'))
actual = field1.startswith(field2)
self.assertEqual(actual, expected)
def test_istartswith_generates_the_right_expression_for_the_istartswith_lookup(self):
sut = self.system_under_test
expected = Q(field__istartswith=sentinel.VALUE)
actual = sut.istartswith(sentinel.VALUE)
self.assertEqual(actual, expected)
def test_istartswith_generates_the_right_expression_for_the_istartswith_lookup_when_comparing_to_a_field(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__istartswith=F('field2'))
actual = field1.istartswith(field2)
self.assertEqual(actual, expected)
def test_endswith_generates_the_right_expression_for_the_endswith_lookup(self):
sut = self.system_under_test
expected = Q(field__endswith=sentinel.VALUE)
actual = sut.endswith(sentinel.VALUE)
self.assertEqual(actual, expected)
def test_endswith_generates_the_right_expression_for_the_endswith_lookup_when_comparing_to_a_field(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__endswith=F('field2'))
actual = field1.endswith(field2)
self.assertEqual(actual, expected)
def test_iendswith_generates_the_right_expression_for_the_iendswith_lookup(self):
sut = self.system_under_test
expected = Q(field__iendswith=sentinel.VALUE)
actual = sut.iendswith(sentinel.VALUE)
self.assertEqual(actual, expected)
def test_iendswith_generates_the_right_expression_for_the_iendswith_lookup_when_comparing_to_a_field(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__iendswith=F('field2'))
actual = field1.iendswith(field2)
self.assertEqual(actual, expected)
def test_search_generates_the_right_expression_for_the_search_lookup(self):
sut = self.system_under_test
expected = Q(field__search=sentinel.VALUE)
actual = sut.search(sentinel.VALUE)
self.assertEqual(actual, expected)
def test_search_generates_the_right_expression_for_the_search_lookup_when_comparing_to_a_field(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__search=F('field2'))
actual = field1.search(field2)
self.assertEqual(actual, expected)
def test_regex_generates_the_right_expression_for_the_regex_lookup(self):
sut = self.system_under_test
expected = Q(field__regex=sentinel.VALUE)
actual = sut.regex(sentinel.VALUE)
self.assertEqual(actual, expected)
def test_regex_generates_the_right_expression_for_the_regex_lookup_when_comparing_to_a_field(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__regex=F('field2'))
actual = field1.regex(field2)
self.assertEqual(actual, expected)
def test_iregex_generates_the_right_expression_for_the_iregex_lookup(self):
sut = self.system_under_test
expected = Q(field__iregex=sentinel.VALUE)
actual = sut.iregex(sentinel.VALUE)
self.assertEqual(actual, expected)
def test_iregex_generates_the_right_expression_for_the_iregex_lookup_when_comparing_to_a_field(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__iregex=F('field2'))
actual = field1.iregex(field2)
self.assertEqual(actual, expected)
def test_in_generates_the_right_expression_for_the_in_lookup(self):
sut = self.system_under_test
expected = Q(field__in=(sentinel.VALUE1, sentinel.VALUE2))
actual = sut.in_values(sentinel.VALUE1, sentinel.VALUE2)
self.assertEqual(actual, expected)
def test_in_generates_the_right_expression_for_the_in_lookup_when_comparing_to_a_field(self):
sut = self.system_under_test
field2 = NaturalQueryDescriptor('field2')
expected = Q(field__in=(sentinel.VALUE, F('field2')))
actual = sut.in_values(sentinel.VALUE, field2)
self.assertEqual(actual, expected)
def test_between_generates_the_right_expression_for_the_range_lookup(self):
sut = self.system_under_test
expected = Q(field__range=(sentinel.VALUE1, sentinel.VALUE2))
actual = sut.between(sentinel.VALUE1, sentinel.VALUE2)
self.assertEqual(actual, expected)
def test_between_generates_the_right_expression_for_the_range_lookup_when_comparing_to_a_field(self):
sut = self.system_under_test
field2 = NaturalQueryDescriptor('field2')
expected = Q(field__range=(sentinel.VALUE, F('field2')))
actual = sut.between(sentinel.VALUE, field2)
self.assertEqual(actual, expected)
class NaturalQueryDescriptorUnsupportedOperationsTestCase(SimpleTestCase):
@property
def system_under_test(self):
sut = NaturalQueryDescriptor('field')
return sut
@property
def field(self):
return Field(name=sentinel.FIELD_NAME)
@expectedFailure
def test_concated_equals_operator_generates_the_wrong_expression_for_the_exact_lookup(self):
sut = self.system_under_test
expected = Q(field__exact=sentinel.VALUE)
actual = sut == sentinel.VALUE == sentinel.VALUE
self.assertEqual(actual, expected)
@expectedFailure
def test_concated_greater_than_or_equals_operator_generates_the_wrong_expression_for_the_range_lookup(self):
sut = self.system_under_test
expected = Q(field__range=[sentinel.LOWER_VALUE, sentinel.HIGHER_VALUE])
actual = sentinel.HIGHER_VALUE >= sut >= sentinel.LOWER_VALUE
self.assertEqual(actual, expected)
@expectedFailure
def test_concated_greater_than_operator_generates_the_wrong_expression_for_the_lt_and_gt_lookup(self):
sut = self.system_under_test
expected = Q(field_gt=sentinel.LOWER_VALUE, field_lt=sentinel.HIGHER_VALUE)
actual = sentinel.HIGHER_VALUE > sut > sentinel.LOWER_VALUE
self.assertEqual(actual, expected)
@expectedFailure
def test_concated_greater_than_or_equal_and_greater_than_operator_generates_the_wrong_expression_for_the_lt_and_gte_lookup(
self):
sut = self.system_under_test
expected = Q(field_gt=sentinel.LOWER_VALUE, field_lte=sentinel.HIGHER_VALUE)
actual = sentinel.HIGHER_VALUE >= sut > sentinel.LOWER_VALUE
self.assertEqual(actual, expected)
@expectedFailure
def test_concated_greater_than_and_greater_than_or_equal_operator_generates_the_wrong_expression_for_the_lt_and_gte_lookup(
self):
sut = self.system_under_test
expected = Q(field_gte=sentinel.LOWER_VALUE, field_lt=sentinel.HIGHER_VALUE)
actual = sentinel.HIGHER_VALUE > sut >= sentinel.LOWER_VALUE
self.assertEqual(actual, expected)
@expectedFailure
def test_concated_lower_than_or_equals_operator_generates_the_wrong_expression_for_the_range_lookup(self):
sut = self.system_under_test
expected = Q(field__range=[sentinel.LOWER_VALUE, sentinel.HIGHER_VALUE])
actual = sentinel.LOWER_VALUE <= sut <= sentinel.HIGHER_VALUE
self.assertEqual(actual, expected)
@expectedFailure
def test_concated_lower_than_operator_generates_the_wrong_expression_for_the_lt_and_gt_lookup(self):
sut = self.system_under_test
expected = Q(field_gt=sentinel.LOWER_VALUE, field_lt=sentinel.HIGHER_VALUE)
actual = sentinel.LOWER_VALUE < sut < sentinel.HIGHER_VALUE
self.assertEqual(actual, expected)
@expectedFailure
def test_concated_lower_than_or_equal_and_lower_than_operator_generates_the_wrong_expression_for_the_lt_and_gt_lookup(
self):
sut = self.system_under_test
expected = Q(field_gte=sentinel.LOWER_VALUE, field_lt=sentinel.HIGHER_VALUE)
actual = sentinel.LOWER_VALUE <= sut < sentinel.HIGHER_VALUE
self.assertEqual(actual, expected)
@expectedFailure
def test_concated_lower_than_and_lower_than_or_equal_operator_generates_the_wrong_expression_for_the_lt_and_gt_lookup(
self):
sut = self.system_under_test
expected = Q(field_gt=sentinel.LOWER_VALUE, field_lte=sentinel.HIGHER_VALUE)
actual = sentinel.LOWER_VALUE < sut <= sentinel.HIGHER_VALUE
self.assertEqual(actual, expected)
@expectedFailure
def test_cant_and_expressions_when_braces_are_not_present(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__exact=sentinel.VALUE1, field2__exact=sentinel.VALUE2)
actual = field1 == sentinel.VALUE1 & field2 == sentinel.VALUE2
self.assertEqual(actual, expected)
@expectedFailure
def test_cant_or_expressions_when_braces_are_not_present(self):
field1 = NaturalQueryDescriptor('field1')
field2 = NaturalQueryDescriptor('field2')
expected = Q(field1__exact=sentinel.VALUE1) | Q(field2__exact=sentinel.VALUE2)
actual = field1 == sentinel.VALUE1 | field2 == sentinel.VALUE2
self.assertEqual(actual, expected)
|
506793
|
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
class IMBALANETINYIMGNET(torchvision.datasets.ImageFolder):
cls_num = 200
def __init__(self, root, imb_type='exp', imb_factor=0.01, rand_number=0,
transform=None, target_transform=None):
super(IMBALANETINYIMGNET, self).__init__(root, transform, target_transform)
np.random.seed(rand_number)
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imb_factor)
self.gen_imbalanced_data(img_num_list)
def get_img_num_per_cls(self, cls_num, imb_type, imb_factor):
img_max = len(self.samples) / cls_num
img_num_per_cls = []
if imb_type == 'exp':
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
elif imb_type == 'step':
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max))
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max * imb_factor))
else:
img_num_per_cls.extend([int(img_max)] * cls_num)
return img_num_per_cls
def gen_imbalanced_data(self, img_num_per_cls):
new_data = []
new_targets = []
targets_np = np.array(self.targets, dtype=np.int64)
classes = np.unique(targets_np)
self.num_per_cls_dict = dict()
for the_class, the_img_num in zip(classes, img_num_per_cls):
self.num_per_cls_dict[the_class] = the_img_num
idx = np.where(targets_np == the_class)[0]
np.random.shuffle(idx)
selec_idx = idx[:the_img_num]
#print(self.samples)
res_list = [self.samples[i] for i in selec_idx]
#print(res_list)
new_data.extend(res_list)
new_targets.extend([the_class, ] * the_img_num)
#new_data = np.vstack(new_data)
self.samples = new_data
self.targets = new_targets
def get_cls_num_list(self):
cls_num_list = []
for i in range(self.cls_num):
cls_num_list.append(self.num_per_cls_dict[i])
return cls_num_list
if __name__ == '__main__':
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = IMBALANETINYIMGNET(root='./data/tiny-imagenet-200/train/',
transform=transform)
trainloader = iter(trainset)
data, label = next(trainloader)
import pdb; pdb.set_trace()
|
506799
|
from django.core.handlers.base import BaseHandler
import app
from django.test import Client
def test_middleware(rf, settings):
settings.MIDDLEWARE = [
"app.middleware.hello_world"
]
request = rf.get('/')
handler = BaseHandler()
handler.load_middleware()
response = handler.get_response(request)
assert response.status_code == 200
def test_app(settings):
response = Client().get('/')
assert response.status_code == 200
|
506828
|
import FWCore.ParameterSet.Config as cms
eleRegressionEnergy = cms.EDProducer("RegressionEnergyPatElectronProducer",
debug = cms.untracked.bool(False),
inputElectronsTag = cms.InputTag('selectedPatElectrons'),
#inputElectronsTag = cms.InputTag('gsfElectrons'),
# inputCollectionType (0: GsfElectron, 1 :PATElectron)
inputCollectionType = cms.uint32(1),
# useRecHitCollections ( should be set to true if GsfElectrons or if the RecHits have not been embedded into the PATElectron
useRecHitCollections = cms.bool(False),
# produce ValueMaps. Should be true for GsfElectrons otherwise this producer doest nothing. Keep it to false for PAT
produceValueMaps = cms.bool(False),
#regressionInputFile = cms.string("EgammaAnalysis/ElectronTools/data/eleEnergyRegWeights_V1.root"),
regressionInputFile = cms.string("EgammaAnalysis/ElectronTools/data/eleEnergyRegWeights_WithSubClusters_VApr15.root"),
## Regression type (1: ECAL regression w/o subclusters 2: ECAL regression w/ subclusters)
#energyRegressionType = cms.uint32(1),
energyRegressionType = cms.uint32(2),
rhoCollection = cms.InputTag('kt6PFJets:rho:RECO'),
vertexCollection = cms.InputTag('offlinePrimaryVertices'),
# Not used if inputCollectionType is set to 1
nameEnergyReg = cms.string("eneRegForGsfEle"),
nameEnergyErrorReg = cms.string("eneErrorRegForGsfEle"),
# Used only if useRecHitCollections is set to true
recHitCollectionEB = cms.InputTag('reducedEcalRecHitsEB'),
recHitCollectionEE = cms.InputTag('reducedEcalRecHitsEE')
)
|
506830
|
import unittest
import sys
sys.path.insert(0, '../')
import numpy as np
import numpy.linalg as la
from pak.evaluation import MOTM
class TestEvaluation(unittest.TestCase):
def test_motm_duplicate(self):
Gt = np.array([
[1, 1, 0, 0],
[1, 2, 10, 10],
[2, 1, 0, 0]
])
Hy = np.array([
[1, 1, 10, 10],
[1, 1, 0, 0],
[2, 1, 0, 0]
])
cost = lambda a, b: la.norm(a-b)
fp, m, mme, c, d, g = MOTM.evaluate(Gt, Hy, 10, cost)
self.assertEqual(len(fp), 2)
self.assertEqual(len(m), 2)
self.assertEqual(len(mme), 2)
self.assertEqual(len(c), 2)
self.assertEqual(len(d), 2)
self.assertEqual(len(g), 2)
self.assertEqual(np.sum(fp), 0)
self.assertEqual(np.sum(m), 0)
self.assertEqual(np.sum(mme), 0)
self.assertEqual(np.sum(d), 0)
self.assertEqual(np.sum(g), 3)
self.assertEqual(np.sum(c), 3)
def test_motm_fn_with_debug_info(self):
Gt = np.array([
[1, 1, 0, 0],
[1, 3, 11, 11],
[1, 2, 10, 10],
[2, 1, 0, 0],
[2, 3, 11, 11]
])
Hy = np.array([
[1, 2, 10, 10],
[1, 1, 0, 0],
[2, 1, 0, 0]
])
cost = lambda a, b: la.norm(a-b)
fp, m, mme, c, d, g, FN_pairs, FP_pairs, MME_pairs =\
MOTM.evaluate(Gt, Hy, 5, cost,
debug_info=True)
self.assertEqual(FN_pairs[0][0], 1)
self.assertEqual(FN_pairs[1][0], 2)
self.assertEqual(FN_pairs[0][2], 11)
self.assertEqual(FN_pairs[1][2], 11)
self.assertEqual(len(FN_pairs), 2)
self.assertEqual(len(FP_pairs), 0)
self.assertEqual(len(MME_pairs), 0)
self.assertEqual(len(fp), 2)
self.assertEqual(len(m), 2)
self.assertEqual(len(mme), 2)
self.assertEqual(len(c), 2)
self.assertEqual(len(d), 2)
self.assertEqual(len(g), 2)
self.assertEqual(np.sum(fp), 0)
self.assertEqual(np.sum(m), 2)
self.assertEqual(np.sum(mme), 0)
self.assertEqual(np.sum(d), 0)
self.assertEqual(np.sum(g), 5)
self.assertEqual(np.sum(c), 3)
def test_motm_fp_with_debug_info(self):
Gt = np.array([
[1, 1, 0, 0],
[1, 2, 10, 10],
[2, 1, 0, 0]
])
Hy = np.array([
[1, 2, 10, 10],
[1, 3, 20, 20],
[1, 4, 30, 30],
[2, 1, 0, 0],
[1, 1, 0, 0],
[2, 5, 88, 99]
])
cost = lambda a, b: la.norm(a-b)
fp, m, mme, c, d, g, FN_pairs, FP_pairs, MME_pairs =\
MOTM.evaluate(Gt, Hy, 5, cost,
debug_info=True)
self.assertEqual(len(FN_pairs), 0)
self.assertEqual(len(FP_pairs), 3)
self.assertEqual(len(MME_pairs), 0)
self.assertEqual(FP_pairs[0][0], 1)
self.assertEqual(FP_pairs[1][0], 1)
self.assertEqual(FP_pairs[2][0], 2)
self.assertEqual(FP_pairs[0][2], 20)
self.assertEqual(FP_pairs[1][2], 30)
self.assertEqual(FP_pairs[2][2], 88)
self.assertEqual(len(fp), 2)
self.assertEqual(len(m), 2)
self.assertEqual(len(mme), 2)
self.assertEqual(len(c), 2)
self.assertEqual(len(d), 2)
self.assertEqual(len(g), 2)
self.assertEqual(np.sum(fp), 3)
self.assertEqual(np.sum(m), 0)
self.assertEqual(np.sum(mme), 0)
self.assertEqual(np.sum(d), 0)
self.assertEqual(np.sum(g), 3)
self.assertEqual(np.sum(c), 3)
def test_motm_mme_with_debug_info(self):
Gt = np.array([
[1, 1, 0, 0],
[1, 2, 10, 10],
[2, 1, 0, 0]
])
Hy = np.array([
[2, 1, 0, 0],
[1, 2, 10, 10],
[1, 3, 0, 0]
])
cost = lambda a, b: la.norm(a-b)
fp, m, mme, c, d, g, FN_pairs, FP_pairs, MME_pairs =\
MOTM.evaluate(Gt, Hy, 5, cost,
debug_info=True)
self.assertEqual(len(FN_pairs), 0)
self.assertEqual(len(FP_pairs), 0)
self.assertEqual(len(MME_pairs), 1)
self.assertEqual(MME_pairs[0][0], 2)
self.assertEqual(MME_pairs[0][1], 0)
self.assertEqual(len(fp), 2)
self.assertEqual(len(m), 2)
self.assertEqual(len(mme), 2)
self.assertEqual(len(c), 2)
self.assertEqual(len(d), 2)
self.assertEqual(len(g), 2)
self.assertEqual(np.sum(fp), 0)
self.assertEqual(np.sum(m), 0)
self.assertEqual(np.sum(mme), 1)
self.assertEqual(np.sum(d), 0)
self.assertEqual(np.sum(g), 3)
self.assertEqual(np.sum(c), 3)
def test_motm(self):
Gt = np.array([
[1, 1, 0, 0],
[1, 2, 10, 10],
[2, 1, 0, 0]
])
Hy = np.array([
[1, 2, 10, 10],
[1, 1, 0, 0],
[2, 1, 0, 0]
])
cost = lambda a, b: la.norm(a-b)
fp, m, mme, c, d, g = MOTM.evaluate(Gt, Hy, 10, cost)
self.assertEqual(len(fp), 2)
self.assertEqual(len(m), 2)
self.assertEqual(len(mme), 2)
self.assertEqual(len(c), 2)
self.assertEqual(len(d), 2)
self.assertEqual(len(g), 2)
self.assertEqual(np.sum(fp), 0)
self.assertEqual(np.sum(m), 0)
self.assertEqual(np.sum(mme), 0)
self.assertEqual(np.sum(d), 0)
self.assertEqual(np.sum(g), 3)
self.assertEqual(np.sum(c), 3)
def test_motm_with_debug_info(self):
Gt = np.array([
[1, 1, 0, 0],
[1, 2, 10, 10],
[2, 1, 0, 0]
])
Hy = np.array([
[1, 2, 10, 10],
[1, 1, 0, 0],
[2, 1, 0, 0]
])
cost = lambda a, b: la.norm(a-b)
fp, m, mme, c, d, g, FN_pairs, FP_pairs, MME_pairs =\
MOTM.evaluate(Gt, Hy, 10, cost,
debug_info=True)
self.assertEqual(len(FN_pairs), 0)
self.assertEqual(len(FP_pairs), 0)
self.assertEqual(len(MME_pairs), 0)
self.assertEqual(len(fp), 2)
self.assertEqual(len(m), 2)
self.assertEqual(len(mme), 2)
self.assertEqual(len(c), 2)
self.assertEqual(len(d), 2)
self.assertEqual(len(g), 2)
self.assertEqual(np.sum(fp), 0)
self.assertEqual(np.sum(m), 0)
self.assertEqual(np.sum(mme), 0)
self.assertEqual(np.sum(d), 0)
self.assertEqual(np.sum(g), 3)
self.assertEqual(np.sum(c), 3)
def test_motm_correct_hyp_1elem(self):
Gt = np.array([
[1, 1, 0, 0],
[2, 1, 0, 0]
])
Hy = np.array([
[1, 1, 0, 0],
[2, 1, 0, 0]
])
cost = lambda a, b: la.norm(a-b)
fp, m, mme, c, d, g = MOTM.evaluate(Gt, Hy, 10, cost)
self.assertEqual(len(fp), 2)
self.assertEqual(len(m), 2)
self.assertEqual(len(mme), 2)
self.assertEqual(len(c), 2)
self.assertEqual(len(d), 2)
self.assertEqual(len(g), 2)
self.assertEqual(np.sum(fp), 0)
self.assertEqual(np.sum(m), 0)
self.assertEqual(np.sum(mme), 0)
self.assertEqual(np.sum(d), 0)
self.assertEqual(np.sum(g), 2)
self.assertEqual(np.sum(c), 2)
def test_motm_wrong_hyp(self):
Gt = np.array([
[1, 1, 0, 0],
[2, 1, 0, 0]
])
Hy = np.array([
[1, 1, 0, 0],
[1, 2, 10, 0],
[2, 1, 0, 0]
])
cost = lambda a, b: la.norm(a-b)
fp, m, mme, c, d, g = MOTM.evaluate(Gt, Hy, 10, cost)
self.assertEqual(len(fp), 2)
self.assertEqual(len(m), 2)
self.assertEqual(len(mme), 2)
self.assertEqual(len(c), 2)
self.assertEqual(len(d), 2)
self.assertEqual(len(g), 2)
self.assertEqual(np.sum(fp), 1)
self.assertEqual(np.sum(m), 0)
self.assertEqual(np.sum(mme), 0)
self.assertEqual(np.sum(d), 0)
self.assertEqual(np.sum(g), 2)
self.assertEqual(np.sum(c), 2)
def test_motm_1miss(self):
Gt = np.array([
[1, 1, 0, 0],
[1, 2, 10, 0],
[2, 1, 0, 0]
])
Hy = np.array([
[1, 1, 0, 0],
[2, 1, 0, 0]
])
cost = lambda a, b: la.norm(a-b)
fp, m, mme, c, d, g = MOTM.evaluate(Gt, Hy, 10, cost)
self.assertEqual(len(fp), 2)
self.assertEqual(len(m), 2)
self.assertEqual(len(mme), 2)
self.assertEqual(len(c), 2)
self.assertEqual(len(d), 2)
self.assertEqual(len(g), 2)
self.assertEqual(np.sum(fp), 0)
self.assertEqual(np.sum(m), 1)
self.assertEqual(np.sum(mme), 0)
self.assertEqual(np.sum(d), 0)
self.assertEqual(np.sum(g), 3)
self.assertEqual(np.sum(c), 2)
def test_motm_1mme(self):
Gt = np.array([
[1, 1, 0, 0],
[2, 1, 0, 0]
])
Hy = np.array([
[1, 1, 0, 0],
[2, 2, 0, 0]
])
cost = lambda a, b: la.norm(a-b)
fp, m, mme, c, d, g = MOTM.evaluate(Gt, Hy, 10, cost)
self.assertEqual(len(fp), 2)
self.assertEqual(len(m), 2)
self.assertEqual(len(mme), 2)
self.assertEqual(len(c), 2)
self.assertEqual(len(d), 2)
self.assertEqual(len(g), 2)
self.assertEqual(np.sum(fp), 0)
self.assertEqual(np.sum(m), 0)
self.assertEqual(np.sum(mme), 1)
self.assertEqual(np.sum(d), 0)
self.assertEqual(np.sum(g), 2)
self.assertEqual(np.sum(c), 2)
def test_motm_cross(self):
Gt = np.array([
[1, 1, 0, 0],
[1, 2, 2, 2],
[2, 1, 1, 1],
[2, 2, 1, 1],
[3, 1, 2, 2],
[3, 2, 0, 0]
])
Hy = np.array([
[1, 2, 2, 2],
[1, 1, 0, 0],
[2, 2, 1, 1],
[2, 1, 1, 1],
[3, 1, 2, 2],
[3, 2, 0, 0]
])
cost = lambda a, b: la.norm(a-b)
fp, m, mme, c, d, g = MOTM.evaluate(Gt, Hy, 1.3, cost)
self.assertEqual(len(fp), 3)
self.assertEqual(len(m), 3)
self.assertEqual(len(mme), 3)
self.assertEqual(len(c), 3)
self.assertEqual(len(d), 3)
self.assertEqual(len(g), 3)
self.assertEqual(np.sum(fp), 0)
self.assertEqual(np.sum(m), 0)
self.assertEqual(np.sum(mme), 0)
self.assertEqual(np.sum(d), 0)
self.assertEqual(np.sum(g), 6)
self.assertEqual(np.sum(c), 6)
def test_motm_interrupt(self):
Gt = np.array([
[1, 1, 0, 0],
[2, 1, 0, 0],
[5, 1, 0, 0],
[6, 1, 0, 0],
[7, 1, 0, 0],
[8, 1, 0, 0]
])
Hy = np.array([
[1, 1, 0, 0],
[2, 1, 0, 0],
[5, 1, 0, 0],
[6, 1, 0, 0],
[7, 1, 0, 0],
[8, 1, 0, 0]
])
cost = lambda a, b: la.norm(a-b)
fp, m, mme, c, d, g = MOTM.evaluate(Gt, Hy, 10, cost)
self.assertEqual(len(fp), 8)
self.assertEqual(len(m), 8)
self.assertEqual(len(mme), 8)
self.assertEqual(len(c), 8)
self.assertEqual(len(d), 8)
self.assertEqual(len(g), 8)
self.assertEqual(np.sum(fp), 0)
self.assertEqual(np.sum(m), 0)
self.assertEqual(np.sum(mme), 0)
self.assertEqual(np.sum(d), 0)
self.assertEqual(np.sum(g), 6)
self.assertEqual(np.sum(c), 6)
# -------------------------------
# RUN IT
# -------------------------------
if __name__ == '__main__':
unittest.main()
|
506835
|
import io
from testpath import assert_isfile, assert_not_path_exists
from zipfile import ZipFile
from nsist import commands
def test_prepare_bin_dir(tmp_path):
cmds = {
'acommand': {
'entry_point': 'somemod:somefunc',
'extra_preamble': io.StringIO(u'import extra')
}
}
commands.prepare_bin_directory(tmp_path, cmds)
exe_file = str(tmp_path / 'acommand.exe')
assert_isfile(exe_file)
with open(commands.find_exe(console=True), 'rb') as lf:
b_launcher = lf.read()
assert b_launcher[:2] == b'MZ' # Sanity check
with open(exe_file, 'rb') as ef:
b_exe = ef.read()
assert b_exe[:len(b_launcher)] == b_launcher
assert b_exe[len(b_launcher):].startswith(b"#!<launcher_dir>\\..\\Python\\python.exe\r\n")
with ZipFile(exe_file) as zf:
assert zf.testzip() is None
script_contents = zf.read('__main__.py').decode('utf-8')
assert 'import extra' in script_contents
assert 'somefunc()' in script_contents
def test_prepare_bin_dir_noconsole(tmp_path):
cmds = {
'acommand': {
'entry_point': 'somemod:somefunc',
'console': False
}
}
commands.prepare_bin_directory(tmp_path, cmds)
exe_file = str(tmp_path / 'acommand.exe')
assert_isfile(exe_file)
with open(commands.find_exe(console=False), 'rb') as lf:
b_launcher = lf.read()
assert b_launcher[:2] == b'MZ' # Sanity check
with open(exe_file, 'rb') as ef:
b_exe = ef.read()
assert b_exe[:len(b_launcher)] == b_launcher
assert b_exe[len(b_launcher):].startswith(b"#!<launcher_dir>\\..\\Python\\pythonw.exe\r\n")
with ZipFile(exe_file) as zf:
assert zf.testzip() is None
script_contents = zf.read('__main__.py').decode('utf-8')
assert 'somefunc()' in script_contents
|
506919
|
import fileinput
import re
connections = {}
# Set up dictionary of connections
for line in fileinput.input():
rule, wire = re.search(r'(.*) -> (.*)', line).groups()
value = None
if len(rule.split()) == 1:
value = (rule,)
elif 'NOT' in rule:
value = ('NOT', rule.split()[1])
else:
value = (rule.split()[1], rule.split()[0], rule.split()[2])
connections[wire] = value
connections2 = connections.copy()
def follow(wire, c):
rule = c[wire]
val = None
# Base case
if len(rule) == 1:
if rule[0].isdigit():
return int(rule[0])
else:
return follow(rule[0], c)
elif len(rule) == 2:
return ~follow(rule[1], c)
else:
if rule[0] == 'AND':
val = (int(rule[1]) if rule[1].isdigit() else follow(rule[1], c)) & (int(rule[2]) if rule[2].isdigit() else follow(rule[2], c))
elif rule[0] == 'OR':
val = (int(rule[1]) if rule[1].isdigit() else follow(rule[1], c)) | (int(rule[2]) if rule[2].isdigit() else follow(rule[2], c))
elif rule[0] == 'LSHIFT':
val = follow(rule[1], c) << int(rule[2])
elif rule[0] == 'RSHIFT':
val = follow(rule[1], c) >> int(rule[2])
if type(val) is int:
c[wire] = (str(val),)
return val
s = follow('a', connections)
print "Signal to wire a: %d" % s
connections2['b'] = (str(s), )
print "After overriding b to %s, signal to a is %d" % (s, follow('a', connections2))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.