id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
487390
|
from calc.run import run
def test_simple_function():
run(open("test1.calc").readlines())
def test_simple_script():
run(open("test2.calc").readlines())
|
487414
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import argparse
import cv2
import numpy as np
import time
import torch
from external.nms import soft_nms
from utils.logger import Logger
from config import Config
from dataset.coco import COCO
from models.network import create_model, load_model, save_model
from detector import CtdetDetector as Detector
from utils.debugger import colors
from utils.image import size2level, levelnum
COCO_NAMES = ['__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
def get_args():
# Training settings
parser = argparse.ArgumentParser(description='Object Detection!')
parser.add_argument('--finetuning', action='store_true', default=False, help='finetuning the training')
parser.add_argument('--without_gpu', action='store_true', default=True, help='no use gpu')
parser.add_argument('--num_workers', type=int, default=4, help='dataloader threads. 0 for single-thread.')
parser.add_argument('--batch_size', type=int, default=5, help='batch size')
parser.add_argument('--num_epochs', type=int, default=140, help='total training epochs.')
parser.add_argument('--save_all', action='store_true', help='save model to disk every 5 epochs.')
parser.add_argument('--num_iters', type=int, default=-1, help='default: #samples / batch_size.')
parser.add_argument('--val_intervals', type=int, default=5, help='number of epochs to run validation.')
parser.add_argument('--trainval', action='store_true', help='include validation in training and test on test set')
parser.add_argument('--lr', type=float, default=1.25e-4, help='learning rate for batch size 32.')
parser.add_argument('--lr_step', type=str, default='90,120', help='drop learning rate by 10.')
parser.add_argument('--sizeaug', action='store_true', default=False, help='size augmentation')
parser.add_argument('--gpus', default='0', help='-1 for CPU, use comma for multiple gpus')
parser.add_argument('--seed', type=int, default=326, help='random seed')
parser.add_argument('--load_model', default='./save_models/model_last.pth', help='path to pretrained model')
parser.add_argument('--resume', action='store_true', help='resume training')
parser.add_argument('--test', action='store_true')
parser.add_argument('--metric', default='loss', help='main metric to save best model')
parser.add_argument('--image', default='./test.jpg', help='test image')
parser.add_argument('--nms', action='store_true', default=True, help='nms')
parser.add_argument('--network_type', type=str, default='unetobj', help='network type')
parser.add_argument('--backbone', type=str, default='peleenet', help='backbone network')
parser.add_argument('--output_dir', default='./results', help='output dir')
parser.add_argument('--center_thresh', type=float, default=0.10, help='center threshold')
parser.add_argument('--sizethr', type=float, default=0.03, help='size threshold')
parser.add_argument('--instance', type=int, default=0, help='instance number')
args = parser.parse_args()
print(args)
return args
def cleanmask(m0, m1, m2, m3):
return (m0 * ((m0-m1)>0.))
def assignroi(pagenum, dst, src, x1, y1, x2, y2):
dst[y1:y2, x1:x2] += src[y1:y2, x1:x2, pagenum]
def test():
args = get_args()
args.gpus_str = args.gpus
args.gpus = [int(gpu) for gpu in args.gpus.split(',')]
args.gpus = [i for i in range(len(args.gpus))] if args.gpus[0] >=0 else [-1]
if not args.without_gpu:
print("Use GPU")
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus_str
device = torch.device('cuda')
else:
print("Use CPU")
device = torch.device('cpu')
args.gpus = []
if args.network_type == 'large_hourglass':
down_ratio = 4
nstack = 2
else:
down_ratio = 2 if args.backbone == 'peleenet' else 1
nstack = 1
cfg = Config(
args.gpus, device,
args.network_type, args.backbone,
1, down_ratio, nstack
)
if True:
cfg.input_h = 512
cfg.input_w = 512
cfg.input_res = 512
cfg.load_model = args.load_model
cfg.nms = args.nms
cfg.debug = 2
cfg.center_thresh = args.center_thresh
logger = Logger(cfg)
cfg.update(COCO)
#dataset = COCO('val', cfg)
# img = cv2.resize(img, (cfg.img_size,cfg.img_size), interpolation=cv2.INTER_CUBIC)
# img = img.astype(np.float32) / 255.
# img -= np.array(COCO_MEAN, dtype=np.float32)[None, None, :]
# img /= np.array(COCO_STD, dtype=np.float32)[None, None, :]
# imgs = torch.FloatTensor(1, 3, cfg.img_size, cfg.img_size)
# imgs[0,:,:,:] = torch.FloatTensor(img.transpose(2,0,1))
# imgs = imgs.to(cfg.device)
img = cv2.imread(args.image)
h, w, _ = img.shape
scale = 512/w
w = 512
h = int(h*scale)
img = cv2.resize(img, (w,h))
detector = Detector(cfg, args.output_dir)
ret = detector.run(img)
bbox_and_scores = ret['results']
inp_height = ret['meta']['inp_height']
inp_width = ret['meta']['inp_width']
new_height = ret['meta']['new_height']
new_width = ret['meta']['new_width']
trans_inv = ret['meta']['trans_inv']
heatmap = ret['heatmap']
heatmap = cv2.resize(heatmap, (inp_width,inp_height))
heatmap = cv2.warpAffine(heatmap, trans_inv, (new_width, new_height), flags=cv2.INTER_LINEAR)
cv2.imwrite("./results/heatmap.jpg", heatmap)
allmasktmp = ret['allmask']
allmask = np.zeros((h,w,allmasktmp.shape[2]), dtype=np.float32)
for i in range(0, allmask.shape[2]):
allmaskpg = allmasktmp[:,:,i]
allmaskpg = cv2.resize(allmaskpg, (inp_width,inp_height))
allmaskpg = cv2.warpAffine(allmaskpg, trans_inv, (new_width, new_height), flags=cv2.INTER_LINEAR)
allmask[:,:,i] = cv2.resize(allmaskpg, (w,h))
allmaskjpg = np.zeros((h,w,3), dtype=np.uint8)
i = 0
thr = 0.01
for key in bbox_and_scores:
for box in bbox_and_scores[key]:
if box[4] > cfg.center_thresh:
x1 = int(box[0])
y1 = int(box[1])
x2 = int(box[2])
y2 = int(box[3])
if x2 <= x1 or (x1 < 0 and x2 < 0):
continue
if y2 <= y1 or (y1 < 0 and y2 < 0):
continue
x1 = 0 if x1 < 0 else x1
y1 = 0 if y1 < 0 else y1
x2 = w - 1 if x2 >= w else x2
y2 = h - 1 if y2 >= h else y2
# deal with mask begin
cls = key - 1
centerx = (x1+x2)//2
centery = (y1+y2)//2
# clsbase = cls*9
clsbase = 0
allmaskroi = allmask[y1:y2, x1:x2, :]
roi_h, roi_w, _ = allmaskroi.shape
if roi_h < 6 or roi_w < 6:
continue
roi_cx = roi_w//2
roi_cy = roi_h//2
cell_w = (roi_w+5)//6
cell_h = (roi_h+5)//6
roi = np.zeros((roi_h,roi_w), dtype=np.float32)
# TOP
assignroi(0, roi, allmaskroi, 0, 0, roi_cx-cell_w, roi_cy-cell_h)
assignroi(1, roi, allmaskroi, roi_cx-cell_w, 0, roi_cx+cell_w, roi_cy-cell_h)
assignroi(2, roi, allmaskroi, roi_cx+cell_w, 0, roi_w, roi_cy-cell_h)
# MIDDLE
assignroi(3, roi, allmaskroi, 0, roi_cy-cell_h, roi_cx-cell_w, roi_cy+cell_h)
assignroi(4, roi, allmaskroi, roi_cx-cell_w, roi_cy-cell_h, roi_cx+cell_w, roi_cy+cell_h)
assignroi(5, roi, allmaskroi, roi_cx+cell_w, roi_cy-cell_h, roi_w, roi_cy+cell_h)
# BOTTOM
assignroi(6, roi, allmaskroi, 0, roi_cy+cell_h, roi_cx-cell_w, roi_h )
assignroi(7, roi, allmaskroi, roi_cx-cell_w, roi_cy+cell_h, roi_cx+cell_w, roi_h )
assignroi(8, roi, allmaskroi, roi_cx+cell_w, roi_cy+cell_h, -1, roi_h )
# roi = np.amax(allmaskroi[:,:,:], axis=2)
roi = (roi > thr).astype(np.uint8)
rgb = colors[i,0,0].tolist()
i += 1
if args.instance != 0 and args.instance != i:
continue
l = size2level(w*h, roi_w*roi_h)
roi = roi*((allmaskroi[:,:,cfg.num_maskclasses+l]+allmaskroi[:,:,cfg.num_maskclasses+l+1])/2.0>args.sizethr)
allmaskjpg[:,:,0][y1:y2,x1:x2] += roi*rgb[0]
allmaskjpg[:,:,1][y1:y2,x1:x2] += roi*rgb[1]
allmaskjpg[:,:,2][y1:y2,x1:x2] += roi*rgb[2]
# deal with mask begin
cat = COCO._valid_ids[key-1]
cat = COCO.all_valid_ids.index(cat)+1
print(x1, y1, x2, y2, COCO_NAMES[cat], box[4], "level", l)
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(img, COCO_NAMES[cat], (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (cat*2, 255, 255-cat), 2)
assert(cfg.num_maskclasses == 9)
cv2.imwrite(os.path.join(args.output_dir, "centernet.jpg"), img)
cv2.imwrite("./results/pred_rawmask.jpg", (np.amax(allmask[:,:,0:9], axis=2)>thr).astype(np.uint8)*255)
cv2.imwrite("./results/pred_allmask.png", allmaskjpg)
cv2.imwrite("./results/pred_large.png", (allmask[:,:,9:12]*255).astype(np.uint8))
cv2.imwrite("./results/pred_small.png", (allmask[:,:,12:15]*255).astype(np.uint8))
if __name__ == '__main__':
test()
|
487429
|
import datetime
import unittest
from zoomus import components, util
import responses
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RegisterV1TestCase))
suite.addTest(unittest.makeSuite(RegisterV2TestCase))
return suite
class RegisterV1TestCase(unittest.TestCase):
def setUp(self):
self.component = components.webinar.WebinarComponent(
base_uri="http://foo.com",
config={
"api_key": "KEY",
"api_secret": "SECRET",
"version": util.API_VERSION_1,
},
)
@responses.activate
def test_can_register(self):
responses.add(
responses.POST,
"http://foo.com/webinar/register?id=ID&email=<EMAIL>&first_name=Foo&last_name=Bar"
"&api_key=KEY&api_secret=SECRET",
)
self.component.register(
id="ID", email="<EMAIL>", first_name="Foo", last_name="Bar"
)
def test_requires_id(self):
with self.assertRaisesRegexp(ValueError, "'id' must be set"):
self.component.register()
def test_requires_email(self):
with self.assertRaisesRegexp(ValueError, "'email' must be set"):
self.component.register(id="ID")
def test_requires_first_name(self):
with self.assertRaisesRegexp(ValueError, "'first_name' must be set"):
self.component.register(id="ID", email="<EMAIL>")
def test_requires_last_name(self):
with self.assertRaisesRegexp(ValueError, "'last_name' must be set"):
self.component.register(id="ID", email="<EMAIL>", first_name="foo")
@responses.activate
def test_start_time_gets_transformed(self):
start_time = datetime.datetime(1969, 1, 1, 1, 1)
responses.add(
responses.POST,
"http://foo.com/webinar/register?id=ID&email=<EMAIL>&first_name=foo&last_name=bar"
"&start_time=1969-01-01T01%3A01%3A00Z&api_key=KEY&api_secret=SECRET",
)
self.component.register(
id="ID",
email="<EMAIL>",
first_name="foo",
last_name="bar",
start_time=start_time,
)
class RegisterV2TestCase(unittest.TestCase):
def setUp(self):
self.component = components.webinar.WebinarComponentV2(
base_uri="http://foo.com",
config={
"api_key": "KEY",
"api_secret": "SECRET",
"version": util.API_VERSION_2,
},
)
@responses.activate
def test_can_register(self):
responses.add(
responses.POST,
"http://foo.com/webinars/42/registrants",
)
response = self.component.register(
id="42", email="<EMAIL>", first_name="Foo", last_name="Bar"
)
self.assertEqual(
response.request.body,
'{"id": "42", "email": "<EMAIL>", "first_name": "Foo", "last_name": "Bar"}',
)
def test_requires_id(self):
with self.assertRaisesRegexp(ValueError, "'id' must be set"):
self.component.register()
def test_requires_email(self):
with self.assertRaisesRegexp(ValueError, "'email' must be set"):
self.component.register(id="ID")
def test_requires_first_name(self):
with self.assertRaisesRegexp(ValueError, "'first_name' must be set"):
self.component.register(id="ID", email="<EMAIL>")
def test_requires_last_name(self):
with self.assertRaisesRegexp(ValueError, "'last_name' must be set"):
self.component.register(id="ID", email="<EMAIL>", first_name="foo")
if __name__ == "__main__":
unittest.main()
|
487436
|
import torch
import numpy as np
from torch import Tensor
@torch.jit.script
def stable_softmax(logits: torch.Tensor):
logits_m = logits - logits.max(dim=1)[0].unsqueeze(1)
exp = torch.exp(logits_m)
probs = exp / torch.sum(exp, dim=1).unsqueeze(1)
return probs
class ImageNet21kSemanticSoftmax:
def __init__(self, args):
self.args = args
self.tree = torch.load(args.tree_path)
self.class_tree_list = self.tree['class_tree_list']
self.class_names = np.array(list(self.tree['class_description'].values()))
self.max_normalization_factor = 2e1
num_classes = len(self.class_tree_list)
self.class_depth = torch.zeros(num_classes)
for i in range(num_classes):
self.class_depth[i] = len(self.class_tree_list[i]) - 1
max_depth = int(torch.max(self.class_depth).item())
# process semantic relations
hist_tree = torch.histc(self.class_depth, bins=max_depth + 1, min=0, max=max_depth).int()
ind_list = []
class_names_ind_list = []
hirarchy_level_list = []
cls = torch.tensor(np.arange(num_classes))
for i in range(max_depth):
if hist_tree[i] > 1:
hirarchy_level_list.append(i)
ind_list.append(cls[self.class_depth == i].long())
class_names_ind_list.append(self.class_names[ind_list[-1]])
self.hierarchy_indices_list = ind_list
self.hirarchy_level_list = hirarchy_level_list
self.class_names_ind_list = class_names_ind_list
# calcuilating normalization array
self.normalization_factor_list = torch.zeros_like(hist_tree)
self.normalization_factor_list[-1] = hist_tree[-1]
for i in range(max_depth):
self.normalization_factor_list[i] = torch.sum(hist_tree[i:], dim=0)
self.normalization_factor_list = self.normalization_factor_list[0] / self.normalization_factor_list
if self.max_normalization_factor:
self.normalization_factor_list.clamp_(max=self.max_normalization_factor)
def split_logits_to_semantic_logits(self, logits: Tensor) -> Tensor:
"""
split logits to 11 different hierarchies.
:param self.self.hierarchy_indices_list: a list of size [num_of_hierarchies].
Each element in the list is a tensor that contains the corresponding indices for the relevant hierarchy
"""
semantic_logit_list = []
for i, ind in enumerate(self.hierarchy_indices_list):
logits_i = logits[:, ind]
semantic_logit_list.append(logits_i)
return semantic_logit_list
def convert_targets_to_semantic_targets(self, targets_original: Tensor) -> Tensor:
"""
converts single-label targets to targets over num_of_hierarchies different hierarchies.
[batch_size] -> [batch_size x num_of_hierarchies].
if no hierarchical target is available, outputs -1.
:param self.self.hierarchy_indices_list: a list of size [num_of_hierarchies].
Each element in the list is a tensor that contains the corresponding indices for the relevant hierarchy
:param self.class_tree_list: a list of size [num_of_classes].
Each element in the list is a list of the relevent sub-hirrachies.
example - self.class_tree_list[10]: [10, 9, 66, 65, 144]
"""
targets = targets_original.cpu().detach() # dont edit original targets
semantic_targets_list = torch.zeros((targets.shape[0], len(self.hierarchy_indices_list))) - 1
for i, target in enumerate(targets.cpu()): # scanning over batch size
cls_multi_list = self.class_tree_list[target] # all the sub-hirrachies of the rager
hir_levels = len(cls_multi_list)
for j, cls in enumerate(cls_multi_list):
# protection for too small hirarchy_level_list. this protection enables us to remove hierarchies
if len(self.hierarchy_indices_list) <= hir_levels - j - 1:
continue
ind_valid = (self.hierarchy_indices_list[hir_levels - j - 1] == cls)
semantic_targets_list[i, hir_levels - j - 1] = torch.where(ind_valid)[0]
return semantic_targets_list.long().to(device=targets_original.device)
def estimate_teacher_confidence(self, preds_teacher: Tensor) -> Tensor:
"""
Helper function:
return the sum probabilities of the top 5% classes in preds_teacher.
preds_teacher dimensions - [batch_size x num_of_classes]
"""
with torch.no_grad():
num_elements = preds_teacher.shape[1]
num_elements_topk = int(np.ceil(num_elements / 20)) # top 5%
weights_batch = torch.sum(torch.topk(preds_teacher, num_elements_topk).values, dim=1)
return weights_batch
def calculate_KD_loss(self, input_student: Tensor, input_teacher: Tensor):
"""
Calculates the semantic KD-MSE distance between student and teacher probabilities
input_student dimensions - [batch_size x num_of_classes]
input_teacher dimensions - [batch_size x num_of_classes]
"""
semantic_input_student = self.split_logits_to_semantic_logits(input_student)
semantic_input_teacher = self.split_logits_to_semantic_logits(input_teacher)
number_of_hierarchies = len(semantic_input_student)
losses_list = []
# scanning hirarchy_level_list
for i in range(number_of_hierarchies):
# converting to semantic logits
inputs_student_i = semantic_input_student[i]
inputs_teacher_i = semantic_input_teacher[i]
# generating probs
preds_student_i = stable_softmax(inputs_student_i)
preds_teacher_i = stable_softmax(inputs_teacher_i)
# weight MSE-KD distances according to teacher confidence
loss_non_reduced = torch.nn.MSELoss(reduction='none')(preds_student_i, preds_teacher_i)
weights_batch = self.estimate_teacher_confidence(preds_teacher_i)
loss_weighted = loss_non_reduced * weights_batch.unsqueeze(1)
losses_list.append(torch.sum(loss_weighted))
return sum(losses_list)
|
487448
|
import numpy as np
import pytest
import krylov
_factors = [0.0, 1.0, 1.0j, 1.0 + 1.0j, 1e8, 1e-8]
@pytest.mark.parametrize("a", _factors)
@pytest.mark.parametrize("b", _factors)
def test_givens(a, b):
x = np.array([a, b])
G, _ = krylov.givens(x)
# check that G.G is unitary
I = np.eye(2)
assert np.linalg.norm(I - np.dot(G.T.conj(), G), 2) <= 1e-14
# check that absolute value of y[0] equals norm(x)
y = G @ x
ref = np.linalg.norm(x, 2)
assert np.abs(ref - np.abs(y[0])) <= 1e-14 * ref
# check that y[1] == 0
assert np.abs(y[1]) <= 1e-14 * ref
|
487472
|
from django.db import migrations, models
import smartmin.csv_imports.models
class Migration(migrations.Migration):
dependencies = [
('csv_imports', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='importtask',
name='csv_file',
field=models.FileField(help_text='A comma delimited file of records to import', upload_to=smartmin.csv_imports.models.generate_file_path, verbose_name='Import file'),
),
]
|
487486
|
class Error(Exception):
"""Base class for other exceptions"""
pass
class LoginTimeoutError(Error):
"""Raised login to device times out"""
pass
class LoginCredentialsError(Error):
"""Raised when there is a problem with the user credentials"""
pass
|
487542
|
from slack_sdk.web.async_client import AsyncWebClient
class AsyncComplete:
def __init__(self, *, client: AsyncWebClient, body: dict):
self.client = client
self.body = body
async def __call__(self, **kwargs) -> None:
await self.client.workflows_stepCompleted(
workflow_step_execute_id=self.body["event"]["workflow_step"][
"workflow_step_execute_id"
],
**kwargs,
)
|
487608
|
from pytools import sample
def test_greet():
assert sample.greet() == "Hello World"
def test_loop():
assert sample.loop(10) == 55
|
487616
|
from copy import deepcopy
class attrdict(dict):
"""
A dictionary that allows for attribute notation (e.g. d.element).
Parameters
----------
recursive
If True, recursively converts nested dictionaries into :class:`~scvi.utils.attrdict` objects.
Notes
-----
Based off of https://stackoverflow.com/questions/38034377/object-like-attribute-access-for-nested-dictionary.
"""
def __init__(self, *args, recursive: bool = False, **kwargs):
def from_nested_dict(data):
if not isinstance(data, dict):
return data
else:
return attrdict({key: from_nested_dict(data[key]) for key in data})
super().__init__(*args, **kwargs)
for key in self.keys():
if hasattr(self, key):
raise ValueError(
f"Cannot create attrdict containing key {key} due to conflict with built-in dict attribute."
)
if recursive:
self[key] = from_nested_dict(self[key])
else:
self[key] = deepcopy(self[key])
self.__dict__ = self
def __repr__(self) -> str:
return f"attrdict({super().__repr__()})"
|
487618
|
from django.contrib.gis import geos
import graphene
import graphql_geojson
from . import nodes
from .. import models
from ..testcases import SchemaTestCase
class CreatePlace(graphene.ClientIDMutation):
place = graphene.Field(nodes.PlaceNode)
class Input:
name = graphene.String(required=True)
location = graphql_geojson.Geometry(required=True)
@classmethod
def mutate_and_get_payload(cls, root, info,
client_mutation_id=None, **input):
place = models.Place.objects.create(**input)
return cls(place=place)
class MutationsTests(SchemaTestCase):
query = '''
mutation CreatePlace($input: CreatePlaceInput!) {
createPlace(input: $input) {
place {
id
type
geometry {
type
coordinates
}
bbox
properties {
name
}
}
clientMutationId
}
}'''
class Mutations(graphene.ObjectType):
create_place = CreatePlace.Field()
def test_create_place(self):
geometry = geos.Point(1, 0)
response = self.execute({
'input': {
'name': 'test',
'location': str(geometry),
},
})
data = response.data['createPlace']['place']
self.assertGeoJSON(geometry, data)
self.assertEqual('test', data['properties']['name'])
|
487633
|
from django.contrib import admin
from .models import District, MovementReason, \
MovementPass, MoveType,TimeSpend
admin.site.register(District)
#admin.site.register(Subdistrict)
admin.site.register(MovementReason)
admin.site.register(MovementPass)
admin.site.register(TimeSpend)
admin.site.register(MoveType)
|
487644
|
from __future__ import unicode_literals
import hashlib
import mock
from mock.mock import MagicMock
from spockbot.plugins.core.auth import AuthCore, AuthPlugin, java_hex_digest
def test_java_hex_digest():
negative_hash = java_hex_digest(hashlib.sha1(b'a'))
assert negative_hash == '-79081bc8055a58031ea2e22346151515c8899848'
positive_hash = java_hex_digest(hashlib.sha1(b'd'))
assert positive_hash == '3c363836cf4e16666669a25da280a1865c2d2874'
def get_mocked_auth_core():
event_mock = MagicMock()
auth_core = AuthCore(event_mock, MagicMock(), MagicMock())
auth_core.online_mode = False
auth_core.auth_timeout = 3
auth_core.ygg = ygg_mock = MagicMock()
auth_core.ygg.password = ''
auth_core.ygg.username = ''
return auth_core, event_mock, ygg_mock
def test_offline_username():
auth, event, ygg = get_mocked_auth_core()
auth.online_mode = False
assert auth.username is None
auth.username = 'Joe'
# username should return None till start_session()
assert auth.username is None
auth.start_session()
assert auth.username == 'Joe'
assert ygg.mock_calls == []
def test_online_username():
auth, event, ygg = get_mocked_auth_core()
auth.online_mode = True
assert auth.username is None
auth.username = 'Joe'
auth.ygg.selected_profile = {'name': 'Joe'}
# username should return None till start_session()
assert auth.username is None
# Test failing login
ygg.login.return_value = False
auth.start_session()
assert auth.username is None
# Test succeeding login
ygg.login.return_value = True
auth.start_session()
assert auth.username == 'Joe'
def test_password():
auth, event, ygg = get_mocked_auth_core()
assert auth.password is False
# Empty password is no password
auth.password = ''
assert auth.password is False
# Normal password
auth.password = 'password'
assert auth.password is True
def test_start_session_online_success():
auth, event, ygg = get_mocked_auth_core()
auth.online_mode = True
ygg.login.return_value = True
res = auth.start_session()
assert res
assert not event.called
assert auth.username
def test_start_session_online_failure():
auth, event, ygg = get_mocked_auth_core()
auth.online_mode = True
ygg.login.return_value = False
res = auth.start_session()
assert not res
assert event.emit.called
assert not auth.username
@mock.patch('spockbot.plugins.core.auth.os.urandom')
def test_get_shared_secret(rnd):
auth, event, ygg = get_mocked_auth_core()
assert not rnd.called
assert not auth._shared_secret
ss = auth.shared_secret
assert auth.shared_secret == ss
assert rnd.called
def get_mocked_auth_plugin():
auth_plugin = AuthPlugin(MagicMock(), MagicMock())
auth_plugin.auth_quit = False
auth_plugin.sess_quit = False
auth_plugin.event = MagicMock()
return auth_plugin
def test_handle_auth_error():
auth = get_mocked_auth_plugin()
auth.handle_auth_error(None, None)
assert not auth.event.kill.called
auth.auth_quit = True
auth.handle_auth_error(None, None)
assert auth.event.kill.called
def test_handle_session_error():
auth = get_mocked_auth_plugin()
auth.handle_session_error(None, None)
assert not auth.event.kill.called
auth.sess_quit = True
auth.handle_session_error(None, None)
assert auth.event.kill.called
|
487667
|
import json
import csv
import os
import argparse
from glob import glob
def get_statistics_of_file(path):
count = 0
sub_types = []
obj_types = []
with open(path) as f:
for line in f:
data = json.loads(line)
count+= 1
if ('sub_type' in data) and ('obj_types' in data):
local_sub_type = data['sub_type']
local_obj_types = data['obj_types']
sub_types.append(local_sub_type)
obj_types += local_obj_types
return count, sub_types, obj_types
def get_obj_counts(path):
obj_counts = []
with open(file=path) as f:
for line in f:
data = json.loads(line)
obj_labels = data['obj_labels']
obj_counts.append(len(obj_labels))
return obj_counts
"""
Input: Triples
Output: PID/Property Name/Count/Pair Type
"""
def main(args):
input_dirs = glob(args.data_dir)
if args.pids: # P1050
new_dirs = []
pids = list(dict.fromkeys(args.pids.split(",")))
for file in input_dirs:
if file.split("/")[-1] in pids:
new_dirs.append(file)
input_dirs = new_dirs
# pid2name
if args.property_path:
pid2name = {}
with open(args.property_path) as f:
rdr = csv.reader(f, delimiter='\t')
r = list(rdr)
for pid, name in r:
pid2name[pid] = name
# qid2type
if args.type_path:
with open(args.type_path) as f:
qid2type = json.load(f)
else:
qid2type = None
all_obj_counts = []
print(f"PID\tTRAIN\tDEV\tTEST")
all_trains = 0
all_devs = 0
all_tests = 0
for input_dir in input_dirs:
pid = input_dir.split("/")[-1]
train_file = os.path.join(input_dir, 'train.jsonl')
dev_file = os.path.join(input_dir, 'dev.jsonl')
test_file = os.path.join(input_dir, 'test.jsonl')
sub_types = []
obj_types = []
train_count, train_sub_types, train_obj_types = get_statistics_of_file(train_file)
dev_count, dev_sub_types, dev_obj_types = get_statistics_of_file(dev_file)
test_count, test_sub_types, test_obj_types = get_statistics_of_file(test_file)
if qid2type:
sub_types = train_sub_types + dev_sub_types + test_sub_types
obj_types = train_obj_types + dev_obj_types + test_obj_types
sub_types = [qid2type[st] for st in sub_types]
obj_types = [qid2type[ot] for ot in obj_types]
sub_types = list(set(sub_types))
obj_types = list(set(obj_types))
print(f"{pid}\t{train_count}\t{dev_count}\t{test_count}")
all_trains += train_count
all_devs += dev_count
all_tests += test_count
print("================================")
print(f"TOTAL\t{all_trains}\t{all_devs}\t{all_tests}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir",
default='../data/wikidata/triples_processed'
)
parser.add_argument("--pids",
default=None
)
parser.add_argument("--property_path",
default=None
)
parser.add_argument("--type_path",
default=None
)
args = parser.parse_args()
main(args)
|
487741
|
from unittest import TestCase
from unittest.mock import MagicMock, patch
from samcli.lib.build.dependency_hash_generator import DependencyHashGenerator
class TestDependencyHashGenerator(TestCase):
def setUp(self):
self.get_workflow_config_patch = patch("samcli.lib.build.dependency_hash_generator.get_workflow_config")
self.get_workflow_config_mock = self.get_workflow_config_patch.start()
self.get_workflow_config_mock.return_value.manifest_name = "manifest_file"
self.file_checksum_patch = patch("samcli.lib.build.dependency_hash_generator.file_checksum")
self.file_checksum_mock = self.file_checksum_patch.start()
self.file_checksum_mock.return_value = "checksum"
def tearDown(self):
self.get_workflow_config_patch.stop()
self.file_checksum_patch.stop()
@patch("samcli.lib.build.dependency_hash_generator.DependencyHashGenerator._calculate_dependency_hash")
@patch("samcli.lib.build.dependency_hash_generator.pathlib.Path")
def test_init_and_properties(self, path_mock, calculate_hash_mock):
path_mock.return_value.resolve.return_value.__str__.return_value = "code_dir"
calculate_hash_mock.return_value = "dependency_hash"
self.generator = DependencyHashGenerator("code_uri", "base_dir", "runtime")
self.assertEqual(self.generator._code_uri, "code_uri")
self.assertEqual(self.generator._base_dir, "base_dir")
self.assertEqual(self.generator._code_dir, "code_dir")
self.assertEqual(self.generator._runtime, "runtime")
self.assertEqual(self.generator.hash, "dependency_hash")
path_mock.assert_called_once_with("base_dir", "code_uri")
@patch("samcli.lib.build.dependency_hash_generator.pathlib.Path")
def test_calculate_manifest_hash(self, path_mock):
code_dir_mock = MagicMock()
code_dir_mock.resolve.return_value.__str__.return_value = "code_dir"
manifest_path_mock = MagicMock()
manifest_path_mock.resolve.return_value.__str__.return_value = "manifest_path"
manifest_path_mock.resolve.return_value.is_file.return_value = True
path_mock.side_effect = [code_dir_mock, manifest_path_mock]
self.generator = DependencyHashGenerator("code_uri", "base_dir", "runtime")
hash = self.generator.hash
self.file_checksum_mock.assert_called_once_with("manifest_path", hash_generator=None)
self.assertEqual(hash, "checksum")
path_mock.assert_any_call("base_dir", "code_uri")
path_mock.assert_any_call("code_dir", "manifest_file")
@patch("samcli.lib.build.dependency_hash_generator.pathlib.Path")
def test_calculate_manifest_hash_missing_file(self, path_mock):
code_dir_mock = MagicMock()
code_dir_mock.resolve.return_value.__str__.return_value = "code_dir"
manifest_path_mock = MagicMock()
manifest_path_mock.resolve.return_value.__str__.return_value = "manifest_path"
manifest_path_mock.resolve.return_value.is_file.return_value = False
path_mock.side_effect = [code_dir_mock, manifest_path_mock]
self.generator = DependencyHashGenerator("code_uri", "base_dir", "runtime")
self.file_checksum_mock.assert_not_called()
self.assertEqual(self.generator.hash, None)
path_mock.assert_any_call("base_dir", "code_uri")
path_mock.assert_any_call("code_dir", "manifest_file")
@patch("samcli.lib.build.dependency_hash_generator.pathlib.Path")
def test_calculate_manifest_hash_manifest_override(self, path_mock):
code_dir_mock = MagicMock()
code_dir_mock.resolve.return_value.__str__.return_value = "code_dir"
manifest_path_mock = MagicMock()
manifest_path_mock.resolve.return_value.__str__.return_value = "manifest_path"
manifest_path_mock.resolve.return_value.is_file.return_value = True
path_mock.side_effect = [code_dir_mock, manifest_path_mock]
self.generator = DependencyHashGenerator(
"code_uri", "base_dir", "runtime", manifest_path_override="manifest_override"
)
hash = self.generator.hash
self.get_workflow_config_mock.assert_not_called()
self.file_checksum_mock.assert_called_once_with("manifest_path", hash_generator=None)
self.assertEqual(hash, "checksum")
path_mock.assert_any_call("base_dir", "code_uri")
path_mock.assert_any_call("code_dir", "manifest_override")
|
487777
|
import time
from easycli import Root, ProgressBar
DEFAULT_TCP_PORT = 8585
DEFAULT_HOST = 'WPP.local'
class Main(Root):
__help__ = 'easycli example'
__completion__ = True
__arguments__ = [
]
def __call__(self, args):
length = 1000
with ProgressBar(length) as pb:
for i in range(length):
pb.increment()
time.sleep(0.01)
return
if __name__ == '__main__':
Main().main()
|
487778
|
from moai.utils.arguments import ensure_string_list
import moai.networks.lightning as minet
import moai.nn.convolution as mic
import moai.nn.residual as mires
import moai.nn.sampling.spatial.downsample as mids
import moai.modules.lightning as mimod
import moai.nn.utils as miu
import torch
import hydra.utils as hyu
import omegaconf.omegaconf as omegaconf
import typing
import logging
log = logging.getLogger(__name__)
#NOTE: from https://github.com/HRNet/HRNet-Bottom-Up-Pose-Estimation/blob/master/lib/models/pose_hrnet.py
#NOTE: from https://arxiv.org/pdf/1908.07919.pdf
__all__ = ["HRNet"]
class HRNet(minet.FeedForward):
def __init__(self,
configuration: omegaconf.DictConfig,
modules: omegaconf.DictConfig,
data: omegaconf.DictConfig=None,
parameters: omegaconf.DictConfig=None,
feedforward: omegaconf.DictConfig=None,
monads: omegaconf.DictConfig=None,
supervision: omegaconf.DictConfig=None,
validation: omegaconf.DictConfig=None,
visualization: omegaconf.DictConfig=None,
export: omegaconf.DictConfig=None,
):
super(HRNet, self).__init__(
data=data, parameters=parameters,
feedforward=feedforward, monads=monads,
supervision=supervision, validation=validation,
export=export, visualization=visualization,
)
preproc = configuration.preproc
#NOTE: preproc = stem + layer1
preproc_convs = []
prev_features = configuration.in_features
stem = preproc.stem
for b, c, a, f, k, s, p in zip(
stem.blocks, stem.convolutions,
stem.activations, stem.features,
stem.kernel_sizes, stem.strides, stem.paddings):
preproc_convs.append(mic.make_conv_block(
block_type=b,
convolution_type=c,
in_features=prev_features,
out_features=f,
activation_type=a,
convolution_params={
"kernel_size": k,
"stride": s,
"padding": p,
},
))
prev_features = f
residual = preproc.residual
residual_blocks = []
for i, o, b in zip(
residual.features.in_features, residual.features.out_features,
residual.features.bottleneck_features,
):
residual_blocks.append(mires.make_residual_block(
block_type=residual.block,
convolution_type=residual.convolution,
out_features=o,
in_features=i,
bottleneck_features=b,
activation_type=residual.activation,
strided=False,
))
self.pre = torch.nn.Sequential(
*preproc_convs, *residual_blocks,
)
branches_config = configuration.branches
start_trans_config = modules['start_transition']
self.start_trans = hyu.instantiate(start_trans_config,
in_features=residual.features.out_features[-1],
start_features=branches_config.start_features
)
#NOTE: stages
highres_module = modules['highres'] # NOTE: outputs list of # branches outputs
self.stages = torch.nn.ModuleList([
torch.nn.Sequential(*[
hyu.instantiate(highres_module,
branches=i, depth=d, start_features=branches_config.start_features
) for _, d in zip(range(modules), depths)
]) for i, modules, depths in zip(
range(2, configuration.stages + 1),
branches_config.modules,
branches_config.depths,
)
])
stage_trans_config = modules['stage_transition']
self.stage_transitions = torch.nn.ModuleList([
hyu.instantiate(stage_trans_config, branches=i + 1,
prev_branch_features=branches_config.start_features * (2 ** i),
) for i in range(1, configuration.stages - 1)
])
head_module = modules['head']
self.head = hyu.instantiate(head_module,
stages=configuration.stages,
start_features=branches_config.start_features,
out_features=configuration.out_features,
)
self.input = ensure_string_list(configuration.input)
self.output = ensure_string_list(configuration.output)
self.output_prefix = configuration.output
def forward(self,
td: typing.Dict[str, torch.Tensor]
) -> typing.Dict[str, torch.Tensor]:
for i, o in zip(self.input, self.output):
in_tensor = td[i]
preproc = self.pre(in_tensor)
hr_inputs = self.start_trans(preproc)
for stage, trans in zip(self.stages, self.stage_transitions):
hr_inputs = trans(stage(hr_inputs))
prediction = self.head(self.stages[-1](hr_inputs))
if type(prediction) == list: #NOTE: to support higherhnet
for i , heatmap in enumerate(prediction):
td[f"{self.output_prefix[:-2]}_{i+1}"] = heatmap
else:
td[o] = prediction
return td
|
487845
|
from glife import *
import golly as g
bitOff = pattern('4.DBD$5.B$4.2B$5.D$D.B2.F2.3D$3BD2FD4B$D4.D2.3D$5.B$4.DBD$4.DBD$4.DBD!')
bitOn = pattern('4.DBD$5.B.B$4.2B.2B$5.D.2D$D.B2.F.D2.D$3BD2FDF.2B$D4.D2.B.D$5.B2.B$4.DBD$4.DBD$4.DBD!')
g.addlayer()
g.new('ROM')
g.setrule('Varlife')
code = g.getclipstr()
opcodes = {'MNZ': '0000',
'MLZ': '0001',
'ADD': '0010',
'SUB': '0011',
'AND': '0100',
'OR' : '0101',
'XOR': '0110',
'ANT': '0111',
'SL' : '1000',
'SRL': '1001',
'SRA': '1010'}
modes = {'A': '01',
'B': '10',
'C': '11'}
x = 0
#Iterate through the instructions, backwards
for line in code.split('\n')[::-1]:
bincode = []
y = 1
#Remove starting numbering and comments
instruction = line.split(';')[0].split('.')[-1].split()
#Parse each argument
for argument in instruction[:0:-1]:
if argument[0] in modes:
bincode.append('{}{:016b}'.format(modes[argument[0]], 65535 & int(argument[1:], 0)))
else:
bincode.append('00{:016b}'.format(65535 & int(argument, 0)))
bincode.append(opcodes[instruction[0]]) #Add opcode at end
#Insert beginning clock generation line
bitOn.put(11*x, 0)
#Paste ROM bits
for bit in ''.join(bincode):
if bit == '0':
bitOff.put(11*x, 11*y)
else:
bitOn.put(11*x, 11*y)
y += 1
x += 1
#0. MLZ -1 3 3;
#1. MLZ -1 7 6; preloadCallStack
#2. MLZ -1 2 1; beginDoWhile0_infinite_loop
#3. MLZ -1 1 4; beginDoWhile1_trials
#4. ADD A4 2 4;
#5. MLZ -1 A3 5; beginDoWhile2_repeated_subtraction
#6. SUB A5 A4 5;
#7. SUB 0 A5 2;
#8. MLZ A2 5 0;
#9. MLZ 0 0 0; endDoWhile2_repeated_subtraction
#10. MLZ A5 3 0;
#11. MNZ 0 0 0; endDoWhile1_trials
#12. SUB A4 A3 2;
#13. MNZ A2 15 0; beginIf3_prime_found
#14. MNZ 0 0 0;
#15. MLZ -1 A3 1; endIf3_prime_found
#16. ADD A3 2 3;
#17. MLZ -1 3 0;
#18. MLZ -1 1 4; endDoWhile0_infinite_loop
|
487868
|
from summarize.nn.beam_search.coverage_penalizers.coverage_penalizer import CoveragePenalizer
from summarize.nn.beam_search.coverage_penalizers.onmt import ONMTCoveragePenalizer
|
487874
|
import logging
def create_logger(module_name):
"""
Initializes a new logger object
parameters:
module_name (str): name of module importing the logger
"""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(name)s %(levelname)s:%(message)s",
)
log_handler = logging.StreamHandler()
log_file_handler = logging.FileHandler("zebrok.log")
logger = logging.getLogger(module_name)
logger.addHandler(log_handler)
logger.addHandler(log_file_handler)
return logger
|
487876
|
from chembl_webresource_client.settings import Settings
Settings.Instance().MAX_LIMIT = 500
import unittest2 as unittest
import json
from chembl_webresource_client.new_client import new_client
from chembl_webresource_client.utils import utils
from chembl_webresource_client.unichem import unichem_client as unichem
from statistics import median
class TestDocsExamples(unittest.TestCase):
def test_search_molecule_by_synonym(self):
molecule = new_client.molecule
molecule.set_format('json')
res = molecule.search('viagra')
self.assertEqual(len(res), 2)
self.assertTrue(
all([any(['Sildenafil' in y['molecule_synonym'] for y in x['molecule_synonyms']]) for x in res]))
def test_search_target_by_gene_name(self):
target = new_client.target
gene_name = 'BRD4'
res = target.search(gene_name)
self.assertEqual(len(res), 2)
self.assertTrue(all([all(
[any(['brd4' in z['component_synonym'].lower() for z in y['target_component_synonyms']]) for y in
x['target_components']]) for x in res]))
def test_search_gene_name_in_target_synonym(self):
target = new_client.target
gene_name = 'GABRB2'
res = target.filter(target_synonym__icontains=gene_name)
self.assertEqual(len(res), 14)
self.assertTrue(all([any(
[any(['GABRB2' in z['component_synonym'].upper() for z in y['target_component_synonyms']]) for y in
x['target_components']]) for x in res]))
def test_chembl_id_to_uniprot(self):
chembl_ids = ['CHEMBL819', 'CHEMBL820', 'CHEMBL821']
compounds2targets = dict()
# First, let's just parse the csv file to extract compounds ChEMBL IDs:
for chembl_id in chembl_ids:
compounds2targets[chembl_id] = set()
# OK, we have our source IDs, let's process them in chunks:
chunk_size = 50
keys = list(compounds2targets.keys())
for i in range(0, len(keys), chunk_size):
# we jump from compounds to targets through activities:
activities = new_client.activity.filter(molecule_chembl_id__in=keys[i:i + chunk_size]).only(
['molecule_chembl_id', 'target_chembl_id'])
# extracting target ChEMBL IDs from activities:
for act in activities:
compounds2targets[act['molecule_chembl_id']].add(act['target_chembl_id'])
# OK, now our dictionary maps from compound ChEMBL IDs into target ChEMBL IDs
# We would like to replace target ChEMBL IDs with uniprot IDs
for key, val in compounds2targets.items():
# We don't know how many targets are assigned to a given compound so again it's
# better to process targets in chunks:
lval = list(val)
uniprots = set()
for i in range(0, len(val), chunk_size):
targets = new_client.target.filter(target_chembl_id__in=lval[i:i + chunk_size]).only(
['target_components'])
uniprots |= set(
sum([[comp['accession'] for comp in t['target_components']] for t in targets], []))
compounds2targets[key] = uniprots
self.assertTrue({'A1E3K9', 'P35695', 'P62593'}.issubset(compounds2targets['CHEMBL819']))
self.assertTrue('P00811' not in compounds2targets['CHEMBL819'])
self.assertTrue(len(compounds2targets['CHEMBL819']) == 14)
self.assertTrue(len(compounds2targets['CHEMBL820']) == 123)
self.assertTrue(len(compounds2targets['CHEMBL821']) == 13)
def test_chembl_id_to_uniprot_with_parents(self):
chembl_ids = ['CHEMBL819', 'CHEMBL820', 'CHEMBL821']
organism = 'Escherichia coli'
compounds2targets = dict()
header = True
for chembl_id in chembl_ids:
compounds2targets[chembl_id] = set()
chunk_size = 50
keys = list(compounds2targets.keys())
ID_forms = dict()
for x in keys:
ID_forms[x] = set()
for i in range(0, len(keys), chunk_size):
for form in new_client.molecule_form.filter(parent_chembl_id__in=keys[i:i + chunk_size]):
ID_forms[form['parent_chembl_id']].add(form['molecule_chembl_id'])
for i in range(0, len(keys), chunk_size):
for form in new_client.molecule_form.filter(molecule_chembl_id__in=keys[i:i + chunk_size]):
ID_forms[form['molecule_chembl_id']].add(form['parent_chembl_id'])
values = []
for x in ID_forms.values():
values.extend(x)
forms_to_ID = dict()
for x in values:
forms_to_ID[x] = set()
for k in forms_to_ID:
for parent, molecule in ID_forms.items():
if k in molecule:
forms_to_ID[k] = parent
for i in range(0, len(values), chunk_size):
activities = new_client.activity.filter(molecule_chembl_id__in=values[i:i + chunk_size]).filter(
target_organism__istartswith=organism).only(['molecule_chembl_id', 'target_chembl_id'])
for act in activities:
compounds2targets[forms_to_ID[act['molecule_chembl_id']]].add(act['target_chembl_id'])
for key, val in compounds2targets.items():
lval = list(val)
uniprots = set()
for i in range(0, len(val), chunk_size):
targets = new_client.target.filter(target_chembl_id__in=lval[i:i + chunk_size]).only(
['target_components'])
uniprots = uniprots.union(
set(sum([[comp['accession'] for comp in t['target_components']] for t in targets], [])))
compounds2targets[key] = uniprots
self.assertTrue('P00811' in compounds2targets['CHEMBL819'])
self.assertTrue(len(compounds2targets['CHEMBL819']) == 4)
self.assertTrue(len(compounds2targets['CHEMBL820']) == 0)
self.assertTrue(len(compounds2targets['CHEMBL821']) == 0)
def test_chembl_id_to_human_gene_names(self):
chembl_ids = ['CHEMBL819', 'CHEMBL820', 'CHEMBL821']
compounds2targets = dict()
# First, let's just parse the csv file to extract compounds ChEMBL IDs:
for chembl_id in chembl_ids:
compounds2targets[chembl_id] = set()
# OK, we have our source IDs, let's process them in chunks:
chunk_size = 50
keys = list(compounds2targets.keys())
for i in range(0, len(keys), chunk_size):
# we jump from compounds to targets through activities:
activities = new_client.activity.filter(molecule_chembl_id__in=keys[i:i + chunk_size]).only(
['molecule_chembl_id', 'target_chembl_id'])
# extracting target ChEMBL IDs from activities:
for act in activities:
compounds2targets[act['molecule_chembl_id']].add(act['target_chembl_id'])
# OK, now our dictionary maps from compound ChEMBL IDs into target ChEMBL IDs
# We would like to replace target ChEMBL IDs with uniprot IDs
for key, val in compounds2targets.items():
# We don't know how many targets are assigned to a given compound so again it's
# better to process targets in chunks:
lval = list(val)
genes = set()
for i in range(0, len(val), chunk_size):
targets = new_client.target.filter(target_chembl_id__in=lval[i:i + chunk_size]).only(
['target_components'])
for target in targets:
for component in target['target_components']:
for synonym in component['target_component_synonyms']:
if synonym['syn_type'] == "GENE_SYMBOL":
genes.add(synonym['component_synonym'])
compounds2targets[key] = genes
self.assertTrue({'ORF35', 'Pept2', 'Oat1'}.issubset(compounds2targets['CHEMBL819']),
compounds2targets['CHEMBL819'])
self.assertTrue({'B2AR', 'AVPR1', 'UBP41'}.issubset(compounds2targets['CHEMBL820']),
compounds2targets['CHEMBL820'])
self.assertTrue({'OATP1B3', 'OAT3', 'OCT2'}.issubset(compounds2targets['CHEMBL821']),
compounds2targets['CHEMBL821'])
def test_similarity_85(self):
similarity = new_client.similarity
res = similarity.filter(smiles="CO[C@@H](CCC#C\C=C/CCCC(C)CCCCC=C)C(=O)[O-]", similarity=85)
self.assertTrue(len(res) >= 2)
self.assertTrue(all([float(r['similarity']) >= 80.0 for r in res]))
self.assertTrue({'CHEMBL478779', 'CHEMBL477888'}.issubset(set([r['molecule_chembl_id'] for r in res])))
def test_similarity_70(self):
molecule = new_client.molecule
molecule.set_format('json')
similarity = new_client.similarity
aspirin_chembl_id = molecule.search('aspirin')[0]['molecule_chembl_id']
res = similarity.filter(chembl_id=aspirin_chembl_id, similarity=70)
self.assertTrue(len(res) >= 6)
self.assertTrue(res[0]['similarity'] == '100')
self.assertTrue(res[0]['molecule_chembl_id'] == 'CHEMBL25')
self.assertTrue({'CHEMBL163148', 'CHEMBL351485', 'CHEMBL2260706', 'CHEMBL1234172', 'CHEMBL1359634'}.issubset(
set([r['molecule_chembl_id'] for r in res])))
def test_smiles_substructure(self):
substructure = new_client.substructure
res = substructure.filter(smiles="CN(CCCN)c1cccc2ccccc12")
self.assertTrue(len(res) > 80)
self.assertTrue({'CHEMBL442138', 'CHEMBL287955', 'CHEMBL38899', 'CHEMBL53821', 'CHEMBL55826'}.issubset(
set([r['molecule_chembl_id'] for r in res])))
def test_chembl_id_substructure(self):
substructure = new_client.substructure
res = substructure.filter(chembl_id="CHEMBL25")
self.assertTrue(len(res) > 380)
self.assertTrue({'CHEMBL25', 'CHEMBL7666', 'CHEMBL10222', 'CHEMBL10008'}.issubset(
set([r['molecule_chembl_id'] for r in res])))
def test_get_by_chembl_id(self):
molecule = new_client.molecule
molecule.set_format('json')
m1 = molecule.get('CHEMBL25')
self.assertEqual(m1['pref_name'], 'ASPIRIN')
self.assertEqual(m1['molecule_chembl_id'], 'CHEMBL25')
def test_get_by_smiles(self):
molecule = new_client.molecule
molecule.set_format('json')
m1 = molecule.get('CC(=O)Oc1ccccc1C(=O)O')
self.assertEqual(m1['pref_name'], 'ASPIRIN')
self.assertEqual(m1['molecule_chembl_id'], 'CHEMBL25')
def test_get_by_smiles_flexmatch(self):
molecule = new_client.molecule
molecule.set_format('json')
res = molecule.filter(molecule_structures__canonical_smiles__flexmatch='CN(C)C(=N)N=C(N)N')
self.assertEqual(len(res), 6) # this returns 6 compounds
self.assertTrue('CHEMBL1431' in [x['molecule_chembl_id'] for x in res])
self.assertTrue('METFORMIN' in [x['pref_name'] for x in res])
def test_get_by_inchi_key(self):
molecule = new_client.molecule
molecule.set_format('json')
m1 = molecule.get('BSYNRYMUTXBXSQ-UHFFFAOYSA-N')
self.assertEqual(m1['pref_name'], 'ASPIRIN')
self.assertEqual(m1['molecule_chembl_id'], 'CHEMBL25')
def test_get_multiple_by_chmbl_ids(self):
mols = ['CHEMBL6498', 'CHEMBL6499', 'CHEMBL6505']
molecule = new_client.molecule
molecule.set_format('json')
records = molecule.get(['CHEMBL6498', 'CHEMBL6499', 'CHEMBL6505'])
self.assertEqual(len(records), 3)
self.assertTrue(len(set([x['molecule_chembl_id'] for x in records]) ^ set(mols)) == 0)
def test_get_multiple_by_smiles(self):
molecule = new_client.molecule
molecule.set_format('json')
records = molecule.get(['CNC(=O)c1ccc(cc1)N(CC#C)Cc2ccc3nc(C)nc(O)c3c2',
'Cc1cc2SC(C)(C)CC(C)(C)c2cc1\\N=C(/S)\\Nc3ccc(cc3)S(=O)(=O)N',
'CC(C)C[C@H](NC(=O)[C@@H](NC(=O)[C@H](Cc1c[nH]c2ccccc12)NC'
'(=O)[C@H]3CCCN3C(=O)C(CCCCN)CCCCN)C(C)(C)C)C(=O)O'])
self.assertEqual(len(records), 3)
self.assertTrue(
len(set([x['molecule_chembl_id'] for x in records]) ^ set(['CHEMBL6498', 'CHEMBL6499', 'CHEMBL6505'])) == 0)
def test_get_multiple_by_inchi_keys(self):
molecule = new_client.molecule
molecule.set_format('json')
records = molecule.get(['XSQLHVPPXBBUPP-UHFFFAOYSA-N',
'JXHVRXRRSSBGPY-UHFFFAOYSA-N', 'TUHYVXGNMOGVMR-GASGPIRDSA-N'])
self.assertEqual(len(records), 3)
self.assertTrue(
len(set([x['molecule_chembl_id'] for x in records]) ^ set(['CHEMBL6498', 'CHEMBL6499', 'CHEMBL6505'])) == 0)
def test_pChembl(self):
activities = new_client.activity
res = activities.filter(molecule_chembl_id="CHEMBL25", pchembl_value__isnull=False)
pchembls = [float(r['pchembl_value']) for r in res]
self.assertTrue(5.2 < sum(pchembls) / len(pchembls) < 5.3)
def test_get_pChembl_for_compound_and_target(self):
activities = new_client.activity
res = activities.filter(molecule_chembl_id="CHEMBL25", target_chembl_id="CHEMBL612545",
pchembl_value__isnull=False)
pchembls = [float(r['pchembl_value']) for r in res]
self.assertTrue(4.8 < sum(pchembls) / len(pchembls) < 4.9)
def test_get_all_approved_drugs(self):
molecule = new_client.molecule
molecule.set_format('json')
approved_drugs = molecule.filter(max_phase=4)
self.assertTrue(3900 < len(approved_drugs) < 4000)
self.assertTrue(1.74 < sum(
[float(d['molecule_properties']['alogp'] or 0) for d in approved_drugs if d['molecule_properties']]) / len(
approved_drugs) < 1.76)
def test_get_approved_drugs_for_lung_cancer(self):
drug_indication = new_client.drug_indication
molecule = new_client.molecule
molecule.set_format('json')
lung_cancer_ind = drug_indication.filter(efo_term__icontains="LUNG CARCINOMA")
lung_cancer_mols = molecule.filter(
molecule_chembl_id__in=[x['molecule_chembl_id'] for x in lung_cancer_ind])
self.assertTrue(210 < len(lung_cancer_mols) < 220)
self.assertTrue(
set(['NICOTINE', 'SARACATINIB', 'BEMIPARIN']).issubset(set([l['pref_name'] for l in lung_cancer_mols])))
def test_get_mols_with_no_ro5_violations(self):
molecule = new_client.molecule
molecule.set_format('json')
no_violations = molecule.filter(molecule_properties__num_ro5_violations=0)
self.assertTrue(1200000 < len(no_violations) < 1300000)
self.assertTrue(set(['GEMFIBROZIL', 'ANIROLAC', 'AZEPINDOLE']).issubset(
set([n['pref_name'] for n in no_violations[:10000]])))
def test_get_biotherapeutic_molecules(self):
molecule = new_client.molecule
molecule.set_format('json')
biotherapeutics = molecule.filter(biotherapeutic__isnull=False)
self.assertTrue(21000 < len(biotherapeutics) < 22000)
self.assertTrue(
set(['LYPRESSIN', 'USTEKINUMAB', 'APICIDIN']).issubset(set([n['pref_name'] for n in biotherapeutics])))
def test_get_natural_product_drugs(self):
molecule = new_client.molecule
molecule.set_format('sdf')
natural_drugs = molecule.filter(natural_product=1)
self.assertTrue(1900 < len(natural_drugs) < 2000)
b'$$$$'.join([n for n in natural_drugs if n])
molecule.set_format('json')
def test_get_all_natural_products(self):
document = new_client.document
docs = document.filter(journal="J. Nat. Prod.").only('document_chembl_id')
compound_record = new_client.compound_record
records = compound_record.filter(
document_chembl_id__in=[doc['document_chembl_id'] for doc in docs]).only(
['document_chembl_id', 'molecule_chembl_id'])
molecule = new_client.molecule
natural_products = molecule.filter(
molecule_chembl_id__in=[rec['molecule_chembl_id'] for rec in records]).only(
'molecule_structures')
self.assertTrue(34000 < len(natural_products) < 35000)
self.assertTrue(
'CC(=O)Oc1ccccc1C(=O)O' in [d['molecule_structures']['canonical_smiles'] for d in natural_products if
d['molecule_structures']])
def test_get_light_molecules(self):
molecule = new_client.molecule
molecule.set_format('json')
light_molecules = molecule.filter(molecule_properties__mw_freebase__lte=300)
self.assertTrue(310000 < len(light_molecules) < 320000)
mean = sum([float(d['molecule_properties']['mw_freebase'] or 0) for d in light_molecules if
d['molecule_properties']]) / len(light_molecules)
self.assertTrue(251 < mean < 252, mean)
def test_get_light_molecules_ending_with_nib(self):
molecule = new_client.molecule
molecule.set_format('json')
light_nib_molecules = molecule.filter(
molecule_properties__mw_freebase__lte=300).filter(pref_name__iendswith="nib")
self.assertEqual(len(light_nib_molecules), 1)
self.assertEqual('SEMAXANIB', light_nib_molecules[0]['pref_name'])
def test_get_ki_activities_for_herg(self):
target = new_client.target
activity = new_client.activity
herg = target.search('herg')[0]
herg_activities = activity.filter(target_chembl_id=herg['target_chembl_id']).filter(standard_type="Ki")
self.assertTrue(2500 < len(herg_activities) < 3000)
self.assertTrue(
5300 < sum([float(x['standard_value'] or 0) for x in herg_activities]) / len(herg_activities) < 5400)
def test_get_tg_gates_activities(self):
activity = new_client.activity
res = activity.search('"TG-GATES"')
self.assertTrue(150000 < len(res) < 160000)
mean = sum([float(r['pchembl_value'] or 0) for r in res]) / len(res)
self.assertEqual(0.0, mean, mean)
def test_get_b_or_f_type_activities(self):
activity = new_client.activity
res = activity.filter(target_chembl_id='CHEMBL3938', assay_type__iregex='(B|F)')
self.assertTrue(400 < len(res) < 500)
mean = sum([float(r['pchembl_value'] or 0) for r in res]) / len(res)
self.assertTrue(0 < mean < 1, mean)
def test_get_admet_related_inhibitor_assays(self):
assay = new_client.assay
res = assay.search('inhibitor').filter(assay_type='A')
self.assertTrue(1000 < len(res) < 1100)
self.assertEqual(median([r['confidence_score'] for r in res]), 1)
def test_get_cell_by_cellosaurus_id(self):
cell_line = new_client.cell_line
res = cell_line.filter(cellosaurus_id="CVCL_0417")
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['cell_chembl_id'], 'CHEMBL3307686')
def test_get_drugs_by_approval_year_and_name(self):
drug = new_client.drug
res = drug.filter(first_approval=1976).filter(usan_stem="-azosin")
self.assertEqual(len(res), 1)
self.assertTrue('Prazosin' in res[0]['synonyms'])
def test_get_tissue_by_bto_id(self):
tissue = new_client.tissue
res = tissue.filter(bto_id="BTO:0001073")
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['tissue_chembl_id'], 'CHEMBL3638173')
def test_get_tissue_by_caloha_id(self):
tissue = new_client.tissue
res = tissue.filter(caloha_id="TS-0490")
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['tissue_chembl_id'], 'CHEMBL3638176')
def test_get_tissue_by_uberon_id(self):
tissue = new_client.tissue
res = tissue.filter(uberon_id="UBERON:0000173")
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['tissue_chembl_id'], 'CHEMBL3638177')
def test_get_tissue_by_name(self):
tissue = new_client.tissue
res = tissue.filter(pref_name__istartswith='blood')
self.assertTrue(len(res) >= 3)
self.assertTrue(set(['Blood', 'Blood/Brain', 'Blood/Uterus']).issubset(set([r['pref_name'] for r in res])))
def test_get_documents_for_cytokine(self):
document = new_client.document
res = document.search('cytokine')
self.assertTrue(300 < len(res) < 400)
self.assertEqual(median([x['year'] for x in res]), 2010)
def test_search_compound_in_unichem(self):
ret = unichem.get('AIN')
self.assertEqual(len(ret), 1)
self.assertTrue(set(['aspirin', 'CHEMBL25', 'SCHEMBL1353']).issubset(
set([x['src_compound_id'] for x in ret[list(ret.keys())[0]]])))
def test_resolve_inchi_key_to_inchi(self):
ret = unichem.inchiFromKey('<KEY>')
self.assertEqual(ret[0]['standardinchi'],
'InChI=1S/C16H13ClN2O/c1-19-14-8-7-12(17)9-13(14)16(18-10-15(19)20)11-5-3-2-4-6-11/h2-9H,10H2,1H3')
def test_covert_smiles_to_ctab(self):
aspirin = utils.smiles2ctab('O=C(Oc1ccccc1C(=O)O)C')
self.assertTrue('V2000' in aspirin)
def test_convert_smiles_to_image_and_back_to_smiles(self):
aspirin = 'CC(=O)Oc1ccccc1C(=O)O'
im = utils.smiles2image(aspirin)
mol = utils.image2ctab(im)
smiles = utils.ctab2smiles(mol).split()[2]
self.assertEqual(smiles[-10:], aspirin[-10:]) # TODO: fix OSRA!
def test_compute_fingerprints(self):
aspirin = utils.smiles2ctab('O=C(Oc1ccccc1C(=O)O)C')
fingerprints = utils.sdf2fps(aspirin)
self.assertTrue(fingerprints.startswith('#FPS'))
def test_compute_maximal_common_substructure(self):
smiles = ["O=C(NCc1cc(OC)c(O)cc1)CCCC/C=C/C(C)C", "CC(C)CCCCCC(=O)NCC1=CC(=C(C=C1)O)OC", "c1(C=O)cc(OC)c(O)cc1"]
mols = [utils.smiles2ctab(smile) for smile in smiles]
sdf = ''.join(mols)
result = utils.mcs(sdf)
self.assertEqual(result, '[#6]1(-[#6]):[#6]:[#6](-[#8]-[#6]):[#6](:[#6]:[#6]:1)-[#8]')
def test_compute_molecular_descriptors(self):
aspirin = utils.smiles2ctab('O=C(Oc1ccccc1C(=O)O)C')
num_atoms = json.loads(utils.getNumAtoms(aspirin))[0]
self.assertEqual(num_atoms, 13)
mol_wt = json.loads(utils.molWt(aspirin))[0]
self.assertTrue(180 < mol_wt < 181)
log_p = json.loads(utils.logP(aspirin))[0]
self.assertTrue(1.31 < log_p < 1.32)
tpsa = json.loads(utils.tpsa(aspirin))[0]
self.assertTrue(63 < tpsa < 64)
descriptors = json.loads(utils.descriptors(aspirin))[0]
self.assertEqual(descriptors['MolecularFormula'], 'C9H8O4')
def test_standardise_molecule(self):
mol = utils.smiles2ctab("[Na]OC(=O)Cc1ccc(C[NH3+])cc1.c1nnn[n-]1.O")
st = utils.standardise(mol)
self.assertTrue('V2000' in st)
if __name__ == '__main__':
unittest.main()
|
487903
|
import json
from flask import Blueprint, url_for, render_template, flash, redirect, Response
from conekt import cache
from conekt.helpers.cytoscape import CytoscapeHelper
from conekt.models.expression.networks import ExpressionNetworkMethod, ExpressionNetwork
from conekt.models.species import Species
from conekt.models.gene_families import GeneFamilyMethod
from utils.benchmark import benchmark
expression_network = Blueprint('expression_network', __name__)
@expression_network.route('/')
def expression_network_overview():
"""
Overview of all networks in the current database with basic information
"""
networks = ExpressionNetworkMethod.query.all()
return render_template("expression_network.html", networks=networks)
@expression_network.route('/species/<species_id>')
@cache.cached()
def expression_network_species(species_id):
"""
Overview of all networks in the current database with basic information
"""
networks = ExpressionNetworkMethod.query.filter_by(species_id=species_id).all()
species = Species.query.get_or_404(species_id)
return render_template("expression_network.html", networks=networks, species=species)
@expression_network.route('/graph/<node_id>')
@expression_network.route('/graph/<node_id>/<int:family_method_id>')
@cache.cached()
def expression_network_graph(node_id, family_method_id=None):
"""
Page that displays the network graph for a specific network's probe, the depth indicates how many steps away from
the query gene the network is retrieved. For performance reasons depths > 1 are not allowed
:param node_id: id of the network's probe (the query) to visualize
:param depth: How many steps to include, 0 only the query and the direct neighborhood, 1 a step further, ...
Currently unused, filtering is done by javascript downstream
:param family_method_id: family method to use for colors and shapes based on the family
"""
if family_method_id is None:
family_method = GeneFamilyMethod.query.first()
if family_method is not None:
family_method_id = family_method.id
else:
family_method_id = None
node = ExpressionNetwork.query.get(node_id)
enable_second_level = node.method.enable_second_level
depth = 1 if enable_second_level else 0
return render_template("expression_graph.html", node=node, depth=depth, family_method_id=family_method_id,
cutoff=node.method.hrr_cutoff)
@expression_network.route('/download/neighbors/<node_id>')
@cache.cached()
def expression_network_download_neighbors(node_id):
"""
Returns tab delimited table with neighbors of the current node
:param node_id:
:return: Response with table in tab delimited format
"""
network = ExpressionNetwork.query.get(node_id)
return Response(network.neighbors_table)
@expression_network.route('/json/<node_id>')
@expression_network.route('/json/<node_id>/<int:family_method_id>')
@cache.cached()
def expression_network_json(node_id, family_method_id=None):
"""
Generates JSON output compatible with cytoscape.js (see planet/static/planet_graph.js for details how to render)
:param node_id: id of the network's probe (the query) to visualize
:param family_method_id: Which gene families to use
"""
node = ExpressionNetwork.query.get(node_id)
enable_second_level = node.method.enable_second_level
depth = 1 if enable_second_level else 0
network = ExpressionNetwork.get_neighborhood(node_id, depth=depth)
if family_method_id is None:
family_method = GeneFamilyMethod.query.first()
if family_method is not None:
family_method_id = family_method.id
else:
family_method_id = None
network_cytoscape = CytoscapeHelper.parse_network(network)
network_cytoscape = CytoscapeHelper.add_family_data_nodes(network_cytoscape, family_method_id)
network_cytoscape = CytoscapeHelper.add_lc_data_nodes(network_cytoscape)
network_cytoscape = CytoscapeHelper.add_descriptions_nodes(network_cytoscape)
return Response(json.dumps(network_cytoscape), mimetype='application/json')
@expression_network.route('/export/<method_id>')
def expression_network_export(method_id):
def generate(method_id):
header = "gene_a\tgene_b\thrr\tpcc\n"
yield header
nodes = ExpressionNetwork.query.filter(ExpressionNetwork.method_id == method_id).all()
for n in nodes:
neighbors = json.loads(n.network)
for neighbor in neighbors:
gene_a = n.sequence.name if n.sequence_id is not None else n.probe
probe_b = neighbor["probe_name"] if "probe_name" in neighbor.keys() else "Unknown"
gene_b = neighbor["gene_name"] if "gene_name" in neighbor.keys() else probe_b
hrr = neighbor["hrr"] if "hrr" in neighbor.keys() else None
pcc = neighbor["link_pcc"] if "link_pcc" in neighbor.keys() else None
yield '\t'.join([gene_a, gene_b, str(hrr), str(pcc)]) + '\n'
return Response(generate(method_id), mimetype='text/plain')
|
487918
|
import torch
def generate_neighbour_label(h, w, dist_type, rad):
hs = torch.arange(end=h)
ws = torch.arange(end=w)
h_grid, w_grid = torch.meshgrid(hs, ws)
grid = torch.stack((h_grid, w_grid), dim=-1).view(-1, 1, 2).float()
grid_ = grid.transpose(0, 1)
dist = torch.norm(grid - grid_, p=dist_type, dim=-1)
label = dist <= rad
return label
def generate_smooth_label(size, smooth_type, max_prob=0.9, temp=0.1, rad=0.3, dist_type=1):
if smooth_type == 'onehot':
hs = torch.arange(end=size)
ws = torch.arange(end=size)
h_grid, w_grid = torch.meshgrid(hs, ws)
grid = torch.stack((h_grid, w_grid), dim=-1).view(-1, 1, 2).float()
grid_ = grid.transpose(0, 1)
dist = torch.norm(grid - grid_, p=dist_type, dim=-1)
dist /= size
label = torch.zeros_like(dist)
label[dist <= rad] = 1
other_prob = (1 - max_prob) / (label.sum(-1) - 1)
label *= other_prob.unsqueeze(-1)
label[torch.arange(size * size), torch.arange(size * size)] = max_prob
elif smooth_type == 'softmax':
hs = torch.arange(end=size)
ws = torch.arange(end=size)
h_grid, w_grid = torch.meshgrid(hs, ws)
grid = torch.stack((h_grid, w_grid), dim=-1).view(-1, 1, 2).float()
grid_ = grid.transpose(0, 1)
dist = torch.norm(grid - grid_, p=dist_type, dim=-1)
dist /= size
dist[dist > rad] = 1e10
label = torch.softmax(- dist / temp, dim=-1)
return label
|
487923
|
import torch
import torch.nn as nn
import torch.autograd as autograd
import torch.nn.functional as F
import pdb
class CNN(nn.Module):
def __init__(self, args, max_pool_over_time=False):
super(CNN, self).__init__()
self.args = args
self.layers = []
for layer in range(args.num_layers):
convs = []
for filt in args.filters:
in_channels = args.embedding_dim if layer == 0 else args.filter_num * len( args.filters)
kernel_size = filt
new_conv = nn.Conv1d(in_channels=in_channels, out_channels=args.filter_num, kernel_size=kernel_size)
self.add_module( 'layer_'+str(layer)+'_conv_'+str(filt), new_conv)
convs.append(new_conv)
self.layers.append(convs)
self.max_pool = max_pool_over_time
def _conv(self, x):
layer_activ = x
for layer in self.layers:
next_activ = []
for conv in layer:
left_pad = conv.kernel_size[0] - 1
pad_tensor_size = [d for d in layer_activ.size()]
pad_tensor_size[2] = left_pad
left_pad_tensor =autograd.Variable( torch.zeros( pad_tensor_size ) )
if self.args.cuda:
left_pad_tensor = left_pad_tensor.cuda()
padded_activ = torch.cat( (left_pad_tensor, layer_activ), dim=2)
next_activ.append( conv(padded_activ) )
# concat across channels
layer_activ = F.relu( torch.cat(next_activ, 1) )
return layer_activ
def _pool(self, relu):
pool = F.max_pool1d(relu, relu.size(2)).squeeze(-1)
return pool
def forward(self, x):
activ = self._conv(x)
if self.max_pool:
activ = self._pool(activ)
return activ
|
487929
|
from typing import Iterable, Optional
from abc import ABC, abstractmethod
from django.views.generic.base import TemplateView
from django.core.exceptions import ValidationError
from .utils import HostLookupResult
class BaseHostLookupView(ABC, TemplateView):
template_name = "hostlookup/hostlookupresult_list.html"
@abstractmethod
def host_lookup(self, **kwargs) -> Optional[Iterable[HostLookupResult]]:
return NotImplemented
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
try:
results = self.host_lookup(**kwargs)
except ValidationError as ve:
context['errors'] = ve
return context
context['results'] = results
return context
|
487938
|
from __future__ import absolute_import
import httpretty
import pygerduty
import pygerduty.v2
###################
# Version 2 Tests #
###################
@httpretty.activate
def test_get_addon_v2():
body = open('tests/fixtures/addon_v2.json').read()
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/addons/PKX7F81",
body=body, status=200)
p = pygerduty.v2.PagerDuty("password")
addon = p.addons.show("PKX7F81")
assert addon.id == "PKX7F81"
assert addon.type == "incident_show_addon"
assert addon.name == "Service Runbook"
assert addon.src == "https://intranet.example.com/runbook.html"
assert addon.services[0].id == "PIJ90N7"
@httpretty.activate
def test_update_addon_v2():
body_req = open('tests/fixtures/addon_update_request_v2.json').read()
body_resp = open('tests/fixtures/addon_update_response_v2.json').read()
httpretty.register_uri(
httpretty.PUT, "https://api.pagerduty.com/addons/PKX7F81",
body=body_req,
responses=[httpretty.Response(body=body_resp, status=200)])
p = pygerduty.v2.PagerDuty("password")
addon = p.addons.update("PKX7F81")
assert addon.id == "PKX7F81"
assert addon.type == "incident_show_addon"
assert addon.name == "Service Runbook"
assert addon.src == "https://intranet.example.com/runbook.html"
assert addon.services[0].id == "PIJ90N7"
|
487942
|
from collections import defaultdict
import openpyxl
from django.urls import reverse
from django.utils.translation import ugettext as _
from lxml import etree
from corehq.apps.app_manager.views.media_utils import interpolate_media_path
from corehq.apps.translations.app_translations.download import get_bulk_app_single_sheet_by_name
from corehq.apps.translations.app_translations.utils import get_bulk_app_sheet_headers
from corehq.apps.translations.const import SINGLE_SHEET_NAME
def download_multimedia_paths_rows(app, only_missing=False):
paths = defaultdict(list)
for ref in app.all_media():
paths[ref.path].append(ref)
module_index_by_unique_id = {m.unique_id: m.id for m in app.get_modules()}
def _readable_ref(ref):
module_index = module_index_by_unique_id[ref.module_unique_id]
readable = _("Menu {index}: {name}").format(index=module_index, name=ref.get_module_name())
if ref.form_unique_id is not None:
readable += _(" > Form {index}: {name}").format(index=ref.form_order, name=ref.get_form_name())
return readable
rows = []
for path, refs in paths.items():
if not only_missing or path not in app.multimedia_map:
rows.append((_("Paths"), [path, ''] + [_readable_ref(r) for r in refs]))
return rows
def validate_multimedia_paths_rows(app, rows):
old_paths_last_seen = {i.path: None for i in app.all_media()}
new_paths_last_seen = defaultdict(lambda: None)
errors = []
warnings = []
for i, row in enumerate(rows):
(old_path, new_path) = row
if old_path not in old_paths_last_seen:
errors.append(_("Path in row {} could not be found in application: "
"<code>{}</code>").format(i, old_path))
elif old_path == new_path:
errors.append(_("In row {}, old and new paths are both <code>{}</code>. Please provide "
"an updated path or remove this row").format(i, old_path))
elif old_paths_last_seen[old_path] is not None:
# Duplicate old paths is an error: can't rename to two different new values
errors.append(_("Path in row {} was already renamed in row {}: "
"<code>{}</code>").format(i, old_paths_last_seen[old_path], old_path))
old_paths_last_seen[old_path] = i
interpolated_new_path = interpolate_media_path(new_path) # checks for jr://
if interpolated_new_path != new_path:
warnings.append(_("Path <code>{}</code> in row {} was replaced with "
"<code>{}</code>").format(new_path, i, interpolated_new_path))
else:
# It's usually a bad idea to change file extensions, since the file itself isn't changing
old_extension = old_path.split(".")[-1].lower()
new_extension = new_path.split(".")[-1].lower()
if old_extension != new_extension:
warnings.append(_("File extension in row {} changed "
"from {} to {}".format(i, old_extension, new_extension)))
# Duplicate new paths is a warning: will combine what were previously different items
if new_path in new_paths_last_seen:
warnings.append(_("New path in row {} was already used to rename row {}: "
"<code>{}</code>").format(i, new_paths_last_seen[new_path], new_path))
new_paths_last_seen[new_path] = i
return errors, warnings
def update_multimedia_paths(app, paths):
# Update module and form references
success_counts = defaultdict(lambda: 0)
for old_path, new_path in paths.items():
for module in app.modules:
success_counts[module.unique_id] += module.rename_media(old_path, new_path)
for form in module.get_forms():
update_count = form.rename_media(old_path, new_path)
if update_count:
success_counts[form.unique_id] += update_count
# Update app's upstream map of multimedia
for old_path, new_path in paths.items():
if old_path in app.multimedia_map: # path will not be present if file is missing from app
app.multimedia_map.update({
new_path: app.multimedia_map[old_path],
})
# Put together success messages
successes = []
for module in app.modules:
if success_counts[module.unique_id]:
successes.append(_("{} item(s) updated in <a href='{}' target='_blank'>{}</a>").format(
success_counts[module.unique_id],
reverse("view_module", args=[app.domain, app.id, module.unique_id]),
module.default_name()))
for form in module.forms:
if success_counts[form.unique_id]:
successes.append(_("{} item(s) updated in <a href='{}' target='_blank'>{}</a>").format(
success_counts[form.unique_id],
reverse("view_form", args=[app.domain, app.id, form.unique_id]),
"{} > {}".format(module.default_name(), form.default_name())))
return successes
def download_audio_translator_files(domain, app, lang, eligible_for_transifex_only=True):
# Get bulk app translation single sheet data
headers = get_bulk_app_sheet_headers(app, single_sheet=True, lang=lang,
eligible_for_transifex_only=eligible_for_transifex_only)
headers = headers[0] # There's only one row since these are the headers for the single-sheet format
headers = headers[1] # Drop the first element (sheet name), leaving the second (list of header names)
audio_text_index = headers.index('default_' + lang)
audio_path_index = headers.index('audio_' + lang)
sheets = get_bulk_app_single_sheet_by_name(app, lang, eligible_for_transifex_only=True)
audio_rows = [row for row in sheets[SINGLE_SHEET_NAME] if row[audio_path_index]]
# Create file for re-upload to HQ's bulk app translations
upload_workbook = openpyxl.Workbook()
upload_sheet = upload_workbook.worksheets[0]
upload_sheet.title = SINGLE_SHEET_NAME
upload_sheet.append(headers)
# Create dict of audio path to text, and disambiguate any missing path that points to multiple texts
rows_by_audio = {}
for row in audio_rows:
audio_path = row[audio_path_index]
text = row[audio_text_index]
if audio_path in rows_by_audio and audio_path not in app.multimedia_map:
if rows_by_audio[audio_path] != text:
extension = "." + audio_path.split(".")[-1]
not_extension = audio_path[:-len(extension)]
suffix = 1
while audio_path in rows_by_audio and rows_by_audio[audio_path] != text:
suffix += 1
audio_path = "{}_{}{}".format(not_extension, suffix, extension)
row[audio_path_index] = audio_path
upload_sheet.append(row) # add new path to sheet for re-upload to HQ
rows_by_audio[audio_path] = text
# Create dict of rows, keyed by label text to de-duplicate paths
rows_by_text = defaultdict(list)
for row in audio_rows:
rows_by_text[row[audio_text_index]].append(row)
def _get_filename_from_duplicate_rows(rows):
return rows[0][audio_path_index]
# Add a row to upload sheet for each filename being eliminated because the text was duplicated
for text, rows in rows_by_text.items():
filename = _get_filename_from_duplicate_rows(rows)
for row in rows:
if row[audio_path_index] != filename:
row[audio_path_index] = filename
upload_sheet.append(row)
# Create file for translato, with a row for each unique text label
translator_workbook = openpyxl.Workbook()
sheet0 = translator_workbook.worksheets[0]
sheet0.title = "filepaths"
sheet0.append([lang, "audio"])
sheet1 = translator_workbook.create_sheet("verification")
sheet1.append(headers)
for text, rows in rows_by_text.items():
if not any([row[audio_path_index] in app.multimedia_map for row in rows]):
filename = _get_filename_from_duplicate_rows(rows)
sheet0.append([text, filename])
sheet1.append(rows[0])
return {
"bulk_upload.xlsx": upload_workbook,
"excel_for_translator.xlsx": translator_workbook,
}
|
487955
|
import copy
import json
import os
import pprint
from core.result_processor import ResultProcessor
import core.utility as util
DISCOVERED_URLS_OUTFILE = "discovered_urls.txt"
class WebserverMapProcessor(ResultProcessor):
def __init__(self, output_dir: str, results: dict = None):
super().__init__(output_dir, results)
@staticmethod
def is_valid_result(result):
if not isinstance(result, dict):
return False
# check every entry for every IP
for key, value in result.items():
if (not util.is_ipv4(key)) and (not util.is_ipv6(key)):
return False
if not isinstance(value, dict):
return False
# check all hosts for every port
for portid_str, host_group in value.items():
if not isinstance(portid_str, str):
return False
# check validity of port number
try:
portid = int(portid_str)
except ValueError:
return False
if not (1 <= portid <= 65535):
return False
if not isinstance(host_group, dict):
return False
# check all pages available for every host, indexed by status codes
for webhost, node in host_group.items():
for status_code_str, pages in node.items():
# check validity of status code
if not isinstance(status_code_str, str):
return False
try:
status_code = int(status_code_str)
except ValueError:
return False
if not (100 <= status_code < 600):
return False
# check validity of every web page node
for path, page_info in pages.items():
if not path.startswith("/"):
return False
# check validity of keys and their node structure
for page_key in page_info:
if page_key in ("GET", "POST", "cookies", "instances"):
if not isinstance(page_info[page_key], list):
return False
elif not page_key.startswith("misc_info"):
return False
# check validity of every instance
if "instances" in page_info:
for instance in page_info["instances"]:
# check validity of keys and their node structure
if any(ikey not in ("GET", "POST", "cookies") for ikey in instance):
return False
for param_key in ("GET", "POST", "cookies"):
if param_key in instance and not isinstance(instance[param_key], dict):
return False
for k, v in instance[param_key].items():
if not isinstance(k, str):
return False
if not isinstance(v, str):
return False
return True
@staticmethod
def print_result(result: dict):
pprint.pprint(result)
@staticmethod
def print_aggr_result(result):
WebserverMapProcessor.print_result(result)
@staticmethod
def store_result(result: dict, filepath: str):
"""Store the given result at the specified location"""
WebserverMapProcessor.store_json_convertible_result(result, filepath)
@staticmethod
def store_aggregated_result(aggr_result, filepath: str):
"""Store the given aggregated result at the specified location"""
result = aggr_result
WebserverMapProcessor.store_json_convertible_result(result, filepath)
def parse_result_file(self, filepath: str):
return self.parse_result_from_json_file(filepath)
def aggregate_results(self):
"""
Accumulate the results from all the different webserver map analyzers into one.
Results are aggregated by uniting all the different results.
:return: the aggregated results
"""
if not self.results:
return {}
if len(self.results) == 1:
webserver_map = copy.deepcopy(self.results[list(self.results.keys())[0]])
self.store_discovered_urls(webserver_map)
return webserver_map
# unite all webserver map results
webserver_map = {}
for _, result in self.results.items():
for host, port_nodes in result.items():
if host not in webserver_map:
webserver_map[host] = {}
for portid, domain_nodes in port_nodes.items():
if portid not in webserver_map[host]:
webserver_map[host][portid] = {}
# iterate over different webhosts (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Host)
for webhost, status_code_nodes in domain_nodes.items():
if webhost not in webserver_map[host][portid]:
webserver_map[host][portid][webhost] = {}
# iterate over status codes and their associated pages
for status_code, page_nodes in status_code_nodes.items():
if status_code not in webserver_map[host][portid][webhost]:
webserver_map[host][portid][webhost][status_code] = {}
cur_aggr_node = webserver_map[host][portid][webhost][status_code]
for path, page_info in page_nodes.items():
# handle non-existent / empty page_info node in aggregation webserver_map
if path not in cur_aggr_node:
cur_aggr_node[path] = {}
if not cur_aggr_node[path]:
cur_aggr_node[path] = page_info
continue
# unite the GET and POST parameters and cookies
for ptype in ("GET", "POST", "cookies"):
if ptype in page_info:
if ptype in cur_aggr_node[path]:
cur_aggr_node[path][ptype] = list(set(page_info[ptype] +
cur_aggr_node[path][ptype]))
else:
cur_aggr_node[path][ptype] = page_info[ptype]
# unite instances of the path
if "instances" in page_info:
# handle non-existent / empty instances node in aggregation webserver_map
if "instances" not in cur_aggr_node[path]:
cur_aggr_node[path]["instances"] = []
if not cur_aggr_node[path]["instances"]:
cur_aggr_node[path]["instances"] = page_info["instances"]
continue
for cur_instance in page_info["instances"]:
get_params = cur_instance.get("GET", {})
post_params = cur_instance.get("POST", {})
cookies = cur_instance.get("cookies", {})
# skip empty instances
if (not get_params) and (not post_params) and (not cookies):
continue
if ((not any(val for val in get_params.values())) and
(not any(val for val in post_params.values())) and
(not any(val for val in cookies.values()))):
continue
if not any((inst.get("GET", {}) == get_params and
inst.get("POST", {}) == post_params and
inst.get("cookies", {}) == cookies)
for inst in cur_aggr_node[path]["instances"]):
cur_aggr_node[path]["instances"].append(cur_instance)
# safely copy over any miscellaneous info
for key, val in page_info.items():
if key.startswith("misc_info"):
if "misc_info" not in cur_aggr_node[path]:
cur_aggr_node[path]["misc_info"] = val
elif not any(val == aggr_val for aggr_val in cur_aggr_node[path].values()):
for i in range(10):
alt_name = "misc_info_%d" % i
if alt_name not in cur_aggr_node[path]:
cur_aggr_node[path][alt_name] = val
break
self.store_discovered_urls(webserver_map)
return webserver_map
def store_discovered_urls(self, webserver_map):
""" write discovered locations in webserver_map to separate file for easy reading"""
outpath = os.path.join(self.output_dir, DISCOVERED_URLS_OUTFILE)
with open(outpath, "w") as f:
for ip, ip_node in webserver_map.items():
for portid, port_node in ip_node.items():
protocol_prefix = ""
if str(portid) == "80":
protocol_prefix = "http://"
elif str(portid) == "443":
protocol_prefix = "https://"
for host, host_node in port_node.items():
header = "**** %s:%s - %s ****" % (ip, str(portid), host)
full_header = "*" * len(header) + "\n" + header + "\n" + "*" * len(header) + "\n"
f.write(full_header)
for status_code, pages_node in host_node.items():
f.write("-" * 60 + "\n")
if protocol_prefix:
f.write(" [+] URLs with '%s' HTTP response:\n" % str(status_code))
else:
f.write(" [+] Locations with '%s' HTTP response:\n" % str(status_code))
f.write("-" * 60 + "\n")
for path in pages_node:
f.write(" " + protocol_prefix + host + path + "\n")
f.write("\n")
f.write("\n")
|
488045
|
import bsearch
def test_bsearch_list():
data = sorted([5,3,7,1,9,20])
assert bsearch.search_list(data, 5) == (5, 2)
assert bsearch.search_list(data, 1) == (1, 0)
assert bsearch.search_list(data, 20) == (20, len(data) - 1)
assert bsearch.search_list(data, 9) == (9, 4)
assert bsearch.search_list(data, 100) == (None, -1)
assert bsearch.search_list(data, -1) == (None, -1)
def test_bsearch_list_iter():
data = sorted([5,3,7,1,9,20])
assert bsearch.search_list_iter(data, 5) == (5, 2)
assert bsearch.search_list_iter(data, 1) == (1, 0)
assert bsearch.search_list_iter(data, 20) == (20, len(data) - 1)
assert bsearch.search_list_iter(data, 9) == (9, 4)
assert bsearch.search_list_iter(data, 100) == (None, -1)
assert bsearch.search_list_iter(data, -1) == (None, -1)
def test_bsearch_dllist():
data = sorted([5,3,7,1,9,20])
assert bsearch.search_dllist(data, 5) == (5, 2)
assert bsearch.search_dllist(data, 1) == (1, 0)
assert bsearch.search_dllist(data, 20) == (20, len(data) - 1)
assert bsearch.search_dllist(data, 9) == (9, 4)
assert bsearch.search_dllist(data, 100) == (None, -1)
assert bsearch.search_dllist(data, -1) == (None, -1)
def test_btree_search():
# for btree, adding sorted data just makes it a list
data = [5,3,7,1,9,20]
assert bsearch.search_btree(data, 5) == (5, 0)
assert bsearch.search_btree(data, 1) == (1, 3)
assert bsearch.search_btree(data, 20) == (20, len(data) - 1)
assert bsearch.search_btree(data, 9) == (9, 4)
assert bsearch.search_btree(data, 100) == (None, -1)
assert bsearch.search_btree(data, -1) == (None, -1)
|
488104
|
import subprocess
from pathlib import Path
import quetz
from quetz.utils import add_entry_for_index
@quetz.hookimpl
def post_package_indexing(tempdir: Path, channel_name, subdirs, files, packages):
for subdir in subdirs:
path1 = tempdir / channel_name / subdir / "repodata.json"
path2 = tempdir / channel_name / subdir / "repodata.json.zck"
try:
subprocess.check_call(['zck', path1, '-o', path2])
except FileNotFoundError:
raise RuntimeError(
'zchunk does not seem to be installed, '
'you can install it with:\n'
'mamba install zchunk -c conda-forge'
)
except subprocess.CalledProcessError:
raise RuntimeError('Error calling zck on repodata.')
with open(path2, 'rb') as fi:
add_entry_for_index(files, subdir, "repodata.json.zck", fi.read())
|
488126
|
import numpy as np
from ..abstract import Processor
from ..backend.boxes import flip_left_right
from ..backend.boxes import to_image_coordinates
from ..backend.boxes import to_normalized_coordinates
from ..backend.boxes import compute_iou
from ..backend.image import warp_affine
from ..backend.image import translate_image
from ..backend.image import sample_scaled_translation
from ..backend.image import get_rotation_matrix
from ..backend.keypoints import translate_keypoints
class RandomFlipBoxesLeftRight(Processor):
"""Flips image and implemented labels horizontally.
"""
def __init__(self):
super(RandomFlipBoxesLeftRight, self).__init__()
def call(self, image, boxes):
if np.random.randint(0, 2):
boxes = flip_left_right(boxes, image.shape[1])
image = image[:, ::-1]
return image, boxes
class ToImageBoxCoordinates(Processor):
"""Convert normalized box coordinates to image-size box coordinates.
"""
def __init__(self):
super(ToImageBoxCoordinates, self).__init__()
def call(self, image, boxes):
boxes = to_image_coordinates(boxes, image)
return image, boxes
class ToNormalizedBoxCoordinates(Processor):
"""Convert image-size box coordinates to normalized box coordinates.
"""
def __init__(self):
super(ToNormalizedBoxCoordinates, self).__init__()
def call(self, image, boxes):
boxes = to_normalized_coordinates(boxes, image)
return image, boxes
class RandomSampleCrop(Processor):
"""Crops and image while adjusting the bounding boxes.
Boxes should be in point form.
# Arguments
probability: Float between ''[0, 1]''.
"""
def __init__(self, probability=0.5):
self.probability = probability
self.sample_options = (
# using entire original input image
None,
# sample a patch s.t. MIN jaccard w/ obj in .1,.3,.4,.7,.9
(0.1, None),
(0.3, None),
(0.7, None),
(0.9, None),
# randomly sample a patch
(None, None),
)
super(RandomSampleCrop, self).__init__()
def call(self, image, boxes):
if self.probability < np.random.rand():
return image, boxes
labels = boxes[:, -1:]
boxes = boxes[:, :4]
height, width, _ = image.shape
while True:
# randomly choose a mode
mode = np.random.choice(self.sample_options)
if mode is None:
boxes = np.hstack([boxes, labels])
return image, boxes
min_iou, max_iou = mode
if min_iou is None:
min_iou = float('-inf')
if max_iou is None:
max_iou = float('inf')
# max trails (50)
for _ in range(50):
current_image = image
w = np.random.uniform(0.3 * width, width)
h = np.random.uniform(0.3 * height, height)
# aspect ratio constraint b/t .5 & 2
if h / w < 0.5 or h / w > 2:
continue
left = np.random.uniform(width - w)
top = np.random.uniform(height - h)
# convert to integer rect x1,y1,x2,y2
rect = np.array(
[int(left), int(top), int(left + w), int(top + h)])
# calculate IoU (jaccard overlap) b/t the cropped and gt boxes
overlap = compute_iou(rect, boxes)
# is min and max overlap constraint satisfied? if not try again
if overlap.max() < min_iou or overlap.min() > max_iou:
continue
# cut the crop from the image
current_image = current_image[rect[1]:rect[3], rect[0]:rect[2],
:]
# keep overlap with gt box IF center in sampled patch
centers = (boxes[:, :2] + boxes[:, 2:]) / 2.0
# mask in all gt boxes that above and to the left of centers
m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])
# mask in all gt boxes that under and to the right of centers
m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])
# mask in that both m1 and m2 are true
mask = m1 * m2
# have any valid boxes? try again if not
if not mask.any():
continue
# take only matching gt boxes
current_boxes = boxes[mask, :].copy()
# take only matching gt labels
current_labels = labels[mask]
# should we use the box left and top corner or the crop's
current_boxes[:, :2] = np.maximum(current_boxes[:, :2],
rect[:2])
# adjust to crop (by substracting crop's left,top)
current_boxes[:, :2] -= rect[:2]
current_boxes[:, 2:] = np.minimum(current_boxes[:, 2:],
rect[2:])
# adjust to crop (by substracting crop's left,top)
current_boxes[:, 2:] -= rect[:2]
return current_image, np.hstack(
[current_boxes, current_labels])
class Expand(Processor):
"""Expand image size up to 2x, 3x, 4x and fill values with mean color.
This transformation is applied with a probability of 50%.
# Arguments
max_ratio: Float.
mean: None/List: If `None` expanded image is filled with
the image mean.
probability: Float between ''[0, 1]''.
"""
def __init__(self, max_ratio=2, mean=None, probability=0.5):
super(Expand, self).__init__()
self.max_ratio = max_ratio
self.mean = mean
self.probability = probability
def call(self, image, boxes):
if self.probability < np.random.rand():
return image, boxes
height, width, num_channels = image.shape
ratio = np.random.uniform(1, self.max_ratio)
left = np.random.uniform(0, width * ratio - width)
top = np.random.uniform(0, height * ratio - height)
expanded_image = np.zeros((int(height * ratio),
int(width * ratio), num_channels),
dtype=image.dtype)
if self.mean is None:
expanded_image[:, :, :] = np.mean(image, axis=(0, 1))
else:
expanded_image[:, :, :] = self.mean
expanded_image[int(top):int(top + height),
int(left):int(left + width)] = image
expanded_boxes = boxes.copy()
expanded_boxes[:, 0:2] = boxes[:, 0:2] + (int(left), int(top))
expanded_boxes[:, 2:4] = boxes[:, 2:4] + (int(left), int(top))
return expanded_image, expanded_boxes
class ApplyTranslation(Processor):
"""Applies a translation of image and labels.
# Arguments
translation: A list of length two indicating the x,y translation values
fill_color: List of three integers indicating the
color values e.g. ''[0, 0, 0]''
"""
def __init__(self, translation, fill_color=None):
super(ApplyTranslation, self).__init__()
self._matrix = np.zeros((2, 3), dtype=np.float32)
self._matrix[0, 0], self._matrix[1, 1] = 1.0, 1.0
self.fill_color = fill_color
self.translation = translation
@property
def translation(self):
return self._translation
@translation.setter
def translation(self, translation):
if translation is None:
self._translation = None
elif len(translation) == 2:
self._translation = translation
self._matrix[0, 2], self._matrix[1, 2] = translation
else:
raise ValueError('Translation should be `None` or have length two')
def call(self, image, keypoints=None):
height, width = image.shape[:2]
if self.fill_color is None:
fill_color = np.mean(image, axis=(0, 1))
image = warp_affine(image, self._matrix, fill_color)
if keypoints is not None:
keypoints[:, 0] = keypoints[:, 0] + self.translation[0]
keypoints[:, 1] = keypoints[:, 1] + self.translation[1]
return image, keypoints
return image
class RandomTranslation(Processor):
"""Applies a random translation to image and labels
# Arguments
delta_scale: List with two elements having the normalized deltas.
e.g. ''[.25, .25]''.
fill_color: List of three integers indicating the
color values e.g. ''[0, 0, 0]''.
"""
def __init__(
self, delta_scale=[0.25, 0.25], fill_color=None):
super(RandomTranslation, self).__init__()
self.delta_scale = delta_scale
self.apply_translation = ApplyTranslation(None, fill_color)
@property
def delta_scale(self):
return self._delta_scale
@delta_scale.setter
def delta_scale(self, delta_scale):
x_delta_scale, y_delta_scale = delta_scale
if (x_delta_scale < 0) or (y_delta_scale < 0):
raise ValueError('Delta scale values should be a positive scalar')
self._delta_scale = delta_scale
def call(self, image):
height, width = image.shape[:2]
x_delta_scale, y_delta_scale = self.delta_scale
x = image.shape[1] * np.random.uniform(-x_delta_scale, x_delta_scale)
y = image.shape[0] * np.random.uniform(-y_delta_scale, y_delta_scale)
self.apply_translation.translation = [x, y]
return self.apply_translation(image)
class RandomKeypointTranslation(Processor):
"""Applies a random translation to image and keypoints.
# Arguments
delta_scale: List with two elements having the normalized deltas.
e.g. ''[.25, .25]''.
fill_color: ''None'' or List of three integers indicating the
color values e.g. ''[0, 0, 0]''. If ''None'' mean channel values of
the image will be calculated as fill values.
probability: Float between ''[0, 1]''.
"""
def __init__(self, delta_scale=[.2, .2], fill_color=None, probability=0.5):
super(RandomKeypointTranslation, self).__init__()
self.delta_scale = delta_scale
self.fill_color = fill_color
self.probability = probability
@property
def probability(self):
return self._probability
@probability.setter
def probability(self, value):
if not (0.0 < value <= 1.0):
raise ValueError('Probability should be between "[0, 1]".')
self._probability = value
@property
def delta_scale(self):
return self._delta_scale
@delta_scale.setter
def delta_scale(self, delta_scale):
x_delta_scale, y_delta_scale = delta_scale
if (x_delta_scale < 0) or (y_delta_scale < 0):
raise ValueError('Delta scale values should be positive')
if (x_delta_scale > 1) or (y_delta_scale > 1):
raise ValueError('Delta scale values should be less than one')
self._delta_scale = delta_scale
def _sample_random_translation(self, delta_scale, image_shape):
x_delta_scale, y_delta_scale = delta_scale
x = image_shape[1] * np.random.uniform(-x_delta_scale, x_delta_scale)
y = image_shape[0] * np.random.uniform(-y_delta_scale, y_delta_scale)
return [x, y]
def call(self, image, keypoints):
if self.probability >= np.random.rand():
shape = image.shape[:2]
translation = sample_scaled_translation(self.delta_scale, shape)
if self.fill_color is None:
fill_color = np.mean(image, axis=(0, 1))
image = translate_image(image, translation, fill_color)
keypoints = translate_keypoints(keypoints, translation)
return image, keypoints
class RandomKeypointRotation(Processor):
"""Randomly rotate an images with its corresponding keypoints.
# Arguments
rotation_range: Int. indicating the max and min values in degrees
of the uniform distribution ''[-range, range]'' from which the
angles are sampled.
fill_color: ''None'' or List of three integers indicating the
color values e.g. ''[0, 0, 0]''. If ''None'' mean channel values of
the image will be calculated as fill values.
"""
def __init__(self, rotation_range=30, fill_color=None, probability=0.5):
super(RandomKeypointRotation, self).__init__()
self.rotation_range = rotation_range
self.fill_color = fill_color
self.probability = probability
@property
def probability(self):
return self._probability
@probability.setter
def probability(self, value):
if not (0.0 < value <= 1.0):
raise ValueError('Probability should be between "[0, 1]".')
self._probability = value
def _calculate_image_center(self, image):
return (int(image.shape[0] / 2), int(image.shape[1] / 2))
def _rotate_image(self, image, degrees):
center = self._calculate_image_center(image)
matrix = get_rotation_matrix(center, degrees)
if self.fill_color is None:
fill_color = np.mean(image, axis=(0, 1))
return warp_affine(image, matrix, fill_color)
def _degrees_to_radians(self, degrees):
# negative sign changes rotation direction to follow openCV convention.
return - (3.14159 / 180) * degrees
def _build_rotation_matrix(self, radians):
return np.array([[np.cos(radians), - np.sin(radians)],
[np.sin(radians), + np.cos(radians)]])
def _rotate_keypoints(self, keypoints, radians, image_center):
keypoints = keypoints - image_center
matrix = self._build_rotation_matrix(radians)
keypoints = np.matmul(matrix, keypoints.T).T
keypoints = keypoints + image_center
return keypoints
def _sample_rotation(self, rotation_range):
return np.random.uniform(-rotation_range, rotation_range)
def call(self, image, keypoints):
if self.probability >= np.random.rand():
degrees = self._sample_rotation(self.rotation_range)
image = self._rotate_image(image, degrees)
center = self._calculate_image_center(image)
radians = self._degrees_to_radians(degrees)
keypoints = self._rotate_keypoints(keypoints, radians, center)
return image, keypoints
class RandomRotation(Processor):
"""Randomly rotate an images
# Arguments
rotation_range: Int. indicating the max and min values in degrees
of the uniform distribution ``[-range, range]`` from which the
angles are sampled.
fill_color: ''None'' or List of three integers indicating the
color values e.g. ``[0, 0, 0]``. If ``None`` mean channel values of
the image will be calculated as fill values.
probability: Float between 0 and 1.
"""
def __init__(self, rotation_range=30, fill_color=None, probability=0.5):
super(RandomRotation, self).__init__()
self.rotation_range = rotation_range
self.fill_color = fill_color
self.probability = probability
@property
def probability(self):
return self._probability
@probability.setter
def probability(self, value):
if not (0.0 < value <= 1.0):
raise ValueError('Probability should be between "[0, 1]".')
self._probability = value
def _calculate_image_center(self, image):
return (int(image.shape[0] / 2), int(image.shape[1] / 2))
def _rotate_image(self, image, degrees):
center = self._calculate_image_center(image)
matrix = get_rotation_matrix(center, degrees)
if self.fill_color is None:
fill_color = np.mean(image, axis=(0, 1))
return warp_affine(image, matrix, fill_color)
def _sample_rotation(self, rotation_range):
return np.random.uniform(-rotation_range, rotation_range)
def call(self, image):
if self.probability >= np.random.rand():
degrees = self._sample_rotation(self.rotation_range)
image = self._rotate_image(image, degrees)
return image
class TranslateImage(Processor):
"""Applies a translation of image.
The translation is a list of length two indicating the x, y values.
# Arguments
fill_color: List of three integers indicating the
color values e.g. ``[0, 0, 0]``
"""
def __init__(self, fill_color=None):
super(TranslateImage, self).__init__()
self.fill_color = fill_color
def call(self, image, translation):
return translate_image(image, translation, self.fill_color)
|
488147
|
import os
import shutil
from urllib.request import urlopen
from zipfile import ZipFile
def download_aij_models(data_url, data_zipfile):
to_save = os.getcwd()
print("Downloading {} from {}".format(data_zipfile, data_url))
with urlopen(data_url) as response, open(data_zipfile, "wb") as output:
shutil.copyfileobj(response, output)
print("Extracting data...")
with ZipFile(data_zipfile, "r") as to_extract:
to_extract.extractall(to_save)
print("Done")
if __name__ == "__main__":
data_url = "http://bit.ly/2ORHVVC"
data_zipfile = "aij_data_models.zip"
download_aij_models(data_url, data_zipfile)
|
488188
|
from __future__ import absolute_import, division, print_function, unicode_literals
import PIL.Image
from echomesh.Cechomesh import cechomesh
from echomesh.pattern.Pattern import Pattern
from echomesh.util import Log
from echomesh.util.image.Crop import crop
from echomesh.util.image.Resize import resize
LOGGER = Log.logger(__name__)
class Image(Pattern):
HELP = """Displays an image on an x, y plane.
Image accepts JPG, GIF and PNG files."""
SETTINGS = {
'bottom_offset': {
'default': 0,
'help': 'How many pixels to crop off the bottom of the image.',
},
'left': {
'default': None,
'help': ('If None, then the image is horizontally centered - if true, '
'the image is left justified, otherwise it\'s right justified.'
),
},
'left_offset': {
'default': 0,
'help': 'How many pixels to crop off the left of the image.',
},
'right_offset': {
'default': 0,
'help': 'How many pixels to crop off the right of the image.',
},
'stretch': {
'default': False,
'help': ('If true, the image is stretched to fit the pane, ignoring '
'the aspect ratio '),
},
'top': {
'default': None,
'help': ('If None, then the image is vertically centered - if true, '
'the image is top justified, otherwise it\'s bottom justified.'
),
},
'top_offset': {
'default': 0,
'help': 'How many pixels to crop off the top of the image.',
},
'filename': {
'default': 0,
'constant': True,
'help': 'Filename for the image file.',
},
'x': {
'default': 0,
'constant': True,
'help': 'Width of the result, in pixels.',
},
'y': {
'default': 0,
'constant': True,
'help': 'Height of the result, in pixels.',
},
}
PATTERN_COUNT = 0
def _get_image(self):
filename = self.get('filename')
if not filename:
raise Exception('Missing filename setting in Image pattern.')
return PIL.Image.open(filename, 'r')
def _evaluate(self):
return self._image_to_list(self._get_image())
def _image_to_list(self, image):
if image.mode != 'RGB':
image = image.convert(mode='RGB')
image = crop(image, **self.get_dict(
'top_offset', 'left_offset', 'bottom_offset', 'right_offset'))
image = resize(
image, **self.get_dict('x', 'y', 'stretch', 'top', 'left'))
return cechomesh.ColorList(image, columns=image.size[0])
|
488222
|
from .dag import Dag
from .signal import Request
from .task_data import MultiTaskData
class TaskSignal:
""" Class to wrap the construction and sending of signals into easy to use methods."""
def __init__(self, client, dag_name):
""" Initialise the task signal convenience class.
Args:
client (Client): A reference to a signal client object.
dag_name (str): The name of the dag the task belongs to.
"""
self._client = client
self._dag_name = dag_name
def start_dag(self, dag, *, data=None):
""" Schedule the execution of a dag by sending a signal to the workflow.
Args:
dag (Dag, str): The dag object or the name of the dag that should be started.
data (MultiTaskData): The data that should be passed on to the new dag.
Returns:
str: The name of the successfully started dag.
"""
return self._client.send(
Request(
action='start_dag',
payload={'name': dag.name if isinstance(dag, Dag) else dag,
'data': data if isinstance(data, MultiTaskData) else None}
)
).payload['dag_name']
def join_dags(self, names=None):
""" Wait for the specified dags to terminate.
This function blocks until the specified dags terminate. If no dags are specified
wait for all dags of the workflow, except the dag of the task calling this signal,
to terminate.
Args:
names (list): The names of the dags that have to terminate.
Returns:
bool: True if all the signal was sent successfully.
"""
return self._client.send(
Request(
action='join_dags',
payload={'names': names}
)
).success
def stop_dag(self, name=None):
""" Send a stop signal to the specified dag or the dag that hosts this task.
Args:
name str: The name of the dag that should be stopped. If no name is given the
dag that hosts this task is stopped.
Upon receiving the stop signal, the dag will not queue any new tasks and wait
for running tasks to terminate.
Returns:
bool: True if the signal was sent successfully.
"""
return self._client.send(
Request(
action='stop_dag',
payload={'name': name if name is not None else self._dag_name}
)
).success
def stop_workflow(self):
""" Send a stop signal to the workflow.
Upon receiving the stop signal, the workflow will not queue any new dags.
Furthermore it will make the stop signal available to the dags, which will
then stop queueing new tasks. As soon as all active tasks have finished
processing, the workflow will terminate.
Returns:
bool: True if the signal was sent successfully.
"""
return self._client.send(Request(action='stop_workflow')).success
@property
def is_stopped(self):
""" Check whether the task received a stop signal from the workflow.
Tasks can use the stop flag to gracefully terminate their work. This is
particularly important for long running tasks and tasks that employ an
infinite loop, such as trigger tasks.
Returns:
bool: True if the task should be stopped.
"""
resp = self._client.send(
Request(
action='is_dag_stopped',
payload={'dag_name': self._dag_name}
)
)
return resp.payload['is_stopped']
|
488227
|
from .authentificationPolicy import MyAuthenticationPolicy, myAuthorizationPolicy
from pyramid.security import DENY_ALL
__defaultCookieName = 'API-CORE'
def includeme(config):
authorizationPolicy = myAuthorizationPolicy()
config.set_authorization_policy (authorizationPolicy)
customSettings = meaningConfig(config)
authentificationPolicy = MyAuthenticationPolicy(**customSettings)
config.set_authentication_policy (authentificationPolicy)
# from now all view added to project will have 'read' permission by default
# but BE CAREFUL and read the doc
# https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/security.html#setting-a-default-permission
# short way : use NO_PERMISSION_REQUIRED for overwrite this default permission
config.set_default_permission('read')
def meaningConfig(config):
'''
pick params with need in *.ini file
'''
settings = config.get_settings()
if settings.get('RENECO.SECURITE.TINS_LABEL') is None:
'''
this key is used to check your cookie claims
if you are on SERVER B with an instance of ecoReleve
You must have an Tins_Label to identify the app
Tips:
The portal give you a domain cookie with all instance of all app on the server and your user role encoded in the payload
like this ['role'] : {
Tins_label : role
}
(example : erd PROD, erd DEV )
'''
raise Exception('You mus\'t have this key RENECO.SECURITE.TINS_LABEL defined in your *.ini file')
return {
# "header" : settings.get('JWTSECURITY.HEADERS', None), # for portal
"secretkey" : settings.get('JWTSECURITY.MASTER_SECRET', None) ,
"cookie_name" : settings.get('JWTSECURITY.COOKIENAME', __defaultCookieName),
"TIns_Label" : settings.get('RENECO.SECURITE.TINS_LABEL')
}
|
488235
|
from ..DyStockSelectStrategyTemplate import *
from ....Data.Utility.DyStockDataUtility import *
class DySS_ChipDist(DyStockSelectStrategyTemplate):
"""
筹码分布
"""
name = 'DySS_ChipDist'
chName = '筹码分布'
autoFillDays = True
optimizeAutoFillDays = True
continuousTicks = True
colNames = ['代码', '名称',
'下上筹码比',
'下筹码/20日成交量均值',
'短期下上筹码比',
'短期最高价跌幅(%)'
]
param = OrderedDict\
([
('基准日期', datetime.today().strftime("%Y-%m-%d")),
('向前N日周期', 120),
('筹码k值', 10),
('短期N日周期', 30),
('选几只股票', 50)
])
paramToolTip = {'筹码k值': '统计当前价格+/-k%范围内的筹码',
}
# 策略私有参数
def __init__(self, param, info):
super().__init__(param, info)
# unpack parameters
self._baseDate = param['基准日期']
self._forwardNTDays = param['向前N日周期']
self._k = param['筹码k值']
self._shortNTDays = param['短期N日周期']
self._selectStockNbr = param['选几只股票']
def onDaysLoad(self):
return self._baseDate, -self._forwardNTDays + 1
def onTicksLoad(self):
return self._baseDate, -self._shortNTDays + 1
def onInit(self, dataEngine, errorDataEngine):
self._daysEngine = dataEngine.daysEngine
self._ticksEngine = errorDataEngine.ticksEngine
self._stockAllCodes = self._daysEngine.stockAllCodes
self._priceData = {} # {code: [close, high price of short period]}
###################################################################################################################
###################################################################################################################
def onStockDays(self, code, df):
close = df.ix[-1, 'close']
diff = close*self._k/100
up, down = close + diff, close - diff
s = DyStockDataUtility.getChipDistByDays(df, ohlcRatio=40, gridNbr=60)
downVolume = s[down:close].sum()
upVolume = s[close:up].sum()
downUpRatio = downVolume/upVolume
mean20Volume = df['volume'][-20:].mean()
downMean20Ratio = downVolume/mean20Volume
# save
self._priceData[code] = [close, df['high'][-self._shortNTDays:].max()]
# 设置结果
row = [code, self._stockAllCodes[code],
downUpRatio,
downMean20Ratio
]
self._result.append(row)
self._result.sort(key=operator.itemgetter(2), reverse=True)
self._result = self._result[:self._selectStockNbr]
def onStockTicks(self, code, dfs):
close, high = self._priceData.get(code)
diff = high - close
up, down = close + diff, close - diff
s = DyStockDataUtility.getChipDistByTicks(dfs)
ratio = s[down:close].sum()/s[close:up].sum()
# 设置结果
partRow = [ratio,
(high - close)/high*100
]
row = self.getFromResult(code)
row.extend(partRow)
|
488237
|
import tidypolars as tp
from tidypolars import col
import math
def test_abs():
"""Can get absolute value"""
df = tp.Tibble(x = range(-3, 0))
actual = df.mutate(abs_x = tp.abs('x'), abs_col_x = tp.abs(col('x')))
expected = tp.Tibble(x = range(-3, 0), abs_x = range(3, 0, -1), abs_col_x = range(3, 0, -1))
assert actual.frame_equal(expected), "abs failed"
def test_agg_stats():
"""Can get aggregation statistics"""
df = tp.Tibble(x = range(3), y = [2, 1, 0])
actual = (
df
.summarize(
corr = tp.cor('x', 'y'),
count_x = tp.count('x'), count_col_x = tp.count(col('x')),
cov = tp.cov('x', 'y'),
first_x = tp.first('x'), first_col_x = tp.first(col('x')),
last_x = tp.last('x'), last_col_x = tp.last(col('x')),
max_x = tp.max('x'), max_col_x = tp.max(col('x')),
mean_x = tp.mean('x'), mean_col_x = tp.mean(col('x')),
median_x = tp.median('x'), median_col_x = tp.median(col('x')),
min_x = tp.min('x'), min_col_x = tp.min(col('x')),
n = tp.n(),
n_distinct_x = tp.n_distinct('x'), n_distinct_col_x = tp.n_distinct(col('x')),
quantile_x = tp.quantile('x', .25),
sd_x = tp.sd('x'), sd_col_x = tp.sd(col('x')),
sum_x = tp.sum('x'), sum_col_x = tp.sum(col('x')),
var_y = tp.var('y')
)
)
expected = tp.Tibble(
corr = [-1],
count_x = [3], count_col_x = [3],
cov = [-1],
first_x = [0], first_col_x = [0],
last_x = [2], last_col_x = [2],
max_x = [2], max_col_x = [2],
mean_x = [1], mean_col_x = [1],
median_x = [1], median_col_x = [1],
min_x = [0], min_col_x = [0],
n = [3],
n_distinct_x = [3], n_distinct_col_x = [3],
quantile_x = [0],
sd_x = [1], sd_col_x = [1],
sum_x = [3], sum_col_x = [3],
var_y = [1]
)
assert actual.frame_equal(expected), "aggregation stats failed"
def test_case_when():
"""Can use case_when"""
df = tp.Tibble(x = range(1, 4))
actual = df.mutate(case_x = tp.case_when(col('x') < 2).then(0)
.when(col('x') < 3).then(1)
.otherwise(0))
expected = tp.Tibble(x = range(1, 4), case_x = [0, 1, 0])
assert actual.frame_equal(expected), "case_when failed"
def test_casting():
"""Can do type casting"""
df = tp.Tibble(int_col = [0, 0, 1], float_col = [1.0, 2.0, 3.0], chr_col = ["1", "2", "3"])
actual = (
df
.mutate(float_cast = tp.as_float('int_col'),
int_cast = tp.as_integer('float_col'),
string_cast = tp.as_string('int_col'),
bool_cast = tp.as_boolean('int_col'))
.select('float_cast', 'int_cast', 'string_cast', 'bool_cast')
)
expected = tp.Tibble(float_cast = [0.0, 0.0, 1.0],
int_cast = [1, 2, 3],
string_cast = ["0", "0", "1"],
bool_cast = [False, False, True])
assert actual.frame_equal(expected), "casting failed"
def test_coalesce():
"""Can use coalesce"""
df = tp.Tibble(x = [None, None, 1], y = [2, None, 2], z = [3, 3, 3])
actual = (
df
.mutate(
coalesce_x = tp.coalesce(col('x'), col('y'), col('z'))
)
.select('coalesce_x')
)
expected = tp.Tibble(coalesce_x = [2, 3, 1])
assert actual.frame_equal(expected), "coalesce failed"
def test_floor():
"""Can get the floor"""
df = tp.Tibble(x = [1.1, 5.5])
actual = df.mutate(floor_x = tp.floor('x')).select('floor_x')
expected = tp.Tibble(floor_x = [1.0, 5.0])
assert actual.frame_equal(expected), "floor failed"
def test_lag():
"""Can get lagging values with function"""
df = tp.Tibble({'x': range(3)})
actual = df.mutate(lag_null = tp.lag(col('x')),
lag_default = tp.lag('x', default = 1))
expected = tp.Tibble({'x': range(3),
'lag_null': [None, 0, 1],
'lag_default': [1, 0, 1]})
assert actual.frame_equal(expected, null_equal = True), "lag failed"
def test_lead():
"""Can get leading values with function"""
df = tp.Tibble({'x': range(3)})
actual = df.mutate(lead_null = tp.lead(col('x')),
lead_default = tp.lead('x', default = 1))
expected = tp.Tibble({'x': range(3),
'lead_null': [1, 2, None],
'lead_default': [1, 2, 1]})
assert actual.frame_equal(expected, null_equal = True), "lead failed"
def test_logs():
"""Can get leading values with function"""
df = tp.Tibble({'x': range(1, 4)})
actual = df.mutate(log = tp.log(col('x')).round(2),
log10 = tp.log10('x').round(2))
expected = df.mutate(log = col('x').log().round(2), log10 = col('x').log10().round(2))
assert actual.frame_equal(expected), "log failed"
def test_if_else():
"""Can use if_else"""
df = tp.Tibble(x = range(1, 4))
actual = df.mutate(case_x = tp.if_else(col('x') < 2, 1, 0))
expected = tp.Tibble(x = range(1, 4), case_x = [1, 0, 0])
assert actual.frame_equal(expected), "if_else failed"
def test_is_predicates():
"""Can use is predicates"""
df = tp.Tibble(x = [0.0, 1.0, 2.0],
y = [None, math.inf, math.nan])
actual = (
df
.mutate(
between = tp.between('x', 1, 2),
is_finite = tp.is_finite('x'),
is_in = tp.is_in('x', [1.0, 2.0]),
is_infinite = tp.is_infinite('y'),
is_not = tp.is_not(tp.is_finite(col('x'))),
is_not_in = tp.is_not_in('x', [1.0, 2.0]),
is_not_null = tp.is_not_null('y'),
is_null = tp.is_null('y')
)
).drop(['x', 'y'])
expected = tp.Tibble(
between = [False, True, True],
is_finite = [True, True, True],
is_in = [False, True, True],
is_infinite = [None, True, False],
is_not = [False, False, False],
is_not_in = [True, False, False],
is_not_null = [False, True, True],
is_null = [True, False, False]
)
assert actual.frame_equal(expected, null_equal = True), "is_predicates failed"
def test_rep():
df = tp.Tibble(x = [0, 1], y = [0, 1])
assert tp.rep(df, 2).frame_equal(df.bind_rows(df)), "rep df failed"
assert tp.rep(1, 2).series_equal(tp.Series([1, 1])), "rep int failed"
assert tp.rep("a", 2).series_equal(tp.Series(["a", "a"])), "rep str failed"
assert tp.rep(True, 2).series_equal(tp.Series([True, True])), "rep bool failed"
assert tp.rep(tp.Series([0, 1]), 2).series_equal(tp.Series([0, 1, 0, 1])), "rep series failed"
def test_replace_null():
"""Can replace nulls"""
df = tp.Tibble(x = [0, None], y = [None, None])
actual = df.mutate(x = tp.replace_null(col('x'), 1))
expected = tp.Tibble(x = [0, 1], y = [None, None])
assert actual.frame_equal(expected), "replace_null function failed"
def test_row_number():
"""Can get row number"""
df = tp.Tibble(x = ['a', 'a', 'b'])
actual = df.mutate(row_num = tp.row_number())
expected = tp.Tibble(x = ['a', 'a', 'b'], row_num = [1, 2, 3])
assert actual.frame_equal(expected), "row_number failed"
def test_row_number_group():
"""Can get row number by group"""
df = tp.Tibble(x = ['a', 'a', 'b'])
actual = (
df.mutate(group_row_num = tp.row_number(), by = 'x')
.arrange('x', 'group_row_num')
)
expected = tp.Tibble(x = ['a', 'a', 'b'], group_row_num = [1, 2, 1])
assert actual.frame_equal(expected), "group row_number failed"
def test_round():
"""Can round values"""
df = tp.Tibble(x = [1.11, 2.22, 3.33])
actual = df.mutate(x = tp.round(col('x'), 1))
expected = tp.Tibble(x = [1.1, 2.2, 3.3])
assert actual.frame_equal(expected), "round failed"
def test_sqrt():
"""Can get the square root"""
df = tp.Tibble(x = [9, 25, 100])
actual = df.mutate(x = tp.sqrt('x'))
expected = tp.Tibble(x = [3, 5, 10])
assert actual.frame_equal(expected), "sqrt failed"
|
488268
|
import tensorflow as tf
from tensorflow.keras import metrics
from ml4ir.base.model.metrics.metrics_impl import MetricState
from ml4ir.base.features.feature_config import FeatureConfig
from typing import Optional, Dict
class CategoricalAccuracy(metrics.CategoricalAccuracy):
"""
Custom metric class to compute the Categorical Accuracy.
Currently just a wrapper around tf.keras.metrics.CategoricalAccuracy
to maintain consistency of arguments to __init__
"""
def __init__(
self,
feature_config: FeatureConfig,
metadata_features: Dict,
name="categorical_accuracy",
state=MetricState.NEW,
**kwargs
):
"""
Creates a CategoricalAccuracy instance
Parameters
----------
feature_config : FeatureConfig object
FeatureConfig object that defines the configuration for each model
feature
metadata_features : dict
Dictionary of metadata feature tensors that can be used to compute
custom metrics
name : str
Name of the metric
state : {"new", "old"}
State of the metric
"""
super(CategoricalAccuracy, self).__init__(name=name)
class Top5CategoricalAccuracy(metrics.TopKCategoricalAccuracy):
"""
Custom metric class to compute the Top K Categorical Accuracy.
Currently a wrapper around tf.keras.metrics.TopKCategoricalAccuracy that
squeezes one dimension.
It maintains consistency of arguments to __init__
"""
def __init__(
self,
feature_config: Optional[FeatureConfig] = None,
metadata_features: Dict = {},
name="top_5_categorical_accuracy",
state=MetricState.NEW,
**kwargs
):
"""
Creates a CategoricalAccuracy instance
Parameters
----------
feature_config : FeatureConfig object
FeatureConfig object that defines the configuration for each model
feature
metadata_features : dict
Dictionary of metadata feature tensors that can be used to compute
custom metrics
name : str
Name of the metric
state : {"new", "old"}
State of the metric
"""
super(Top5CategoricalAccuracy, self).__init__(name=name, k=5)
def update_state(self, y_true, y_pred, sample_weight=None):
"""
Squeeze the second dimension(axis=1) and compute top K categorical accuracy
Parameters
----------
y_true : Tensor object
Tensor containing true class labels
Shape : [batch_size, 1, num_classes]
y_pred : Tensor object
Tensor containing predicted scores for the classes
Shape : [batch_size, 1, num_classes]
sample_weight : dict
Dictionary containing weights for the classes to measure weighted average metric
Returns
-------
Tensor object
Top K categorical accuracy computed on y_true and y_pred
Notes
-----
Input shape is a 3 dimensional tensor of size
(batch_size, 1, num_classes). We are squeezing
the second dimension to follow the API of tf.keras.metrics.TopKCategoricalAccuracy
Axis 1 of y_true and y_pred must be of size 1, otherwise `tf.squeeze`
will throw error.
"""
return super(Top5CategoricalAccuracy, self).update_state(
tf.squeeze(y_true), tf.squeeze(y_pred), sample_weight=sample_weight
)
|
488271
|
from sys import exit
from argparse import ArgumentParser
from datetime import datetime
from scanner import TargetParser, Enumerator, start_tls
from TLS.protocols import versions as p_versions
# ToDo
# cipher preference
# encrypted sni (https://tools.ietf.org/html/draft-ietf-tls-esni-02)
def print_start():
print("Starting enumeration at: {}".format(datetime.now().strftime('%d-%m-%Y %H:%M:%S')))
def test(target, preamble, sni_name):
enum = Enumerator(target)
if sni_name:
enum.sni_name = sni_name
enum.set_clear_text_layer(preamble)
enum.verbose = True # Enumerator will print in verbose mode
print_start()
supported_protocols = enum.get_version_support(reversed(p_versions))
if len(supported_protocols) == 0:
for key, value in start_tls.items():
if int(target.port) in value: # Try again: adding a clear-text protocol for the port
print_start()
enum.set_clear_text_layer(key)
supported_protocols = enum.get_version_support(reversed(p_versions))
break
if len(supported_protocols) == 0: # Try again with SNI extension disabled (all following actions will not use SNI)
enum.sni = False
print_start()
supported_protocols = enum.get_version_support(reversed(p_versions))
enum.check_fallback_support(supported_protocols)
for p in supported_protocols:
enum.get_cipher_support(p)
if p_versions[supported_protocols[0]] == p_versions['TLSv1_3'] and len(supported_protocols) > 1:
enum.get_certificate(supported_protocols[1])
else:
enum.get_certificate(supported_protocols[0])
def main():
parser = ArgumentParser(description='Scanner to enumerate encryption protocol support', prog='TLScan3')
parser.add_argument('target', type=str, help="specify target as: host:port e.g. www.example.com:443 or "
"[::1]:443 for IPv6")
parser.add_argument('--version', action='version', version='%(prog)s 3.1')
p_group = parser.add_mutually_exclusive_group()
for key, value in start_tls.items():
p_group.add_argument("--{}".format(key), dest=key, action='store_true',
help='Use {} as protocol layer'.format(key.upper()))
parser.add_argument('--sni', type=str, dest='sni', help="SNI name to use in the handshake")
args = parser.parse_args()
preamble = None
for key, value in start_tls.items():
try:
if getattr(args, key):
preamble = key
break
except AttributeError:
pass
try:
try:
t = TargetParser(args.target).get_target()
except ValueError:
print("[!] Failed to parse target, trying again by adding a default port (443)")
t = TargetParser(args.target + ":443").get_target()
test(t, preamble, args.sni)
except KeyboardInterrupt:
print("[!] Received termination signal, exiting!")
exit(3)
except:
raise
if __name__ == '__main__':
main()
|
488287
|
import requests
BASE_URL = "http://fundamentus.com.br/"
"""
>>> get_stock_url('ITSA3')
'http://fundamentus.com.br/detalhes.php?papel=ITSA3'
"""
def get_stock_url(stock):
return "{}detalhes.php?papel={}".format(BASE_URL, stock)
"""
>>> get_base_url()
"http://fundamentus.com.br/"
"""
def get_base_url():
return "http://fundamentus.com.br/"
def get_stocks():
with open("fundamentus.txt", "r") as fundamentus_file:
stocks = fundamentus_file.read().split()
return stocks
def download_stock_html(stock_url):
req = requests.get(stock_url)
return req.content
|
488291
|
from spherov2.commands import Commands
class SystemMode(Commands):
_did = 18
@staticmethod
def enable_desktoy_mode(toy, enable: bool, proc=None):
toy._execute(SystemMode.__encode(toy, 41, proc, [int(enable)]))
@staticmethod
def get_out_of_box_state(toy, proc=None):
return bool(toy._execute(SystemMode._encode(toy, 43, proc)).data[0])
@staticmethod
def enable_out_of_box_state(toy, enable: bool, proc=None):
toy._execute(SystemMode.__encode(toy, 44, proc, [int(enable)]))
|
488300
|
import torch
import torch.nn.functional as F
def coords_grid(b, h, w, homogeneous=False, device=None):
y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W]
stacks = [x, y]
if homogeneous:
ones = torch.ones_like(x) # [H, W]
stacks.append(ones)
grid = torch.stack(stacks, dim=0).float() # [2, H, W] or [3, H, W]
grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W]
if device is not None:
grid = grid.to(device)
return grid
def generate_window_grid(h_min, h_max, w_min, w_max, len_h, len_w, device=None):
assert device is not None
x, y = torch.meshgrid([torch.linspace(w_min, w_max, len_w, device=device),
torch.linspace(h_min, h_max, len_h, device=device)],
)
grid = torch.stack((x, y), -1).transpose(0, 1).float() # [H, W, 2]
return grid
def normalize_coords(coords, h, w):
# coords: [B, H, W, 2]
c = torch.Tensor([(w - 1) / 2., (h - 1) / 2.]).float().to(coords.device)
return (coords - c) / c # [-1, 1]
def bilinear_sample(img, sample_coords, mode='bilinear', padding_mode='zeros', return_mask=False):
# img: [B, C, H, W]
# sample_coords: [B, 2, H, W] in image scale
if sample_coords.size(1) != 2: # [B, H, W, 2]
sample_coords = sample_coords.permute(0, 3, 1, 2)
b, _, h, w = sample_coords.shape
# Normalize to [-1, 1]
x_grid = 2 * sample_coords[:, 0] / (w - 1) - 1
y_grid = 2 * sample_coords[:, 1] / (h - 1) - 1
grid = torch.stack([x_grid, y_grid], dim=-1) # [B, H, W, 2]
img = F.grid_sample(img, grid, mode=mode, padding_mode=padding_mode, align_corners=True)
if return_mask:
mask = (x_grid >= -1) & (y_grid >= -1) & (x_grid <= 1) & (y_grid <= 1) # [B, H, W]
return img, mask
return img
def flow_warp(feature, flow, mask=False, padding_mode='zeros'):
b, c, h, w = feature.size()
assert flow.size(1) == 2
grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W]
return bilinear_sample(feature, grid, padding_mode=padding_mode,
return_mask=mask)
def forward_backward_consistency_check(fwd_flow, bwd_flow,
alpha=0.01,
beta=0.5
):
# fwd_flow, bwd_flow: [B, 2, H, W]
# alpha and beta values are following UnFlow (https://arxiv.org/abs/1711.07837)
assert fwd_flow.dim() == 4 and bwd_flow.dim() == 4
assert fwd_flow.size(1) == 2 and bwd_flow.size(1) == 2
flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1) # [B, H, W]
warped_bwd_flow = flow_warp(bwd_flow, fwd_flow) # [B, 2, H, W]
warped_fwd_flow = flow_warp(fwd_flow, bwd_flow) # [B, 2, H, W]
diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1) # [B, H, W]
diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1)
threshold = alpha * flow_mag + beta
fwd_occ = (diff_fwd > threshold).float() # [B, H, W]
bwd_occ = (diff_bwd > threshold).float()
return fwd_occ, bwd_occ
|
488379
|
from scipy import signal
from pywizard.userSettings import settings
import logging
import scipy as sp
class PitchEstimator(object):
@classmethod
def pitchForPeriod(cls, buf):
return cls(buf).estimate()
def __init__(self, buf):
self._bestPeriod = None
self.buf = buf
self._normalizedCoefficients = self.getNormalizedCoefficients()
def isOutOfRange(self):
x = self.bestPeriod()
return ( (self._normalizedCoefficients[x] < self._normalizedCoefficients[x-1])
and
(self._normalizedCoefficients[x] < self._normalizedCoefficients[x+1]) )
def interpolated(self):
bestPeriod = int(self.bestPeriod())
middle = self._normalizedCoefficients[bestPeriod]
left = self._normalizedCoefficients[bestPeriod - 1]
right = self._normalizedCoefficients[bestPeriod + 1]
if ( (2*middle - left - right) == 0):
return bestPeriod
else:
return bestPeriod + .5 * ( right - left) / (2 * middle - left - right)
def estimate(self):
bestPeriod = int(self.bestPeriod())
maximumMultiple = bestPeriod / self.minimumPeriod()
found = False
estimate = self.interpolated()
if sp.isnan(estimate):
return 0.0
while not found and maximumMultiple >= 1:
subMultiplesAreStrong = True
for i in range(0, int(maximumMultiple)):
logging.debug("estimate={} maximumMultiple={}".format(estimate, maximumMultiple))
subMultiplePeriod = int( sp.floor( (i+1) * estimate / maximumMultiple + .5) )
try:
curr = self._normalizedCoefficients[subMultiplePeriod]
except IndexError:
curr = None
if (curr is not None) and ( curr < settings.subMultipleThreshold * self._normalizedCoefficients[bestPeriod]):
subMultiplesAreStrong = False
if subMultiplesAreStrong:
estimate /= maximumMultiple
maximumMultiple -= 1
return estimate
def getNormalizedCoefficients(self):
minimumPeriod = self.minimumPeriod() - 1
maximumPeriod = self.maximumPeriod() + 1
return self.buf.getNormalizedCoefficientsFor(minimumPeriod, maximumPeriod)
def bestPeriod(self):
if self._bestPeriod is None:
bestPeriod = self.minimumPeriod()
maximumPeriod = self.maximumPeriod()
bestPeriod = self._normalizedCoefficients.index(max(self._normalizedCoefficients))
logging.debug("_normalizedCoefficients = {}".format(self._normalizedCoefficients))
logging.debug("bestPeriod={} minimumPeriod={} maximumPeriod={}".format(bestPeriod, self.minimumPeriod(), self.maximumPeriod()))
if bestPeriod < self.minimumPeriod():
bestPeriod = self.minimumPeriod()
if bestPeriod > maximumPeriod:
bestPeriod = maximumPeriod
self._bestPeriod = int(bestPeriod)
return self._bestPeriod
def maxPitchInHZ(self):
return settings.maximumPitchInHZ
def minPitchInHZ(self):
return settings.minimumPitchInHZ
def minimumPeriod(self):
return int(sp.floor(self.buf.sampleRate / settings.maximumPitchInHZ - 1))
def maximumPeriod(self):
return int(sp.floor(self.buf.sampleRate / settings.minimumPitchInHZ + 1))
def ClosestValueFinder(actual, table):
if actual < table[0]:
return 0
return table.index(min(table, key=lambda x:abs(x-actual)))
|
488398
|
import json
import discord
from discord.ext import commands
from utils import randomcolor
class Colors(commands.Cog):
"""Color releated commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["col", "colour"], extras={"image": "https://i.imgur.com/zMQ7mz3.png"})
async def color(self, ctx, color: commands.ColourConverter):
"""Sends information about a color
The following formats are accepted:
- `0x<hex>`
- `#<hex>`
- `0x#<hex>`
- `rgb(<number>, <number>, <number>)`
- All the colors mentioned in https://gist.github.com/Soheab/d9cf3f40e34037cfa544f464fc7d919e#colours
"""
hexcol = str(hex(color.value))[2:]
async with self.bot.session.get(f"http://www.thecolorapi.com/id?hex={hexcol}") as response:
data = json.loads(await response.text())
color_name = data["name"]["value"].title()
intcol = int(hexcol, 16)
embed = discord.Embed(color=intcol)
embed.set_author(name=color_name)
embed.set_image(url=f"http://singlecolorimage.com/get/{hexcol}/384x384")
embed.add_field(name="Hex", value=data["hex"]["value"])
embed.add_field(name="RGB", value=data["rgb"]["value"])
embed.add_field(name="INT", value=intcol)
embed.add_field(name="HSL", value=data["hsl"]["value"])
embed.add_field(name="HSV", value=data["hsv"]["value"])
embed.add_field(name="CMYK", value=data["cmyk"]["value"])
embed.add_field(name="XYZ", value=data["XYZ"]["value"])
await ctx.send(embed=embed)
@color.error
async def color_error(self, ctx, error):
if isinstance(error, commands.BadColourArgument):
return await ctx.send(f"Bad color: {error}")
raise error
@commands.command(
aliases=["randcolor", "randomcol", "randcol", "randomcolor", "rc"],
extras={"image": "https://i.imgur.com/m6GQHPg.png"},
)
async def randomcolour(self, ctx):
"""
Generates a random color.
Note: This is not fully random, but it is random enough for most purposes.
"""
rand_color = randomcolor.RandomColor()
generated_color = rand_color.generate()[0]
hexcol = generated_color.replace("#", "")
async with self.bot.session.get(f"http://www.thecolorapi.com/id?hex={hexcol}") as response:
data = json.loads(await response.text())
color_name = data["name"]["value"]
rgb = data.get("rgb").get("value")
hexcol = data.get("hex").get("value")
intcol = int(hexcol[1:], 16)
embed = discord.Embed(color=intcol)
embed.set_author(name=color_name)
embed.set_thumbnail(url=f"http://singlecolorimage.com/get/{hexcol[1:]}/256x256")
embed.add_field(name="Hex", value=data["hex"]["value"])
embed.add_field(name="RGB", value=data["rgb"]["value"])
embed.add_field(name="INT", value=intcol)
embed.add_field(name="HSL", value=data["hsl"]["value"])
embed.add_field(name="HSV", value=data["hsv"]["value"])
embed.add_field(name="CMYK", value=data["cmyk"]["value"])
embed.add_field(name="XYZ", value=data["XYZ"]["value"])
embed.set_footer(text="You can use the color command to get more details about the color")
await ctx.send(embed=embed)
def setup(bot):
"""Adds the cog to the bot"""
bot.add_cog(Colors(bot))
|
488411
|
from astropy import cosmology as cosmo
import logging
from autoconf import conf
import autofit as af
import autoarray as aa
import autogalaxy as ag
from autolens.lens.model.analysis import AnalysisDataset
from autolens.lens.model.preloads import Preloads
from autolens.interferometer.model.result import ResultInterferometer
from autolens.interferometer.model.visualizer import VisualizerInterferometer
from autolens.interferometer.fit_interferometer import FitInterferometer
from autolens.lens.model.settings import SettingsLens
from autolens import exc
logger = logging.getLogger(__name__)
logger.setLevel(level="INFO")
class AnalysisInterferometer(AnalysisDataset):
def __init__(
self,
dataset,
positions: aa.Grid2DIrregular = None,
hyper_dataset_result=None,
cosmology=cosmo.Planck15,
settings_pixelization=aa.SettingsPixelization(),
settings_inversion=aa.SettingsInversion(),
settings_lens=SettingsLens(),
):
super().__init__(
dataset=dataset,
positions=positions,
hyper_dataset_result=hyper_dataset_result,
cosmology=cosmology,
settings_pixelization=settings_pixelization,
settings_inversion=settings_inversion,
settings_lens=settings_lens,
)
if self.hyper_dataset_result is not None:
self.set_hyper_dataset(result=self.hyper_dataset_result)
else:
self.hyper_galaxy_visibilities_path_dict = None
self.hyper_model_visibilities = None
@property
def interferometer(self):
return self.dataset
def modify_before_fit(self, paths: af.DirectoryPaths, model: af.AbstractPriorModel):
self.check_and_replace_hyper_images(paths=paths)
if not paths.is_complete:
visualizer = VisualizerInterferometer(visualize_path=paths.image_path)
visualizer.visualize_interferometer(interferometer=self.interferometer)
visualizer.visualize_hyper_images(
hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict,
hyper_model_image=self.hyper_model_image,
)
logger.info(
"PRELOADS - Setting up preloads, may take a few minutes for fits using an inversion."
)
self.set_preloads(paths=paths, model=model)
return self
def set_hyper_dataset(self, result):
super().set_hyper_dataset(result=result)
self.hyper_model_visibilities = result.hyper_model_visibilities
self.hyper_galaxy_visibilities_path_dict = (
result.hyper_galaxy_visibilities_path_dict
)
def associate_hyper_visibilities(
self, instance: af.ModelInstance
) -> af.ModelInstance:
"""
Takes visibilities from the last result, if there is one, and associates them with galaxies in this search
where full-path galaxy names match.
If the galaxy collection has a different name then an association is not made.
e.g.
galaxies.lens will match with:
galaxies.lens
but not with:
galaxies.lens
galaxies.source
Parameters
----------
instance
A model instance with 0 or more galaxies in its tree
Returns
-------
instance
The input instance with visibilities associated with galaxies where possible.
"""
if self.hyper_galaxy_visibilities_path_dict is not None:
for galaxy_path, galaxy in instance.path_instance_tuples_for_class(
ag.Galaxy
):
if galaxy_path in self.hyper_galaxy_visibilities_path_dict:
galaxy.hyper_model_visibilities = self.hyper_model_visibilities
galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[
galaxy_path
]
return instance
def log_likelihood_function(self, instance):
"""
Determine the fit of a lens galaxy and source galaxy to the interferometer in this lens.
Parameters
----------
instance
A model instance with attributes
Returns
-------
fit : Fit
A fractional value indicating how well this model fit and the model interferometer itself
"""
try:
return self.fit_interferometer_for_instance(
instance=instance
).figure_of_merit
except (
exc.PixelizationException,
exc.InversionException,
exc.GridException,
OverflowError,
) as e:
raise exc.FitException from e
def fit_interferometer_for_instance(
self,
instance,
use_hyper_scalings=True,
preload_overwrite=None,
check_positions=True,
):
self.associate_hyper_images(instance=instance)
tracer = self.tracer_for_instance(instance=instance)
if check_positions:
self.settings_lens.check_positions_trace_within_threshold_via_tracer(
tracer=tracer, positions=self.positions
)
hyper_background_noise = self.hyper_background_noise_for_instance(
instance=instance
)
return self.fit_interferometer_for_tracer(
tracer=tracer,
hyper_background_noise=hyper_background_noise,
use_hyper_scalings=use_hyper_scalings,
)
def fit_interferometer_for_tracer(
self,
tracer,
hyper_background_noise,
use_hyper_scalings=True,
preload_overwrite=None,
):
preloads = self.preloads if preload_overwrite is None else preload_overwrite
return FitInterferometer(
interferometer=self.dataset,
tracer=tracer,
hyper_background_noise=hyper_background_noise,
use_hyper_scaling=use_hyper_scalings,
settings_pixelization=self.settings_pixelization,
settings_inversion=self.settings_inversion,
preloads=preloads,
)
@property
def fit_func(self):
return self.fit_interferometer_for_instance
def stochastic_log_evidences_for_instance(self, instance):
instance = self.associate_hyper_images(instance=instance)
tracer = self.tracer_for_instance(instance=instance)
if not tracer.has_pixelization:
return None
if not any(
[
isinstance(pix, aa.pix.VoronoiBrightnessImage)
for pix in tracer.pixelization_list
]
):
return
hyper_background_noise = self.hyper_background_noise_for_instance(
instance=instance
)
settings_pixelization = (
self.settings_pixelization.settings_with_is_stochastic_true()
)
log_evidences = []
for i in range(self.settings_lens.stochastic_samples):
try:
log_evidence = FitInterferometer(
interferometer=self.dataset,
tracer=tracer,
hyper_background_noise=hyper_background_noise,
settings_pixelization=settings_pixelization,
settings_inversion=self.settings_inversion,
preloads=self.preloads,
).log_evidence
except (
exc.PixelizationException,
exc.InversionException,
exc.GridException,
OverflowError,
) as e:
log_evidence = None
if log_evidence is not None:
log_evidences.append(log_evidence)
return log_evidences
def visualize(self, paths: af.DirectoryPaths, instance, during_analysis):
instance = self.associate_hyper_images(instance=instance)
fit = self.fit_interferometer_for_instance(instance=instance)
visualizer = VisualizerInterferometer(visualize_path=paths.image_path)
visualizer.visualize_fit_interferometer(
fit=fit, during_analysis=during_analysis
)
visualizer.visualize_tracer(
tracer=fit.tracer, grid=fit.grid, during_analysis=during_analysis
)
if fit.inversion is not None:
visualizer.visualize_inversion(
inversion=fit.inversion, during_analysis=during_analysis
)
visualizer.visualize_contribution_maps(tracer=fit.tracer)
if visualizer.plot_fit_no_hyper:
fit = self.fit_interferometer_for_tracer(
tracer=fit.tracer,
hyper_background_noise=None,
use_hyper_scalings=False,
preload_overwrite=Preloads(use_w_tilde=False),
)
visualizer.visualize_fit_interferometer(
fit=fit, during_analysis=during_analysis, subfolders="fit_no_hyper"
)
def save_results_for_aggregator(
self,
paths: af.DirectoryPaths,
samples: af.OptimizerSamples,
model: af.Collection,
):
if conf.instance["general"]["hyper"]["stochastic_outputs"]:
self.save_stochastic_outputs(paths=paths, samples=samples)
def make_result(
self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch
):
return ResultInterferometer(
samples=samples, model=model, analysis=self, search=search
)
def save_attributes_for_aggregator(self, paths: af.DirectoryPaths):
super().save_attributes_for_aggregator(paths=paths)
paths.save_object("uv_wavelengths", self.dataset.uv_wavelengths)
paths.save_object("real_space_mask", self.dataset.real_space_mask)
paths.save_object("positions", self.positions)
|
488425
|
from pathlib import Path
from attr import attrib, dataclass
from iters import iter
from gd.abstract_entity import AbstractEntity
from gd.async_iters import awaitable_iterator
from gd.errors import MissingAccess
from gd.http import NEWGROUNDS_SONG_LISTEN, URL
from gd.model import SongModel # type: ignore
from gd.text_utils import make_repr
from gd.typing import IO, TYPE_CHECKING, Any, AsyncIterator, Dict, Iterable, Optional, Union
if TYPE_CHECKING:
from gd.client import Client # noqa
__all__ = (
"ArtistInfo",
"Author",
"Song",
"default_song",
"official_client_songs",
"official_server_songs",
)
class Song(AbstractEntity):
"""Class that represents Geometry Dash/Newgrounds songs.
This class is derived from :class:`~gd.AbstractEntity`.
"""
def __repr__(self) -> str:
info = {"id": self.id, "name": repr(self.name), "author": repr(self.author)}
return make_repr(self, info)
def __str__(self) -> str:
return str(self.name)
@classmethod
def from_model( # type: ignore
cls, model: SongModel, *, client: Optional["Client"] = None, custom: bool = True
) -> "Song":
return cls(
id=model.id,
name=model.name,
size=model.size,
author=model.author,
download_link=model.download_link,
custom=custom,
client=client,
)
@property
def name(self) -> int:
""":class:`str`: A name of the song."""
return self.options.get("name", "")
@property
def size(self) -> float:
""":class:`float`: A float representing size of the song, in megabytes."""
return self.options.get("size", 0.0)
@property
def author(self) -> str:
""":class:`str`: An author of the song."""
return self.options.get("author", "")
@property
def link(self) -> str:
""":class:`str`: A link to the song on Newgrounds, e.g. ``.../audio/listen/<id>``."""
if self.is_custom():
return NEWGROUNDS_SONG_LISTEN.format(song_id=self.id)
return ""
@property
def download_link(self) -> str:
""":class:`str`: A link to download the song, used in :meth:`.Song.download`."""
return self.options.get("download_link", "")
def is_custom(self) -> bool:
""":class:`bool`: Indicates whether the song is custom or not."""
return bool(self.options.get("custom", True))
@classmethod
def official(
cls,
id: Optional[int] = None,
name: Optional[str] = None,
index: Optional[int] = None,
server_style: bool = True,
return_default: bool = True,
*,
client: Optional["Client"] = None,
) -> "Song":
official_songs = official_server_songs if server_style else official_client_songs
if id is not None:
official_song = iter(official_songs).get(id=id)
elif name is not None:
official_song = iter(official_songs).get(name=name)
elif index is not None:
try:
official_song = official_songs[index]
except (IndexError, ValueError, TypeError):
official_song = None
else:
raise ValueError("Expected either of queries: id, name or index.")
if official_song is None:
if return_default:
official_song = get_default_song(id)
else:
raise LookupError("Could not find official level by given query.")
return cls(
id=official_song.id,
name=official_song.name,
size=0.0,
author=official_song.author,
custom=False,
client=client,
)
def get_author(self) -> "Author":
""":class:`~gd.Author`: Author of the song."""
if not self.is_custom():
raise MissingAccess("Can not get author of an official song.")
return Author(name=self.author, client=self.client)
async def update(self, from_ng: bool = False) -> None:
"""Update the song.
Parameters
----------
from_ng: :class:`bool`
Whether to fetch song from Newgrounds.
Raises
------
:exc:`~gd.MissingAccess`
Failed to find the song.
:exc:`~gd.HTTPStatusError`
Server returned error status code.
:exc:`~gd.HTTPError`
Failed to process the request.
"""
if from_ng:
new = await self.client.get_ng_song(self.id)
else:
new = await self.client.get_song(self.id)
self.options.update(new.options)
async def get_artist_info(self) -> "ArtistInfo":
"""Fetch artist info of ``self``.
Acts like the following:
.. code-block:: python3
await client.get_artist_info(song.id)
Raises
------
:exc:`~gd.MissingAccess`
Failed to find artist info.
:exc:`~gd.HTTPStatusError`
Server returned error status code.
:exc:`~gd.HTTPError`
Failed to process the request.
Returns
-------
:class:`~gd.ArtistInfo`
Fetched info about an artist.
"""
if not self.is_custom(): # pragma: no cover
return ArtistInfo(
id=self.id,
artist=self.author,
song=self.name,
whitelisted=True,
scouted=True,
api=True,
custom=False,
client=self.client_unchecked,
)
return await self.client.get_artist_info(self.id)
async def download(
self, file: Optional[Union[str, Path, IO]] = None, with_bar: bool = False,
) -> Optional[bytes]:
"""Download a song from Newgrounds.
Parameters
----------
file: Optional[Union[:class:`str`, :class:`~pathlib.Path`, IO]]
File-like or Path-like object to write song to, instead of returning bytes.
with_bar: :class:`bool`
Whether to show a progress bar while downloading.
Requires ``tqdm`` to be installed.
Raises
------
:exc:`~gd.MissingAccess`
Can not download the song because it is official or not found.
:exc:`~gd.HTTPStatusError`
Server returned error status code.
:exc:`~gd.HTTPError`
Failed to process the request.
Returns
-------
Optional[:class:`bytes`]
A song as bytes, if ``file`` was not specified.
"""
if not self.is_custom():
raise MissingAccess("Song is official. Can not download.")
if not self.download_link:
# load song from newgrounds if there is no link
await self.update(from_ng=True)
return await self.client.http.download(self.download_link, file=file, with_bar=with_bar)
class ArtistInfo(AbstractEntity):
"""Class that represents info about the creator of a particular song."""
def __str__(self) -> str:
return str(self.artist)
def __repr__(self) -> str:
info = {
"id": self.id,
"artist": repr(self.artist),
"song": repr(self.song),
"is_scouted": self.is_scouted(),
"is_whitelisted": self.is_whitelisted(),
"exists": self.exists,
}
return make_repr(self, info)
def to_dict(self) -> Dict[str, Any]:
result = super().to_dict()
result.update(exists=self.exists)
return result
@property
def artist(self) -> str:
""":class:`str`: Author of the song."""
return self.options.get("artist", "")
@property
def song(self) -> str:
""":class:`str`: A name of the song."""
return self.options.get("song", "")
@property
def exists(self) -> bool:
""":class:`bool`: Whether the song exists."""
return bool(self.artist and self.song)
def is_scouted(self) -> bool:
""":class:`bool`: Whether the artist is scouted."""
return bool(self.options.get("scouted"))
def is_whitelisted(self) -> bool:
""":class:`bool`: Whether the artist is whitelisted."""
return bool(self.options.get("whitelisted"))
def api_allowed(self) -> bool:
""":class:`bool`: Whether the external API is allowed."""
return bool(self.options.get("api"))
def is_custom(self) -> bool:
""":class:`bool`: Whether the song is custom."""
return bool(self.options.get("custom", True))
def get_author(self) -> "Author":
""":class:`~gd.Author` of the song."""
if not self.is_custom():
raise MissingAccess("Can not get author of an official song.")
return Author(name=self.artist, client=self.client)
author = property(get_author)
async def update(self) -> None:
"""Update artist info.
Raises
------
:exc:`~gd.MissingAccess`
Failed to find artist info.
:exc:`~gd.HTTPStatusError`
Server returned error status code.
:exc:`~gd.HTTPError`
Failed to process the request.
"""
new = await self.client.get_artist_info(self.id)
self.options.update(new.options)
class Author(AbstractEntity):
"""Class that represents an author on Newgrounds.
This class is derived from :class:`~gd.AbstractEntity`.
"""
def __repr__(self) -> str:
info = {"name": repr(self.name), "link": repr(self.link)}
return make_repr(self, info)
def __str__(self) -> str:
return str(self.name)
@property
def id(self) -> int:
""":class:`int`: ID of the Author."""
return hash(self.name) | hash(self.link)
@property
def link(self) -> URL:
""":class:`~yarl.URL`: URL to author's page."""
return URL(self.options.get("link", f"https://{self.name}.newgrounds.com/"))
@property
def name(self) -> str:
""":class:`str`: Name of the author."""
return self.options.get("name", "")
@awaitable_iterator
def get_page_songs(self, page: int = 0) -> AsyncIterator[Song]:
"""Get songs on the page.
Parameters
----------
page: :class:`int`
Page of songs to look at.
Raises
------
:exc:`~gd.MissingAccess`
Failed to find songs.
:exc:`~gd.HTTPStatusError`
Server returned error status code.
:exc:`~gd.HTTPError`
Failed to process the request.
Returns
-------
AsyncIterator[:class:`~gd.Song`]
Songs found.
"""
return self.client.get_ng_user_songs_on_page(self, page=page)
@awaitable_iterator
def get_songs(self, pages: Iterable[int] = range(10)) -> AsyncIterator[Song]:
"""Get songs on the pages.
Parameters
----------
pages: Iterable[:class:`int`]
Pages of songs to look at.
Raises
------
:exc:`~gd.MissingAccess`
Failed to find songs.
:exc:`~gd.HTTPStatusError`
Server returned error status code.
:exc:`~gd.HTTPError`
Failed to process the request.
Returns
-------
AsyncIterator[:class:`~gd.Song`]
Songs found.
"""
return self.client.get_ng_user_songs(self, pages=pages)
@dataclass
class OfficialSong:
id: int = attrib()
author: str = attrib()
name: str = attrib()
default_song = OfficialSong(id=0, author="DJVI", name="Unknown")
def get_default_song(id: Optional[int] = None) -> OfficialSong:
if id is None:
return default_song
return OfficialSong(id=id, author=default_song.author, name=default_song.name)
official_client_songs = (
OfficialSong(id=0, author="OcularNebula", name="Practice: Stay Inside Me"),
OfficialSong(id=1, author="ForeverBound", name="Stereo Madness"),
OfficialSong(id=2, author="DJVI", name="Back On Track"),
OfficialSong(id=3, author="Step", name="Polargeist"),
OfficialSong(id=4, author="DJVI", name="Dry Out"),
OfficialSong(id=5, author="DJVI", name="Base After Base"),
OfficialSong(id=6, author="DJVI", name="Cant Let Go"),
OfficialSong(id=7, author="Waterflame", name="Jumper"),
OfficialSong(id=8, author="Waterflame", name="Time Machine"),
OfficialSong(id=9, author="DJVI", name="Cycles"),
OfficialSong(id=10, author="DJVI", name="xStep"),
OfficialSong(id=11, author="Waterflame", name="Clutterfunk"),
OfficialSong(id=12, author="DJ-Nate", name="Theory of Everything"),
OfficialSong(id=13, author="Waterflame", name="Electroman Adventures"),
OfficialSong(id=14, author="DJ-Nate", name="Clubstep"),
OfficialSong(id=15, author="DJ-Nate", name="Electrodynamix"),
OfficialSong(id=16, author="Waterflame", name="Hexagon Force"),
OfficialSong(id=17, author="Waterflame", name="Blast Processing"),
OfficialSong(id=18, author="DJ-Nate", name="Theory of Everything 2"),
OfficialSong(id=19, author="Waterflame", name="Geometrical Dominator"),
OfficialSong(id=20, author="F-777", name="Deadlocked"),
OfficialSong(id=21, author="MDK", name="Fingerdash"),
OfficialSong(id=22, author="F-777", name="The Seven Seas"),
OfficialSong(id=23, author="F-777", name="Viking Arena"),
OfficialSong(id=24, author="F-777", name="Airborne Robots"),
OfficialSong(id=25, author="RobTop", name="Secret"), # aka DJRubRub, LOL
OfficialSong(id=26, author="<NAME>", name="Payload"),
OfficialSong(id=27, author="<NAME>", name="Beast Mode"),
OfficialSong(id=28, author="<NAME>", name="Machina"),
OfficialSong(id=29, author="<NAME>", name="Years"),
OfficialSong(id=30, author="<NAME>", name="Frontlines"),
OfficialSong(id=31, author="Waterflame", name="Space Pirates"),
OfficialSong(id=32, author="Waterflame", name="Striker"),
OfficialSong(id=33, author="<NAME>", name="Embers"),
OfficialSong(id=34, author="<NAME>", name="Round 1"),
OfficialSong(id=35, author="F-777", name="Monster Dance Off"),
OfficialSong(id=36, author="MDK", name="Press Start"),
OfficialSong(id=37, author="Bossfight", name="Nock Em"),
OfficialSong(id=38, author="<NAME>", name="Power Trip"),
)
official_server_songs = tuple(
OfficialSong(id=official_song.id - 1, author=official_song.author, name=official_song.name)
for official_song in official_client_songs
)
|
488455
|
import cv2
import numpy as np
import math
import time
from .utilities import Distance, Distance_
from ...config import config
def BwareaOpen(img,MinArea):
thresh = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY)[1]
# Filter using contour area and remove small noise
cnts = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]
cnts_TooSmall = []
for index, cnt in enumerate(cnts):
area = cv2.contourArea(cnt)
if area < MinArea:
cnts_TooSmall.append(cnt)
thresh = cv2.drawContours(thresh, cnts_TooSmall, -1, 0, -1) # [ contour = less then minarea contour, contourIDx, Colour , Thickness ]
return thresh
def FindExtremas(img):
positions = np.nonzero(img) # position[0] 0 = rows 1 = cols
if (len(positions)!=0):
top = positions[0].min()
bottom = positions[0].max()
left = positions[1].min()
right = positions[1].max()
return top,bottom
else:
return 0,0
def FindLowestRow(img):
positions = np.nonzero(img) # position[0] 0 = rows 1 = cols
if (len(positions)!=0):
top = positions[0].min()
bottom = positions[0].max()
left = positions[1].min()
right = positions[1].max()
return bottom
else:
return img.shape[0]
def RetLargestContour(gray):
LargestContour_Found = False
thresh=np.zeros(gray.shape,dtype=gray.dtype)
_,bin_img = cv2.threshold(gray,0,255,cv2.THRESH_BINARY)
#Find the two Contours for which you want to find the min distance between them.
cnts = cv2.findContours(bin_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]
Max_Cntr_area = 0
Max_Cntr_idx= -1
for index, cnt in enumerate(cnts):
area = cv2.contourArea(cnt)
if area > Max_Cntr_area:
Max_Cntr_area = area
Max_Cntr_idx = index
LargestContour_Found = True
if (Max_Cntr_idx!=-1):
thresh = cv2.drawContours(thresh, cnts, Max_Cntr_idx, (255,255,255), -1) # [ contour = less then minarea contour, contourIDx, Colour , Thickness ]
return thresh, LargestContour_Found
def RetLargestContour_OuterLane(gray,minArea):
LargestContour_Found = False
thresh=np.zeros(gray.shape,dtype=gray.dtype)
_,bin_img = cv2.threshold(gray,0,255,cv2.THRESH_BINARY)
#################################### TESTING SHADOW BREAKER CODE BY DILATING####################
# 3. Dilating Segmented ROI's
kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(5,5))
bin_img_dilated = cv2.morphologyEx(bin_img, cv2.MORPH_DILATE, kernel) #Find the two Contours for which you want to find the min distance between them.
bin_img_ret = cv2.morphologyEx(bin_img_dilated, cv2.MORPH_ERODE, kernel) #Find the two Contours for which you want to find the min distance between them.
bin_img = bin_img_ret
#################################### TESTING SHADOW BREAKER CODE BY DILATING####################
cnts = cv2.findContours(bin_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]
Max_Cntr_area = 0
Max_Cntr_idx= -1
for index, cnt in enumerate(cnts):
area = cv2.contourArea(cnt)
if area > Max_Cntr_area:
Max_Cntr_area = area
Max_Cntr_idx = index
LargestContour_Found = True
if Max_Cntr_area < minArea:
LargestContour_Found = False
if ((Max_Cntr_idx!=-1) and (LargestContour_Found)):
thresh = cv2.drawContours(thresh, cnts, Max_Cntr_idx, (255,255,255), -1) # [ contour = less then minarea contour, contourIDx, Colour , Thickness ]
return thresh, LargestContour_Found
def ROI_extracter(image,strtPnt,endPnt):
# Selecting Only ROI from Image
ROI_mask = np.zeros(image.shape, dtype=np.uint8)
cv2.rectangle(ROI_mask,strtPnt,endPnt,255,thickness=-1)
#image_ROI = cv2.bitwise_and(image,image,mask=ROI_mask)
image_ROI = cv2.bitwise_and(image,ROI_mask)
return image_ROI
def ExtractPoint(img,specified_row):
Point= (0,specified_row)
specified_row_data = img[ specified_row-1,:]
#print("specified_row_data",specified_row_data)
positions = np.nonzero(specified_row_data) # position[0] 0 = rows 1 = cols
#print("positions",positions)
#print("len(positions[0])",len(positions[0]))
if (len(positions[0])!=0):
#print(positions[0])
min_col = positions[0].min()
Point=(min_col,specified_row)
return Point
def Ret_LowestEdgePoints(gray):
Outer_Points_list=[]
thresh = np.zeros(gray.shape,dtype=gray.dtype)
Lane_OneSide=np.zeros(gray.shape,dtype=gray.dtype)
Lane_TwoSide=np.zeros(gray.shape,dtype=gray.dtype)
_,bin_img = cv2.threshold(gray,0,255,cv2.THRESH_BINARY)
#Find the two Contours for which you want to find the min distance between them.
cnts = cv2.findContours(bin_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[1]
thresh = cv2.drawContours(thresh, cnts, 0, (255,255,255), 1) # [ contour = less then minarea contour, contourIDx, Colour , Thickness ]
# Boundary of the Contour is extracted and Saved in Thresh
Top_Row,Bot_Row = FindExtremas(thresh)
Contour_TopBot_PortionCut = ROI_extracter(thresh,(0, Top_Row + 5),(thresh.shape[1],Bot_Row-5))
cnts2 = cv2.findContours(Contour_TopBot_PortionCut, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[1]
LowRow_a=-1
LowRow_b=-1
Euc_row=0# Row for the points to be compared
First_line = np.copy(Lane_OneSide)
cnts_tmp = []
if(len(cnts2)>1):
for index_tmp, cnt_tmp in enumerate(cnts2):
if((cnt_tmp.shape[0])>50):
cnts_tmp.append(cnt_tmp)
cnts2 = cnts_tmp
for index, cnt in enumerate(cnts2):
Lane_OneSide = np.zeros(gray.shape,dtype=gray.dtype)
Lane_OneSide = cv2.drawContours(Lane_OneSide, cnts2, index, (255,255,255), 1) # [ contour = less then minarea contour, contourIDx, Colour , Thickness ]
Lane_TwoSide = cv2.drawContours(Lane_TwoSide, cnts2, index, (255,255,255), 1) # [ contour = less then minarea contour, contourIDx, Colour , Thickness ]
if(len(cnts2)==2):
if (index==0):
First_line = np.copy(Lane_OneSide)
LowRow_a = FindLowestRow(Lane_OneSide)
elif(index==1):
LowRow_b = FindLowestRow(Lane_OneSide)
if(LowRow_a<LowRow_b):# First index is shorter
Euc_row=LowRow_a
else:
Euc_row=LowRow_b
#print("Euc_row",Euc_row)
#cv2.namedWindow("First_line",cv2.WINDOW_NORMAL)
#cv2.imshow("First_line",First_line)
#cv2.waitKey(0)
Point_a = ExtractPoint(First_line,Euc_row)
Point_b = ExtractPoint(Lane_OneSide,Euc_row)
Outer_Points_list.append(Point_a)
Outer_Points_list.append(Point_b)
return Lane_TwoSide, Outer_Points_list
def ApproxDistBWCntrs(cnt,cnt_cmp):
# compute the center of the contour
M = cv2.moments(cnt)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# compute the center of the contour
M_cmp = cv2.moments(cnt_cmp)
cX_cmp = int(M_cmp["m10"] / M_cmp["m00"])
cY_cmp = int(M_cmp["m01"] / M_cmp["m00"])
minDist=Distance_((cX,cY),(cX_cmp,cY_cmp))
Centroid_a=(cX,cY)
Centroid_b=(cX_cmp,cY_cmp)
return minDist,Centroid_a,Centroid_b
def Estimate_MidLane(BW,MaxDistance):
#cv2.namedWindow("BW_zero",cv2.WINDOW_NORMAL)
BW_zero= cv2.cvtColor(BW,cv2.COLOR_GRAY2BGR)
#Find the two Contours for which you want to find the min distance between them.
cnts= cv2.findContours(BW, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]#3ms
MinArea=1
cnts_Legit=[]
for index, _ in enumerate(cnts):
area = cv2.contourArea(cnts[index])
if area > MinArea:
cnts_Legit.append(cnts[index])
cnts=cnts_Legit
# Cycle through each point in the Two contours & find the distance between them.
# Take the minimum Distance by comparing all other distances & Mark that Points.
CntIdx_BstMatch = []# [BstMatchwithCnt0,BstMatchwithCnt1,....]
Closests_Pixels_list = []
#200msec
for index, cnt in enumerate(cnts):
prevmin_dist = 100000
Bstindex_cmp = 0
#BstClosests_Pixels =0
BstCentroid_a=0
BstCentroid_b=0
for index_cmp in range(len(cnts)-index):
index_cmp = index_cmp + index
cnt_cmp = cnts[index_cmp]
if (index!=index_cmp):
min_dist,Centroid_a,Centroid_b = ApproxDistBWCntrs(cnt,cnt_cmp)
#Closests_Pixels=(cnt[min_dstPix_Idx[0]],cnt_cmp[min_dstPix_Idx[1]])
if(min_dist < prevmin_dist):
if (len(CntIdx_BstMatch)==0):
prevmin_dist = min_dist
Bstindex_cmp = index_cmp
#BstClosests_Pixels = Closests_Pixels
BstCentroid_a=Centroid_a
BstCentroid_b=Centroid_b
else:
Present= False
for i in range(len(CntIdx_BstMatch)):
if ( (index_cmp == i) and (index == CntIdx_BstMatch[i]) ):
Present= True
if not Present:
prevmin_dist = min_dist
Bstindex_cmp = index_cmp
#BstClosests_Pixels = Closests_Pixels
BstCentroid_a=Centroid_a
BstCentroid_b=Centroid_b
if ((prevmin_dist!=100000 ) and (prevmin_dist>MaxDistance)):
break
if (type(BstCentroid_a)!=int):
CntIdx_BstMatch.append(Bstindex_cmp)
#Closests_Pixels_list.append(BstClosests_Pixels)
#cv2.line(BW_zero,(BstClosests_Pixels[0][0][0],BstClosests_Pixels[0][0][1]),(BstClosests_Pixels[1][0][0],BstClosests_Pixels[1][0][1]),(0,0,255),thickness=2)
cv2.line(BW_zero,BstCentroid_a,BstCentroid_b,(0,255,0),thickness=2)
#cv2.imshow("BW_zero",BW_zero)
#cv2.imwrite("D:/Had_LuQ/MidlaneClosestJoined.png",BW_zero)
BW_zero = cv2.cvtColor(BW_zero,cv2.COLOR_BGR2GRAY)
BW_Largest,Largest_found = RetLargestContour(BW_zero)#3msec
if(Largest_found):
return BW_Largest
else:
return BW
|
488504
|
from pydantic import BaseModel, Extra
class A(BaseModel):
class Config:
extra = Extra.ignore
A(a='123')
class B(BaseModel):
a: str
class Config:
extra = Extra.forbid
B(a='abc', <error descr="'b' extra fields not permitted">b='123'</error>)
class C(BaseModel):
id: int
class D(C):
data: List[int]
class Config:
extra = Extra.forbid
d = D(id=1, data=[1, 2, 3])
|
488538
|
import komand
from .schema import CardLookupInput, CardLookupOutput
# Custom imports below
import minfraud
class CardLookup(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="card_lookup",
description="Query credit card info",
input=CardLookupInput(),
output=CardLookupOutput(),
)
def run(self, params={}): # noqa: MC0001
address = params.get("address")
issuer_id_number = params.get("card_issuer_id_number")
last_4_digits = params.get("card_last_4_digits")
token = params.get("card_token")
bank_name = params.get("card_bank_name")
bank_phone_country_code = params.get("bank_phone_country_code")
bank_phone_number = params.get("bank_phone_number")
avs_result = params.get("avs_result")
cvv_result = params.get("cvv_result")
user = self.connection.user
license = self.connection.license
# Set client
client = minfraud.Client(user, license)
# define request
request = {"device": {"ip_address": address}}
credit_card = {}
if issuer_id_number:
credit_card["issuer_id_number"] = issuer_id_number
if last_4_digits:
credit_card["last_4_digits"] = last_4_digits
if token:
credit_card["token"] = token
if bank_name:
credit_card["bank_name"] = bank_name
if bank_phone_country_code:
credit_card["bank_phone_country_code"] = bank_phone_country_code
if bank_phone_number:
credit_card["bank_phone_number"] = bank_phone_number
if avs_result:
credit_card["avs_result"] = avs_result
if cvv_result:
credit_card["cvv_result"] = cvv_result
# Add credit_card dict to request
if credit_card:
request["credit_card"] = credit_card
else:
self.logger.info("No credit card info provided")
try:
# Generate Request
insights = client.insights(request)
except minfraud.AuthenticationError:
self.logger.error("Authentication failed")
raise
except minfraud.InsufficientFundsError:
self.logger.error("Insufficient funds")
raise
except minfraud.InvalidRequestError:
self.logger.error("Invalid request")
raise
except minfraud.HttpError:
self.logger.error("Unexpected HTTP error occurred")
raise
except minfraud.MinFraudError:
self.logger.error("Unexpected content received from server")
raise
# Overall risk score
risk_score = str(insights.risk_score)
# Issuer info
name = str(insights.credit_card.issuer.name)
matches_provided_name = insights.credit_card.issuer.matches_provided_name
phone_number = str(insights.credit_card.issuer.phone_number)
matches_provided_phone_number = insights.credit_card.issuer.matches_provided_phone_number
issuer = {
"name": name,
"matches_provided_name": matches_provided_name,
"phone_number": phone_number,
"matches_provided_phone_number": matches_provided_phone_number,
}
# Clean issuer dict
issuer = komand.helper.clean_dict(issuer)
# Additional info
brand = str(insights.credit_card.brand)
country = str(insights.credit_card.country)
is_issued_in_billing_address_country = insights.credit_card.is_issued_in_billing_address_country
is_prepaid = insights.credit_card.is_prepaid
type = insights.credit_card.type
credit_card = {
"brand": brand,
"country": country,
"is_issued_in_billing_address_country": is_issued_in_billing_address_country,
"is_prepaid": is_prepaid,
"type": type,
}
# Clean additional dict
credit_card = komand.helper.clean_dict(credit_card)
# Combine dicts
credit_card_result = {"issuer": issuer, "credit_card": credit_card}
# Clean issuer_result dict
credit_card_result = komand.helper.clean_dict(credit_card_result)
return {"risk_score": risk_score, "credit_card_result": credit_card_result}
def test(self):
user = self.connection.user
license = self.connection.license
# Set client
client = minfraud.Client(user, license)
# Define request
request = {"device": {"ip_address": "8.8.8.8"}}
try:
# Generate request
insights = client.insights(request)
except minfraud.AuthenticationError:
self.logger.error("Authentication failed")
raise
except minfraud.InsufficientFundsError:
self.logger.error("Insufficient funds")
raise
return {}
|
488562
|
from __future__ import division
from __future__ import print_function
import os
import sys
# dirty hack: include top level folder to path
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
)
import numpy as np
from numpy import linalg as LA
import networkx as nx
from torch_geometric.data import Data
from torch_geometric.nn import MessagePassing
import torch
import torch.nn as nn
from torch_scatter import scatter_add
from models.blocks import NodeBlock, NodeBlockInd, EdgeBlock, RecurrentUpdateNet
from models.utils import SeriesModel, make_mlp, replace_graph
class GradientLayer(nn.Module):
def __init__(self, net=None, kernel_param='12', kernel_feature='both'):
super(GradientLayer, self).__init__()
self.net = net
self.kernel_param = kernel_param
self.kernel_feature = kernel_feature
def _forward_one_net(self, net, x, edge_index, target='out'):
x_src, x_dst = x[edge_index[0]], x[edge_index[1]]
if net is None:
out = x_dst - x_src
# net_out = torch.ones(edge_index.shape[1], 1).to(x.device)
net_out = x.new_ones(edge_index.shape[1], 2)
else:
if self.kernel_feature == 'both':
net_input = torch.cat((x_dst, x_src), dim=-1)
elif self.kernel_feature == 'src':
net_input = x_src
elif self.kernel_feature == 'dst':
net_input = x_dst
net_out = net(net_input)
# out = x_dst - net_out * x_src
# out = net_out * (x_dst - x_src)
net_out = net_out.reshape(-1, 2)
net_out_ones = torch.ones_like(net_out)
if self.kernel_param == '1':
net_out = torch.cat((net_out[:, 0:1], net_out_ones[:, 1:2]), dim=-1)
elif self.kernel_param == '2':
net_out = torch.cat((net_out_ones[:, 0:1], net_out[:, 1:2]), dim=-1)
out = net_out[:, 0:1] * (x_dst - net_out[:, 1:2] * x_src)
if target == 'out':
return out
elif target == 'net_out':
return net_out
else:
raise NotImplementedError()
def forward(self, x, edge_index):
if isinstance(self.net, nn.ModuleList):
out_list = [self._forward_one_net(net, x, edge_index, 'out') for net in self.net]
return torch.cat(out_list, dim=-1)
else:
return self._forward_one_net(self.net, x, edge_index, 'out')
def get_net_out(self, x, edge_index):
if isinstance(self.net, nn.ModuleList):
net_out_list = [self._forward_one_net(net, x, edge_index, 'net_out') for net in self.net]
return torch.cat(net_out_list, dim=-1)
else:
return self._forward_one_net(self.net, x, edge_index, 'net_out')
class LaplacianLayer(MessagePassing):
def __init__(self, net=None, kernel_param='12', kernel_feature='both'):
super(LaplacianLayer, self).__init__(aggr='add', flow='source_to_target')
self.net = net
self.kernel_param = kernel_param
self.kernel_feature = kernel_feature
def _message_one_net(self, net, x_i, x_j):
if net is None:
return x_i - x_j
else:
if self.kernel_feature == 'both':
net_input = torch.cat((x_i, x_j), dim=-1)
elif self.kernel_feature == 'src':
net_input = x_i
elif self.kernel_feature == 'dst':
net_input = x_j
net_out = net(net_input)
net_out = net_out.reshape(-1, 2)
net_out_ones = torch.ones_like(net_out)
if self.kernel_param == '1':
net_out = torch.cat((net_out[:, 0:1], net_out_ones[:, 1:2]), dim=-1)
elif self.kernel_param == '2':
net_out = torch.cat((net_out_ones[:, 0:1], net_out[:, 1:2]), dim=-1)
return net_out[:, 0:1] * (x_i - net_out[:, 1:2] * x_j)
def forward(self, x, edge_index):
return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)
def message(self, x_i, x_j):
if isinstance(self.net, nn.ModuleList):
message_list = [self._message_one_net(net, x_i, x_j) for net in self.net]
return torch.cat(message_list, dim=-1)
else:
return self._message_one_net(self.net, x_i, x_j)
def update(self, aggr_out):
return aggr_out
def _get_one_net_out(self, net, x, edge_index):
if net is None:
return x.new_ones(edge_index.shape[1], 2)
else:
x_src, x_dst = x[edge_index[0]], x[edge_index[1]]
if self.kernel_feature == 'both':
net_input = torch.cat((x_src, x_dst), dim=-1)
elif self.kernel_feature == 'src':
net_input = x_src
elif self.kernel_feature == 'dst':
net_input = x_dst
net_out = net(net_input)
net_out = net_out.reshape(-1, 2)
net_out_ones = torch.ones_like(net_out)
if self.kernel_param == '1':
net_out = torch.cat((net_out[:, 0:1], net_out_ones[:, 1:2]), dim=-1)
elif self.kernel_param == '2':
net_out = torch.cat((net_out_ones[:, 0:1], net_out[:, 1:2]), dim=-1)
return net_out
def get_net_out(self, x, edge_index):
if isinstance(self.net, nn.ModuleList):
net_out_list = [self._get_one_net_out(net, x, edge_index) for net in self.net]
return torch.cat(net_out_list, dim=-1)
else:
return self._get_one_net_out(self.net, x, edge_index)
class MultiLayerRecurrentGN(nn.Module):
def __init__(self, gn_layers):
super(MultiLayerRecurrentGN, self).__init__()
self.gn_layers = nn.ModuleList(gn_layers)
self.gn_layer_num = len(gn_layers)
def forward(self, graph):
# 1st dim is layer rank
node_hidden_list = graph.node_hidden
edge_hidden_list = graph.edge_hidden
updated_node_hidden_list = []
updated_edge_hidden_list = []
assert len(node_hidden_list) == self.gn_layer_num
graph_li = replace_graph(graph)
for li in range(self.gn_layer_num):
graph_li = replace_graph(graph_li, node_hidden=node_hidden_list[li], edge_hidden=edge_hidden_list[li])
graph_li = self.gn_layers[li](graph_li)
updated_node_hidden_list.append(graph_li.node_hidden)
updated_edge_hidden_list.append(graph_li.edge_hidden)
graph = replace_graph(graph_li,
node_hidden=torch.stack(updated_node_hidden_list, dim=0),
edge_hidden=torch.stack(updated_edge_hidden_list, dim=0))
return graph
class PDGN(SeriesModel):
def __init__(self,
input_dim,
output_dim,
hidden_dim_pde,
hidden_dim_gn,
input_frame_num,
skip_first_frames_num,
mode,
recurrent,
layer_num,
gn_layer_num=1,
edge_final_dim=None,
nophysics_mode=None,
use_dist=False,
pde_params_out_dim=None,
use_time_grad=True,
use_edge_grad=True,
use_laplacian=True,
use_pde_params=True,
learnable_edge_grad=False,
learnable_edge_grad_kernel_num=1,
learnable_laplacian=False,
learnable_laplacian_kernel_num=1,
grad_kernel_param_loc='12',
grad_kernel_feature='both',
laplacian_kernel_param_loc='12',
laplacian_kernel_feature='both',
node_meta_dim=0,
predict_model='GN'):
super(PDGN, self).__init__(input_dim, output_dim, input_frame_num,
skip_first_frames_num, is_recurrent=recurrent)
#### For learnable parameters
self.node_dim = input_dim # for easy understanding
self.edge_dim = input_dim # for easy understanding
self.hidden_dim_pde = hidden_dim_pde
self.hidden_dim_gn = hidden_dim_gn
self.layer_num = layer_num
self.gn_layer_num = gn_layer_num
#### For PDE
self.mode = mode
self.nophysics_mode = nophysics_mode
self.use_dist = use_dist
if self.use_dist:
self.edge_dist_dim = 1
else:
self.edge_dist_dim = 0
if pde_params_out_dim is None:
if mode == 'diff':
pde_params_out_dim = self.node_dim
elif mode == 'adv':
pde_params_out_dim = self.edge_dim
# use time grad
self.use_time_grad = use_time_grad
self.time_grad_dim = self.node_dim if self.use_time_grad else 0
# use edge grad
self.use_edge_grad = use_edge_grad
self.edge_grad_dim = self.edge_dim if self.use_edge_grad else 0
# use laplacian
self.use_laplacian = use_laplacian
self.laplacian_dim = self.node_dim if self.use_laplacian else 0
# use pde params
self.use_pde_params = use_pde_params
self.pde_params_dim = pde_params_out_dim if self.use_pde_params else 0
self.learnable_edge_grad = learnable_edge_grad
self.learnable_edge_grad_kernel_num = learnable_edge_grad_kernel_num
self.learnable_laplacian = learnable_laplacian
self.learnable_laplacian_kernel_num = learnable_laplacian_kernel_num
if self.learnable_edge_grad:
if grad_kernel_feature == 'both':
grad_net_input_mult = 2
else:
grad_net_input_mult = 1
grad_net_list = [
make_mlp(grad_net_input_mult * (self.node_dim + node_meta_dim), self.hidden_dim_pde, self.node_dim * 2, self.layer_num, activation='SELU')
for _ in range(self.learnable_edge_grad_kernel_num)
]
self.gradient_layer = GradientLayer(
net=nn.ModuleList(grad_net_list), kernel_param=grad_kernel_param_loc, kernel_feature=grad_kernel_feature
)
self.edge_grad_dim = self.edge_grad_dim * self.learnable_edge_grad_kernel_num
else:
self.gradient_layer = GradientLayer()
if self.learnable_laplacian:
if laplacian_kernel_feature == 'both':
laplacian_net_input_mult = 2
else:
laplacian_net_input_mult = 1
laplacian_net_list = [
make_mlp(laplacian_net_input_mult * (self.node_dim + node_meta_dim), self.hidden_dim_pde, self.node_dim * 2, self.layer_num, activation='SELU')
for _ in range(self.learnable_laplacian_kernel_num)
]
self.laplacian_layer = LaplacianLayer(
net=nn.ModuleList(laplacian_net_list), kernel_param=laplacian_kernel_param_loc, kernel_feature=laplacian_kernel_feature
)
self.laplacian_dim = self.laplacian_dim * self.learnable_laplacian_kernel_num
else:
self.laplacian_layer = LaplacianLayer()
def _get_pde_specific_parts(mode):
if mode == 'diff':
if self.use_pde_params:
pde_mlp = make_mlp(2 * self.node_dim, self.hidden_dim_pde,
self.pde_params_dim, self.layer_num, activation='ReLU')
pde_net = NodeBlockInd(2 * self.node_dim,
self.pde_params_dim,
custom_func=pde_mlp) # This is actually a simple SingleMLP.
else:
pde_mlp, pde_net = None, None
first_gn_node_dim_in, first_gn_edge_dim_in = \
self.node_dim + self.laplacian_dim + self.time_grad_dim + self.pde_params_dim, self.edge_grad_dim
elif mode == 'adv':
if self.use_pde_params:
pde_mlp = make_mlp(2 * self.node_dim + self.edge_dim, self.hidden_dim_pde, self.pde_params_dim,
self.layer_num, activation='ReLU')
pde_net = EdgeBlock(2 * self.node_dim + self.edge_dim, self.pde_params_dim,
use_edges=True, use_sender_nodes=True, use_receiver_nodes=True,
use_globals=False,
custom_func=pde_mlp) # This is actually a simple SingleMLP.
else:
pde_mlp, pde_net = None, None
first_gn_node_dim_in, first_gn_edge_dim_in = \
self.node_dim + self.laplacian_dim + self.time_grad_dim, self.edge_grad_dim + self.pde_params_dim
else:
raise NotImplementedError('{} not implemented!'.format(mode))
return pde_mlp, pde_net, first_gn_node_dim_in, first_gn_edge_dim_in
self.pde_mlp, self.pde_net, first_gn_node_dim_in, first_gn_edge_dim_in = _get_pde_specific_parts(self.mode)
self.predict_model = predict_model
if self.predict_model == 'GN':
#### Prediction module
gn_net_blocks = []
for li in range(self.gn_layer_num):
t_node_dim_in, t_node_dim_out = self.hidden_dim_gn, self.hidden_dim_gn
t_edge_dim_in, t_edge_dim_out = self.hidden_dim_gn, self.hidden_dim_gn
final_activation = True
if li == 0:
if self.nophysics_mode == 'nopad':
t_node_dim_in, t_edge_dim_in = 1 * self.node_dim, self.edge_dist_dim
else:
t_node_dim_in, t_edge_dim_in = first_gn_node_dim_in, first_gn_edge_dim_in + self.edge_dist_dim
if li == self.gn_layer_num - 1:
if edge_final_dim is None:
gn_edge_mlp_outdim = self.edge_dim
else:
gn_edge_mlp_outdim = self.hidden_dim_gn
t_node_dim_out, t_edge_dim_out = self.output_dim, gn_edge_mlp_outdim
final_activation = False
_edge_input_dim = 2 * t_node_dim_in + 1 * t_edge_dim_in # sender/receiver nodes
if edge_final_dim is None:
gn_edge_mlp_final_activation = final_activation
else:
gn_edge_mlp_final_activation = True
if self.is_recurrent:
gn_edge_mlp = RecurrentUpdateNet(
in_features=_edge_input_dim,
latent_dim=self.hidden_dim_gn,
out_features=t_edge_dim_out,
num_layers=2,
final_activation=gn_edge_mlp_final_activation
)
else:
gn_edge_mlp = make_mlp(_edge_input_dim, self.hidden_dim_gn, t_edge_dim_out,
self.layer_num, activation='ReLU', final_activation=gn_edge_mlp_final_activation)
gn_edge_block = EdgeBlock(_edge_input_dim, t_edge_dim_out,
use_edges=(t_edge_dim_in > 0), use_sender_nodes=True, use_receiver_nodes=True,
use_globals=False,
custom_func=gn_edge_mlp, recurrent=self.is_recurrent)
# Node: (curr, laplacian, du_dt, D) Edge: (gradient)
_node_input_dim = 1 * t_node_dim_in + 2 * t_edge_dim_out # bi-direction
if self.is_recurrent:
gn_node_mlp = RecurrentUpdateNet(
in_features=_node_input_dim,
latent_dim=self.hidden_dim_gn,
out_features=t_node_dim_out,
num_layers=2,
final_activation=final_activation
)
else:
gn_node_mlp = make_mlp(_node_input_dim, self.hidden_dim_gn, t_node_dim_out,
self.layer_num, activation='ReLU', final_activation=final_activation)
gn_node_block = NodeBlock(_node_input_dim, t_node_dim_out,
use_nodes=True, use_sent_edges=True, use_received_edges=True,
use_globals=False,
custom_func=gn_node_mlp, recurrent=self.is_recurrent)
if self.is_recurrent:
gn_net_blocks.append(nn.Sequential(
gn_edge_block, gn_node_block
))
else:
gn_net_blocks.append(gn_edge_block)
gn_net_blocks.append(gn_node_block)
if self.is_recurrent:
self.gn_net = MultiLayerRecurrentGN(gn_net_blocks)
else:
self.gn_net = nn.Sequential(*gn_net_blocks)
else:
assert self.predict_model == 'sum'
def derivative_cell(self, data, length=2):
"""
Derivative Cell (DC)
Input:
graph_seq:
- len(graph_seq)==length is True.
- Elements of graph_seq is torch.Tensor
- All elements should have "x" and "edge_index" attributes.
Return:
- If length=2, only first-order temporal derivative is returned.
- If length=3, first-, second-order temporal derivatives are returned.
- If length>3, TODO
- Spatial derivatives, Laplacian and Gradient are returned.
- Zero-order derivative or current graph, i.e. graph_seq[-1], is also returned.
"""
assert data.x.shape[1] == length
G_prev = data.x[:, -2, :]
G_curr = data.x[:, -1, :]
edge_index = data.edge_index
ret = {
'curr': G_curr,
'edge_index': edge_index
}
if self.nophysics_mode == 'nopad':
pass
else:
if hasattr(data, 'node_meta'):
gradient_input = torch.cat((G_curr, data.node_meta), dim=-1)
else:
gradient_input = G_curr
ret['gradient'] = self.gradient_layer(gradient_input, edge_index)[..., :G_curr.shape[-1]]
ret['gradient_weight'] = self.gradient_layer.get_net_out(gradient_input, edge_index).detach()
ret['laplacian'] = self.laplacian_layer(gradient_input, edge_index)[..., :G_curr.shape[-1]]
ret['laplacian_weight'] = self.laplacian_layer.get_net_out(gradient_input, edge_index).detach()
# debug
Gsrc, Gdst = G_curr[edge_index[0]], G_curr[edge_index[1]]
debug_gradient = ret['gradient']
debug_gradient_2 = ret['gradient_weight'][:, 0:1] * (Gdst - ret['gradient_weight'][:, 1:2] * Gsrc)
debug_laplacian = ret['laplacian']
debug_laplacian_2 = scatter_add(ret['laplacian_weight'][:, 0:1] * (Gsrc - ret['laplacian_weight'][:, 1:2] * Gdst), edge_index[0], dim=0)
debug_laplacian_3 = scatter_add(ret['laplacian_weight'][:, 0:1] * (Gdst - ret['laplacian_weight'][:, 1:2] * Gsrc), edge_index[1], dim=0)
ret["du_dt"] = G_curr - G_prev # (N, F)
if length == 3:
G_prev_prev = data.x[:, -3, :]
ret["du2_dt2"] = (G_curr - G_prev) + (G_prev_prev - G_prev)
else:
pass
if self.nophysics_mode == 'zeropad':
for k in ['gradient', 'laplacian', 'du_dt', 'du2_dt2']:
if k in ret.keys():
ret[k] = torch.zeros_like(ret[k])
return ret
def build_DG(self, data, DC_output, PDE_params):
"""
Module for generating Derivative Graph.
It builds a new graph having derivatives and PDE parameters as node-, edge-attributes.
For instance, if a given PDE is Diffusion Eqn,
this module will concat node-wise attributes with PDE_params (diffusion-coefficient).
Input:
DC_output:
- Output of derivative_cell()
- dictionary and key: du_dt, gradient, laplacian, curr
PDE_params:
- Output of NN_PDE()
- Depending on "mode", it may be node-wise or edge-wise features.
mode: (self)
- This should be same as "mode" in NN_PDE
Output:
output_graph:
- output_graph.x : curr, laplacian, du_dt
- output_graph.edge_attr : gradient
- Additionally, PDE_params will be concatenated properly.
"""
curr = DC_output["curr"] # (N, F)
if self.nophysics_mode == 'nopad':
if self.use_dist:
output_graph = replace_graph(data, x=curr, edge_attr=data.edge_dist)
else:
output_graph = replace_graph(data, x=curr, edge_attr=None)
else:
du_dt = DC_output["du_dt"] # (N, F)
gradient = DC_output["gradient"] # (E, F)
laplacian = DC_output["laplacian"] # (N, F)
if self.mode == "diff":
node_attr_to_cat = [curr,]
if self.use_laplacian:
node_attr_to_cat.append(laplacian)
if self.use_time_grad:
node_attr_to_cat.append(du_dt)
if self.use_pde_params:
node_attr_to_cat.append(PDE_params)
edge_attr_to_cat = []
if self.use_dist:
edge_attr_to_cat.append(data.edge_dist)
if self.use_edge_grad:
edge_attr_to_cat.append(gradient)
elif self.mode == "adv":
node_attr_to_cat = [curr, ]
if self.use_laplacian:
node_attr_to_cat.append(laplacian)
if self.use_time_grad:
node_attr_to_cat.append(du_dt)
edge_attr_to_cat = []
if self.use_dist:
edge_attr_to_cat.append(data.edge_dist)
if self.use_edge_grad:
edge_attr_to_cat.append(gradient)
if self.use_pde_params:
edge_attr_to_cat.append(PDE_params)
else:
# TODO
raise NotImplementedError()
node_attr = torch.cat(node_attr_to_cat, dim=-1)
if len(edge_attr_to_cat) > 0:
edge_attr = torch.cat(edge_attr_to_cat, dim=-1)
else:
edge_attr = None
output_graph = replace_graph(data, x=node_attr, edge_attr=edge_attr,
gradient_weight=DC_output['gradient_weight'],
laplacian_weight=DC_output['laplacian_weight'])
return output_graph
def NN_PDE(self, DC_output):
"""
Module for inferring unknown parameters in PDEs.
For instance, if a given PDE is Diffusion Eqn, this module infers a diffusive coefficient, D, for all nodes.
If a given PDE is Convection Eqn, this module infers a exteranl vector field, v, for all directions (edges).
TODO:
For other equations?
Input:
DC_output:
- output of derivative_cell()
- It is a dictionary.
Output:
Inferred Node-wise parameters or Edge-wise paramters.
"""
#### Feature engineering
# du_dt is commonly used.
# ∇u, Δu, and du2_dt2 are selected based on a given PDE.
if self.nophysics_mode == 'nopad':
return None
if not self.use_pde_params:
return None
input_dict = DC_output
du_dt = input_dict["du_dt"] # (N, F)
gradient = input_dict["gradient"] # (E, F)
laplacian = input_dict["laplacian"] # (N, F)
edge_index = input_dict['edge_index']
if self.mode == "diff":
_graph = Data(x=torch.cat([du_dt, laplacian], dim=-1), edge_index=edge_index)
# output_graph.x is the inferred diffusion-coefficient. (N, F)
output_graph = self.pde_net(_graph)
output = output_graph.x # (N, F)
elif self.mode == "adv":
_graph = Data(x=du_dt, edge_attr=gradient, edge_index=edge_index)
# output_graph.edge_attr is the inferred velocity field. (E, F)
output_graph = self.pde_net(_graph)
output = output_graph.edge_attr # (E, F)
else:
# TODO
pass
if self.nophysics_mode == 'zeropad':
output = torch.zeros_like(output)
return output
def forward_onestep(self, data, return_features=False):
if self.predict_model == 'GN':
if self.is_recurrent:
if not hasattr(data, 'node_hidden'):
data = replace_graph(data,
node_hidden=data.x.new_zeros(self.gn_layer_num, self.gn_net.gn_layers[0][1].net.num_layers, data.x.shape[0],
self.gn_net.gn_layers[0][1].net.latent_dim))
if not hasattr(data, 'edge_hidden'):
data = replace_graph(data,
edge_hidden=data.x.new_zeros(self.gn_layer_num, self.gn_net.gn_layers[0][0].net.num_layers, data.edge_index.shape[1],
self.gn_net.gn_layers[0][0].net.latent_dim))
# One-step prediction
# Read data (B,T,N,F) and return (B,1,N,output_dim).
length = data.x.shape[1] # T
# 1. Derivative Cell
DC_output = self.derivative_cell(data, length=length) # dictionary
# 2. NN_PDE
PDE_params = self.NN_PDE(DC_output) # (N,F) or (E,F)
# 3. Derivative Graph
DG_output = self.build_DG(data, DC_output, PDE_params) # torch_geometric.Data
DG_output_data = DG_output.clone().apply(lambda x: x.detach())
# 4. Prediction
if self.predict_model == 'GN':
output_graph = self.gn_net(DG_output) # torch_geometric.Data
else:
gradient = DC_output['gradient']
laplacian = DC_output['laplacian']
gradient_out = torch.zeros_like(laplacian)
gradient_out = scatter_add(gradient, DC_output['edge_index'][1, :], dim=0, out=gradient_out)
dx = gradient_out + laplacian
output_graph = replace_graph(DG_output, x=dx)
# 5. Outputs
output_graph.x = output_graph.x + data.x[:, -1, -self.output_dim:]
if return_features:
return output_graph, DG_output_data
else:
return output_graph
|
488599
|
import pandas as pd
from loguru import logger
from pals.GSEA import GSEA
from pals.ORA import ORA
from pals.PLAGE import PLAGE
from pals.common import DATABASE_REACTOME_KEGG, DATABASE_REACTOME_CHEBI, DATABASE_REACTOME_UNIPROT, \
DATABASE_REACTOME_ENSEMBL
from pals.feature_extraction import DataSource
from linker.constants import PKS, COMPOUND_DATABASE_KEGG, COMPOUND_DATABASE_CHEBI, METABOLOMICS, \
PROTEOMICS, GENOMICS, MIN_REPLACE_PROTEOMICS_METABOLOMICS, MIN_REPLACE_GENOMICS, PLAGE_NUM_RESAMPLES, \
PLAGE_RANDOM_SEED
from linker.views.functions import get_group_members, get_standardized_df
def get_pals_data_source(analysis, analysis_data, case, control, min_hits):
axis = 1
X_std, data_df, design_df = get_standardized_df(analysis_data, axis, pk_cols=PKS)
if design_df is None:
return None
# if this is a pimp data, the index of X_std will be in this format:
# <compound_id>_<peak_id>
# we need to remove the <peak_id> for PALS compound matching to work
old_index = X_std.index.values
new_index = []
for idx in old_index:
if '_' in idx:
tokens = idx.split('_')
new_index.append(tokens[0])
else:
new_index.append(idx)
assert len(old_index) == len(new_index)
X_std = X_std.rename(index=dict(zip(old_index, new_index)))
# retrieve experimental design information
experimental_design = {
'comparisons': [get_comparison(case, control)],
'groups': get_group_members(analysis_data)
}
# retrieve annotation df
annotation_df = pd.DataFrame()
annotation_df['entity_id'] = X_std.index
annotation_df.index.name = 'row_id'
annotation_df.head()
# retrieve measurement df
X_std.reset_index(drop=True, inplace=True)
X_std.index.name = 'row_id'
X_std.head()
# create PALS data source
reactome_metabolic_pathway_only = analysis.metadata['metabolic_pathway_only']
reactome_species = analysis.metadata['species_list'][0] # assume the first one
reactome_query = True
# select database name
database_name, min_replace = _get_database_name(analysis, analysis_data)
# create a PALS data source
assert database_name is not None
ds = DataSource(X_std, annotation_df, experimental_design, database_name,
reactome_species, reactome_metabolic_pathway_only, reactome_query, min_replace=min_replace,
min_hits=min_hits)
return ds
def _get_database_name(analysis, analysis_data):
min_replace = None
database_name = None
if analysis_data.data_type == METABOLOMICS:
if analysis.metadata['compound_database_str'] == COMPOUND_DATABASE_KEGG:
database_name = DATABASE_REACTOME_KEGG
elif analysis.metadata['compound_database_str'] == COMPOUND_DATABASE_CHEBI:
database_name = DATABASE_REACTOME_CHEBI
min_replace = MIN_REPLACE_PROTEOMICS_METABOLOMICS
elif analysis_data.data_type == PROTEOMICS:
database_name = DATABASE_REACTOME_UNIPROT
min_replace = MIN_REPLACE_PROTEOMICS_METABOLOMICS
elif analysis_data.data_type == GENOMICS:
database_name = DATABASE_REACTOME_ENSEMBL
min_replace = MIN_REPLACE_GENOMICS
return database_name, min_replace
def get_comparison(case, control):
return {
'case': case.lower(),
'control': control.lower(),
'name': '%s_vs_%s' % (case, control)
}
def run_pals(ds):
logger.info('Running PALS')
pals = PLAGE(ds, num_resamples=PLAGE_NUM_RESAMPLES, seed=PLAGE_RANDOM_SEED)
pathway_df = pals.get_pathway_df(standardize=False)
return pathway_df
def run_ora(ds):
logger.info('Running ORA')
ora = ORA(ds)
pathway_df = ora.get_pathway_df(standardize=False)
return pathway_df
def run_gsea(ds):
logger.info('Running GSEA')
gsea = GSEA(ds, pbar=True)
pathway_df = gsea.get_pathway_df(standardize=False)
return pathway_df
|
488632
|
from CommonServerPython import *
def get_time_to_next_shift(roles):
today_week_day = datetime.today().weekday()
# transform python weekday to demisto shift weekday(monday in python is 0 and in demisto is 1)
today_week_day = 0 if today_week_day == 6 else today_week_day + 1
for role in roles:
shifts = role.get('shifts') or []
for shift in shifts:
shift_from_day = shift.get('fromDay')
shift_to_day = shift.get('toDay')
if shift_from_day <= today_week_day <= shift_to_day:
# get the time when the shift starts
delta = shift_from_day - today_week_day
shift_from = datetime.today() + timedelta(days=delta)
shift_from = shift_from.replace(minute=shift.get('fromMinute'), hour=shift.get('fromHour'), second=0)
# get the time when the shift ends
delta = shift_to_day - today_week_day
shift_to = datetime.today() + timedelta(days=delta)
shift_to = shift_to.replace(minute=shift.get('toMinute'), hour=shift.get('toHour'), second=0)
if shift_from < datetime.today() < shift_to:
# found the current shift
diff = shift_to - datetime.today()
return round(diff.total_seconds())
return 0
def main():
get_roles_response = demisto.executeCommand('getRoles', {})
if is_error(get_roles_response):
demisto.error(f'Failed to get roles: {str(get_error(get_roles_response))}')
else:
roles = get_roles_response[0]['Contents']
widget = [{'name': '', 'data': [get_time_to_next_shift(roles)]}]
return_results(json.dumps(widget))
if __name__ in ('__builtin__', 'builtins', '__main__'):
main()
|
488666
|
import json
import uuid
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.types import CHAR, INTEGER, VARCHAR, TypeDecorator
class JSONEncodedDict(TypeDecorator):
"""Represents an immutable structure as a json-encoded string.
Usage::
JSONEncodedDict(255)
"""
impl = VARCHAR
cache_ok = True
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
class IntFlag(TypeDecorator):
impl = INTEGER
cache_ok = True
# force the cast to INTEGER
def process_bind_param(self, value, dialect):
return int(value)
class GUID(TypeDecorator):
"""Platform-independent GUID type.
Uses PostgreSQL's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
Reference: https://docs.sqlalchemy.org/en/13/core/custom_types.html#backend-agnostic-guid-type
"""
impl = CHAR
cache_ok = True
def load_dialect_impl(self, dialect):
if dialect.name == "postgresql":
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == "postgresql":
return str(value)
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value).int
else:
# hexstring
return "%.32x" % value.int
def process_result_value(self, value, dialect):
if value is None:
return value
else:
if not isinstance(value, uuid.UUID):
value = uuid.UUID(value)
return value
|
488704
|
from hsi_toolkit.util import img_det
from hsi_toolkit.dev.dim_reduction import mnf
import numpy as np
def mtmf_statistic(hsi_img,tgt_sig, mask = None):
"""
Mixture Tuned Matched Filter Infeasibility Statistic
Inputs:
hsi_image - n_row x n_col x n_band hyperspectral image
tgt_sig - target signature (n_band x 1 - column vector)
mask - binary image limiting detector operation to pixels where mask is true
if not present or empty, no mask restrictions are used
Outputs:
mtmf_out - MTMF infeasibility statistic
alpha - matched filter output
8/12/2012 - <NAME> - <EMAIL>
12/2018 - Python Implementation by Yutai Zhou
"""
if tgt_sig.ndim == 1:
tgt_sig = tgt_sig[:, np.newaxis]
mnf_img, n_dim, mnf_vecs, mnf_eigvals, mnf_mu = mnf(hsi_img,1);
# tgt_sig = tgt_sig[:n_dim,0][:,np.newaxis]
s = mnf_vecs @ (tgt_sig - mnf_mu)
mtmf_out, kwargsout = img_det(mtmf_helper, mnf_img, s, mnf_eigvals = mnf_eigvals)
return mtmf_out, kwargsout['alpha']
def mtmf_helper(hsi_data, tgt_sig, kwargs):
mnf_eigvals = kwargs['mnf_eigvals']
n_band, n_pixel = hsi_data.shape
z = hsi_data
s = tgt_sig
sts = s.T @ s
alpha = np.zeros(n_pixel)
mtmf_data = np.zeros(n_pixel)
ev = np.sqrt(mnf_eigvals)
one = np.ones(n_band)
for i in range(n_pixel):
# print(s.shape,z.shape, z[:,i].shape)
a = (s.T @ z[:,i][:,np.newaxis] / sts).squeeze()
alpha[i] = np.max((0, np.min((1, a))))
# print(alpha[i])
sig_inv = 1 / ((ev * (1 - alpha[i]) - one) ** 2)
mtmf_data[i] = z[:,i][np.newaxis,:] @ sig_inv @ z[:,i][:,np.newaxis]
return mtmf_data, {'alpha': alpha}
|
488714
|
import asyncio
from asyncio import Future
from typing import Optional, Union, cast
from aiohttp import WSMessage
from aiohttp.web import Request
from aiohttp.web_ws import WebSocketResponse
from websockets import WebSocketServerProtocol
from bxcommon.rpc.abstract_ws_rpc_handler import AbstractWsRpcHandler, WsRequest
from bxcommon.rpc.rpc_errors import RpcError
from bxcommon.rpc.json_rpc_response import JsonRpcResponse
class WsConnection:
def __init__(
self,
websocket: Union[WebSocketResponse, WebSocketServerProtocol],
path: str,
ws_rpc_handler: AbstractWsRpcHandler,
) -> None:
self.ws = websocket
self.path = path # currently unused
self.ws_rpc_handler = ws_rpc_handler
self.request_handler: Optional[Future] = None
self.publish_handler: Optional[Future] = None
self.alive_handler: Optional[Future] = None
async def handle(self, request: Request) -> WebSocketResponse:
request_handler = asyncio.ensure_future(self.handle_request(request))
publish_handler = asyncio.ensure_future(self.handle_publications())
alive_handler = asyncio.ensure_future(self.ws_rpc_handler.wait_for_close())
self.request_handler = request_handler
self.publish_handler = publish_handler
self.alive_handler = alive_handler
await asyncio.wait(
[request_handler, publish_handler, alive_handler], return_when=asyncio.FIRST_COMPLETED
)
return self.ws
async def handle_request(self, request: Request) -> None:
websocket = self.ws
await websocket.prepare(request)
async for message in websocket:
try:
response = await self.ws_rpc_handler.handle_request(
# pyre-ignore[6] Expected `multidict.CIMultiDictProxy[typing.Any]`
WsRequest(cast(WSMessage, message), request.headers)
)
except RpcError as err:
response = JsonRpcResponse(err.id, error=err).to_jsons()
await websocket.send_str(response)
async def handle_publications(self, ) -> None:
websocket = self.ws
while not websocket.closed:
message = await self.ws_rpc_handler.get_next_subscribed_message()
await websocket.send_bytes(
self.ws_rpc_handler.serialize_cached_subscription_message(message)
)
async def close(self) -> None:
self.ws_rpc_handler.close()
request_handler = self.request_handler
if request_handler is not None:
request_handler.cancel()
publish_handler = self.publish_handler
if publish_handler is not None:
publish_handler.cancel()
alive_handler = self.alive_handler
if alive_handler is not None:
alive_handler.cancel()
# cleanup to avoid circular reference and allow immediate GC.
self.request_handler = None
self.publish_handler = None
self.alive_handler = None
await self.ws.close()
|
488716
|
from flask import Flask, request, render_template, json, \
flash, session, redirect, url_for, Response, \
jsonify
from api import app
from api.models.repository import Repository
from api.models.user import User
@app.route('/api/repos/<username>/<repository>/trees/<sha>')
def TreeByUserAndRepoAndSha(username, repository, sha):
user = User.query.filter_by(username = username).first()
repo = Repository.query.filter_by(owner = user, name = repository).first()
recursively = int(request.args.get('recursive', 0)) == 1
return jsonify(tree=repo.git.getTree(sha, recursively))
|
488728
|
import zmq
context = zmq.Context()
print("Connecting to Server on port 9999")
socket = context.socket(zmq.REQ)
socket.connect("tcp://127.0.0.1:9999")
print('Sending Hello')
socket.send(b"Hello")
print('Waiting for answer')
message = socket.recv()
print(f"Received: {message}")
|
488738
|
from threading import Timer
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QContextMenuEvent
from PyQt5.QtWidgets import QAction
from PyQt5.QtWidgets import QHeaderView
from PyQt5.QtWidgets import QMenu
from PyQt5.QtWidgets import QTreeView
class TransferTreeView(QTreeView):
def __init__(self, parent=None):
super().__init__(parent)
self.context_menu = QMenu(self)
header = self.header()
header.sectionDoubleClicked.connect(self._header_double_clicked_handler)
self._add_menu_action("Transfer", self._transfer_handler)
# TODO: Implement
# self._add_menu_action("Transfer changed", self._transfer_changed_handler)
self.context_menu.addSeparator()
self._destination_menu_action = self._add_menu_action(
"Set as destination", self._set_transfer_directory_handler)
def _header_double_clicked_handler(self, idx):
self.header().setSectionResizeMode(idx, QHeaderView.ResizeToContents)
# Has to be QTimer, multithreading.Timer blocks Qt thread for some reason
QTimer.singleShot(10, lambda: self.header().setSectionResizeMode(idx, QHeaderView.Interactive))
def _add_menu_action(self, title, handler):
action = QAction(title, self.context_menu)
action.triggered.connect(handler)
self.context_menu.addAction(action)
return action
def _transfer_handler(self):
pass
def _transfer_changed_handler(self):
pass
def _set_transfer_directory_handler(self):
pass
def contextMenuEvent(self, *args, **kwargs):
ev = args[0]
assert isinstance(ev, QContextMenuEvent)
point = ev.pos()
rows = self.selectionModel().selectedRows()
self._destination_menu_action.setEnabled(False)
if rows:
if len(rows) == 1 and self.model().isDir(rows[0]):
self._destination_menu_action.setEnabled(True)
self.context_menu.exec(self.mapToGlobal(point))
|
488801
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
import math
from ..video_dataset import VideoDataset
from utils.ddd_utils import compute_box_3d, project_to_image
class KITTITracking(VideoDataset):
num_categories = 5
default_resolution = [384, 1280]
class_name = ['Pedestrian', 'Car', 'Cyclist', 'Van', 'Truck']
# negative id is for "not as negative sample for abs(id)".
# 0 for ignore losses for all categories in the bounding box region
# ['Pedestrian', 'Car', 'Cyclist', 'Van', 'Truck', 'Person_sitting',
# 'Tram', 'Misc', 'DontCare']
cat_ids = {1:1, 2:2, 3:3, 4:4, 5:5, 6:-1, 7:-9999, 8:-9999, 9:0}
max_objs = 100
def __init__(self, opt, split):
data_dir = os.path.join(opt.data_dir, 'kitti_tracking')
split_ = 'train' if opt.dataset_version != 'test' else 'test' #'test'
img_dir = os.path.join(
data_dir, 'data_tracking_image_2', '{}ing'.format(split_), 'image_02')
if split == 'val':
ann_file_ = 'val_half'
else:
ann_file_ = split_ if opt.dataset_version == '' else opt.dataset_version
ann_path = os.path.join(
data_dir, 'annotations', 'tracking_{}.json'.format(
ann_file_))
self.images = None
super(KITTITracking, self).__init__(opt, split, ann_path, img_dir)
self.alpha_in_degree = False
self.num_samples = len(self.images)
self.box_size_thresh = [0] * self.num_categories
print('Loaded {} {} samples'.format(split, self.num_samples))
def __len__(self):
return self.num_samples
def _to_float(self, x):
return float("{:.2f}".format(x))
def save_results_ioueval(self, results, save_dir):
formattted_results = []
if not os.path.exists(save_dir):
os.mkdir(save_dir)
for video in self.coco.dataset['videos']:
video_id = video['id']
images = self.video_to_images[video_id]
for image_info in images:
img_id = image_info['id']
if not (img_id in results):
continue
frame_id = image_info['frame_id']
for i in range(len(results[img_id])):
item = results[img_id][i]
if 'visibility' in item and not item['visibility']:
continue
if item['age'] != 1:
continue
category_id = item['class']
track_id = item['tracking_id'] if 'tracking_id' in item else -1
bbox = [item['bbox'][0].item(), item['bbox'][1].item(), item['bbox'][2].item() - item['bbox'][0].item(), item['bbox'][3].item() - item['bbox'][1].item()]
entry = {'video_id': video_id, 'image_id': img_id, 'category_id': category_id, 'track_id': track_id, 'bbox': bbox, 'score': item['score'].item()}
formattted_results.append(entry)
print(save_dir + '/iou_eval.json')
json.dump(formattted_results, open(save_dir + '/iou_eval.json', 'w'))
def save_results(self, results, save_dir):
results_dir = os.path.join(save_dir, 'results_kitti_tracking')
if not os.path.exists(results_dir):
os.mkdir(results_dir)
for video in self.coco.dataset['videos']:
video_id = video['id']
file_name = video['file_name']
out_path = os.path.join(results_dir, '{}.txt'.format(file_name))
f = open(out_path, 'w')
images = self.video_to_images[video_id]
for image_info in images:
img_id = image_info['id']
if not (img_id in results):
continue
frame_id = image_info['frame_id']
for i in range(len(results[img_id])):
item = results[img_id][i]
category_id = item['class']
cls_name_ind = category_id
class_name = self.class_name[cls_name_ind - 1]
if 'visibility' in item and not item['visibility']:
continue
if item['age'] != 1:
continue
if not ('alpha' in item):
item['alpha'] = -1
if not ('rot_y' in item):
item['rot_y'] = -10
if 'dim' in item:
item['dim'] = [max(item['dim'][0], 0.01),
max(item['dim'][1], 0.01), max(item['dim'][2], 0.01)]
if not ('dim' in item):
item['dim'] = [-1, -1, -1]
if not ('loc' in item):
item['loc'] = [-1000, -1000, -1000]
track_id = item['tracking_id'] if 'tracking_id' in item else -1
f.write('{} {} {} -1 -1'.format(frame_id - 1, track_id, class_name))
f.write(' {:d}'.format(int(item['alpha'])))
f.write(' {:.2f} {:.2f} {:.2f} {:.2f}'.format(
item['bbox'][0], item['bbox'][1], item['bbox'][2], item['bbox'][3]))
f.write(' {:d} {:d} {:d}'.format(
int(item['dim'][0]), int(item['dim'][1]), int(item['dim'][2])))
f.write(' {:d} {:d} {:d}'.format(
int(item['loc'][0]), int(item['loc'][1]), int(item['loc'][2])))
f.write(' {:d} {:.2f}\n'.format(int(item['rot_y']), item['score']))
f.close()
def run_eval(self, results, save_dir, write_to_file=False, dataset_version="val_half"):
self.save_results(results, save_dir)
os.system('python tools/eval_kitti_track/evaluate_tracking.py ' + \
'{}/results_kitti_tracking/ {}'.format(
save_dir, self.opt.dataset_version))
self.save_results_ioueval(results, save_dir)
exp_id = save_dir.split('/')[-1]
os.chdir("../tao")
command = 'python scripts/evaluation/evaluate.py ' + \
'../data/kitti_tracking/annotations/tracking_%s_tao.json ' % dataset_version + \
'{}/iou_eval.json'.format(save_dir) + ' --config-updates CATEGORIES 1,2 FILTER_IOU_THRSH 0.2'
if write_to_file:
print("Writing to file")
command += ' > ../exp/tracking/{}/eval_out.txt'.format(exp_id)
os.system(command)
|
488805
|
import pylint
from pylint.lint import Run as PylintRun
import sys
from perflint.for_loop_checker import ForLoopChecker, LoopInvariantChecker
from perflint.list_checker import ListChecker
from perflint.comprehension_checker import ComprehensionChecker
pylint.modify_sys_path()
rules = (
list(ForLoopChecker.msgs.keys())
+ list(LoopInvariantChecker.msgs.keys())
+ list(ListChecker.msgs.keys())
+ list(ComprehensionChecker.msgs.keys())
)
args = []
args.append("--load-plugins=perflint")
args.append("--disable=all")
args.append("--enable={0}".format(",".join(rules)))
args.extend(sys.argv[1:])
try:
PylintRun(args)
except KeyboardInterrupt:
sys.exit(1)
|
488838
|
from dash import Dash, html, dcc, Input, Output
import plotly.express as px
def cal_list(y_list, s_value):
new_list = list()
for num, i in enumerate(y_list):
if num % 2 == 0:
new_list.append(i + s_value)
else:
new_list.append(i - s_value)
return new_list
app = Dash(__name__)
app.layout = html.Div(
[
html.P("はんなりPythonへようこそ!", style={"fontSize": "3rem"}),
html.Div(
[dcc.Slider(-5, 5, 1, value=0, id="my_slider", updatemode="drag")],
style={"width": "80%", "margin": "auto"},
),
html.Div([dcc.Graph(id="my_graph")]),
],
style={"textAlign": "center"},
)
@app.callback(Output("my_graph", "figure"), Input("my_slider", "value"))
def update_graph(s_value):
x_list = [1, 2, 3, 4, 5]
y_list = [-2, 2, 3, 1, -1]
new_list = cal_list(y_list, s_value)
return px.bar(x=x_list, y=new_list)
if __name__ == "__main__":
app.run_server(debug=True)
|
488880
|
import os
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--normal_patches_train_dir",
default='/data/camelyon16/train/normal_patches_x40_vah_norm',
help='a path to normalized patches')
parser.add_argument("--normal_patches_test_dir",
default='/data/camelyon16/test/normal_patches_x40_vah_norm',
help='a path to normalized patches')
parser.add_argument("--tumor_patches_train_dir",
default='/data/camelyon16/train/tumor_patches_x40_vah_norm',
help='a path to normalized patches')
parser.add_argument("--tumor_patches_test_dir",
default='/data/camelyon16/test/tumor_patches_x40_vah_norm',
help='a path to normalized patches')
parser.add_argument("--output_split_root",
type=str,
default='/data/camelyon16/train_test_split/healthy')
args = parser.parse_args()
for _type, train_dir, test_dir in [
('normal', args.normal_patches_train_dir, args.normal_patches_test_dir),
('anomaly', args.tumor_patches_train_dir, args.tumor_patches_test_dir)
]:
output_dir = os.path.join(args.output_split_root, _type)
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, 'train'), 'w') as fin:
fin.writelines([filename + '\n' for filename in os.listdir(train_dir)])
with open(os.path.join(output_dir, 'test'), 'w') as fin:
fin.writelines([filename + '\n' for filename in os.listdir(test_dir)])
|
488890
|
from manimlib.imports import *
from ManimProjects.utils.Parabola import Parabola
from ManimProjects.utils.geometry import CText
class OpenScene(Scene):
def construct(self):
sub1 = CText('这将是一个')
sub2 = CText('关于线段长度的性质')
subs = VGroup(sub1, sub2)
subs.arrange(DOWN)
self.play(Write(sub1))
self.play(Write(sub2))
self.wait(2)
self.play(FadeOut(subs))
class Prop2(Parabola):
CONFIG = {
'focus': 3,
'x_min': -5
}
def construct(self):
self.adjust_x_range()
graph = self.get_graph(color=DARK_BROWN)
directrix = self.get_directrix()
focus = Dot().move_to(self.get_focus())
focus.set_fill(DARK_BROWN)
focus.plot_depth = 1
self.play(ShowCreation(graph),
ShowCreation(directrix),
ShowCreation(focus))
focus_label = TexMobject('F').scale(0.5)
focus_label.next_to(focus, DOWN, buff=SMALL_BUFF)
self.play(Write(focus_label))
h_line = Line()
d_loc = directrix.get_center()[0] * RIGHT
h_line.put_start_and_end_on(d_loc, ORIGIN)
h_line.plot_depth = -1
self.play(ShowCreation(h_line))
a = Dot()
a.set_fill(LIGHT_BROWN)
a.plot_depth = 1
a.move_to(self.coords_to_point(0, 0))
a_label = TexMobject('A').scale(0.5)
a_label.next_to(a, DL, buff=SMALL_BUFF)
t = Dot()
t.set_fill(LIGHT_BROWN)
t.plot_depth = 1
t.move_to(d_loc)
t_label = TexMobject('T').scale(0.5)
t_label.next_to(t, LEFT, buff=SMALL_BUFF)
y_value = ValueTracker(9)
p = Dot()
p.set_fill(DARK_BLUE)
p.plot_depth = 1
p.add_updater(lambda m:\
m.move_to(self.value_to_point(
y_value.get_value())))
p_label = TexMobject('P').scale(0.5)
p_label.add_updater(lambda m:\
m.next_to(p, RIGHT, buff=SMALL_BUFF))
m = Dot()
m.set_fill(DARK_BLUE)
m.plot_depth = 1
m.add_updater(lambda t:\
t.move_to(d_loc + p.get_center()[1] * UP))
m_label = TexMobject('M').scale(0.5)
m_label.add_updater(lambda l:\
l.next_to(m, LEFT, buff=SMALL_BUFF))
n = Dot()
n.set_fill(DARK_BLUE)
n.plot_depth = 1
n.add_updater(lambda m:\
m.move_to(p.get_center()[0] * RIGHT))
n_label = TexMobject('N').scale(0.5)
n_label.add_updater(lambda m:\
m.next_to(n, DOWN, buff=SMALL_BUFF))
self.play(*[ShowCreation(e) for e in\
[a, p, m, n, t]])
self.play(*[ShowCreation(e) for e in\
[a_label, t_label, p_label, m_label, n_label]])
self.wait()
pn = Line()
pn.add_updater(lambda l:\
l.put_start_and_end_on(
p.get_center(), n.get_center()))
pf = Line()
pf.add_updater(lambda l:\
l.put_start_and_end_on(
p.get_center(), focus.get_center()))
pm = Line()
pm.add_updater(lambda l:\
l.put_start_and_end_on(
p.get_center(), m.get_center()))
self.play(*[ShowCreation(e) for e in\
[pn, pf, pm]])
self.wait()
prop = VGroup()
prop.add(CText('性质:')\
.scale(0.25).set_color(DARK_BLUE))
prop.add(TexMobject('PN^2=4AF\\cdot AN')\
.scale(0.5)).set_color(DARK_BROWN)
prop.arrange(RIGHT)
proof = VGroup()
prf = CText('证明:')\
.scale(0.25).set_color(DARK_BLUE)
proof.add(prf)
details = VGroup()
line1 = TexMobject('TN^2=', '(TA+AN)^2')\
.scale(0.5)
details.add(line1)
line2 = TexMobject('TN^2=PM^2=PF^2')\
.scale(0.5)
details.add(line2)
line3 = TexMobject('PF^2=PN^2+NF^2')\
.scale(0.5)
details.add(line3)
line4 = TexMobject(
'PN^2+NF^2=NF^2+4 AF\\cdot AN')\
.scale(0.5)
details.add(line4)
line5 = TexMobject(
'PN^2=4AF\\cdot AN'
).scale(0.5).set_color(DARK_BROWN)
details.add(line5)
proof.add(details)
details.arrange(DOWN)
line2.align_to(line1, LEFT)
line3.align_to(line2, LEFT)
line4.align_to(line3, LEFT)
line5.align_to(line4, LEFT)
proof.arrange(RIGHT)
prf.align_to(details, UP)
forms = VGroup()
forms.add(prop)
forms.add(proof)
forms.arrange(DOWN)
proof.align_to(prop, LEFT)
forms.to_edge(RIGHT, buff=2)
expand = TexMobject('TA',
'^2+AN^2+2', 'TA',
'\\cdot AN')\
.scale(0.5)
expand.align_to(line1[1], LEFT)
expand.align_to(line1[1], UP)
expand2 = TexMobject('AF',
'^2+AN^2+2', 'AF',
'\\cdot AN')\
.scale(0.5)
expand2.align_to(line1[1], LEFT)
expand2.align_to(line1[1], UP)
expand3 = TexMobject(
'(AF-AN)^2+4 AF\\cdot AN')\
.scale(0.5)
expand3.align_to(line1[1], LEFT)
expand3.align_to(line1[1], UP)
expand4 = TexMobject(
'NF^2+4 AF\\cdot AN'
).scale(0.5)
expand4.align_to(line1[1], LEFT)
expand4.align_to(line1[1], UP)
self.play(Write(prop))
self.wait()
self.play(y_value.set_value, 1)
self.wait()
self.play(y_value.set_value, 3)
self.wait()
self.play(Write(prf))
self.play(Write(line1))
self.wait()
self.play(ShowCreationThenDestruction(
Line(t, n).set_color(RED)
))
self.play(ShowCreationThenDestruction(
Line(t, a).set_color(RED)
))
self.play(ShowCreationThenDestruction(
Line(a, n).set_color(RED)
))
self.wait()
self.play(ReplacementTransform(
line1[1], expand
))
self.wait(3)
self.play(ShowCreationThenDestruction(
Line(t, a).set_color(RED)
))
self.play(ShowCreationThenDestruction(
Line(a, focus).set_color(RED)
))
self.wait()
self.play(*[Transform(
expand[e], expand2[e]) for e in\
[0, 2]])
self.wait(3)
self.play(ReplacementTransform(
expand, expand3
))
self.wait(3)
self.play(ReplacementTransform(
expand3, expand4
))
self.wait(3)
self.play(Write(line2))
self.play(ShowCreationThenDestruction(
Line(t, n).set_color(RED)
))
self.play(ShowCreationThenDestruction(
Line(p, m).set_color(RED)
))
self.play(ShowCreationThenDestruction(
Line(p, focus).set_color(RED)
))
self.wait(3)
self.play(Write(line3))
self.play(ShowCreationThenDestruction(Polygon(
p.get_center(),
n.get_center(),
focus.get_center()
).set_fill(DARK_BLUE, opacity=1)))
self.wait(3)
self.play(
WiggleOutThenIn(expand4),
WiggleOutThenIn(line3))
self.play(Write(line4))
self.wait(3)
self.play(Write(line5))
self.wait(3)
def1 = CText('AF是焦距')
def2 = CText('AN是点P的横标线')
def3 = CText('PN是点P的纵标线')
defines = VGroup(def1, def2, def3)
defines.arrange(DOWN)
defines.scale(0.25)
defines.next_to(proof, DOWN)
self.play(Write(defines))
self.wait(5)
self.play(y_value.set_value, 8)
self.wait(10)
class Summary(Scene):
def construct(self):
text = CText('总结')
text.set_fill(DARK_BROWN)
content1 = CText('纵标线的平方等于')
content2 = CText('横标线与焦距乘积的四倍')
contents = VGroup(content1, content2)
contents.scale(0.7)
contents.arrange(DOWN)
total = VGroup(text, contents)
total.arrange(DOWN, buff=MED_LARGE_BUFF)
self.play(Write(text))
self.wait(2)
self.play(Write(contents))
self.wait(10)
|
488902
|
def on_button_pressed_a():
global fred
fred += 1
def on_button_pressed_b():
global fred
fred += -1
fred = 0
def on_forever():
basic.show_number(fred)
basic.forever(on_forever)
|
488912
|
from unittest import TestCase
from github.github_issue_object import GithubIssueObject
class TestGithubIssueObject(TestCase):
def test_get_difficulty(self):
self.assertEqual(GithubIssueObject.get_difficulty('difficulty: trivial'), 0.1, 'difficulty should have been 0.5')
self.assertEqual(GithubIssueObject.get_difficulty('difficulty:easy'), 1, 'difficulty should have been 1')
self.assertEqual(GithubIssueObject.get_difficulty('difficulty: medium'), 1.5, 'difficulty should have been 1.5')
self.assertEqual(GithubIssueObject.get_difficulty('difficulty: hard'), 2, 'difficulty should have been 2')
self.assertEqual(GithubIssueObject.get_difficulty('difficulty: none'), 1, 'difficulty should have been 1')
self.assertEqual(GithubIssueObject.get_difficulty(''), 1, 'difficulty should have been 1')
|
488921
|
from click.testing import CliRunner
import json
import pathlib
import pytest
from sqlite_transform import cli
import sqlite_utils
@pytest.mark.parametrize("delimiter", [None, ";", "-"])
def test_jsonsplit(tmpdir, delimiter):
db_path = str(pathlib.Path(tmpdir) / "data.db")
db = sqlite_utils.Database(db_path)
db["example"].insert_all(
[
{"id": 1, "tags": (delimiter or ",").join(["foo", "bar"])},
{"id": 2, "tags": (delimiter or ",").join(["bar", "baz"])},
],
pk="id",
)
args = ["jsonsplit", db_path, "example", "tags"]
if delimiter is not None:
args.extend(["--delimiter", delimiter])
result = CliRunner().invoke(cli.cli, args)
assert 0 == result.exit_code, result.output
assert list(db["example"].rows) == [
{"id": 1, "tags": '["foo", "bar"]'},
{"id": 2, "tags": '["bar", "baz"]'},
]
@pytest.mark.parametrize(
"type,expected_array",
(
(None, ["1", "2", "3"]),
("float", [1.0, 2.0, 3.0]),
("int", [1, 2, 3]),
),
)
def test_jsonsplit_type(fresh_db_and_path, type, expected_array):
db, db_path = fresh_db_and_path
db["example"].insert_all(
[
{"id": 1, "records": "1,2,3"},
],
pk="id",
)
args = ["jsonsplit", db_path, "example", "records"]
if type is not None:
args.extend(("--type", type))
result = CliRunner().invoke(cli.cli, args)
assert 0 == result.exit_code, result.output
assert json.loads(db["example"].get(1)["records"]) == expected_array
@pytest.mark.parametrize("drop", (True, False))
def test_jsonsplit_output(fresh_db_and_path, drop):
db, db_path = fresh_db_and_path
db["example"].insert_all(
[
{"id": 1, "records": "1,2,3"},
],
pk="id",
)
args = ["jsonsplit", db_path, "example", "records", "--output", "tags"]
if drop:
args += ["--drop"]
result = CliRunner().invoke(cli.cli, args)
assert 0 == result.exit_code, result.output
expected = {
"id": 1,
"records": "1,2,3",
"tags": '["1", "2", "3"]',
}
if drop:
del expected["records"]
assert db["example"].get(1) == expected
|
488939
|
def miller(E, P, Q, m):
from six.moves import map
"""
Calculate Divisor by Miller's Algorithm
Args:
E: The Elliptic Curve
P: A point over E which has order m
Q: A point over E which has order m to apply function f_P
m: The order of P, Q on E
Returns:
f_P(Q)
"""
def h(P, Q, R):
# if \lambda is infinity
if (P == Q and P.y == 0) or (P != Q and P.x == Q.x):
return R.x - P.x
L = P.line_coeff(Q)
p = R.y - P.y - L * (R.x - P.x)
q = R.x + P.x + Q.x - L * L
return p / q
if P == Q:
return 1
b = map(int, bin(m)[2:])
next(b)
f = 1
T = P
for i in b:
f = f * f * h(T, T, Q)
T = T + T
if i:
f = f * h(T, P, Q)
T = T + P
return f
def weil_pairing(E, P, Q, m, S=None):
"""
Calculate Weil Pairing
Args:
E: The Elliptic Curve
P: A point over E which has order m
Q: A point over E which has order m
m: The order of P, Q on E
S: [Optional] A random point on E
Returns:
e_m(P, Q)
"""
if S is None:
S = E.random_point()
from ecpy.utils.util import is_enable_native, _native
from ecpy.fields.ExtendedFiniteField import ExtendedFiniteFieldElement
if is_enable_native:
P = _native.EC_elem(E.ec, tuple(P.x), tuple(P.y), tuple(P.z))
Q = _native.EC_elem(E.ec, tuple(Q.x), tuple(Q.y), tuple(Q.z))
S = _native.EC_elem(E.ec, tuple(S.x), tuple(S.y), tuple(S.z))
if E.ec.type == 1:
t = _native.FF_elem(0)
elif E.ec.type == 2:
t = _native.EF_elem(0, 0)
_native.weil_pairing(t, E.ec, P, Q, S, m)
if E.ec.type == 1:
return t.to_python()
elif E.ec.type == 2:
t = t.to_python()
return ExtendedFiniteFieldElement(E.field, t[0], t[1])
else:
fpqs = miller(E, P, Q + S, m)
fps = miller(E, P, S, m)
fqps = miller(E, Q, P - S, m)
fqs = miller(E, Q, -S, m)
return E.field._inv(fps * fqps) * fpqs * fqs
def tate_pairing(E, P, Q, m, k=2):
"""
Calculate Tate Pairing
Args:
E: The Elliptic Curve
P: A point over E which has order m
Q: A point over E which has order m
m: The order of P, Q on E
k: [Optional] The Embedding Degree of m on E
"""
from ecpy.utils.util import is_enable_native, _native
if is_enable_native:
P = _native.EC_elem(E.ec, tuple(P.x), tuple(P.y), tuple(P.z))
Q = _native.EC_elem(E.ec, tuple(Q.x), tuple(Q.y), tuple(Q.z))
if E.ec.type == 1:
t = _native.FF_elem(0)
elif E.ec.type == 2:
t = _native.EF_elem(0, 0)
_native.tate_pairing(t, E.ec, P, Q, m, k)
if E.ec.type == 1:
from ecpy.fields.Zmod import ZmodElement
return ZmodElement(E.field, t.to_python())
elif E.ec.type == 2:
from ecpy.fields.ExtendedFiniteField import ExtendedFiniteFieldElement
t = t.to_python()
return ExtendedFiniteFieldElement(E.field, t[0], t[1])
else:
f = miller(E, P, Q, m)
return f ** (((E.field.p ** k) - 1) // m)
def MapToPoint(E, y):
"""
MapToPoint Function: Given by Boneh-Durfee's ID-based Encryption Paper.
Args:
E: The Elliptic Curve
y: Any Value (should be E.field element)
Returns:
Correspond point of y on E
"""
from ecpy.utils import cubic_root
x = cubic_root(y**2 - 1)
Q = E(x, y)
return 6 * Q
def gen_supersingular_ec(bits=70):
"""
Generate Super-Singluar Elliptic Curve
Args:
bits: The Security Parameter: log_2 p = bits
Returns:
A (Super Singular) Elliptic Curve, Extended Finite Field, l
l is need to calculate Pairing
"""
from ecpy.fields import ExtendedFiniteField
from .EllipticCurve import EllipticCurve
def _next_prime(n):
from ecpy.util import is_prime
"""
return next prime of n
"""
while not is_prime(n):
n += 1
return n
"""
If you have gmpy, use gmpy.next_prime
in other hand, use slow function
"""
try:
from gmpy import next_prime
except:
next_prime = _next_prime
def gen_prime():
from ecpy.util import is_prime
from random import randint
while True:
p = int(next_prime(randint(2**(bits - 1), 2**bits)))
if is_prime(p * 6 - 1):
break
return p * 6 - 1, p
p, l = gen_prime()
F = ExtendedFiniteField(p, "x^2+x+1")
return EllipticCurve(F, 0, 1), F, l
def find_point_by_order(E, l):
"""
Find a Elliptic Curve Point P which has order l.
Args:
E: The Elliptic Curve
l: Order of Point on E
Returns:
Point on E which has order l.
"""
i = 3
while True:
r = E.get_corresponding_y(i)
if r != None:
P = E(i, r)
if (P * l).is_infinity():
return P
i += 1
def symmetric_weil_pairing(E, P, Q, m):
"""
Symmetric Weil Pairing
\hat{e}(P, Q) = e(P, \phi(Q)) (\phi is Distortion Map)
Args:
E: The Elliptic Curve
P: A point on E which has order m
Q: A point on E which has order m
m: The order of P, Q
"""
return weil_pairing(E, P, Q.distortion_map(), m)
def symmetric_tate_pairing(E, P, Q, m, k=2):
"""
Symmetric Tate Pairing
\hat{e}(P, Q) = e(P, \phi(Q)) (\phi is Distortion Map)
Args:
E: The Elliptic Curve
P: A point on E which has order m
Q: A point on E which has order m
m: The order of P, Q
k: [Optional] The Embedding Degree of m on E
"""
return tate_pairing(E, P, Q.distortion_map(), m)
|
488949
|
import objs
import utils
class FloorPlane(object):
def __init__(self, scene, isCeiling=False):
self.scene = scene
self.isCeiling = isCeiling
self.gPoints = scene.layoutPoints
self.walls = scene.layoutWalls
self.color = (0,0,0)
self.normal = (0, -1, 0) if isCeiling else (0, 1, 0)
self.height = 0
self.planeEquation = (0, 0, 0, 0)
self.corners = []
self.edges = []
self.bbox2d = ((0,0),(1,1))
self.id = 0
self.updateGeometry()
def updateGeometry(self):
cameraH = self.scene.cameraHeight
cam2ceilH = self.scene.layoutHeight - cameraH
self.height = cam2ceilH if self.isCeiling else cameraH
self.planeEquation = self.normal + (self.height,)
self.color = utils.normal2color(self.normal)
self.updateCorners()
self.updateEdges()
self.updateBbox2d()
def updateCorners(self):
self.corners = []
for gp in self.gPoints:
if self.isCeiling:
xyz = (gp.xyz[0], self.height, gp.xyz[2])
else:
xyz = (gp.xyz[0], -self.height, gp.xyz[2])
corner = objs.GeoPoint(self.scene, None, xyz)
self.corners.append(corner)
def updateEdges(self):
self.edges = []
cnum = len(self.corners)
for i in range(cnum):
edge = objs.GeoEdge(self.scene,
(self.corners[i], self.corners[(i+1)%cnum]))
self.edges.append(edge)
def updateBbox2d(self):
coords = []
for c in [e.coords for e in self.edges]:
coords += c
self.bbox2d = utils.imagePointsBox(coords)
|
488978
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
from tensorflow.python.client import timeline
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
import tensorflow as tf
import random
import numpy as np
import time
import sparse_tools as sp
from direct_sparse_module import sparse_nn_ops as sc_module
import os
import sys
def verifyValues(
tensor_in_sizes,
filter_in_sizes,
stride,
rho_data = 0.1,
rho_filter = 1,
padding = 'SAME',
dim = 5,
max_density = 0.1,
num_trials = 3,
filter_type = 'K-RELU',
test_type = '',
dense=True
):
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
out_sizes = np.copy(tensor_in_sizes)
out_sizes[-1] = filter_in_sizes[-1]
out_entry_count = np.prod(out_sizes) * max_density
bias = np.zeros([filter_in_sizes[-1]], dtype=np.float32)
no_strides = [1, 1, 1, 1, 1]
[t1ind, t1val, t1sh] = sp.createRandomSparseTensor(rho_data, tensor_in_sizes, -3, 3)
s1 = tf.SparseTensor(indices=t1ind, values=t1val, dense_shape=t1sh)
d1 = sp.sparse_to_dense(t1ind, t1val, t1sh)
[t2ind, t2val, t2sh] = sp.createRandomSparseTensor(rho_filter, filter_in_sizes)
s2 = tf.SparseTensor(indices=t2ind, values=t2val, dense_shape=t2sh)
d2 = sp.sparse_to_dense(t2ind, t2val, t2sh)
print("strides: \n", strides)
print("input shape", tensor_in_sizes)
print("filter shape", filter_in_sizes)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.7
with tf.device("/gpu:0"):
convd = sc_module.direct_sparse_data_conversion(t1ind, t1val, t1sh)
convf = sc_module.direct_sparse_filter_conversion(t2ind, t2val, t2sh, t1sh)
with tf.Session(config=config) as sess:
pd = sess.run(convd)
pf = sess.run(convf)
tf.reset_default_graph()
ts = 0
with tf.device("/gpu:0"):
approx_scskconv = sc_module.direct_sparse_conv_kd(pd.out_indices, pd.out_values, pd.out_shape, pd.out_block_channel_mapping, pf.out_indices, pf.out_values, pf.out_shape, pf.out_channel_mapping, bias, strides, padding, out_entry_count, dim, max_density, filter_type);
with tf.Session(config=config) as sess:
t6 = time.time()
sv3 = sess.run(approx_scskconv)
t5 = time.time()
for i in range(0, num_trials):
sess.run(approx_scskconv)
t6 = time.time()
ts = abs(t6 - t5) / max(num_trials,1)
print("time approx sparse: ", ts)
tf.reset_default_graph()
time.sleep(1)
if dense:
td = 0
with tf.device("/gpu:0"):
conv = nn_ops.conv3d(d1, d2, strides, padding)
with tf.Session(config=config) as sess:
t22 = time.time()
expected = sess.run(conv)
t11 = time.time()
for i in range(0, num_trials):
sess.run(conv)
t22 = time.time()
td = abs(t22 - t11) / max(num_trials,1)
print("time dense gpu: ", td)
tf.reset_default_graph()
print("time ratio: ", ts / td)
return [expected, sv3, ts, td]
def do_test(res, f_density, batch_size):
pid = os.getpid()
print(pid)
num_trials = 5
res = res
channel_count = 1
channel_count_out = 8
filter_res = 3
batch_size = batch_size
max_density = 1/res
in_density = 1/res
f_density = f_density
filter_type = 'K-RELU'
test_type = ''
ret_value = verifyValues(
tensor_in_sizes=[batch_size, res, res, res, channel_count], #[batch, depth, height, width, in_channels]
filter_in_sizes=[filter_res, filter_res, filter_res, channel_count, channel_count_out], #[depth, height, width, in_channels, out_channels]
stride=1,
rho_data=1 * in_density,
rho_filter=1 * f_density,
padding='SAME',
max_density=max_density,
num_trials=num_trials,
filter_type=filter_type,
test_type=test_type)
for res in [2**i for i in range(4, 9)]:
for f_density in [0.1, 0.3, 0.5, 1]:
for batch in [8]:
print('========================================================================')
print('========================================================================')
print('res = {} f_density = {} batch = {}'.format(res, f_density, batch))
do_test(res, f_density, batch)
|
489030
|
import logging
from django.db.models import Q
from usaspending_api.references.models import ToptierAgency
from usaspending_api.common.exceptions import InvalidParameterException
logger = logging.getLogger(__name__)
def spending_filter(alt_set, queryset, filters, _type):
for key, value in filters.items():
# check for valid key
if value is None:
raise InvalidParameterException("Invalid filter: " + key + " has null as its value.")
key_list = [
"budget_function",
"budget_subfunction",
"federal_account",
"program_activity",
"object_class",
"recipient",
"award",
"award_category",
"agency",
"agency_type",
"fy",
"quarter",
"period",
]
if key not in key_list:
raise InvalidParameterException(
key + " filter does not exist. "
"Valid Filters: budget_function, budget_subfunction, federal_account,"
"program_activity, object_class, recipient, award, award_category,"
"agency, agency_type, fy, quarter, period."
)
# Check _type to filter on correct set (alt_set or queryset)
alt_set_keys = ["recipient", "award", "award_category", "agency_type"]
if _type in alt_set_keys:
# Apply filters
# budget_function
if key == "budget_function":
alt_set = alt_set.filter(treasury_account__budget_function_code=value)
# budget_subfunction
elif key == "budget_subfunction":
alt_set = alt_set.filter(treasury_account__budget_subfunction_code=value)
# federal_account
elif key == "federal_account":
alt_set = alt_set.filter(treasury_account__federal_account=value)
# program_activity
elif key == "program_activity":
alt_set = alt_set.filter(program_activity=value)
# object_class
elif key == "object_class":
alt_set = alt_set.filter(object_class__major_object_class=value)
# recipient
elif key == "recipient":
alt_set = alt_set.filter(
Q(award__latest_transaction__assistance_data__awardee_or_recipient_legal=value)
| Q(award__latest_transaction__contract_data__awardee_or_recipient_legal=value)
)
# award, award_category
elif key == "award" or key == "award_category":
alt_set = alt_set.filter(award__id=value)
# agency
elif key == "agency":
toptier_agency = ToptierAgency.objects.filter(agency__id=value, agency__toptier_flag=True).first()
if toptier_agency is None:
raise InvalidParameterException("Agency ID provided does not correspond to a toptier agency")
alt_set = alt_set.filter(treasury_account__funding_toptier_agency=toptier_agency)
# All other _type
else:
# budget_function
if key == "budget_function":
queryset = queryset.filter(treasury_account__budget_function_code=value)
# budget_subfunction
elif key == "budget_subfunction":
queryset = queryset.filter(treasury_account__budget_subfunction_code=value)
# federal_account
elif key == "federal_account":
queryset = queryset.filter(treasury_account__federal_account=value)
# program_activity
elif key == "program_activity":
queryset = queryset.filter(program_activity=value)
# object_class
elif key == "object_class":
queryset = queryset.filter(object_class__major_object_class=value)
# recipient
elif key == "recipient":
queryset = queryset.filter(
treasury_account__in=alt_set.filter(
Q(award__latest_transaction__contract_data__awardee_or_recipient_legal=value)
| Q(award__latest_transaction__assistance_data__awardee_or_recipient_legal=value)
).values_list("treasury_account_id", flat=True)
)
# award, award_category
elif key == "award" or key == "award_category":
queryset = queryset.filter(
treasury_account__in=alt_set.filter(award__id=value).values_list("treasury_account_id", flat=True)
)
# agency
elif key == "agency":
toptier_agency = ToptierAgency.objects.filter(agency__id=value, agency__toptier_flag=True).first()
if toptier_agency is None:
raise InvalidParameterException("Agency ID provided does not correspond to a toptier agency")
queryset = queryset.filter(treasury_account__funding_toptier_agency=toptier_agency)
return alt_set, queryset
|
489041
|
from .linear import Linear
from .conv import Conv1d, Conv2d, Conv3d
__all__ = [
'Linear',
'Conv1d',
'Conv2d',
'Conv3d',
]
|
489044
|
import time
import Adafruit_ADS1x15
import RPi.GPIO as GPIO
LED =14
GPIO.setmode(GPIO.BCM)
GPIO.setup(LED,GPIO.OUT)
adc = Adafruit_ADS1x15.ADS1115()
GAIN = 1
channel=0
adc.start_adc(channel, gain=GAIN)
while True:
value = adc.get_last_result()
print(str(value))
time.sleep(0.1)
if value >= 100:
GPIO.output(LED,1)
else :
GPIO.output(LED,0)
adc.stop_adc()
|
489060
|
import csv
import os
import pickle
import json
from itertools import chain
from shapely.geometry.polygon import Polygon
import threading
import shutil
import requests
from pathlib import Path
class TileDownloader(object):
"""
Class to download tiles from the openNRW web server in a multi-threaded fashion.
Attributes
----------
tile_dir : Path
Path to directory where all the downloaded tiles are saved.
downloaded_path : Path
Specifies the path to the document which saves all the tiles by their minx, miny, maxx, maxy coordinates which were successfully downloaded.
not_downloaded_path : Path
Specifies the path to the document which saves all the tiles by their minx, miny, maxx, maxy coordinates which were **not** successfully downloaded.
WMS_1 : str
Initial URL stub for requests to the openNRW server.
WMS_2 : str
Final URL stub for requests to the openNRW server.
NUM_THREADS : int
Number of threads used to simultaneously download tiles from the openNRW server.
"""
def __init__(self, configuration, polygon, tile_coords):
"""
Sets instance variables and starts donwloading process in a multi-threaded fashion
Parameters
----------
configuration : dict
config.yml in dict format.
polygon : shapely.geometry.polygon.Polygon
Geo-referenced polygon geometry for the selected county within NRW.
tile_coords : list
List of tuples where each tuple specifies its respective tile by minx, miny, maxx, maxy.
"""
self.polygon = polygon
self.tile_coords = tile_coords
self.tile_dir = Path(configuration['tile_dir'])
self.downloaded_path = Path(f"logs/downloading/{configuration.get('county4analysis')}_downloadedTiles.csv")
self.not_downloaded_path = Path(f"logs/downloading/{configuration.get('county4analysis')}_notDownloadedTiles.csv")
# URL dummy for image request from open NRW server
self.WMS_1 = 'https://www.wms.nrw.de/geobasis/wms_nw_dop?SERVICE=WMS&REQUEST=GetMap&Version=1.1.1&LAYERS=nw_dop_rgb&SRS=EPSG:4326&BBOX='
self.WMS_2 = '&WIDTH=4800&HEIGHT=4800&FORMAT=image/png;%20mode=8bit'
self.NUM_THREADS = 4
download_threads = []
for num in range(0, self.NUM_THREADS):
# Download_threads is a list which contains thread objects that get passed their respective function to execute, here the download function, and the necessary arguments to execute that
# function in form of a tuple
download_threads.append(threading.Thread(target=self.download, args=(self.tile_coords, num,)))
for num in range(0, self.NUM_THREADS):
# Iterate over all thread objects and start them
download_threads[num].start()
print('Successfully started thread ' + str(num))
for t in download_threads:
# Iterate over all threads and execute them
t.join()
def download(self, Tile_coords, threadCounter):
"""
Download tiles from openNRW's web servers.
Parameters
----------
Tile_coords : list
List of tuples. Each tuple specifies a to be downloaded tile by its minx, miny, maxx, maxy.
threadCounter : int
ID to distinguish between the different threads working in parallel.
Returns
-------
"""
for index, tile in enumerate(Tile_coords):
if index % self.NUM_THREADS == threadCounter:
minx, miny, maxx, maxy = tile
# cease when disk space not enough
while 1:
st = os.statvfs(self.tile_dir)
# f_frsize − fundamental file system block size.
# f_bavail − free blocks available to non-super user.
# Storage capacity can be calculated by multiplying number of free blocks * block size
# Remember: 1 KB are 1024 Bytes. Hence, 1 MB are 1024*1024 Bytes
remain_capacity = st.f_bavail * st.f_frsize / 1024 / 1024
# if remain_capacity is larger than 1 GB, continue downloading tiles
if remain_capacity > 1024:
break
print(str(threadCounter), ": Disk space not enough!")
try:
current_save_path = os.path.join(self.tile_dir, str(minx) + ',' + str(miny) + ',' + str(maxx) + ',' + str(maxy) + '.png')
# Specify URL from which we download our tile
url = os.path.join(self.WMS_1 + str(minx) + ',' + str(miny) + ',' + str(maxx) + ',' + str(maxy) + self.WMS_2)
# Download tile imagery from URL
response = requests.get(url, stream=True)
# Save downloaded file under current_save_path
with open(current_save_path, 'wb') as out_file:
response.raw.decode_content = True
shutil.copyfileobj(response.raw, out_file)
del response
# This line will execute only after the whole tile has been downloaded
# Once the tile is completely downloaded, we add a 'COMPLETE' string at
# the end of its path to signal the Tile_Processing script which tiles
# are ready to be processed
os.rename(current_save_path, os.path.join(current_save_path[:-4] + ',COMPLETE.png'))
with open(Path(self.downloaded_path), "a") as csvFile:
writer = csv.writer(csvFile, lineterminator="\n")
writer.writerow([str(tile)])
# Only tiles that weren't fully downloaded are saved subsequently
except:
with open(Path(self.not_downloaded_path),"a") as csvFile:
writer = csv.writer(csvFile, lineterminator="\n")
writer.writerow([str(tile)])
|
489079
|
import math
try:
import cupy as xp
except ImportError:
import numpy as xp
from scipy.signal import firwin, kaiserord, windows
class FIR:
def __init__(self, num_chs, L_x, h, D=1):
assert num_chs >= 1, 'Filter must have at least one input channel'
assert h.ndim == 1, 'Impulse response must be a 1D array'
assert len(h) - 1 >= 0, 'FIR Order must be at least 0'
assert D >= 1, 'Decimation factor must be at least 1'
assert (len(h) - 1) % D == 0, (
'FIR order must be a multiple of the decimation factor')
assert L_x >= 1, 'Input block length must be at least 1'
assert L_x % D == 0, (
'Input block length must be a multiple of the decimation factor')
self._num_chs = num_chs
self._L_x = L_x
self._D = D
self._overlap_samples = xp.zeros((num_chs, len(h) - 1), dtype=complex)
self._L_ifft = (L_x + len(h) - 1) // D
self._L_transient = (len(h) - 1) // D
self._H = xp.fft.fft(h, n=(L_x + len(h) - 1))
def push(self, x):
x = xp.concatenate((self._overlap_samples, x), axis=1)
X = xp.fft.fft(x)
Y = X * self._H
Y = Y.reshape(self._num_chs, -1, self._L_ifft).sum(axis=1) / self._D
y = xp.fft.ifft(Y)
y = y[:, self._L_transient :]
self._overlap_samples = x[:, self._L_x :]
return y
def firkaiser(passband, stopband, atten=60):
(L_filt, beta) = kaiserord(atten, (stopband - passband) / (2 * math.pi))
h = firwin(L_filt, passband / 2, window=('kaiser', beta), nyq=math.pi)
h = xp.asarray(h)
return h
def firgauss(stopband, order, atten=60):
assert stopband > 0, 'Stopband must be greater than 0'
assert order >= 0, 'Order must be at least 0'
assert atten > 0, 'Attenuation at stopband must be greater than 0'
std = 2 * math.sqrt(atten / 10 * math.log(10)) / stopband
h = xp.asarray(windows.gaussian(order + 1, std))
h /= h.sum()
return h
def gauss_rise_time(h):
low = 0.01
high = 0.99
assert len(h) - 1 >= 0, 'FIR Order must be at least 0'
step_resp = h.cumsum()
step_resp /= step_resp[-1]
rise_t = int(xp.abs(step_resp - high).argmin() -
xp.abs(step_resp - low).argmin())
return rise_t
|
489080
|
import os
import subprocess
from tests.framework.base_command_test_case import BaseCommandTestCase
from tests.framework.context_manager import cd
class TestInit(BaseCommandTestCase):
# This is a portion of the postinstall script we generate that should always be there only once, since
# our script should never write itself more than once.
should_exist_once_in_postinstall = "fs=require('fs');if(fs.existsSync(d)===false){fs.symlinkSync(s,d,'dir')}"
should_exist_once_in_build = "bsb -make-world"
name = "reason-package"
directory = "src/myCode"
def test_steps_pass(self):
with cd('./tests/root_for_testing'):
result = self.call("node", "../../index.js", "setup", self.directory, self.name)
self.assertTrue(result, 'Standard setup call did not pass successfully.')
def test_steps_pass_no_linking_flag(self):
with cd('./tests/root_for_testing'):
result = self.call("node", "../../index.js", "setup", self.directory, self.name, "--no-linking")
self.assertTrue(result, 'Standard --no-linking setup call did not pass successfully.')
def test_steps_pass_in_source_flag(self):
with cd('./tests/root_for_testing'):
result = self.call("node", "../../index.js", "setup", self.directory, self.name, "--in-source")
self.assertTrue(result, 'Standard --in-source setup call did not pass successfully.')
def test_bsconfig_was_created(self):
with cd('./tests/root_for_testing'):
self.call("node", "../../index.js", "setup", self.directory, self.name)
exists = os.path.isfile('bsconfig.json')
self.assertTrue(exists, 'bsconfig.json was never created')
def test_merlin_was_created(self):
with cd('./tests/root_for_testing'):
self.call("node", "../../index.js", "setup", self.directory, self.name)
exists = os.path.isfile('.merlin')
self.assertTrue(exists, '.merlin was never created')
### postinstall testing
def test_postinstall_was_correctly_added_to_package_file_with_no_scripts(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.no_scripts.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('postinstall', contents['scripts'], 'Could not find postinstall key in the package.json scripts key')
postinstall_script = contents['scripts']['postinstall']
self.assertEqual(postinstall_script.count(self.should_exist_once_in_postinstall), 1, 'Found more than one instance of our postinstall script')
def test_postinstall_was_correctly_added_to_package_file_with_empty_scripts(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.empty_scripts.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('postinstall', contents['scripts'], 'Could not find postinstall key in the package.json scripts key')
postinstall_script = contents['scripts']['postinstall']
self.assertEqual(postinstall_script.count(self.should_exist_once_in_postinstall), 1, 'Found more than one instance of our postinstall script')
def test_postinstall_was_correctly_added_to_package_file_with_other_script(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.other_script.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('postinstall', contents['scripts'], 'Could not find postinstall key in the package.json scripts key')
postinstall_script = contents['scripts']['postinstall']
self.assertEqual(postinstall_script.count(self.should_exist_once_in_postinstall), 1, 'Found more than one instance of our postinstall script')
def test_postinstall_was_correctly_added_to_package_file_with_other_postinstall(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.other_postinstall.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('postinstall', contents['scripts'], 'Could not find postinstall key in the package.json scripts key')
postinstall_script = contents['scripts']['postinstall']
self.assertIn(' && ', postinstall_script, 'Our postinstall script was probably not integrated correctly with an existing postinstall')
self.assertEqual(postinstall_script.count(self.should_exist_once_in_postinstall), 1, 'Found more than one instance of our postinstall script')
def test_postinstall_was_correctly_added_to_package_file_with_no_scripts_called_twice(self):
with cd('./tests/root_for_testing'):
self.call("node", "../../index.js", "setup", self.directory, self.name)
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('postinstall', contents['scripts'], 'Could not find postinstall key in the package.json scripts key')
postinstall_script = contents['scripts']['postinstall']
self.assertEqual(postinstall_script.count(self.should_exist_once_in_postinstall), 1, 'Found more than one instance of our postinstall script')
### build command testing
def test_build_command_was_correctly_added_to_package_file_with_no_scripts(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.no_scripts.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('build-reason', contents['scripts'], 'Could not find build-reason key in the package.json scripts key')
build_command = contents['scripts']['build-reason']
self.assertEqual(build_command.count(self.should_exist_once_in_build), 1, 'Found more than one instance of our build script')
def test_build_command_was_correctly_added_to_package_file_with_empty_scripts(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.empty_scripts.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('build-reason', contents['scripts'], 'Could not find build-reason key in the package.json scripts key')
build_command = contents['scripts']['build-reason']
self.assertEqual(build_command.count(self.should_exist_once_in_build), 1, 'Found more than one instance of our postinstall script')
def test_build_command_was_correctly_added_to_package_file_with_other_script(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.other_script.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('build-reason', contents['scripts'], 'Could not find build-reason key in the package.json scripts key')
build_command = contents['scripts']['build-reason']
self.assertEqual(build_command.count(self.should_exist_once_in_build), 1, 'Found more than one instance of our postinstall script')
def test_build_command_was_correctly_added_to_package_file_with_other_build_command(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.other_postinstall.json", "package.json")
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('build-reason', contents['scripts'], 'Could not find build-reason key in the package.json scripts key')
build_command = contents['scripts']['build-reason']
self.assertIn(' && ', build_command, 'Our postinstall script was probably not integrated correctly with an existing postinstall')
self.assertEqual(build_command.count(self.should_exist_once_in_build), 1, 'Found more than one instance of our postinstall script')
def test_build_command_was_correctly_added_to_package_file_with_no_scripts_called_twice(self):
with cd('./tests/root_for_testing'):
self.call("node", "../../index.js", "setup", self.directory, self.name)
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_json('package.json')
self.assertIn('scripts', contents, 'Could not find scripts key in the package.json file')
self.assertIn('build-reason', contents['scripts'], 'Could not find build-reason key in the package.json scripts key')
build_command = contents['scripts']['build-reason']
self.assertEqual(build_command.count(self.should_exist_once_in_build), 1, 'Found more than one instance of our postinstall script')
def test_postinstall_was_not_added_to_package_file_with_in_source_flag(self):
with cd('./tests/root_for_testing'):
# This uses our default package.json copy which has a scripts key and no postinstall key
self.call("node", "../../index.js", "setup", self.directory, self.name, "--in-source")
contents = self.read_json('package.json')
self.assertNotIn('postinstall', contents['scripts'], 'Postinstall was created when it should not have been')
def test_proper_bsconfig_file_generated_with_in_source_flag(self):
with cd('./tests/root_for_testing'):
# This uses our default package.json copy which has a scripts key and no postinstall key
self.call("node", "../../index.js", "setup", self.directory, self.name, "--in-source")
contents = self.read_file('bsconfig.json')
self.assertIn('in-source', contents, 'Config file doesn\'t have `in-source` set to true')
def test_proper_bsconfig_file_generated_with(self):
with cd('./tests/root_for_testing'):
# This uses our default package.json copy which has a scripts key and no postinstall key
self.call("node", "../../index.js", "setup", self.directory, self.name)
contents = self.read_file('bsconfig.json')
self.assertNotIn('in-source', contents, 'Config file wrongly has `in-source` set to true')
def test_postinstall_was_not_added_to_package_file_with_no_linking_flag(self):
with cd('./tests/root_for_testing'):
# This uses our default package.json copy which has a scripts key and no postinstall key
self.call("node", "../../index.js", "setup", self.directory, self.name, "--no-linking")
contents = self.read_json('package.json')
self.assertNotIn('postinstall', contents['scripts'], 'Postinstall was created when it should not have been')
def test_config_was_not_created_when_given_bad_target(self):
with cd('./tests/root_for_testing'):
self.call("node", "../../index.js", "setup", 'some/bad/target', self.name)
existsMerlin = self.exists('.merlin')
existsBsconfig = self.exists('bsconfig.json')
self.assertFalse(existsMerlin, '.merlin file was created even though we gave a bad target')
self.assertFalse(existsBsconfig, 'bsconfig.json file was created even though we gave a bad target')
def test_config_was_not_created_when_given_bad_target_no_linking(self):
with cd('./tests/root_for_testing'):
self.call("node", "../../index.js", "setup", 'some/bad/target', self.name, "--no-linking")
existsMerlin = self.exists('.merlin')
existsBsconfig = self.exists('bsconfig.json')
self.assertFalse(existsMerlin, '.merlin file was created even though we gave a bad target')
self.assertFalse(existsBsconfig, 'bsconfig.json file was created even though we gave a bad target')
def test_postinstall_was_not_created_when_given_bad_target(self):
with cd('./tests/root_for_testing'):
self.call("cp", "package.other_postinstall.json", "package.json")
contents = self.read_json('package.json')
postinstall_before = contents['scripts']['postinstall']
self.call("node", "../../index.js", "setup", 'some/bad/target', self.name)
contents = self.read_json('package.json')
postinstall_after = contents['scripts']['postinstall']
self.assertEqual(postinstall_before, postinstall_after, 'The postinstall script was altered even though we gave bad target')
def tearDown(self):
with cd('./tests/root_for_testing'):
self.call("rm", "-f", "bsconfig.json")
self.call("rm", "-f", ".merlin")
self.call("rm", "-rf", "lib")
self.call("rm", "-f", "node_modules/reason-package")
self.call("cp", "package.empty_scripts.json", "package.json")
|
489089
|
import json
import logging
from os import getenv
from typing import Dict, List
from datetime import datetime, timedelta
from terrasnek.api import TFC
from .run import TFRun, parse_tf_runs
logger = logging.getLogger(__name__)
ORG = 'org'
WORKSPACE = 'workspace'
TERRAFORM_SECTION = 'terraform'
TERRAFORM_URL = 'url'
TERRAFORM_SERVICE_ACCOUNT_WORKSPACE_MAP = 'service_workspace_map'
TERRAFORM_WORKSPACE_TOKEN_FILE = 'workspace_token_file'
ENV_TERRAFORM_WORKSPACE_TOKEN_FILE = "TERRAFORM_READ_TOKENS"
class TFQuery:
"""
Query for terraform runs generating changes
"""
# TODO: add checking of map format
def __init__(self, tf_url: str, sa_org_workspace_map: Dict[str, Dict[str, str]], org_token_map: Dict[str, str]):
"""
:param tf_url: Terraform instance URL
:param sa_org_workspace_map: Map of GCP Service Account to TF organization/workspace
:param org_token_map: Map of TF organization to TF team token
"""
self.tf_url = tf_url
self.sa_org_workspace_map = sa_org_workspace_map
self.org_token_map = org_token_map
def __get_api(self, gcp_sa: str) -> TFC:
"""
Connect to TF API object for GCP Service account
:param gcp_sa: GCP Service Account which performed change
:raises TFQueryError
:return: Terraform API object
"""
if gcp_sa not in self.sa_org_workspace_map:
raise TFQueryError(f"Unknown GCP ServiceAccount {gcp_sa}")
tf_org = self.sa_org_workspace_map[gcp_sa][ORG]
tf_token = self.org_token_map[tf_org]
tf_api = TFC(tf_token, url=self.tf_url)
tf_api.set_org(tf_org)
return tf_api
def get_runs(self, gcp_sa: str,
run_limit: int = 10,
change_time: datetime = None,
time_window: int = 30) -> List[TFRun]:
"""
Get TF runs for given GCP Service account at given point in time
:param gcp_sa: GCP Service Account which performed change
:param run_limit: Number of TF run limits to get
:param change_time: point in time from which to look for change
:param time_window: size of time window to look for change (in minutes)
:return:
"""
logger.info(f"Getting TF {run_limit} last runs for workspace connected {gcp_sa}")
tf_api = self.__get_api(gcp_sa)
tf_workspace_names = self.sa_org_workspace_map[gcp_sa][WORKSPACE]
tf_runs = []
for workspace in tf_workspace_names:
try:
logger.info(f"Getting workspace_id for workspace name {workspace}")
workspace_response = tf_api.workspaces.show(workspace_name=workspace)
tf_workspace_id = workspace_response["data"]["id"]
except Exception as e:
raise TFQueryError(f"Issue getting workspace ID for workspace {workspace}") from e
try:
logger.info(f"Getting {run_limit} TF runs for workspace ID {tf_workspace_id}")
runs_response = tf_api.runs.list(tf_workspace_id, page_size=run_limit, include=["created-by"])
tf_runs += parse_tf_runs(runs_response, tf_api.get_org(), workspace)
except Exception as e:
raise TFQueryError(f"Issue getting terraform runs") from e
if not change_time:
change_time = datetime.utcnow()
start_time = (change_time - timedelta(minutes=time_window))
tf_runs = [tf_run for tf_run in tf_runs if tf_run.apply_time >= start_time]
return tf_runs
def configure_tfquery(config: Dict) -> TFQuery:
"""
Generates TFQuery based from configuraiton file
:param config: configuration
:return: TFQuery object with parsed config
"""
#TODO Add handling of raised exception
if config is None:
logger.info("No terraform configuration found. Skipping TF querying for additional info")
return None
if not isinstance(config, dict):
raise TFQueryConfigurationError(f"Incorrect configuration type. Should be dict is {type(config)}")
if TERRAFORM_URL not in config:
raise TFQueryConfigurationError(f"Missing required key: {TERRAFORM_URL}")
if TERRAFORM_SERVICE_ACCOUNT_WORKSPACE_MAP not in config:
raise TFQueryConfigurationError(f"Missing required key: {TERRAFORM_SERVICE_ACCOUNT_WORKSPACE_MAP}")
# Verify url
url = config[TERRAFORM_URL]
if not isinstance(url, str):
raise TFQueryConfigurationError(f"Incorrect value for {TERRAFORM_URL}. Should be str is {type(url)}")
# Verify service account workspace mapping structure
sa_workspace_map = config[TERRAFORM_SERVICE_ACCOUNT_WORKSPACE_MAP]
if not isinstance(sa_workspace_map, dict):
raise TFQueryConfigurationError(f"Incorrect configuration type in {TERRAFORM_SERVICE_ACCOUNT_WORKSPACE_MAP}. "
f"Should be dict is {type(sa_workspace_map)}")
for key, value in sa_workspace_map.items():
if not isinstance(key, str):
raise TFQueryConfigurationError(f"Incorrect entry type for {key}. Should be str is {type(key)}")
if not isinstance(value, dict):
raise TFQueryConfigurationError(f"Incorrect entry type for {value}. Should be dict in {type(key)}")
if ORG not in value or WORKSPACE not in value:
raise TFQueryConfigurationError(f"Missing one of required keys: {ORG}, {WORKSPACE} "
f"in {key}")
if not isinstance(value[ORG], str):
raise TFQueryConfigurationError(f"Incorrect value type for {key} {ORG}. "
f"Should be str is {type(value[ORG])}")
if not isinstance(value[WORKSPACE], list):
raise TFQueryConfigurationError(f"Incorrect value type for {key} {WORKSPACE}. "
f"Should be list is {type(value[WORKSPACE])}")
# Verify token file
token_file = config.get(TERRAFORM_WORKSPACE_TOKEN_FILE, getenv(ENV_TERRAFORM_WORKSPACE_TOKEN_FILE))
if token_file is None:
raise TFQueryConfigurationError(f"No token file specified in configuration file and no "
f"env var set with file location")
if not isinstance(token_file, str):
raise TFQueryConfigurationError(f"Incorrect value for {TERRAFORM_WORKSPACE_TOKEN_FILE}. "
f"Should be str, is {type(token_file)}")
try:
with open(token_file) as fh:
token_map = json.load(fh)
except Exception as e:
raise TFQueryConfigurationError(f"Issue opening token file {TERRAFORM_WORKSPACE_TOKEN_FILE}") from e
if not isinstance(token_map, dict):
raise TFQueryConfigurationError(f"Incorrect token file configuration {TERRAFORM_WORKSPACE_TOKEN_FILE} "
f"Should be dict is {type(token_map)}")
for key, value in token_map.items():
if not isinstance(key, str):
raise TFQueryConfigurationError(f"Incorrect configuration in token file. Workspace names should be string")
if not isinstance(value, str):
raise TFQueryConfigurationError(f"Incorrect configuration in token file. Tokens should be string")
return TFQuery(tf_url=url, sa_org_workspace_map=sa_workspace_map, org_token_map=token_map)
class TFQueryError(Exception):
pass
class TFQueryConfigurationError(TFQueryError):
pass
|
489136
|
from typing import Dict, List, Tuple, Optional
import torch
import torch.nn.functional as F
from classify.metric.abstract import AlignmentMetric
from classify.metric.abstract import Metric
from utils.utils import prod
class BinaryCrossEntropyLoss(Metric):
"""Computes the hinge loss between aligned and un-aligned document pairs (for AskUbuntu).
For each document, the loss is sum_ij |negative_similarity_i - positive_similarity_j + margin|
i.e. sum over all positive/negative pairs
"""
def __init__(
self,
weight: Optional[torch.Tensor] = None,
ignore_index: Optional[int] = None,
reduction: str = "mean",
) -> None:
"""Initialize the MultiLabelNLLLoss.
Parameters
----------
weight : Optional[torch.Tensor]
A manual rescaling weight given to each class.
If given, has to be a Tensor of size N, where N is the
number of classes.
ignore_index : Optional[int], optional
Specifies a target value that is ignored and does not
contribute to the input gradient. When size_average is
True, the loss is averaged over non-ignored targets.
reduction : str, optional
Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'.
'none': no reduction will be applied,
'mean': the output will be averaged
'sum': the output will be summed.
"""
super(BinaryCrossEntropyLoss, self).__init__()
self.weight = weight
self.ignore_index = ignore_index
self.reduction = reduction
def compute(
self, logits: torch.Tensor, targets: torch.Tensor, step: int = 4
) -> torch.Tensor:
"""Computes the Negative log likelihood loss for multilabel.
Parameters
----------
pred: torch.Tensor
input logits of shape (B x N)
target: torch.LontTensor
target tensor of shape (B x N)
Returns
-------
loss: torch.float
Multi label negative log likelihood loss, of shape (B)
"""
targets = [t for target in targets for t in target["targets"]]
targets = torch.stack(targets).float()
logits = torch.stack(
[torch.sum(cost * alignment) for cost, alignment in logits]
)
if self.ignore_index is not None:
targets[:, self.ignore_index] = 0
# if self.weight is None:
# self.weight = torch.ones(logits.size(1)).to(logits)
loss = F.binary_cross_entropy_with_logits(
logits, targets
) # , weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)
return loss
|
489148
|
import argparse
from collections import defaultdict
from rck.core.graph import construct_hiag_inflate_from_haploid_data
from rck.core.io import read_scnt_from_source, read_acnt_from_source, read_scnb_from_source, read_adjacency_groups_from_source, read_positions_from_source, get_logging_cli_parser, \
get_standard_logger_from_args, EXTERNAL_NA_ID
from rck.core.structures import get_ref_telomeres_from_segments, AdjacencyType, AdjacencyGroupType, Segment
from rck.utils.karyotype.analysis import adjacency_groups_molecule_violations, adjacency_groups_labeling_violations, adjacency_groups_general_violations
def main():
parser = argparse.ArgumentParser(prog="RCK-UTILS-KAR-stats", parents=[get_logging_cli_parser()])
parser.add_argument("--verbose", choices=[0, 1, 2, 3, 4, 5], type=int, default=5)
parser.add_argument("--acnt", required=True, type=argparse.FileType("rt"))
parser.add_argument("--acnt-separator", default="\t")
parser.add_argument("--acnt-extra-separator", default=";")
parser.add_argument("--scnt", required=True, type=argparse.FileType("rt"))
parser.add_argument("--scnt-separator", default="\t")
parser.add_argument("--scnt-extra-separator", default=";")
parser.add_argument("--scnb", type=argparse.FileType("rt"))
parser.add_argument("--scnb-separator", default="\t")
parser.add_argument("--scnb-extra-separator", default=";")
parser.add_argument("--nas-fp", type=float, default=-1.0)
parser.add_argument("--adjacency-groups", type=argparse.FileType("rt"))
parser.add_argument("--adg-separator", default="\t")
parser.add_argument("--adg-aids-separator", default=",")
parser.add_argument("--adg-extra-separator", default=";")
parser.add_argument("--telomere-positions", type=argparse.FileType("rt"))
parser.add_argument("--telomere-positions-separator", default="\t")
parser.add_argument("--telomere-positions-extra-separator", default=";")
args = parser.parse_args()
logger = get_standard_logger_from_args(args=args, program_name="RCK-UTILS-KAR-stats")
logger.info("Reading segment copy number tensor from {file}".format(file=args.scnt))
segments, scnt = read_scnt_from_source(source=args.scnt, separator=args.scnt_separator, extra_separator=args.scnt_extra_separator, remove_cn_data_from_segs=True)
logger.info("Reading adjacency copy number tensor from {file}".format(file=args.acnt))
adjacencies, acnt = read_acnt_from_source(source=args.acnt, separator=args.acnt_separator, extra_separator=args.acnt_extra_separator, remove_cn_data_from_adj=True)
if args.scnb is not None:
logger.info("Reading segment copy number boundaries tensor from {file}".format(file=args.scnb))
_, scnb = read_scnb_from_source(source=args.scnb, separator=args.scnb_separator, extra_separator=args.scnb_extra_separator, remove_cnb_data_from_segs=True)
else:
logger.info("No segment copy number boundaries tensor is provided via --scnb flag")
scnb = None
if args.adjacency_groups is not None:
logger.info("Reading adjacency groups information from {file}".format(file=args.adjacency_groups))
groups = read_adjacency_groups_from_source(source=args.adjacency_groups, separator=args.adg_separator,
extra_separator=args.adg_extra_separator, aids_separator=args.adg_aids_separator)
else:
logger.info("No adjacency groups information is provided via --adjacency-groups flag")
groups = []
if args.telomere_positions is not None:
logger.info("Reading telomere positions from {file}".format(file=args.telomere_positions))
telomeres = read_positions_from_source(source=args.telomere_positions, separator=args.telomeres_positions_separator,
extra_separator=args.telomere_positions_extra_separator)
else:
logger.info("No telomere positions are provided via --telomere-positions flag. Defaulting to reference telomere positions".format(file=args.telomere_positions))
telomeres = get_ref_telomeres_from_segments(segments=segments)
segments_by_chrs = defaultdict(list)
for segment in segments:
segments_by_chrs[segment.chromosome].append(segment)
print("A total of {cnt} chromosomes are observed".format(cnt=len(segments_by_chrs)))
total_segments_cnt = 0
for chr_name, chr_segments in segments_by_chrs.items():
total_segments_cnt += len(chr_segments)
if args.verbose >= 3:
print("Chromosome {chr_name} has {cnt} segments".format(chr_name=chr_name, cnt=len(chr_segments)))
print("A total of {cnt} segments are observed".format(cnt=total_segments_cnt))
novel_adjacencies = [adj for adj in adjacencies if adj.adjacency_type == AdjacencyType.NOVEL]
reference_adjacencies = [adj for adj in adjacencies if adj.adjacency_type == AdjacencyType.REFERENCE]
print("A total of {cnt} adjacencies ({n_cnt} novel; {r_cnt} reference)".format(cnt=len(novel_adjacencies) + len(reference_adjacencies),
n_cnt=len(novel_adjacencies), r_cnt=len(reference_adjacencies)))
adjacencies_by_external_ids = {adj.extra.get(EXTERNAL_NA_ID, adj.stable_id_non_phased): adj for adj in adjacencies}
if groups is not None:
for ag in groups:
ag.populate_adjacencies_via_ids(source=adjacencies, source_by_ids=adjacencies_by_external_ids)
molecule_groups = [ag for ag in groups if ag.group_type == AdjacencyGroupType.MOLECULE]
labeling_groups = [ag for ag in groups if ag.group_type == AdjacencyGroupType.LABELING]
general_groups = [ag for ag in groups if ag.group_type == AdjacencyGroupType.GENERAL]
if len(molecule_groups) > 0:
logger.info("Checking compliance with {cnt} molecule groups".format(cnt=len(molecule_groups)))
molecule_groups_violations = adjacency_groups_molecule_violations(groups=molecule_groups, acnt=acnt)
if len(molecule_groups_violations):
logger.error("A total of {cnt} molecule groups DO NOT agree with input karyotype. See molecule groups ids below".format(cnt=len(molecule_groups)))
logger.error(", ".join([ag.gid for ag in molecule_groups_violations]))
else:
logger.info("All molecule groups agree with input karyotype")
else:
logger.info("No molecule groups were provided. Nothing to check.")
if len(labeling_groups) > 0:
logger.info("Checking compliance with {cnt} labeling groups".format(cnt=len(labeling_groups)))
labeling_groups_violations = adjacency_groups_labeling_violations(groups=labeling_groups, acnt=acnt)
if len(labeling_groups_violations):
logger.error("A total of {cnt} labeling groups DO NOT agree with input karyotype. See labeling groups ids below".format(cnt=len(labeling_groups_violations)))
logger.error(", ".join([ag.gid for ag in labeling_groups_violations]))
else:
logger.info("All labeling groups agree with input karyotype")
else:
logger.info("No labeling groups were provided. Nothing to check.")
if len(general_groups) > 0:
logger.info("Checking compliance with {cnt} general groups".format(cnt=len(general_groups)))
general_groups_violations = adjacency_groups_general_violations(groups=general_groups, acnt=acnt)
if len(general_groups_violations):
logger.error("A total of {cnt} general groups DO NOT agree with input karyotype. See general groups ids below".format(cnt=len(general_groups_violations)))
logger.error(", ".join([ag.gid for ag in general_groups_violations]))
else:
logger.info("All general groups agree with input karyotype")
else:
logger.info("No information about adjacency groups were provided. Nothing to check.")
clone_ids = sorted(set(scnt.keys()) & set(acnt.keys()))
for clone_id in clone_ids:
logger.info("Checking balancing and telomeres for clone {clone_id}".format(clone_id=clone_id))
hiag = construct_hiag_inflate_from_haploid_data(hapl_segments=segments, hapl_adjacencies=adjacencies)
scnp = scnt[clone_id]
acnp = acnt[clone_id]
hiag.assign_copy_numbers_from_scn_profile(scn_profile=scnp)
hiag.assign_copy_numbers_from_acn_profile(acn_profile=acnp)
hiag.remove_edges_with_zero_cn()
logger.info("Checking that every vertex has a copy number excess >= 0.")
for node in hiag.nodes(data=False):
if hiag.node_imbalance(node=node) < 0:
logger.warning("Something went WRONG! On segment extremity {node} there is a negative copy number excess...".format(node=str(node)))
logger.info("Getting inferred telomeres.")
diploid_telomeres = hiag.get_telomeres()
inferred_hapl_telomeres_ids = {p.stable_id_non_hap for p in diploid_telomeres}
input_hapl_telomers_ids = {p.stable_id_non_hap for p in telomeres}
if inferred_hapl_telomeres_ids > input_hapl_telomers_ids:
logger.error("Something went WRONG! Following segments extremities, while not specified specified as possible telomere sites were inferred as such.")
logger.error(",".join(map(str, sorted(inferred_hapl_telomeres_ids - input_hapl_telomers_ids))))
else:
logger.info("Everything is OK! in clone {clone_id} all extremities have non-negative copy number excess, and inferred telomere sites concur with the input"
"".format(clone_id=clone_id))
length = 0
for u, v, data in hiag.segment_edges():
s: Segment = data["object"]
length += s.length * data["copy_number"]
logger.info(f"Total length for clone {clone_id} = {length}")
chromosome_cnt = sum(hiag.node_imbalance(node) for node in hiag.nodes(data=False)) / 2
logger.info(f"Total number of chromosomes in clone {clone_id} = {chromosome_cnt}")
if __name__ == "__main__":
main()
|
489152
|
import logging
from glob import iglob
from pykwalify.core import Core
from pykwalify.errors import SchemaError
import yaml
logging.basicConfig()
def get_schema(version):
""" Load contents of schema file """
path = 'v{}.yaml'.format(version)
contents = open(path)
return yaml.load(contents)
def create_validator(source_data):
""" Generate validator from PyKwalify """
version = source_data.get('schema_version', '3.1.0')
schema = get_schema(version)
validator = Core(source_data={}, schema_data=schema)
validator.source = source_data
return validator
def test_component_validity():
""" Test component validity against the OpenControl schema """
for component_file in iglob('*/component.yaml'):
print(component_file)
source_data = yaml.load(open(component_file))
validator = create_validator(source_data)
try:
validator.validate(raise_exception=True)
except SchemaError:
assert False, "Error found in: {0}".format(component_file)
|
489191
|
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
import torch
import numpy as np
from agent import DDPGAgent
from maddpg import MADDPG
from utils import MultiAgentReplayBuffer
def make_env(scenario_name, benchmark=False):
# load scenario from script
scenario = scenarios.load(scenario_name + ".py").Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
if benchmark:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data)
else:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)
return env
env = make_env(scenario_name="simple_spread")
ma_controller = MADDPG(env, 1000000)
ma_controller.run(500,300,32)
|
489202
|
import ast
import types
import decimal
import unittest
a_global = 'global variable'
class TestCase(unittest.TestCase):
def assertAllRaise(self, exception_type, regex, error_strings):
for str in error_strings:
with self.subTest(str=str):
with self.assertRaisesRegex(exception_type, regex):
eval(str)
def test__format__lookup(self):
class X:
def __format__(self, spec):
return 'class'
x = X()
y = X()
y.__format__ = types.MethodType(lambda self, spec: 'instance', y)
self.assertEqual(f'{y}', format(y))
self.assertEqual(f'{y}', 'class')
self.assertEqual(format(x), format(y))
self.assertEqual(x.__format__(''), 'class')
self.assertEqual(y.__format__(''), 'instance')
self.assertEqual(type(x).__format__(x, ''), 'class')
self.assertEqual(type(y).__format__(y, ''), 'class')
def test_ast(self):
class X:
def __init__(self):
self.called = False
def __call__(self):
self.called = True
return 4
x = X()
expr = "\na = 10\nf'{a * x()}'"
t = ast.parse(expr)
c = compile(t, '', 'exec')
self.assertFalse(x.called)
exec(c)
self.assertTrue(x.called)
def test_docstring(self):
def f():
f"""Not a docstring"""
self.assertIsNone(f.__doc__)
def g():
f"""Not a docstring"""
self.assertIsNone(g.__doc__)
def test_literal_eval(self):
with self.assertRaisesRegex(ValueError, 'malformed node or string'):
ast.literal_eval("f'x'")
def test_ast_compile_time_concat(self):
x = ['']
expr = "x[0] = 'foo' f'{3}'"
t = ast.parse(expr)
c = compile(t, '', 'exec')
exec(c)
self.assertEqual(x[0], 'foo3')
def test_compile_time_concat_errors(self):
self.assertAllRaise(SyntaxError,
'cannot mix bytes and nonbytes literals', ["f'' b''", "b'' f''"])
def test_literal(self):
self.assertEqual(f'', '')
self.assertEqual(f'a', 'a')
self.assertEqual(f' ', ' ')
def test_unterminated_string(self):
self.assertAllRaise(SyntaxError, 'f-string: unterminated string', [
'f\'{"x\'', 'f\'{"x}\'', 'f\'{("x\'', 'f\'{("x}\''])
def test_mismatched_parens(self):
self.assertAllRaise(SyntaxError, 'f-string: mismatched', ["f'{((}'"])
def test_double_braces(self):
self.assertEqual(f'{', '{')
self.assertEqual(f'a{', 'a{')
self.assertEqual(f'{b', '{b')
self.assertEqual(f'a{b', 'a{b')
self.assertEqual(f'}', '}')
self.assertEqual(f'a}', 'a}')
self.assertEqual(f'}b', '}b')
self.assertEqual(f'a}b', 'a}b')
self.assertEqual(f'{}', '{}')
self.assertEqual(f'a{}', 'a{}')
self.assertEqual(f'{b}', '{b}')
self.assertEqual(f'{}c', '{}c')
self.assertEqual(f'a{b}', 'a{b}')
self.assertEqual(f'a{}c', 'a{}c')
self.assertEqual(f'{b}c', '{b}c')
self.assertEqual(f'a{b}c', 'a{b}c')
self.assertEqual(f'{{(10)}', '{10')
self.assertEqual(f'}{(10)}', '}10')
self.assertEqual(f'}{{(10)}', '}{10')
self.assertEqual(f'}a{{(10)}', '}a{10')
self.assertEqual(f'{(10)}{', '10{')
self.assertEqual(f'{(10)}}', '10}')
self.assertEqual(f'{(10)}}{', '10}{')
self.assertEqual(f'{(10)}}a{}', '10}a{}')
self.assertEqual(f"{'{{}}'}", '{{}}')
self.assertAllRaise(TypeError, 'unhashable type', ["f'{ {{}} }'"])
def test_compile_time_concat(self):
x = 'def'
self.assertEqual(f'abc## {x}ghi', 'abc## defghi')
self.assertEqual(f'abc{x}ghi', 'abcdefghi')
self.assertEqual(f'abc{x}ghi{x:4}', 'abcdefghidef ')
self.assertEqual(f'{x}{x}', '{x}def')
self.assertEqual(f'{x{x}', '{xdef')
self.assertEqual(f'{x}{x}', '{x}def')
self.assertEqual(f'{{x}}{x}', '{{x}}def')
self.assertEqual(f'{{x{x}', '{{xdef')
self.assertEqual(f'x}}{x}', 'x}}def')
self.assertEqual(f'{x}x}}', 'defx}}')
self.assertEqual(f'{x}', 'def')
self.assertEqual(f'{x}', 'def')
self.assertEqual(f'{x}', 'def')
self.assertEqual(f'{x}2', 'def2')
self.assertEqual(f'1{x}2', '1def2')
self.assertEqual(f'1{x}', '1def')
self.assertEqual(f'{x}-{x}', 'def-def')
self.assertEqual(f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'', '')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'", [
"f'{3' f'}'"])
def test_comments(self):
d = {'#': 'hash'}
self.assertEqual(f"{'#'}", '#')
self.assertEqual(f"{d['#']}", 'hash')
self.assertAllRaise(SyntaxError,
"f-string expression part cannot include '#'", ["f'{1#}'",
"f'{3(#)}'", "f'{#}'", "f'{)#}'"])
def test_many_expressions(self):
def build_fstr(n, extra=''):
return "f'" + '{x} ' * n + extra + "'"
x = 'X'
width = 1
for i in range(250, 260):
self.assertEqual(eval(build_fstr(i)), (x + ' ') * i)
self.assertEqual(eval(build_fstr(255) * 256), (x + ' ') * (255 * 256))
s = build_fstr(253, '{x:{width}} ')
self.assertEqual(eval(s), (x + ' ') * 254)
s = "f'{1}' 'x' 'y'" * 1024
self.assertEqual(eval(s), '1xy' * 1024)
def test_format_specifier_expressions(self):
width = 10
precision = 4
value = decimal.Decimal('12.34567')
self.assertEqual(f'result: {value:{width}.{precision}}',
'result: 12.35')
self.assertEqual(f'result: {value:{width!r}.{precision}}',
'result: 12.35')
self.assertEqual(f'result: {value:{width:0}.{precision:1}}',
'result: 12.35')
self.assertEqual(f'result: {value:{(1)}{(0):0}.{precision:1}}',
'result: 12.35')
self.assertEqual(f'result: {value:{(1)}{(0):0}.{precision:1}}',
'result: 12.35')
self.assertEqual(f'{(10):#{(1)}0x}', ' 0xa')
self.assertEqual(f"{(10):{'#'}1{(0)}{'x'}}", ' 0xa')
self.assertEqual(f"{(-10):-{'#'}1{(0)}x}", ' -0xa')
self.assertEqual(f"{(-10):{'-'}#{(1)}0{'x'}}", ' -0xa')
self.assertEqual(f'{(10):#{(3 != {(4): 5} and width)}x}', ' 0xa')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'", [
'f\'{"s"!r{":10"}}\''])
self.assertAllRaise(SyntaxError, 'invalid syntax', ["f'{4:{/5}}'"])
self.assertAllRaise(SyntaxError,
'f-string: expressions nested too deeply', [
"f'result: {value:{width:{0}}.{precision:1}}'"])
self.assertAllRaise(SyntaxError,
'f-string: invalid conversion character', ['f\'{"s"!{"r"}}\''])
def test_side_effect_order(self):
class X:
def __init__(self):
self.i = 0
def __format__(self, spec):
self.i += 1
return str(self.i)
x = X()
self.assertEqual(f'{x} {x}', '1 2')
def test_missing_expression(self):
self.assertAllRaise(SyntaxError,
'f-string: empty expression not allowed', ["f'{}'",
"f'{ }'f' {} '", "f'{!r}'", "f'{ !r}'", "f'{10:{ }}'",
"f' { } '", "f'''{\t\x0c\r\n}'''", "f'{!x}'", "f'{ !xr}'",
"f'{!x:}'", "f'{!x:a}'", "f'{ !xr:}'", "f'{ !xr:a}'", "f'{!}'",
"f'{:}'", "f'{!'", "f'{!s:'", "f'{:'", "f'{:x'"])
self.assertAllRaise(SyntaxError, 'invalid character in identifier',
["f'''{\xa0}'''", '\xa0'])
def test_parens_in_expressions(self):
self.assertEqual(f'{(3,)}', '(3,)')
self.assertAllRaise(SyntaxError, 'invalid syntax', ["f'{,}'", "f'{,}'"]
)
self.assertAllRaise(SyntaxError, "f-string: expecting '}'", [
"f'{3)+(4}'"])
self.assertAllRaise(SyntaxError,
'EOL while scanning string literal', ["f'{\n}'"])
def test_backslashes_in_string_part(self):
self.assertEqual(f'\t', '\t')
self.assertEqual('\\t', '\\t')
self.assertEqual(f'\\t', '\\t')
self.assertEqual(f'{(2)}\t', '2\t')
self.assertEqual(f'{(2)}\t{(3)}', '2\t3')
self.assertEqual(f'\t{(3)}', '\t3')
self.assertEqual(f'Δ', 'Δ')
self.assertEqual('\\u0394', '\\u0394')
self.assertEqual(f'\\u0394', '\\u0394')
self.assertEqual(f'{(2)}Δ', '2Δ')
self.assertEqual(f'{(2)}Δ{(3)}', '2Δ3')
self.assertEqual(f'Δ{(3)}', 'Δ3')
self.assertEqual(f'Δ', 'Δ')
self.assertEqual('\\U00000394', '\\U00000394')
self.assertEqual(f'\\U00000394', '\\U00000394')
self.assertEqual(f'{(2)}Δ', '2Δ')
self.assertEqual(f'{(2)}Δ{(3)}', '2Δ3')
self.assertEqual(f'Δ{(3)}', 'Δ3')
self.assertEqual(f'Δ', 'Δ')
self.assertEqual(f'{(2)}Δ', '2Δ')
self.assertEqual(f'{(2)}Δ{(3)}', '2Δ3')
self.assertEqual(f'Δ{(3)}', 'Δ3')
self.assertEqual(f'2Δ', '2Δ')
self.assertEqual(f'2Δ3', '2Δ3')
self.assertEqual(f'Δ3', 'Δ3')
self.assertEqual(f' ', ' ')
self.assertEqual('\\x20', '\\x20')
self.assertEqual(f'\\x20', '\\x20')
self.assertEqual(f'{(2)} ', '2 ')
self.assertEqual(f'{(2)} {(3)}', '2 3')
self.assertEqual(f' {(3)}', ' 3')
self.assertEqual(f'2 ', '2 ')
self.assertEqual(f'2 3', '2 3')
self.assertEqual(f' 3', ' 3')
with self.assertWarns(DeprecationWarning):
value = eval("f'\\{6*7}'")
self.assertEqual(value, '\\42')
self.assertEqual(f'\\{(6 * 7)}', '\\42')
self.assertEqual(f'\\{(6 * 7)}', '\\42')
AMPERSAND = 'spam'
self.assertEqual(f'&', '&')
self.assertEqual(f'\\N{AMPERSAND}', '\\Nspam')
self.assertEqual(f'\\N{AMPERSAND}', '\\Nspam')
self.assertEqual(f'\\&', '\\&')
def test_misformed_unicode_character_name(self):
self.assertAllRaise(SyntaxError,
"\\(unicode error\\) 'unicodeescape' codec can't decode bytes in position .*: malformed \\\\N character escape"
, ["f'\\N'", "f'\\N{'", "f'\\N{GREEK CAPITAL LETTER DELTA'",
"'\\N'", "'\\N{'", "'\\N{GREEK CAPITAL LETTER DELTA'"])
def test_no_backslashes_in_expression_part(self):
self.assertAllRaise(SyntaxError,
'f-string expression part cannot include a backslash', [
"f'{\\'a\\'}'", "f'{\\t3}'", "f'{\\}'", "rf'{\\'a\\'}'",
"rf'{\\t3}'", "rf'{\\}'", 'rf\'{"\\N{LEFT CURLY BRACKET}"}\'',
"f'{\\n}'"])
def test_no_escapes_for_braces(self):
"""
Only literal curly braces begin an expression.
"""
self.assertEqual(f'{1+1}', '{1+1}')
self.assertEqual(f'{1+1', '{1+1')
self.assertEqual(f'{1+1', '{1+1')
self.assertEqual(f'{1+1}', '{1+1}')
def test_newlines_in_expressions(self):
self.assertEqual(f'{(0)}', '0')
self.assertEqual(f'{(3 + 4)}', '7')
def test_lambda(self):
x = 5
self.assertEqual(f"{(lambda y: x * y)('8')!r}", "'88888'")
self.assertEqual(f"{(lambda y: x * y)('8')!r:10}", "'88888' ")
self.assertEqual(f"{(lambda y: x * y)('8'):10}", '88888 ')
self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing', [
"f'{lambda x:x}'"])
def test_yield(self):
def fn(y):
f"""y:{(yield y * 2)}"""
g = fn(4)
self.assertEqual(next(g), 8)
def test_yield_send(self):
def fn(x):
yield f'x:{(yield lambda i: x * i)}'
g = fn(10)
the_lambda = next(g)
self.assertEqual(the_lambda(4), 40)
self.assertEqual(g.send('string'), 'x:string')
def test_expressions_with_triple_quoted_strings(self):
self.assertEqual(f"{'x'}", 'x')
self.assertEqual(f'{"eric\'s"}', "eric's")
self.assertEqual(f'{\'xeric"sy\'}', 'xeric"sy')
self.assertEqual(f'{\'xeric"s\'}', 'xeric"s')
self.assertEqual(f'{\'eric"sy\'}', 'eric"sy')
self.assertEqual(f'{\'xeric"sy\'}', 'xeric"sy')
self.assertEqual(f'{\'xeric"sy\'}', 'xeric"sy')
self.assertEqual(f'{\'xeric"sy\'}', 'xeric"sy')
def test_multiple_vars(self):
x = 98
y = 'abc'
self.assertEqual(f'{x}{y}', '98abc')
self.assertEqual(f'X{x}{y}', 'X98abc')
self.assertEqual(f'{x}X{y}', '98Xabc')
self.assertEqual(f'{x}{y}X', '98abcX')
self.assertEqual(f'X{x}Y{y}', 'X98Yabc')
self.assertEqual(f'X{x}{y}Y', 'X98abcY')
self.assertEqual(f'{x}X{y}Y', '98XabcY')
self.assertEqual(f'X{x}Y{y}Z', 'X98YabcZ')
def test_closure(self):
def outer(x):
def inner():
return f'x:{x}'
return inner
self.assertEqual(outer('987')(), 'x:987')
self.assertEqual(outer(7)(), 'x:7')
def test_arguments(self):
y = 2
def f(x, width):
return f'x={(x * y):{width}}'
self.assertEqual(f('foo', 10), 'x=foofoo ')
x = 'bar'
self.assertEqual(f(10, 10), 'x= 20')
def test_locals(self):
value = 123
self.assertEqual(f'v:{value}', 'v:123')
def test_missing_variable(self):
with self.assertRaises(NameError):
f"""v:{value}"""
def test_missing_format_spec(self):
class O:
def __format__(self, spec):
if not spec:
return '*'
return spec
self.assertEqual(f'{O():x}', 'x')
self.assertEqual(f'{O()}', '*')
self.assertEqual(f'{O():}', '*')
self.assertEqual(f'{(3):}', '3')
self.assertEqual(f'{(3)!s:}', '3')
def test_global(self):
self.assertEqual(f'g:{a_global}', 'g:global variable')
self.assertEqual(f'g:{a_global!r}', "g:'global variable'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:global variable l:local variable')
self.assertEqual(f'g:{a_global!r}', "g:'global variable'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:global variable l:'local variable'")
self.assertIn("module 'unittest' from", f'{unittest}')
def test_shadowed_global(self):
a_global = 'really a local'
self.assertEqual(f'g:{a_global}', 'g:really a local')
self.assertEqual(f'g:{a_global!r}', "g:'really a local'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:really a local l:local variable')
self.assertEqual(f'g:{a_global!r}', "g:'really a local'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:really a local l:'local variable'")
def test_call(self):
def foo(x):
return 'x=' + str(x)
self.assertEqual(f'{foo(10)}', 'x=10')
def test_nested_fstrings(self):
y = 5
self.assertEqual(f"{(f'{(0)}' * 3)}", '000')
self.assertEqual(f"{(f'{y}' * 3)}", '555')
def test_invalid_string_prefixes(self):
self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing', [
"fu''", "uf''", "Fu''", "fU''", "Uf''", "uF''", "ufr''",
"urf''", "fur''", "fru''", "rfu''", "ruf''", "FUR''", "Fur''",
"fb''", "fB''", "Fb''", "FB''", "bf''", "bF''", "Bf''", "BF''"])
def test_leading_trailing_spaces(self):
self.assertEqual(f'{(3)}', '3')
self.assertEqual(f'{(3)}', '3')
self.assertEqual(f'{(3)}', '3')
self.assertEqual(f'{(3)}', '3')
self.assertEqual(f'expr={{x: y for x, y in [(1, 2)]}}', 'expr={1: 2}')
self.assertEqual(f'expr={{x: y for x, y in [(1, 2)]}}', 'expr={1: 2}')
def test_not_equal(self):
self.assertEqual(f'{(3 != 4)}', 'True')
self.assertEqual(f'{(3 != 4):}', 'True')
self.assertEqual(f'{(3 != 4)!s}', 'True')
self.assertEqual(f'{(3 != 4)!s:.3}', 'Tru')
def test_conversions(self):
self.assertEqual(f'{(3.14):10.10}', ' 3.14')
self.assertEqual(f'{(3.14)!s:10.10}', '3.14 ')
self.assertEqual(f'{(3.14)!r:10.10}', '3.14 ')
self.assertEqual(f'{(3.14)!a:10.10}', '3.14 ')
self.assertEqual(f"{'a'}", 'a')
self.assertEqual(f"{'a'!r}", "'a'")
self.assertEqual(f"{'a'!a}", "'a'")
self.assertEqual(f"{'a!r'}", 'a!r')
self.assertEqual(f'{(3.14):!<10.10}', '3.14!!!!!!')
self.assertAllRaise(SyntaxError,
'f-string: invalid conversion character', ["f'{3!g}'",
"f'{3!A}'", "f'{3!3}'", "f'{3!G}'", "f'{3!!}'", "f'{3!:}'",
"f'{3! s}'"])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'", [
"f'{x!s{y}}'", "f'{3!ss}'", "f'{3!ss:}'", "f'{3!ss:s}'"])
def test_assignment(self):
self.assertAllRaise(SyntaxError, 'invalid syntax', ["f'' = 3",
"f'{0}' = x", "f'{x}' = x"])
def test_del(self):
self.assertAllRaise(SyntaxError, 'invalid syntax', ["del f''",
"del '' f''"])
def test_mismatched_braces(self):
self.assertAllRaise(SyntaxError,
"f-string: single '}' is not allowed", ["f'{{}'", "f'{{}}}'",
"f'}'", "f'x}'", "f'x}x'", "f'\\u007b}'", "f'{3:}>10}'",
"f'{3:}}>10}'"])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'", [
"f'{3:{{>10}'", "f'{3'", "f'{3!'", "f'{3:'", "f'{3!s'",
"f'{3!s:'", "f'{3!s:3'", "f'x{'", "f'x{x'", "f'{x'", "f'{3:s'",
"f'{{{'", "f'{{}}{'", "f'{'"])
self.assertEqual(f"{'{'}", '{')
self.assertEqual(f"{'}'}", '}')
self.assertEqual(f"{(3):{'}'}>10}", '}}}}}}}}}3')
self.assertEqual(f"{(2):{'{'}>10}", '{{{{{{{{{2')
def test_if_conditional(self):
def test_fstring(x, expected):
flag = 0
if f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_empty(x, expected):
flag = 0
if f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_non_empty(x, expected):
flag = 0
if f' {x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
test_fstring('', 2)
test_fstring(' ', 1)
test_concat_empty('', 2)
test_concat_empty(' ', 1)
test_concat_non_empty('', 1)
test_concat_non_empty(' ', 1)
def test_empty_format_specifier(self):
x = 'test'
self.assertEqual(f'{x}', 'test')
self.assertEqual(f'{x:}', 'test')
self.assertEqual(f'{x!s:}', 'test')
self.assertEqual(f'{x!r:}', "'test'")
def test_str_format_differences(self):
d = {'a': 'string', (0): 'integer'}
a = 0
self.assertEqual(f'{d[0]}', 'integer')
self.assertEqual(f"{d['a']}", 'string')
self.assertEqual(f'{d[a]}', 'integer')
self.assertEqual('{d[a]}'.format(d=d), 'string')
self.assertEqual('{d[0]}'.format(d=d), 'integer')
def test_invalid_expressions(self):
self.assertAllRaise(SyntaxError, 'invalid syntax', ["f'{a[4)}'",
"f'{a(4]}'"])
def test_errors(self):
self.assertAllRaise(TypeError, 'unsupported', ["f'{(lambda: 0):x}'",
"f'{(0,):x}'"])
self.assertAllRaise(ValueError, 'Unknown format code', [
"f'{1000:j}'", "f'{1000:j}'"])
def test_loop(self):
for i in range(1000):
self.assertEqual(f'i:{i}', 'i:' + str(i))
def test_dict(self):
d = {'"': 'dquote', "'": 'squote', 'foo': 'bar'}
self.assertEqual(f'{d["\'"]}', 'squote')
self.assertEqual(f'{d[\'"\']}', 'dquote')
self.assertEqual(f"{d['foo']}", 'bar')
self.assertEqual(f"{d['foo']}", 'bar')
def test_backslash_char(self):
self.assertEqual(eval('f"\\\n"'), '')
self.assertEqual(eval('f"\\\r"'), '')
if __name__ == '__main__':
unittest.main()
|
489265
|
import matplotlib.pyplot as plt
import numpy as np
import qcodes as qc
from qcodes import (
Measurement,
experiments,
initialise_database,
initialise_or_create_database_at,
load_by_guid,
load_by_run_spec,
load_experiment,
load_last_experiment,
load_or_create_experiment,
new_experiment,
ParameterWithSetpoints,
)
from qcodes.dataset.plotting import plot_dataset
from qcodes.instrument_drivers.tektronix.keithley_7510 import GeneratedSetPoints
from qcodes.loops import Loop
from qcodes.logger.logger import start_all_logging
# from qcodes.tests.instrument_mocks import DummyInstrument, DummyInstrumentWithMeasurement
from OPX_driver import *
pulse_len = 1000
config = {
"version": 1,
"controllers": {
"con1": {
"type": "opx1",
"analog_outputs": {
1: {"offset": +0.0},
2: {"offset": +0.0},
},
"analog_inputs": {
1: {"offset": +0.0},
},
}
},
"elements": {
"qe1": {
"mixInputs": {"I": ("con1", 1), "Q": ("con1", 2)},
"outputs": {"output1": ("con1", 1)},
"intermediate_frequency": 5e6,
"operations": {"playOp": "constPulse", "readout": "readoutPulse"},
"time_of_flight": 180,
"smearing": 0,
},
},
"pulses": {
"constPulse": {
"operation": "control",
"length": pulse_len, # in ns
"waveforms": {"I": "const_wf", "Q": "const_wf"},
},
"readoutPulse": {
"operation": "measure",
"length": pulse_len,
"waveforms": {"I": "const_wf", "Q": "const_wf"},
"digital_marker": "ON",
"integration_weights": {"x": "xWeights", "y": "yWeights"},
},
},
"waveforms": {
"const_wf": {"type": "constant", "sample": 0.2},
},
"digital_waveforms": {
"ON": {"samples": [(1, 0)]},
},
"integration_weights": {
"xWeights": {
"cosine": [1.0] * (pulse_len // 4),
"sine": [0.0] * (pulse_len // 4),
},
"yWeights": {
"cosine": [0.0] * (pulse_len // 4),
"sine": [1.0] * (pulse_len // 4),
},
},
}
f_pts = 100
voltage_range = np.linspace(0, 10, 10)
f_range = np.linspace(0, 100, f_pts)
# opx = OPX(config)
opx = OPX_SpectrumScan(config)
opx.f_start(0)
opx.f_stop(100)
opx.sim_time(100000)
opx.n_points(f_pts)
station = qc.Station()
station.add_component(opx)
exp = load_or_create_experiment(
experiment_name="my experiment", sample_name="this sample"
)
meas = Measurement(exp=exp, station=station)
meas.register_parameter(opx.ext_v) # register the independent parameter
meas.register_parameter(
opx.spectrum, setpoints=(opx.ext_v,)
) # now register the dependent one
with meas.run() as datasaver:
for v in voltage_range:
opx.ext_v(v)
# interact with external device here
datasaver.add_result((opx.ext_v, v), (opx.spectrum, opx.spectrum()))
dataset = datasaver.dataset
plot_dataset(dataset)
|
489273
|
import os
from shutil import copyfile, move
from random import sample
def copy_files(source, destination, file_name, alphabet):
""" Checks if the directory exists, if not then creates it, and copies
the files to the directory."""
if not os.path.exists(destination+alphabet):
os.makedirs(destination+alphabet)
copyfile(src=source+file_name, dst=destination+alphabet+'/'+file_name)
def move_file(current, destination, file_name, alphabet):
"""Moves file from current directory to destination.
current = "path/to/current/file.foo"
destination = "path/to/new/destination/for/file.foo"
"""
if not os.path.exists(destination+alphabet):
os.makedirs(destination+alphabet)
move(src=current, dst=destination+alphabet+'/'+file_name)
if __name__ == '__main__':
file_list = os.listdir("Data/RawImages/")
alphabets = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O',
'P','Q','R','S','T','U','V','W','X','Y','Z']
for filename in sorted(file_list):
""" Sorts files in an alphabetical order and places them in a folder for that alphabet"""
for i in range(len(alphabets)):
if filename[0] == alphabets[i]:
copy_files(source='Data/RawImages/', destination='Data/TrainData/', file_name=filename, alphabet=alphabets[i])
print("Successfully created TrainData!")
folders = os.listdir('Data/TrainData/')
for folder in sorted(folders):
all_files = os.listdir('Data/TrainData/'+folder)
moves = sample(all_files, 10) # randomly selects 10 files from each source folder as testing data
for each in moves:
move_file(current='Data/TrainData/'+folder+'/'+each, destination='Data/TestData/',
alphabet=folder, file_name=each)
print("Successfully created TestData!")
|
489327
|
from collections import (
deque,
)
from enum import Enum
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Set,
Union,
)
from lark import (
Tree,
Token,
)
from .functools import (
exists,
)
class NodeType(Enum):
GRAMMAR = 0
PRODUCTION = 1
SYMBOL = 2
EXPRESSION = 3
SEQUENCE = 4
TERMINAL = 5
IMPORTS = 8
IMPORT = 9
NAME = 10
EXTERNAL_IMPORTS = 12
EXTERNAL_IMPORT = 13
SOURCE = 14
FILENAMES = 15
FILENAME = 16
ANNOTATIONS = 6
ANNOTATION = 7
START = 11
# Next: 17
TERMINAL_NODES = {
NodeType.TERMINAL,
NodeType.SYMBOL,
NodeType.ANNOTATION,
NodeType.START,
NodeType.SOURCE,
NodeType.FILENAME,
}
NONTERMINAL_NODES = {
NodeType.GRAMMAR,
NodeType.PRODUCTION,
NodeType.EXPRESSION,
NodeType.SEQUENCE,
NodeType.ANNOTATIONS,
NodeType.EXTERNAL_IMPORTS,
NodeType.EXTERNAL_IMPORT,
NodeType.FILENAMES,
}
class Node(object):
def __init__(self,
node_type: NodeType,
value: Optional[str] = None,
children: List['Node'] = list(),
probability: Optional[int] = None):
self.node_type = node_type
self.value = value
self.children = children
self.cached_symbols = set() # type: Set[str]
self.probability = probability
def __str__(self):
if self.node_type == NodeType.GRAMMAR:
return '\n'.join([str(child) for child in self.children])
elif self.node_type == NodeType.PRODUCTION:
if len(self.children) == 2:
return f'{self.children[0]} ::= {self.children[1]}'
elif len(self.children) == 3:
return (
f'{self.children[0]}\n'
f'{self.children[1]} ::= {self.children[2]}'
)
else:
raise Exception(
f'Expected production to have 2 or 3 children but '
f'it had {len(self.children)}'
)
elif self.node_type == NodeType.SYMBOL:
return f'<{self.value}>'
elif self.node_type == NodeType.EXPRESSION:
return ' | '.join(map(str, self.children))
elif self.node_type == NodeType.SEQUENCE:
if self.probability:
ret = f'{self.probability} '
else:
ret = ''
ret += ' '.join(map(str, self.children))
return ret
elif self.node_type == NodeType.TERMINAL:
return self.value
elif self.node_type == NodeType.ANNOTATION:
return f'@{self.value}'
elif self.node_type == NodeType.ANNOTATIONS:
return '\n'.join([str(child) for child in self.children])
elif self.node_type == NodeType.IMPORTS:
return '\n'.join([str(child) for child in self.children]) + '\n'
elif self.node_type == NodeType.IMPORT:
return f'import {self.value}'
elif self.node_type == NodeType.START:
return f'start: <{self.value}>'
elif self.node_type == NodeType.EXTERNAL_IMPORTS:
return '\n'.join(map(str, self.children)) + '\n'
elif self.node_type == NodeType.EXTERNAL_IMPORT:
source = self.children[0].value
filenames = sorted([x.value for x in self.children[1].children])
ret = f'from {source} import (\n'
for filename in filenames:
ret += f' {filename},\n'
ret += ')\n'
return ret
else:
raise Exception(f'Unrecognized node type {self.node_type}')
def __repr__(self, indent: int = 0):
r = ' ' * indent + str(self.node_type)[len('NodeType.'):] + ':'
if self.children:
for child in self.children:
r += '\n' + child.__repr__(indent + 1)
else:
r += ' ' + (self.value or 'ø')
return r
def _bfs(self) -> Iterator['Node']:
queue = deque([self])
while queue:
current = queue.pop()
for child in current.children:
queue.appendleft(child)
yield current
def walk(self) -> Iterator['Node']:
return self._bfs()
def defines(self, value: Any) -> bool:
"""Return whether this grammar defines the given symbol.
Args:
value: The value of the symbol we're looking for.
Returns:
Whether the grammar contains the symbol's definition.
"""
assert self.node_type == NodeType.GRAMMAR
if self.cached_symbols:
return value in self.cached_symbols
for production in self.filter(
lambda x: x.node_type == NodeType.PRODUCTION
):
has_annotation = (
len(production.children) > 1
and Node.is_annotations(production.children[1])
)
if has_annotation:
assert production.children[1].value, (
'The productions must have a symbol value.'
)
symbol = production.children[1].value
else:
assert production.children[0].value, (
'The productions must have a symbol value.'
)
symbol = production.children[0].value
self.cached_symbols.add(symbol or '')
return value in self.cached_symbols
def equals(self, other: Any) -> bool:
if type(other) != type(self):
return False
if self.node_type != other.node_type:
return False
if self.node_type in TERMINAL_NODES:
return self.value == other.value
elif self.node_type == NodeType.SEQUENCE:
return (
self.probability == other.probability
and all([
x.equals(y)
for x, y in zip(self.children, other.children)
])
)
elif self.node_type in NONTERMINAL_NODES:
return all([
x.equals(y)
for x, y in zip(self.children, other.children)
])
elif self.node_type == NodeType.IMPORTS:
# We ignore imports, since they should have been
# expanded before translation.
return True
else:
raise Exception(f'Unrecognized node type. {self.node_type}')
def filter(self, filt: Callable[['Node'], bool]) -> Iterator['Node']:
for node in self._bfs():
if filt(node):
yield node
def remove(self, filt: Callable[['Node'], bool]) -> bool:
parents_to_children = dict() # type: Dict['Node', List['Node']]
for parent in self._bfs():
for child in parent.children:
if filt(child):
if parent not in parents_to_children:
parents_to_children[parent] = list()
parents_to_children[parent].append(child)
# Since dicts are ordered in Python3.7, and since this was
# a BFS, we can go through in reverse order and remove nodes.
# We won't get a null reference in this way.
parents = list(parents_to_children.keys())
parents.reverse()
for parent in parents:
for child in parents_to_children[parent]:
parent.children.remove(child)
return bool(parents_to_children)
@classmethod
def from_lark_tree(self, tree: Union[Tree, Token]) -> 'Node':
if isinstance(tree, Token):
return Node(
NodeType.TERMINAL,
value=tree.value,
)
assert isinstance(tree, Tree)
if tree.data == 'start':
return Node.from_lark_tree(tree.children[0])
elif tree.data == 'grammar':
# Don't include an import node if there were no
# imports. Imports will also be stripped out later.
return Node(
NodeType.GRAMMAR,
children=list(map(Node.from_lark_tree, tree.children)),
)
elif tree.data == 'production':
return Node(
NodeType.PRODUCTION,
children=list(map(Node.from_lark_tree, tree.children)),
)
elif tree.data == 'symbol':
return Node(
NodeType.SYMBOL,
value=tree.children[0].value,
)
elif tree.data == 'expression':
return Node(
NodeType.EXPRESSION,
children=list(map(Node.from_lark_tree, tree.children)),
)
elif tree.data == 'sequence':
if (hasattr(tree, 'children')
and hasattr(tree.children[0], 'data')
and tree.children[0].data == 'probability'):
first_child = 1
probability = tree.children[0].children[0].value
else:
first_child = 0
probability = None
return Node(
NodeType.SEQUENCE,
children=list(map(
Node.from_lark_tree, tree.children[first_child:]
)),
probability=probability,
)
elif tree.data == 'annotations':
return Node(
NodeType.ANNOTATIONS,
children=list(map(Node.from_lark_tree, tree.children)),
)
elif tree.data == 'annotation':
return Node(
NodeType.ANNOTATION,
value=tree.children[0].value,
)
elif tree.data == 'imports':
if tree.children:
return Node(
NodeType.IMPORTS,
children=list(map(Node.from_lark_tree, tree.children)),
)
else:
return None
elif tree.data == 'import':
assert len(tree.children) == 1
return Node(
NodeType.IMPORT,
value=tree.children[0].value.strip(),
)
elif tree.data == 'name':
assert len(tree.children) == 1
return Node(
NodeType.NAME,
value=tree.children[0].value.strip(),
)
elif tree.data == 'start_expression':
assert len(tree.children) == 1
assert len(tree.children[0].children) == 1
return Node(
NodeType.START,
value=tree.children[0].children[0].value.strip(),
)
elif tree.data == 'external_imports':
return Node(
NodeType.EXTERNAL_IMPORTS,
children=list(map(Node.from_lark_tree, tree.children)),
)
elif tree.data == 'external_import':
assert len(tree.children) == 2
return Node(
NodeType.EXTERNAL_IMPORT,
children=[
Node(NodeType.SOURCE, tree.children[0].value),
Node.from_lark_tree(tree.children[1]),
],
)
elif tree.data == 'items':
assert len(tree.children) > 0
filenames = list()
stack = deque(tree.children)
while stack:
curr = stack.pop()
if hasattr(curr, 'type') and curr.type == 'ITEM':
filenames.append(curr.value)
if hasattr(curr, 'children'):
stack.extend(curr.children)
return Node(
NodeType.FILENAMES,
children=[
Node(NodeType.FILENAME, value=filename)
for filename in filenames
]
)
else:
raise Exception(
f'Unrecognized Lark type "{tree.data}". Check grammar.'
)
def _invalidate_cache(self):
self.cached_symbols = set()
def append(self, node: 'Node'):
self._invalidate_cache()
self.children.append(node)
def prepend(self, node: 'Node'):
self._invalidate_cache()
self.children.insert(0, node)
def clone(self) -> 'Node':
return Node(
node_type=self.node_type,
children=[child.clone() for child in self.children],
value=self.value,
probability=self.probability,
)
def to_python(self, start_symbol: Optional[str] = None) -> str:
if self.node_type == NodeType.TERMINAL:
# Terminals are encoded as token types, so
# they should not be quoted.
assert self.value is not None
return self.value[1:-1].replace('\\', '')
elif self.node_type == NodeType.SYMBOL:
assert self.value is not None
return f'"{self.value}"'
elif self.node_type == NodeType.SEQUENCE:
# The grammar for sequence is
# sequence: probability?
# annotations?
# (symbol | TERMINAL)
# (_WHITESPACE (symbol | TERMINAL))*
#
# If the node has undergone translation, it will have
# only one or two terminals/symbols, by definition of CNF.
#
# Probability is saved onto the node, but annotations
# remain as children. (Probably a poor choice we should change.)
#
# +----------+-----------------+----------------+
# | Children | Has Probability | Has Annotation |
# +----------+-----------------+----------------+
# | 1 | N | N |
# +----------+-----------------+----------------+
# | 1 | N | Y |
# +----------+-----------------+----------------+
# | 1 | Y | N |
# +----------+-----------------+----------------+
# | 1 | Y | Y |
# +----------+-----------------+----------------+
# | 2 | N | N |
# +----------+-----------------+----------------+
# | 2 | N | Y |
# +----------+-----------------+----------------+
# | 2 | Y | N |
# +----------+-----------------+----------------+
# | 2 | Y | Y |
# +----------+-----------------+----------------+
# | 3 | N | N |
# +----------+-----------------+----------------+
# | 3 | N | Y |
# +----------+-----------------+----------------+
# | 3 | Y | N |
# +----------+-----------------+----------------+
# | 3 | Y | Y |
# +----------+-----------------+----------------+
#
# Which will allow the consumer to distinguish between all of
# these without introducing a new type.
#
annotations = '[], '
children = self.children
if self.children[0].node_type == NodeType.ANNOTATIONS:
annotations = self.children[0].to_python() + ', '
children = self.children[1:]
if len(children) == 1:
annotations = ''
probability = '0'
if self.probability:
probability = str(self.probability)
return (
'('
+ annotations
+ ', '.join([x.to_python() for x in children])
+ ', ' + probability
+ ')'
)
elif self.node_type == NodeType.EXPRESSION:
return ', '.join([x.to_python() for x in self.children])
elif self.node_type == NodeType.IMPORTS:
# Ignore imports -- these should be inlined by the
# translation process.
return ''
elif self.node_type == NodeType.NAME:
# The name is handled below, along with the grammar, to
# make sure it doesn't get out of order.
return ''
elif self.node_type == NodeType.PRODUCTION:
has_annotation = (
len(self.children) > 1
and Node.is_annotations(self.children[0])
)
if has_annotation:
annotation = self.children[0].to_python()
symbol = self.children[1].to_python()
expression = self.children[2].to_python()
return (
' ' * 8
+ f'P.with_annotations('
+ f'{annotation}, {symbol}, {expression})'
)
else:
symbol = self.children[0].to_python()
expression = next(self.filter(Node.is_expression)).to_python()
if not symbol.startswith('"'):
# The symbol was elided up to the grammar.
# It will already have been included, so we'll
# just skip this one.
return ' ' * 8 + '# Empty symbol elided to grammar'
return ' ' * 8 + f'P({symbol}, {expression}),'
elif self.node_type == NodeType.ANNOTATIONS:
return (
'['
+ ', '.join([n.to_python() for n in self.children])
+ ']'
)
elif self.node_type == NodeType.START:
return ''
elif self.node_type == NodeType.ANNOTATION:
assert self.value is not None
return self.value
elif self.node_type == NodeType.GRAMMAR:
import datetime
comment = (
f'# Generated on {datetime.datetime.now()}\n'
)
name = 'Grammar'
for name_node in self.filter(Node.is_name):
assert name_node.value is not None
name = name_node.value
values = [
comment,
]
for node in self.filter(Node.is_external_imports):
values.append(node.to_python())
values.extend([
f'class {name}(BaseGrammar):',
' productions = [',
])
start_symbol = None
for start_node in self.filter(Node.is_start):
start_symbol = start_node.value
for production in self.filter(Node.is_production):
values.append(production.to_python(start_symbol=start_symbol))
values.append(' ]')
values.append(f' start = "{start_symbol}"')
return '\n'.join(values)
elif self.node_type == NodeType.EXTERNAL_IMPORTS:
return '\n'.join([x.to_python() for x in self.children])
elif self.node_type == NodeType.EXTERNAL_IMPORT:
source = self.children[0].value
filenames = sorted([x.value for x in self.children[1].children])
ret = f'from {source} import (\n'
for filename in filenames:
ret += f' {filename},\n'
ret += ')\n'
return ret
else:
raise Exception(f'Unrecognized node type, {self.node_type}')
@staticmethod
def is_symbol(x: 'Node') -> bool:
return x.node_type == NodeType.SYMBOL
@staticmethod
def is_terminal(x: 'Node') -> bool:
return x.node_type == NodeType.TERMINAL
@staticmethod
def is_production(x: 'Node') -> bool:
return x.node_type == NodeType.PRODUCTION
@staticmethod
def is_sequence(x: 'Node') -> bool:
return x.node_type == NodeType.SEQUENCE
@staticmethod
def is_expression(x: 'Node') -> bool:
return x.node_type == NodeType.EXPRESSION
@staticmethod
def is_annotation(x: 'Node') -> bool:
return x.node_type == NodeType.ANNOTATION
@staticmethod
def is_annotations(x: 'Node') -> bool:
return x.node_type == NodeType.ANNOTATIONS
@staticmethod
def is_imports(x: 'Node') -> bool:
return x.node_type == NodeType.IMPORTS
@staticmethod
def is_import(x: 'Node') -> bool:
return x.node_type == NodeType.IMPORT
@staticmethod
def is_name(x: 'Node') -> bool:
return x.node_type == NodeType.NAME
@staticmethod
def is_external_imports(x: 'Node') -> bool:
return x.node_type == NodeType.EXTERNAL_IMPORTS
@staticmethod
def has_symbol(x: str) -> Callable[['Node'], bool]:
def _inner(y: Node) -> bool:
return exists(
y.filter(lambda z: Node.is_symbol(z) and z.value == x)
)
return _inner
@staticmethod
def has_value(x: str) -> Callable[['Node'], bool]:
def _inner(y: Node) -> bool:
return hasattr(y, 'value') and y.value == x
return _inner
@staticmethod
def _production_with_lhs(symbol: str) -> Callable[['Node'], bool]:
def _inner(x: Node) -> bool:
if not Node.is_production(x):
return False
if not x.children or not Node.is_symbol(x.children[0]):
return False
return x.children[0].value == symbol
return _inner
@staticmethod
def has_annotation(node: 'Node') -> bool:
return exists(node.filter(Node.is_annotations))
# Production-specific functions
@staticmethod
def get_symbol(node: 'Node') -> 'Node':
assert Node.is_production(node)
if Node.is_annotations(node.children[0]):
return node.children[1]
return node.children[0]
@staticmethod
def has_sequence(node: 'Node', sequence: 'Node') -> bool:
for child in node.filter(Node.is_sequence):
if child.equals(sequence):
return True
return False
@staticmethod
def is_start(node: 'Node') -> bool:
return node.node_type == NodeType.START
def to_dot(self) -> str:
"""Prints the dot representation of the tree.
This is primarily meant for debugging.
Returns:
The dot representation of the tree.
"""
name_lookup = dict() # type: Dict['Node', str]
names = set() # type: Set[str]
def _node_name(node: 'Node') -> str:
if node in name_lookup:
return name_lookup[node]
elif node.node_type in TERMINAL_NODES:
assert node.value
name = node.value.replace(
'"', 'Q',
).replace(
'\\', 'B',
).replace(
'@', 'At',
)
i = 0
while name + str(i) in names:
i += 1
name = name + str(i)
names.add(name)
name_lookup[node] = name
return name
elif node.node_type in NONTERMINAL_NODES:
name = str(node.node_type).replace('.', '_')
i = 0
while name + str(i) in names:
i += 1
name = name + str(i)
names.add(name)
name_lookup[node] = name
return name
else:
raise Exception(
f'Unrecognized node type {node.node_type}'
)
def _node_label(node: 'Node') -> str:
if node.node_type in TERMINAL_NODES:
assert node.value is not None
return node.value.replace('"', '\\"')
elif Node.is_expression(node):
return ''
else:
return _node_name(node)
def _node_shape(node: 'Node') -> str:
if Node.is_annotation(node):
return 'diamond'
elif node.node_type in TERMINAL_NODES:
return 'rectangle'
else:
return 'oval'
lines = ['digraph G {']
# Iterate through all the children to create the
# definitions.
for node in self._bfs():
name = _node_name(node)
label = _node_label(node)
shape = _node_shape(node)
lines.append(f'{name} [label="{label}", shape="{shape}"];')
# Iterate through all the children to create the
# relationships between nodes.
for node in self._bfs():
if node.node_type in TERMINAL_NODES:
continue
name = _node_name(node)
for child in node.children:
child_name = _node_name(child)
lines.append(f'{name} -> {child_name};')
lines.append('}')
return '\n'.join(lines)
def merge_annotations(self, other: 'Node'):
assert self.node_type == NodeType.SEQUENCE
assert other.node_type == NodeType.ANNOTATIONS
has_annotations = (
self.children
and Node.is_annotations(self.children[0])
)
if not has_annotations:
self.children.insert(0, other.clone())
return
for annotation in other.children:
assert annotation.node_type == NodeType.ANNOTATION
already_defined = False
for existing in self.filter(Node.is_annotation):
if existing.value == annotation.value:
already_defined = True
break
if not already_defined:
self.children[0].children.append(annotation.clone())
def merge(self, other: 'Node'):
assert self.node_type == NodeType.GRAMMAR
for external_imports in other.filter(Node.is_external_imports):
cloned = external_imports.clone()
self.children.insert(0, external_imports)
for production in other.filter(Node.is_production):
cloned = production.clone()
self.children.append(cloned)
|
489347
|
import os
from setuptools import setup
_packages = ["polara",
"polara/recommender",
"polara/evaluation",
"polara/datasets",
"polara/lib",
"polara/tools",
"polara/preprocessing",
"polara/recommender/coldstart",
"polara/recommender/hybrid",
"polara/recommender/contextual",
"polara/recommender/external",
"polara/recommender/external/mymedialite",
"polara/recommender/external/turi",
"polara/recommender/external/implicit",
"polara/recommender/external/lightfm"]
opts = dict(name="polara",
description="Fast and flexible recommender systems framework",
keywords = "recommender systems",
version = "0.7.2",
license="MIT",
author="<NAME>",
platforms=["any"],
packages=_packages)
# opts.update(extras)
if __name__ == '__main__':
setup(**opts)
|
489352
|
import math
class Solution:
def shortestSuperstring(self, A):
"""
:type A: List[str]
:rtype: str
"""
N = len(A)
tails = [[0]*N for _ in range(N)]
for i, x in enumerate(A):
for j, y in enumerate(A):
if i != j:
for k in range(len(x) + 1):
if y.startswith(x[k:]):
tails[i][j] = len(y) - len(x) + k
break
dp = [[math.inf]*N for _ in range(1 << N)]
parent = [[None]*N for _ in range(1 << N)]
minLen = math.inf
last = None
for mask in range(1, 1 << N):
for bit in range(N):
if mask & (1 << bit):
prev = mask - (1 << bit)
if prev == 0:
dp[mask][bit] = len(A[bit])
else:
for k in range(N):
if prev & (1 << k):
if dp[prev][k] != math.inf:
val = dp[prev][k] + tails[k][bit]
if val < dp[mask][bit]:
dp[mask][bit] = val
parent[mask][bit] = k
if mask == (1 << N) - 1 and dp[mask][bit] < minLen:
minLen = dp[mask][bit]
last = bit
curr = (1 << N) - 1
St = []
while curr:
St.append(last)
temp = curr
curr -= (1 << last)
last = parent[temp][last]
first = St.pop()
res = [A[first]]
while St:
second = St.pop()
res.append(A[second][len(A[second]) - tails[first][second]:])
first = second
return ''.join(res)
|
489363
|
import math
import os
from hydra import utils
from torchvision import datasets, transforms
class CIFAR10(datasets.CIFAR10): # TODO: Documentation
def __init__(
self,
partition: str,
**kwargs,
):
if "root" in kwargs:
root = kwargs["root"]
else:
root = utils.get_original_cwd()
# DEBUG
# root = "../"
root = os.path.join(root, "data")
transform = []
resize_to = None
resize_blur = False
# Resize according to config.resize
if "resize" in kwargs and kwargs["resize"] != "":
try:
img_size = list(map(int, kwargs["resize"].split(",")))
except Exception:
raise ValueError(
f"config.resize \"{kwargs['resize']}\" is in "
f"wrong format. Should be `h,w`."
)
resize_to = img_size[0]
resize_blur = kwargs["resize_blur"] if "resize_blur" in kwargs else False
resize_blur_sigma = (
kwargs["resize_blur_sigma"] if "resize_blur_sigma" in kwargs else 1.0
)
# Blur anyway, regardless of resize
if "blur" in kwargs and kwargs["blur"]:
resize_blur = True
resize_blur_sigma = (
kwargs["resize_blur_sigma"] if "resize_blur_sigma" in kwargs else 1.0
)
# Blur before resize, regardless of partition
if resize_blur and resize_blur_sigma != 0.0:
# Half kernel size = 3 x sigma, rounded up
kernel_size = math.ceil(resize_blur_sigma * 3.0) * 2 + 1
transform.append(
transforms.GaussianBlur(kernel_size, sigma=resize_blur_sigma)
)
augment = kwargs["augment"]
if augment == "resnet":
transform.extend(
augmentations_resnet(
resize_to=resize_to, interpolation=kwargs["resize_interpolation"]
)
)
elif augment == "None":
if resize_to is not None:
interpolation = {
"bilinear": transforms.InterpolationMode.BILINEAR,
"nearest": transforms.InterpolationMode.NEAREST,
}[kwargs["resize_interpolation"]]
transform.append(
transforms.Resize(resize_to, interpolation=interpolation)
)
transform.extend(
[
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
else:
raise NotImplementedError(f"augment = {augment}")
transform = transforms.Compose(transform)
if partition == "train":
train = True
elif partition == "test":
train = False
else:
raise NotImplementedError(
"The dataset partition {} does not exist".format(partition)
)
super().__init__(root=root, train=train, transform=transform, download=True)
# def get_augmentations():
# """
# Following "A branching and merging convolutional network with homogeneous filter capsules"
# - <NAME> al., 2020 - https://arxiv.org/abs/2001.09136
# """
# augmentations = [
# transforms.RandomApply(
# [transforms.RandomRotation(30)], p=0.5
# ), # Rotation by 30 degrees with probability 0.5
# transforms.RandomApply(
# [transforms.RandomAffine(degrees=0, translate=(0, 0), scale=(0.75, 1))],
# p=0.5,
# ), # Rescale with probability 0.5
# transforms.ToTensor(),
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
# transforms.RandomErasing(
# p=0.5, scale=(4 / 32.0, 4 / 32.0), ratio=(1.0, 1.0), value=0, inplace=False
# ), # Erase patches of 4 pixels with probability 0.5
# ]
# return augmentations
def augmentations_resnet(resize_to=None, crop_size=None, interpolation="bilinear"):
"""
Following "A branching and merging convolutional network with homogeneous filter capsules"
- Biearly et al., 2020 - https://arxiv.org/abs/2001.09136
"""
if crop_size is None:
crop_size = 32
pad_size = crop_size // 8
augmentations = [
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(crop_size, pad_size),
]
if resize_to is not None:
if interpolation == "bilinear":
interpolation = transforms.InterpolationMode.BILINEAR
elif interpolation == "nearest":
interpolation = transforms.InterpolationMode.NEAREST
else:
raise NotImplementedError(f"resize_interpolation={interpolation}")
augmentations.append(transforms.Resize(resize_to, interpolation=interpolation))
augmentations.extend(
[
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
return augmentations
|
489364
|
from collections import Counter
tab = '''TABACARIA
Não sou nada.
Nunca serei nada.
Não posso querer ser nada.
À parte isso, tenho em mim todos os sonhos do mundo.
Janelas do meu quarto,
Do meu quarto de um dos milhões do mundo que ninguém sabe quem é
(E se soubessem quem é, o que saberiam?),
Dais para o mistério de uma rua cruzada constantemente por gente,
Para uma rua inacessível a todos os pensamentos,
Real, impossivelmente real, certa, desconhecidamente certa,
Com o mistério das coisas por baixo das pedras e dos seres,
Com a morte a pôr humidade nas paredes e cabelos brancos nos homens,
Com o Destino a conduzir a carroça de tudo pela estrada de nada.
Estou hoje vencido, como se soubesse a verdade.
Estou hoje lúcido, como se estivesse para morrer,
E não tivesse mais irmandade com as coisas
Senão uma despedida, tornando-se esta casa e este lado da rua
A fileira de carruagens de um comboio, e uma partida apitada
De dentro da minha cabeça,
E uma sacudidela dos meus nervos e um ranger de ossos na ida.
Estou hoje perplexo como quem pensou e achou e esqueceu.
Estou hoje dividido entre a lealdade que devo
À Tabacaria do outro lado da rua, como coisa real por fora,
E à sensação de que tudo é sonho, como coisa real por dentro.
Falhei em tudo.
Como não fiz propósito nenhum, talvez tudo fosse nada.
A aprendizagem que me deram,
Desci dela pela janela das traseiras da casa,
Fui até ao campo com grandes propósitos.
Mas lá encontrei só ervas e árvores,
E quando havia gente era igual à outra.
Saio da janela, sento-me numa cadeira. Em que hei-de pensar?
Que sei eu do que serei, eu que não sei o que sou?
Ser o que penso? Mas penso ser tanta coisa!
E há tantos que pensam ser a mesma coisa que não pode haver tantos!
Génio? Neste momento
Cem mil cérebros se concebem em sonho génios como eu,
E a história não marcará, quem sabe?, nem um,
Nem haverá senão estrume de tantas conquistas futuras.
Não, não creio em mim.
Em todos os manicómios há doidos malucos com tantas certezas!
Eu, que não tenho nenhuma certeza, sou mais certo ou menos certo?
Não, nem em mim...
Em quantas mansardas e não-mansardas do mundo
Não estão nesta hora génios-para-si-mesmos sonhando?
Quantas aspirações altas e nobres e lúcidas —
Sim, verdadeiramente altas e nobres e lúcidas —,
E quem sabe se realizáveis,
Nunca verão a luz do sol real nem acharão ouvidos de gente?
O mundo é para quem nasce para o conquistar
E não para quem sonha que pode conquistá-lo, ainda que tenha razão.
Tenho sonhado mais que o que Napoleão fez.
Tenho apertado ao peito hipotético mais humanidades do que Cristo,
Tenho feito filosofias em segredo que nenhum Kant escreveu.
Mas sou, e talvez serei sempre, o da mansarda,
Ainda que não more nela;
Serei sempre o que não nasceu para isso;
Serei sempre só o que tinha qualidades;
Serei sempre o que esperou que lhe abrissem a porta ao pé de uma parede sem porta
E cantou a cantiga do Infinito numa capoeira,
E ouviu a voz de Deus num poço tapado.
Crer em mim? Não, nem em nada.
Derrame-me a Natureza sobre a cabeça ardente
O seu sol, a sua chuva, o vento que me acha o cabelo,
E o resto que venha se vier, ou tiver que vir, ou não venha.
Escravos cardíacos das estrelas,
Conquistámos todo o mundo antes de nos levantar da cama;
Mas acordámos e ele é opaco,
Levantámo-nos e ele é alheio,
Saímos de casa e ele é a terra inteira,
Mais o sistema solar e a Via Láctea e o Indefinido.
(Come chocolates, pequena;
Come chocolates!
Olha que não há mais metafísica no mundo senão chocolates.
Olha que as religiões todas não ensinam mais que a confeitaria.
Come, pequena suja, come!
Pudesse eu comer chocolates com a mesma verdade com que comes!
Mas eu penso e, ao tirar o papel de prata, que é de folhas de estanho,
Deito tudo para o chão, como tenho deitado a vida.)
Mas ao menos fica da amargura do que nunca serei
A caligrafia rápida destes versos,
Pórtico partido para o Impossível.
Mas ao menos consagro a mim mesmo um desprezo sem lágrimas,
Nobre ao menos no gesto largo com que atiro
A roupa suja que sou, sem rol, pra o decurso das coisas,
E fico em casa sem camisa.
(Tu, que consolas, que não existes e por isso consolas,
Ou deusa grega, concebida como estátua que fosse viva,
Ou patrícia romana, impossivelmente nobre e nefasta,
Ou princesa de trovadores, gentilíssima e colorida,
Ou marquesa do século dezoito, decotada e longínqua,
Ou cocote célebre do tempo dos nossos pais,
Ou não sei quê moderno — não concebo bem o quê —,
Tudo isso, seja o que for, que sejas, se pode inspirar que inspire!
Meu coração é um balde despejado.
Como os que invocam espíritos invocam espíritos invoco
A mim mesmo e não encontro nada.
Chego à janela e vejo a rua com uma nitidez absoluta.
Vejo as lojas, vejo os passeios, vejo os carros que passam,
Vejo os entes vivos vestidos que se cruzam,
Vejo os cães que também existem,
E tudo isto me pesa como uma condenação ao degredo,
E tudo isto é estrangeiro, como tudo.)
Vivi, estudei, amei, e até cri,
E hoje não há mendigo que eu não inveje só por não ser eu.
Olho a cada um os andrajos e as chagas e a mentira,
E penso: talvez nunca vivesses nem estudasses nem amasses nem cresses
(Porque é possível fazer a realidade de tudo isso sem fazer nada disso);
Talvez tenhas existido apenas, como um lagarto a quem cortam o rabo
E que é rabo para aquém do lagarto remexidamente.
Fiz de mim o que não soube,
E o que podia fazer de mim não o fiz.
O dominó que vesti era errado.
Conheceram-me logo por quem não era e não desmenti, e perdi-me.
Quando quis tirar a máscara,
Estava pegada à cara.
Quando a tirei e me vi ao espelho,
Já tinha envelhecido.
Estava bêbado, já não sabia vestir o dominó que não tinha tirado.
Deitei fora a máscara e dormi no vestiário
Como um cão tolerado pela gerência
Por ser inofensivo
E vou escrever esta história para provar que sou sublime.
Essência musical dos meus versos inúteis,
Quem me dera encontrar-te como coisa que eu fizesse,
E não ficasse sempre defronte da Tabacaria de defronte,
Calcando aos pés a consciência de estar existindo,
Como um tapete em que um bêbado tropeça
Ou um capacho que os ciganos roubaram e não valia nada.
Mas o Dono da Tabacaria chegou à porta e ficou à porta.
Olhou-o com o desconforto da cabeça mal voltada
E com o desconforto da alma mal-entendendo.
Ele morrerá e eu morrerei.
Ele deixará a tabuleta, e eu deixarei versos.
A certa altura morrerá a tabuleta também, e os versos também.
Depois de certa altura morrerá a rua onde esteve a tabuleta,
E a língua em que foram escritos os versos.
Morrerá depois o planeta girante em que tudo isto se deu.
Em outros satélites de outros sistemas qualquer coisa como gente
Continuará fazendo coisas como versos e vivendo por baixo de coisas como tabuletas,
Sempre uma coisa defronte da outra,
Sempre uma coisa tão inútil como a outra,
Sempre o impossível tão estúpido como o real,
Sempre o mistério do fundo tão certo como o sono de mistério da superfície,
Sempre isto ou sempre outra coisa ou nem uma coisa nem outra.
Mas um homem entrou na Tabacaria (para comprar tabaco?),
E a realidade plausível cai de repente em cima de mim.
Semiergo-me enérgico, convencido, humano,
E vou tencionar escrever estes versos em que digo o contrário.
Acendo um cigarro ao pensar em escrevê-los
E saboreio no cigarro a libertação de todos os pensamentos.
Sigo o fumo como uma rota própria,
E gozo, num momento sensitivo e competente,
A libertação de todas as especulações
E a consciência de que a metafísica é uma consequência de estar mal disposto.
Depois deito-me para trás na cadeira
E continuo fumando.
Enquanto o Destino mo conceder, continuarei fumando.
(Se eu casasse com a filha da minha lavadeira
Talvez fosse feliz.)
Visto isto, levanto-me da cadeira. Vou à janela.
O homem saiu da Tabacaria (metendo troco na algibeira das calças?).
Ah, conheço-o: é o Esteves sem metafísica.
(O Dono da Tabacaria chegou à porta.)
Como por um instinto divino o Esteves voltou-se e viu-me.
Acenou-me adeus gritei-lhe Adeus ó Esteves!, e o universo
Reconstruiu-se-me sem ideal nem esperança, e o Dono da Tabacaria sorriu.
'''
xpto = Counter(tab.split())
print(xpto.most_common(10))
|
489384
|
from typing import Callable, Dict, TYPE_CHECKING, Union
from functools import partial
from catalyst.core.callback import IBackwardCallback
from catalyst.registry import REGISTRY
if TYPE_CHECKING:
from catalyst.core.runner import IRunner
class BackwardCallback(IBackwardCallback):
"""Optimizer callback, abstraction over backward step.
Args:
metric_key: a key to get loss from ``runner.batch_metrics``
grad_clip_fn: callable gradient cliping function or it's name
grad_clip_params: key-value parameters for grad_clip_fn
log_gradient: boolean flag to log gradient norm to ``runner.batch_metrics``
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
metric_key: str,
grad_clip_fn: Union[str, Callable] = None,
grad_clip_params: Dict = None,
log_gradient: bool = False,
):
"""Init."""
super().__init__()
self.metric_key = metric_key
if isinstance(grad_clip_fn, str):
self.grad_clip_fn = REGISTRY.get(grad_clip_fn)
else:
self.grad_clip_fn = grad_clip_fn
if grad_clip_params is not None:
self.grad_clip_fn = partial(self.grad_clip_fn, **grad_clip_params)
self._prefix_gradient = f"gradient/{metric_key}"
self._log_gradient = log_gradient
def on_batch_end(self, runner: "IRunner"):
"""Event handler."""
if runner.is_train_loader:
loss = runner.batch_metrics[self.metric_key]
runner.engine.backward(loss)
if self.grad_clip_fn is not None:
runner.engine.unscale_gradients()
norm = self.grad_clip_fn(self.model.parameters())
if self._log_gradient:
runner.batch_metrics[f"{self._prefix_gradient}/norm"] = norm
__all__ = ["BackwardCallback"]
|
489422
|
import pytest
from motllo.markdown_parser import (
InlinedCodeBlock,
BareCodeBlock,
BareCodeBlockParser,
LinkBlock,
ListBlock,
ListBlockParser,
TextBlockParser,
CodeBlockParser,
OrderedOneOfParser,
LinkBlockParser,
TagBlockParser,
SequenceParser,
TextBlock,
CodeBlock,
TagBlock,
InlinedCodeBlockParser,
)
from random import sample
CODE = '\ndef hello_world():\n\t print("Hello world")\n'
TAG = "#this/is/a/tag"
TITLE = "This goes somewhere"
BLOCKS = {
"text_block": "Text text\n text\n\t more text",
"code": CODE,
"title": TITLE,
"code_block": f"\n```python{CODE}```\n",
"code_block_no_lang": "\n```import org.apache.spark.SparkSession\n```\n",
"unterminated_code_block": "\n```oh crap\n\n",
"tag_block": TAG,
"tag_in_code_block": f"\n```foo\n{TAG}\n```\n",
"tag_in_inlined_code_block": f"`{TAG}` ",
"tag_in_link_block": f"[{TITLE}](https://foo.com/{TAG})",
}
def test_text_block():
text_block = BLOCKS["text_block"]
unterminated_code_block = BLOCKS["unterminated_code_block"]
text = f"{text_block}{unterminated_code_block}"
parsed_text_block, rest = TextBlockParser().parse(text)
assert parsed_text_block == TextBlock(text_block + "\n")
assert rest == unterminated_code_block[1:]
def test_list_block():
text = f"- This is a bullet list\n- Another item\n End"
parsed_text_block, rest = ListBlockParser().parse(text)
assert parsed_text_block == ListBlock("This is a bullet list")
def test_tag_block():
tag_block = BLOCKS["tag_block"]
unterminated_code_block = BLOCKS["unterminated_code_block"]
text = f"{tag_block}{unterminated_code_block}"
parsed_tag_block, rest = TagBlockParser().parse(text)
assert parsed_tag_block == TagBlock(tag_block)
assert rest == unterminated_code_block
def test_link_block():
link_block = BLOCKS["tag_in_link_block"]
text_block = BLOCKS["text_block"]
tag_block = BLOCKS["tag_block"]
title = BLOCKS["title"]
text = f"{text_block} {link_block} {text_block}"
text_or_link = OrderedOneOfParser([TextBlockParser(), LinkBlockParser()])
sequenced = SequenceParser([text_or_link])
blocks, rest = sequenced.parse(text)
assert blocks[0] == TextBlock(text_block + " ")
assert blocks[1] == LinkBlock(text=title, link=f"https://foo.com/{tag_block}")
assert blocks[2] == TextBlock(" " + text_block)
def test_parse_inlined_code_block():
text_block = BLOCKS["text_block"]
inlined_code_block = BLOCKS["tag_in_inlined_code_block"]
code = BLOCKS["tag_block"]
text = f"{inlined_code_block}{text_block}"
block, rest = InlinedCodeBlockParser().parse(text)
assert block == InlinedCodeBlock(f"{code}")
assert rest == " " + text_block
def test_problematic_code_block():
text = """```
A:B:C~E:F~G:H~~I::J~K~L
```
There are"""
block, rest = BareCodeBlockParser().parse(text)
assert block == BareCodeBlock("A:B:C~E:F~G:H~~I::J~K~L\n")
assert rest == " \n\nThere are"
def test_parse_code_block():
text_block = BLOCKS["text_block"]
code_block = BLOCKS["code_block"][1:]
code = BLOCKS["code"]
text = f"{code_block}{text_block}"
block, rest = CodeBlockParser().parse(text)
assert block == CodeBlock(f"{code}", "python")
assert rest == text_block
text_block = BLOCKS["text_block"]
code_block = BLOCKS["tag_in_code_block"][1:]
code = BLOCKS["tag_block"]
text = f"{code_block}{text_block}"
block, rest = CodeBlockParser().parse(text)
assert block == CodeBlock(f"\n{code}\n", "foo")
assert rest == text_block
def test_parse_text_and_code_block():
text_block = BLOCKS["text_block"]
code_block = BLOCKS["code_block"]
second_text_block = "".join(sample(BLOCKS["text_block"], 10)) # This can
# cause an
# spurious
# failure if
# we hit
# badly on a
# new line,
# possibly
code = BLOCKS["code"]
text = f"{text_block}{code_block}{second_text_block}"
text_or_code = OrderedOneOfParser([TextBlockParser(), CodeBlockParser()])
sequenced = SequenceParser([text_or_code])
blocks, rest = sequenced.parse(text)
assert blocks[0] == TextBlock(text_block + "\n")
assert blocks[1] == CodeBlock(f"{code}", "python")
assert blocks[2] == TextBlock(second_text_block)
assert rest is None
def test_parse_text_and_tag_code_block():
text_block = BLOCKS["text_block"]
code_block = BLOCKS["code_block"]
tag_block = BLOCKS["tag_block"]
tag_in_code = BLOCKS["tag_in_code_block"]
second_text_block = "".join(sample(BLOCKS["text_block"], 10))
code = BLOCKS["code"]
text = f"{text_block}\n{tag_block}{code_block}{second_text_block}{tag_in_code}"
print(text)
text_or_code = OrderedOneOfParser(
[TextBlockParser(), CodeBlockParser(), TagBlockParser()]
)
sequenced = SequenceParser([text_or_code])
blocks, rest = sequenced.parse(text)
assert blocks[0] == TextBlock(text_block + "\n")
assert blocks[1] == TagBlock(tag_block)
assert blocks[2] == TextBlock("\n")
assert blocks[3] == CodeBlock(f"{code}", "python")
assert blocks[4] == TextBlock(" " + second_text_block + "\n")
assert blocks[5] == CodeBlock(f"\n{tag_block}\n", "foo")
assert rest is None
def test_parse_text_and_tag_code_block():
text_block = BLOCKS["text_block"]
code_block = BLOCKS["code_block"]
tag_block = BLOCKS["tag_block"]
tag_in_code = BLOCKS["tag_in_code_block"]
tag_in_inlined_code_block = BLOCKS["tag_in_inlined_code_block"]
second_text_block = "".join(sample(BLOCKS["text_block"], 10))
code = BLOCKS["code"]
text = f"{text_block}\n{tag_block}{code_block}{tag_in_inlined_code_block}{second_text_block}{tag_in_code}"
print(text)
text_or_code = OrderedOneOfParser(
[
TextBlockParser(),
CodeBlockParser(),
InlinedCodeBlockParser(),
TagBlockParser(),
]
)
sequenced = SequenceParser([text_or_code])
blocks, rest = sequenced.parse(text)
assert blocks[0] == TextBlock(text_block + "\n")
assert blocks[1] == TagBlock(tag_block)
assert blocks[2] == TextBlock("\n")
assert blocks[3] == CodeBlock(f"{code}", "python")
assert blocks[4] == InlinedCodeBlock(f"{tag_block}")
assert blocks[5] == TextBlock(" " + second_text_block + "\n")
assert blocks[6] == CodeBlock(f"\n{tag_block}\n", "foo")
assert rest is None
|
489440
|
AUDIO = 'audio'
VIDEO = 'video'
FLOW = 'flow'
ENCODERS = [AUDIO, VIDEO, FLOW]
NO_SEPARATION = 'none'
FREQ_MASK = 'unet_mask'
SEPARATION = [NO_SEPARATION, FREQ_MASK]
FFT_WINDOW = 25 * 0.001 # sec
FFT_OVERLAP_R = 2 # number of window overlaps
NUM_SEP_TRACKS_DEF = 32
CTX_FEATS_FCUNITS_DEF = [64, 128, 128]
SEP_FREQ_MASK_FCUNITS_DEF = [256]
LOC_FCUNITS_DEF = [512, 512]
SEP_FFT_WINDOW_DEF = 0.025
|
489454
|
import os
import pytest
import fault
import magma as m
def test_bit_pattern_simple():
class Foo(m.Circuit):
io = m.IO(I=m.In(m.Bits[8]), O=m.Out(m.Bit))
bit_pat = m.BitPattern("b??1??01?")
io.O @= bit_pat == io.I
m.compile("build/Foo", Foo)
tester = fault.Tester(Foo)
tester.circuit.I = 0b00100010
tester.eval()
tester.circuit.O.expect(1)
tester.circuit.I = 0b10100010
tester.eval()
tester.circuit.O.expect(1)
tester.circuit.I = 0b10100000
tester.eval()
tester.circuit.O.expect(0)
tester.compile_and_run("verilator",
directory=os.path.join(os.path.dirname(__file__),
"build"))
def test_as_bv():
x = m.BitPattern("b1001")
assert x.as_bv() == 0b1001
y = m.BitPattern("b1??1")
with pytest.raises(TypeError) as e:
y.as_bv()
assert (str(e.value) ==
"Can only convert BitPattern with no don't cares to int")
def test_hashable():
x = m.BitPattern("b10?1")
y = m.BitPattern("b1?10")
dict_ = {
x: 1,
y: 0
}
assert dict_[x] == 1
assert dict_[y] == 0
|
489470
|
import time
import asyncio
import logging
from concurrent import futures
from xwing.socket.client import Client
logging.basicConfig(level='INFO')
async def connect_and_send(loop, endpoint, payload, start, duration):
client = Client(loop, endpoint)
conn = await client.connect('server0')
n = 0
while time.monotonic() - start < duration:
await conn.send(payload)
await conn.recv()
n += 1
return n
def run(start, duration, data=b'x'):
loop = asyncio.get_event_loop()
requests = loop.run_until_complete(connect_and_send(
loop, "localhost:5555", b'x', start, duration))
loop.close()
return requests
def main(number_of_workers=10, duration=30):
start = time.monotonic()
with futures.ProcessPoolExecutor(max_workers=number_of_workers) as \
executor:
fs = [executor.submit(run, start, duration) for i in range(number_of_workers)]
reqs_per_second = sum([f.result() for f in futures.wait(fs).done]) / duration
print('Requests per second w=%d: ' % number_of_workers,
reqs_per_second)
if __name__ == '__main__':
main(number_of_workers=4)
|
489485
|
import unittest
from spec.metrics.src.tag import Tag
class TestTag(unittest.TestCase):
def test_create_instance(self):
tag = Tag('name', 'value')
self.assertEqual(tag.get_tag_name(), 'name')
self.assertEqual(tag.get_tag_value(), 'value')
def test_create_raises_exception(self):
bad_name = lambda: Tag('_%&$#@', 'value')
self.assertRaises(Exception, bad_name)
def test_equals(self):
tag1 = Tag('name', 'value')
tag2 = Tag('name', 'value')
self.assertEqual(tag1, tag2)
if __name__ == '__main__':
unittest.main()
|
489490
|
from setuptools import setup
setup(
name='torch-dct',
version='0.1.5',
packages=['torch_dct'],
platforms='any',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
],
install_requires=['torch>=0.4.1'],
url='https://github.com/zh217/torch-dct',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Discrete Cosine Transform (DCT) for pytorch',
long_description=open('README.md').read(),
long_description_content_type='text/markdown'
)
|
489496
|
import os
from kgcnn.data.moleculenet import MoleculeNetDataset
from kgcnn.data.download import DownloadDataset
class MoleculeNetDataset2018(MoleculeNetDataset, DownloadDataset):
"""Downloader for DeepChem MoleculeNetDataset 2018 class.
"""
datsets_download_info = {
"ESOL": {"dataset_name": "ESOL", "download_file_name": 'delaney-processed.csv', "data_directory_name": "ESOL"},
"FreeSolv": {"dataset_name": "FreeSolv", "data_directory_name": "FreeSolv", "download_file_name": 'SAMPL.csv'},
"Lipop": {"dataset_name": "Lipop", "data_directory_name": "Lipop", "download_file_name": 'Lipophilicity.csv'},
"PCBA": {"dataset_name": "PCBA", "data_directory_name": "PCBA", "download_file_name": 'pcba.csv.gz',
"extract_gz": True, "extract_file_name": 'pcba.csv'},
"MUV": {"dataset_name": "MUV", "data_directory_name": "MUV", "download_file_name": 'muv.csv.gz',
"extract_gz": True, "extract_file_name": 'muv.csv'},
"HIV": {"dataset_name": "HIV", "data_directory_name": "HIV", "download_file_name": 'HIV.csv'},
"BACE": {"dataset_name": "BACE", "data_directory_name": "BACE", "download_file_name": 'bace.csv'},
"BBBP": {"dataset_name": "BBBP", "data_directory_name": "BBBP", "download_file_name": 'BBBP.csv'},
"Tox21": {"dataset_name": "Tox21", "data_directory_name": "Tox21", "download_file_name": 'tox21.csv.gz',
"extract_gz": True, "extract_file_name": 'tox21.csv'},
"ToxCast": {"dataset_name": "ToxCast", "data_directory_name": "ToxCast",
"download_file_name": 'toxcast_data.csv.gz', "extract_gz": True,
"extract_file_name": 'toxcast_data.csv'},
"SIDER": {"dataset_name": "SIDER", "data_directory_name": "SIDER", "download_file_name": 'sider.csv.gz',
"extract_gz": True, "extract_file_name": 'sider.csv'},
"ClinTox": {"dataset_name": "ClinTox", "data_directory_name": "ClinTox", "download_file_name": 'clintox.csv.gz',
"extract_gz": True, "extract_file_name": 'clintox.csv'},
}
datasets_prepare_data_info = {
"ESOL": {"make_conformers": True, "add_hydrogen": True},
"FreeSolv": {"make_conformers": True, "add_hydrogen": True},
"Lipop": {"make_conformers": True, "add_hydrogen": True},
"PCBA": {"make_conformers": True, "add_hydrogen": True},
"MUV": {"make_conformers": True, "add_hydrogen": True},
"HIV": {"make_conformers": True, "add_hydrogen": True},
"BACE": {"make_conformers": True, "add_hydrogen": True, "smiles_column_name": "mol"},
"BBBP": {"make_conformers": True, "add_hydrogen": True, "smiles_column_name": "smiles"},
"Tox21": {"make_conformers": True, "add_hydrogen": True, "smiles_column_name": "smiles"},
"ToxCast": {"make_conformers": True, "add_hydrogen": True, "smiles_column_name": "smiles"},
"SIDER": {"make_conformers": True, "add_hydrogen": True, "smiles_column_name": "smiles"},
"ClinTox": {"make_conformers": True, "add_hydrogen": True, "smiles_column_name": "smiles"}
}
datasets_read_in_memory_info = {
"ESOL": {"add_hydrogen": False, "has_conformers": True,
"label_column_name": "measured log solubility in mols per litre"},
"FreeSolv": {"add_hydrogen": False, "has_conformers": True, "label_column_name": "expt"},
"Lipop": {"add_hydrogen": False, "has_conformers": True, "label_column_name": "exp"},
"PCBA": {"add_hydrogen": False, "has_conformers": False, "label_column_name": slice(0, 128)},
"MUV": {"add_hydrogen": False, "has_conformers": True, "label_column_name": slice(0, 17)},
"HIV": {"add_hydrogen": False,"has_conformers": True, "label_column_name": "HIV_active"},
"BACE": {"add_hydrogen": False, "has_conformers": True, "label_column_name": "Class"},
"BBBP": { "add_hydrogen": False, "has_conformers": True, "label_column_name": "p_np"},
"Tox21": {"add_hydrogen": False, "has_conformers": True, "label_column_name": slice(0, 12)},
"ToxCast": {"add_hydrogen": False, "has_conformers": True, "label_column_name": slice(1, 618)},
"SIDER": {"add_hydrogen": False, "has_conformers": True, "label_column_name": slice(1, 28)},
"ClinTox": {"add_hydrogen": False, "has_conformers": True, "label_column_name": [1, 2]}
}
def __init__(self, dataset_name: str, reload: bool = False, verbose: int = 1):
"""Initialize a `GraphTUDataset` instance from string identifier.
Args:
dataset_name (str): Name of a dataset.
reload (bool): Download the dataset again and prepare data on disk.
verbose (int): Print progress or info for processing, where 0 is silent. Default is 1.
"""
if not isinstance(dataset_name, str):
raise ValueError("Please provide string identifier for TUDataset.")
MoleculeNetDataset.__init__(self, verbose=verbose, dataset_name=dataset_name)
# Prepare download
if dataset_name in self.datsets_download_info:
self.download_info = self.datsets_download_info[dataset_name]
self.download_info.update({"download_url": "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/" +
self.download_info["download_file_name"]})
else:
raise ValueError("ERROR:kgcnn: Can not resolve %s as a Molecule. Pick " % dataset_name,
self.datsets_download_info.keys(),
"For new dataset, add to `datsets_download_info` list manually.")
DownloadDataset.__init__(self, **self.download_info, reload=reload, verbose=verbose)
self.data_directory = os.path.join(self.data_main_dir, self.data_directory_name)
self.file_name = self.download_file_name if self.extract_file_name is None else self.extract_file_name
self.dataset_name = dataset_name
self.require_prepare_data = True
self.fits_in_memory = True
if self.require_prepare_data:
self.prepare_data(overwrite=reload, **self.datasets_prepare_data_info[self.dataset_name])
if self.fits_in_memory:
self.read_in_memory(**self.datasets_read_in_memory_info[self.dataset_name])
# data = MoleculeNetDataset2018("ESOL", reload=False).set_attributes()
# data = MoleculeNetDataset2018("PCBA", reload=False).set_attributes()
# data = MoleculeNetDataset2018("ClinTox", reload=True).set_attributes()
|
489531
|
import os
from unittesting import DeferrableTestCase
from SublimeLinter.tests.parameterized import parameterized as p
from SublimeLinter.tests.mockito import unstub, when
import sublime
from SublimeLinter import panel_view
CODE = 'arbitrary_violation'
STD_ERROR = {
'line': 0,
'start': 0,
'region': sublime.Region(0, 2),
'error_type': 'error',
'linter': 'the_foo',
'code': CODE,
'msg': 'The error is arbitrary.',
}
def std_error(**kw):
rv = STD_ERROR.copy()
rv.update(kw)
return rv
class TestResultRegexes(DeferrableTestCase):
@classmethod
def setUpClass(cls):
# make sure we have a window to work with
s = sublime.load_settings("Preferences.sublime-settings")
s.set("close_windows_when_empty", False)
@classmethod
def tearDownClass(cls):
unstub()
def setUp(self):
sublime.run_command("new_window")
window = self.window = sublime.active_window()
panel_view.ensure_panel(window)
window.run_command('sublime_linter_panel_toggle') # make it visible
self.view = self.create_view(window)
def tearDown(self):
self.window.run_command('close_window')
unstub()
def create_view(self, window):
view = window.new_file()
self.addCleanup(self.close_view, view)
return view
def close_view(self, view):
view.set_scratch(True)
view.close()
@p.expand(
[
(
'sorted files',
{'/foo/b.py': [std_error()], '/foo/a.py': [std_error()]},
[('/foo/a.py', 1, 1), ('/foo/b.py', 1, 1)],
),
(
'absolute windows paths',
{'C:\\zoo\\b.py': [std_error()], 'D:\\xoo\\f.py': [std_error()]},
[('/C/zoo/b.py', 1, 1), ('/D/xoo/f.py', 1, 1)]
if os.name == 'nt'
else [('C:\\zoo\\b.py', 1, 1), ('D:\\xoo\\f.py', 1, 1)],
),
(
'message ends with colon',
{'/foo/a.py': [std_error(msg='Message ends with a colon:')]},
[('/foo/a.py', 1, 1)],
),
]
)
def test_(self, _, ERRORS, RESULT):
window = self.window
when(panel_view).get_window_errors(...).thenReturn(ERRORS)
panel_view.fill_panel(window)
panel = panel_view.get_panel(window)
# The interface updates async.
yield lambda: panel.find(CODE, 0, sublime.LITERAL)
results = panel.find_all_results()
self.assertEqual(results, RESULT)
@p.expand(
[
(
{'/a.py': [std_error()], '/b.py': [std_error()], '/c.py': [std_error()]},
[('/b.py', 1, 1), ('/c.py', 1, 1), ('/a.py', 1, 1)],
'/a.py'
),
]
)
def test_active_file_comes_last(self, ERRORS, RESULT, ACTIVE_FILE):
window = self.window
when(panel_view).get_window_errors(...).thenReturn(ERRORS)
panel_view.State.update({
'active_view': self.view,
'active_filename': ACTIVE_FILE,
'cursor': 0
})
panel_view.fill_panel(window)
panel = panel_view.get_panel(window)
# The interface updates async.
yield lambda: panel.find(CODE, 0, sublime.LITERAL)
results = panel.find_all_results()
self.assertEqual(results, RESULT)
@p.expand(
[
(
{'/b.py': [std_error()], '/c.py': [std_error()]},
[('/b.py', 1, 1), ('/c.py', 1, 1)],
'/a.py'
),
]
)
def test_clean_active_file_displays_std_message(self, ERRORS, RESULT, ACTIVE_FILE):
window = self.window
when(panel_view).get_window_errors(...).thenReturn(ERRORS)
panel_view.State.update({
'active_view': self.view,
'active_filename': ACTIVE_FILE,
'cursor': 0
})
panel_view.fill_panel(window)
panel = panel_view.get_panel(window)
# The interface updates async.
match = yield lambda: panel.find('a.py:\n No lint results', 0, sublime.LITERAL)
self.assertTrue(match)
|
489570
|
import pytest
from datasciencebox.core.settings import Settings
def test_required_bare_fields():
settings = Settings()
assert settings['CLOUD'] == 'bare'
with pytest.raises(AssertionError):
settings.validate_fields()
settings['NODES'] = []
settings['USERNAME'] = 'root'
settings['KEYPAIR'] = '~/.ssh/something'
settings.validate_fields()
def test_required_aws_fields():
settings = Settings()
settings['CLOUD'] = 'aws'
with pytest.raises(AssertionError):
settings.validate_fields()
settings['AWS_KEY'] = '1'
settings['AWS_SECRET'] = '1'
settings['AWS_KEYNAME'] = '1'
settings['AWS_REGION'] = '1'
settings['AWS_SECURITY_GROUPS'] = '1'
settings['AWS_IMAGE'] = '1'
settings['AWS_SIZE'] = '1'
settings['USERNAME'] = '1'
settings['KEYPAIR'] = '~/.ssh/something'
settings['NUMBER_NODES'] = 3
settings.validate_fields()
|
489631
|
from invoke import task
@task(help={
'bytecode': 'Remove bytecode files matching the pattern \'**/*.pyc\'.',
'cache': 'Remove the \'.wbcache.p\' file.',
'extra': 'Remove any extra files passed in here.'
})
def clean(ctx, cache=False, bytecode=False, extra=''):
"""
Clean (delete) files. If passed with no arguments, nothing is deleted.
"""
patterns = []
if cache:
patterns.append('.wbcache.p')
if bytecode:
patterns.append('**/*.pyc')
if extra:
patterns.append(extra)
for pattern in patterns:
ctx.run('rm -rf %s' % pattern)
@task(help={
'pylintrc': 'Path to a pylintrc file for configuring PyLint.',
'extra': 'Extra Python files to lint in addition to the default.'
})
def lint(ctx, pylintrc='.pylintrc', extra=''):
"""
Use PyLint to check for errors and enforce a coding standard.
This will, by default, use the PyLint configuration found in '.pylintrc',
but can accept a different path.
"""
from pylint.lint import Run
args = ['--reports=no', '--rcfile=' + pylintrc]
files = ['weatherBot.py', 'utils.py', 'models.py', 'keys.py']
if extra:
files.append(extra)
Run(args + files)
@task(help={
'yamllintrc': 'Path to a yamllintrc file for configuring PyLint.',
'filename': 'Path to the strings YAML file to validate.'
})
def validateyaml(ctx, yamllintrc='.yamllint', filename='strings.yml'):
"""
Use yamllint to check for errors and enforce a markup standard for the strings YAML file.
By default this will use the '.yamllint' config file to validate 'strings.yml'.
"""
ctx.run('yamllint --config-file %s %s' % (yamllintrc, filename))
@task(help={
'report': 'Flag to print a coverage report'
})
def test(ctx, report=False):
"""
Runs tests and reports on code coverage.
Keys need to be entered in 'keys.py' or set as environmental variables.
"""
ctx.run('coverage run --source=weatherBot,models,utils,keys test.py')
if report:
ctx.run('coverage report -m')
|
489643
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
# Run in the console (under Anaconda version): python setup.py build_ext --inplace
# setup(
# ext_modules=cythonize("hybrid_hawkes_exp_likelihood.pyx"),
# include_dirs=[numpy.get_include()]
# )
ext_modules=[ Extension("hybrid_hawkes_exp_cython",
["hybrid_hawkes_exp_cython.pyx"],
# libraries=["m"], # comment this line when compiling on Windows
extra_compile_args = ["-ffast-math"])]
setup(
name = "hybrid_hawkes_exp_cython",
cmdclass = {"build_ext": build_ext},
ext_modules = ext_modules,
include_dirs=[numpy.get_include()])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.