id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
473377
|
from ..visualize import plot
from ..utils import use_gpu, seed, U
from ..data_structures import DotDict
import framework
import os
import shutil
import sys
from datetime import datetime
import socket
from typing import List, Callable, Optional, Any
from .saver import Saver
from .argument_parser import ArgumentParser
import torch
import time
import subprocess
from copy import deepcopy
def get_plot_config(args):
assert args.log in ["all", "tb", "wandb"]
return args.log in ["all", "tb"], args.log in ["all", "wandb"]
class TrainingHelper:
class Dirs:
pass
def __init__(self, register_args: Optional[Callable[[ArgumentParser],None]],
wandb_project_name: Optional[str] = None,
log_async: bool=False, extra_dirs: List[str]=[]):
self.is_sweep = False
self.log_async = log_async
self.wandb_project_name = wandb_project_name
self.all_dirs = ["checkpoint", "tensorboard"] + extra_dirs
self.create_parser()
if register_args is not None:
register_args(self.arg_parser)
self.start()
def print_env_info(self):
try:
import pkg_resources
print("---------------- Environment information: ----------------")
installed_packages = pkg_resources.working_set
print(list(sorted(["%s==%s" % (i.key, i.version) for i in installed_packages])))
print("----------------------------------------------------------")
except:
pass
try:
git = subprocess.run(["git", "rev-parse", "--verify", "HEAD"], stderr=subprocess.DEVNULL,
stdout=subprocess.PIPE)
if git.returncode == 0:
print(f"Git hash: {git.stdout.decode().strip()}")
except:
pass
def create_parser(self):
self.arg_parser = ArgumentParser(get_train_dir=lambda x: os.path.join("save", x.name) if x.name is not None
else None)
self.arg_parser.add_argument("-name", type=str, help="Train dir name")
self.arg_parser.add_argument("-reset", default=False, help="reset training - ignore saves", save=False)
self.arg_parser.add_argument("-log", default="tb")
self.arg_parser.add_argument("-save_interval", default="5000", parser=self.arg_parser.int_or_none_parser)
self.arg_parser.add_argument("-wandb_save_interval", default="None", parser=self.arg_parser.int_or_none_parser)
self.arg_parser.add_argument("-seed", default="none", parser=self.arg_parser.int_or_none_parser)
self.arg_parser.add_argument("-gpu", default="auto", help="use this gpu")
self.arg_parser.add_argument("-keep_alive", default=False)
self.arg_parser.add_argument("-sweep_id_for_grid_search", default=0,
help="Doesn't do anything, just to run multiple W&B iterations.")
self.arg_parser.add_argument("-restore", default="")
def create_dirs(self):
self.dirs = self.Dirs()
self.dirs.base = self.summary.save_dir
for d in self.all_dirs:
assert d not in self.dirs.__dict__, f"Directory {d} already exists"
self.dirs.__dict__[d] = os.path.join(self.dirs.base, d)
if self.args.reset:
print("Resetting training state...")
for d in self.all_dirs:
shutil.rmtree(self.dirs.__dict__[d], ignore_errors=True)
for d in self.all_dirs:
os.makedirs(self.dirs.__dict__[d], exist_ok=True)
def save_startup_log(self):
self.arg_parser.save(os.path.join(self.summary.save_dir, "args.json"))
with open(os.path.join(self.summary.save_dir, "startup_log.txt"), "a+") as f:
f.write(f"{str(datetime.now())} {socket.gethostname()}: {' '.join(sys.argv)}\n")
def start_tensorboard(self):
if self.use_tensorboard:
os.makedirs(self.dirs.tensorboard, exist_ok=True)
framework.visualize.tensorboard.start(log_dir=self.dirs.tensorboard)
def use_cuda(self) -> bool:
return torch.cuda.is_available() and self.args.gpu.lower() != "none"
def setup_environment(self):
use_gpu(self.args.gpu)
if self.args.seed is not None:
seed.fix(self.args.seed)
self.device = torch.device("cuda") if self.use_cuda() else torch.device("cpu")
def start(self):
self.args = self.arg_parser.parse_and_try_load()
self.restore_pending = None
if self.args.restore:
# Restore args first such that the rest of the config is loaded correctly. Do not restore the GPU settings.
assert self.args.reset is False, "Cannot restore and reset at the same time"
gpu_backup = self.args.gpu
self.restore_pending = Saver.do_load(self.args.restore)
self.arg_parser.from_dict(self.restore_pending["run_invariants"]["args"])
self.args = self.arg_parser.parse_and_try_load()
self.args.gpu = gpu_backup
self.args.reset = False
self.use_tensorboard, self.use_wandb = get_plot_config(self.args)
constructor = plot.AsyncLogger if self.log_async else plot.Logger
assert (not self.use_wandb) or (self.wandb_project_name is not None), \
'Must specify wandb project name if logging to wandb.'
self.state = DotDict()
self.state.iter = 0
assert self.args.name is not None or self.use_wandb, "Either name must be specified or W&B should be used"
if self.args.restore and self.restore_pending["run_invariants"]["wandb_id"] is not None:
wandb_args = {
"project": self.wandb_project_name,
"id": self.restore_pending["run_invariants"]["wandb_id"]["run_id"],
"resume": "must"
}
else:
wandb_args = {
"project": self.wandb_project_name,
"config": self.arg_parser.to_dict()
}
self.summary = constructor(save_dir=os.path.join("save", self.args.name) if self.args.name is not None else None,
use_tb=self.use_tensorboard,
use_wandb=self.use_wandb,
wandb_init_args=wandb_args,
wandb_extra_config={
"experiment_name": self.args.name
},
get_global_step = lambda: self.state.iter)
if self.use_wandb:
self.print_env_info()
self.run_invariants = {
"wandb_id": self.summary.wandb_id,
"args": self.arg_parser.to_dict()
}
self.create_dirs()
self.save_startup_log()
self.start_tensorboard()
self.saver = Saver(self.dirs.checkpoint, self.args.save_interval,
keep_every_n_hours=None if self.use_wandb else 4)
self.saver["state"] = self.state
self.saver["run_invariants"] = deepcopy(self.run_invariants)
self.setup_environment()
def wait_for_termination(self):
if self.args.keep_alive and self.use_tensorboard and not self.use_wandb:
print("Done. Waiting for termination")
while True:
time.sleep(100)
def save(self):
self.saver.save(iter=self.state.iter)
self.saver.cleanup()
def tick(self):
self.saver.tick(iter=self.state.iter)
def finish(self):
self.summary.finish()
if self.is_sweep or self.args.save_interval is None:
self.save()
self.wait_for_termination()
def to_device(self, data: Any) -> Any:
return U.apply_to_tensors(data, lambda d: d.to(self.device))
def restore(self):
if self.restore_pending is not None:
assert self.saver.load_data(self.restore_pending), "Restoring failed."
self.restore_pending = None
restored = True
else:
restored = self.saver.load()
if restored:
# Do not restore these things
self.saver.register("run_invariants", deepcopy(self.run_invariants), replace=True)
def get_storage_path(self, path: str) -> str:
path = os.path.join(self.dirs.export, path)
os.makedirs(os.path.dirname(path), exist_ok=True)
return path
|
473382
|
import copy
import os
import random
import string
import unittest
from contextlib import redirect_stdout
from os import path
import io
import torch
from closest_string.task.dataset_generator_synthetic import ClosestStringDatasetGenerator
from edit_distance.models.cnn.model import CNN
from edit_distance.models.feedforward.model import MLPEncoder
from edit_distance.models.recurrent.model import GRU
from edit_distance.models.transformer.model import Transformer
from edit_distance.train import general_arg_parser, execute_train
from util.data_handling.string_generator import IndependentGenerator
from edit_distance.task.dataset_generator_synthetic import EditDistanceDatasetGenerator
ALPHABET_SIZE = 4
def generate_dataset_and_parser():
folder_name = ''.join(random.choice(string.ascii_lowercase) for _ in range(10))
generator = IndependentGenerator(alphabet_size=ALPHABET_SIZE, seed=0)
edit_dataset_name = folder_name + '/test_ed.pkl'
edit_dataset = EditDistanceDatasetGenerator(
N_batches={"train": 2, "val": 2, "test": 2},
batch_size={"train": 5, "val": 3, "test": 3},
len_sequence={"train": 10, "val": 10, "test": 10},
max_changes={"train": 2, "val": 2, "test": 2},
string_generator=generator, seed=0)
edit_dataset.save_as_pickle(edit_dataset_name)
closest_dataset_name = folder_name + '/test_closest.pkl'
closest_dataset = ClosestStringDatasetGenerator(N_reference=3, N_query=4, len_sequence=10, min_changes=2,
max_changes=4, initials=3, string_generator=generator, seed=0)
closest_dataset.save_as_pickle(closest_dataset_name)
parser = general_arg_parser()
args = parser.parse_args()
args.data = edit_dataset_name
args.epochs = 2
args.print_every = 1
args.closest_data_path = closest_dataset_name
return folder_name, edit_dataset_name, closest_dataset_name, args
def remove_files(folder_name, edit_dataset_name, closest_dataset_name):
if path.exists('MLPEncoder.pkl'): os.remove('MLPEncoder.pkl')
if path.exists('0.pkl'): os.remove('0.pkl')
if path.exists('1.pkl'): os.remove('1.pkl')
os.remove(edit_dataset_name)
os.remove(closest_dataset_name)
os.rmdir(folder_name)
class TestClosestDatasetGenerationSynthetic(unittest.TestCase):
def test_cosine_distance_stdout(self):
folder_name, edit_dataset_name, closest_dataset_name, args = generate_dataset_and_parser()
args = copy.copy(args)
args.distance = 'cosine'
# run method storing output
f = io.StringIO()
with redirect_stdout(f):
execute_train(model_class=MLPEncoder,
model_args=dict(layers=2,
hidden_size=5,
batch_norm=True),
args=args)
out = f.getvalue()
# check correct output
assert 'Top1:' in out and 'Top5:' in out and 'Top10:' in out, 'Wrong output format for cosine distance'
# remove files
remove_files(folder_name, edit_dataset_name, closest_dataset_name)
def test_euclidean_distance_stdout(self):
folder_name, edit_dataset_name, closest_dataset_name, args = generate_dataset_and_parser()
args = copy.copy(args)
args.distance = 'euclidean'
# run method storing output
f = io.StringIO()
with redirect_stdout(f):
execute_train(model_class=MLPEncoder,
model_args=dict(layers=2,
hidden_size=5,
batch_norm=True),
args=args)
out = f.getvalue()
# check correct output
assert 'Top1:' in out and 'Top5:' in out and 'Top10:' in out, 'Wrong output format for euclidean distance'
# remove files
remove_files(folder_name, edit_dataset_name, closest_dataset_name)
def test_square_distance_stdout(self):
folder_name, edit_dataset_name, closest_dataset_name, args = generate_dataset_and_parser()
args = copy.copy(args)
args.distance = 'square'
# run method storing output
f = io.StringIO()
with redirect_stdout(f):
execute_train(model_class=MLPEncoder,
model_args=dict(layers=2,
hidden_size=5,
batch_norm=True),
args=args)
out = f.getvalue()
# check correct output
assert 'Top1:' in out and 'Top5:' in out and 'Top10:' in out, 'Wrong output format for square distance'
# remove files
remove_files(folder_name, edit_dataset_name, closest_dataset_name)
def test_manhattan_distance_stdout(self):
folder_name, edit_dataset_name, closest_dataset_name, args = generate_dataset_and_parser()
args = copy.copy(args)
args.distance = 'manhattan'
# run method storing output
f = io.StringIO()
with redirect_stdout(f):
execute_train(model_class=MLPEncoder,
model_args=dict(layers=2,
hidden_size=5,
batch_norm=True),
args=args)
out = f.getvalue()
# check correct output
assert 'Top1:' in out and 'Top5:' in out and 'Top10:' in out, 'Wrong output format for manhattan distance'
# remove files
remove_files(folder_name, edit_dataset_name, closest_dataset_name)
def test_hyperbolic_distance_stdout(self):
folder_name, edit_dataset_name, closest_dataset_name, args = generate_dataset_and_parser()
args = copy.copy(args)
args.distance = 'hyperbolic'
# run method storing output
f = io.StringIO()
with redirect_stdout(f):
execute_train(model_class=MLPEncoder,
model_args=dict(layers=2,
hidden_size=5,
batch_norm=True),
args=args)
out = f.getvalue()
# check correct output
assert 'Top1:' in out and 'Top5:' in out and 'Top10:' in out, 'Wrong output format for hyperbolic distance'
# remove files
remove_files(folder_name, edit_dataset_name, closest_dataset_name)
|
473389
|
from __future__ import unicode_literals
from django.apps import AppConfig
class SimpleImageUploadConfig(AppConfig):
name = 'simple_image_upload'
|
473465
|
import os
import sys
import requests
import re
with open(sys.argv[1]) as fp:
lines = fp.readlines()
flag = False
start = 0
end = 0
for i, line in enumerate(lines):
if "MODELS_HASH" in line:
flag = True
start = i
continue
if flag:
if line.startswith("}"):
end = i
break
MODELS_HASH = dict()
response = requests.get("https://api.github.com/repos/xiaosu-zhu/McQuic/releases/tags/generic", headers={"Accept":"application/vnd.github.v3+json"}).json()
assets = response["assets"]
for asset in assets:
name: str = asset["name"]
if not name.endswith("mcquic"):
continue
regex = re.compile(r"^qp_[0-9]*_(mse|msssim)_[0-9a-fA-F]{8,}\.mcquic$")
if not regex.match(name):
raise ValueError(f"Naming convention broken with `{name}`.")
stem = name.split(".")[0]
component = stem.split("_")
qp = component[1]
target = component[2]
hashStr = component[-1]
print(qp, target, hashStr)
MODELS_HASH[f"qp_{qp}_{target}"] = hashStr
MODELS_HASH = """MODELS_HASH = {
%s
}
""" % (os.linesep.join(f" \"{key}\": \"{value}\"" for key, value in MODELS_HASH.items()))
result = lines[:start] + [MODELS_HASH] + lines[(end+1):]
with open(sys.argv[1], "w") as fp:
fp.writelines(result)
|
473516
|
import pytest
from model_mommy import mommy
from rest_framework.test import APIClient
from reqs.models import Agency, AgencyGroup, Policy, Requirement, Topic
@pytest.mark.django_db
@pytest.mark.parametrize('path,num_results', (
('/topics/', 10),
('/topics/?name=0000000000', 1),
('/topics/?name=000000', 0),
('/topics/?name__icontains=000000', 1),
))
def test_topic_filtering(path, num_results):
client = APIClient()
for i in range(10):
mommy.make(Topic, name=str(i)*10)
results = client.get(path).json()['results']
assert len(results) == num_results
@pytest.mark.django_db
@pytest.mark.parametrize('path,num_results', (
('/requirements/', 3),
('/requirements/?topics__name=0000', 1),
('/requirements/?topics__name=1111', 2),
('/requirements/?topics__name__in=2222,3333', 2),
))
def test_requirement_filtering_topic(path, num_results):
"""We can filter by a nested topic"""
client = APIClient()
topics = [mommy.make(Topic, name=str(i)*4) for i in range(4)]
req1, req2, req3 = mommy.make(Requirement, _quantity=3)
for req in (req1, req2, req3):
req.policy.workflow_phase = 'published'
req.policy.save()
req1.topics.add(topics[0], topics[1])
req2.topics.add(topics[1], topics[2])
req3.topics.add(topics[3])
results = client.get(path).json()['results']
assert len(results) == num_results
@pytest.mark.django_db
def test_requirements_queryset_order():
"""We should receive results in # of matches order"""
client = APIClient()
topics = mommy.make(Topic, _quantity=6)
req1, req2, req3 = [mommy.make(Requirement, req_id=str(i + 1))
for i in range(3)]
for req in (req1, req2, req3):
req.policy.workflow_phase = 'published'
req.policy.save()
req1.topics.add(topics[0], topics[1])
req2.topics.add(topics[1], topics[2], topics[3])
req3.topics.add(topics[0], topics[4], topics[5])
param = ','.join(str(topics[i].pk) for i in (0, 2, 3))
response = client.get('/requirements/?topics__id__in=' + param)
req_ids = [req['req_id'] for req in response.json()['results']]
assert req_ids == ['2', '1', '3']
@pytest.mark.django_db
@pytest.mark.parametrize('params,req_ids,omb_policy_ids,result', (
('', (1, 2, 3), (10, 11, 12), ["1", "2", "3"]),
('ordering', (1, 2, 3), (10, 11, 12), ["1", "2", "3"]),
('ordering=', (1, 2, 3), (10, 11, 12), ["1", "2", "3"]),
('', (3, 2, 1), (10, 11, 12), ["1", "2", "3"]),
('ordering=-req_id', (2, 1, 3), (10, 11, 12), ["3", "2", "1"]),
('ordering=policy__omb_policy_id', (1, 2, 3), (20, 30, 10),
["3", "1", "2"]),
('ordering=-policy__omb_policy_id', (1, 2, 3), (20, 30, 10),
["2", "1", "3"]),
), ids=repr)
def test_requirements_ordered_by_key(params, req_ids, omb_policy_ids, result):
"""
We should be able to pass in arbitrary sort fields.
"""
client = APIClient()
for req_id, omb_policy_id in zip(req_ids, omb_policy_ids):
policy = mommy.make(Policy, omb_policy_id=str(omb_policy_id),
workflow_phase='published')
mommy.make(Requirement, req_id=str(req_id), policy=policy)
path = "/requirements/?{0}".format(params)
response = client.get(path)
req_ids = [req['req_id'] for req in response.json()['results']]
assert req_ids == result
@pytest.mark.django_db
@pytest.mark.parametrize('params,result', (
('req_id', ["1", "2", "3"]),
('policy__omb_policy_id', ["3", "1", "2"]),
('policy__omb_policy_id,-req_id', ["3", "2", "1"]),
('policy__omb_policy_id,verb', ["3", "2", "1"]),
('policy__omb_policy_id,req_id', ["3", "1", "2"]),
), ids=repr)
def test_requirements_ordered_by_multiple_keys(params, result):
"""
We should be able to pass in arbitrary sort fields.
"""
policy1 = mommy.make(Policy, omb_policy_id="23",
workflow_phase='published')
policy2 = mommy.make(Policy, omb_policy_id="17",
workflow_phase='published')
mommy.make(Requirement, req_id=1, verb="zoot", policy=policy1)
mommy.make(Requirement, req_id=2, verb="yo", policy=policy1)
mommy.make(Requirement, req_id=3, verb="xi", policy=policy2)
client = APIClient()
path = "/requirements/?ordering={0}".format(params)
response = client.get(path)
req_ids = [req['req_id'] for req in response.json()['results']]
assert req_ids == result
@pytest.mark.django_db
@pytest.mark.parametrize('params', (
"gibberish",
"-gibberish",
"-",
"asdf",
"policy__",
"policy__gibberish",
))
def test_requirements_ordered_by_bad_key(params):
"""
Sorting by keys that don't exist should doesn't affect sort order.
"""
client = APIClient()
for i in range(3):
policy = mommy.make(Policy, omb_policy_id=str(10 - i),
workflow_phase='published')
mommy.make(Requirement, req_id=str(i), policy=policy)
path = "/requirements/?ordering={0}".format(params)
response = client.get(path)
req_ids = [req['req_id'] for req in response.json()['results']]
assert req_ids == ['0', '1', '2']
@pytest.mark.django_db
def test_requirements_agencies_filter():
client = APIClient()
req1 = mommy.make(Requirement,
policy=mommy.make(Policy, workflow_phase='published'))
req1.agencies.add(*mommy.make(Agency, _quantity=3))
req2 = mommy.make(Requirement,
policy=mommy.make(Policy, workflow_phase='published'))
req2.agencies.add(*mommy.make(Agency, _quantity=4))
path = "/requirements/"
response = client.get(path).json()['results']
assert len(response) == 2
path += "?agencies__id__in={0}".format(req1.agencies.first().pk)
response = client.get(path).json()['results']
assert len(response) == 1
assert response[0]['req_id'] == req1.req_id
@pytest.mark.django_db
@pytest.mark.parametrize('workflow, req_public, visible', [
('edit', False, False),
('edit', True, False),
('published', False, False),
('published', True, True),
])
def test_requirements_nonpublic(workflow, req_public, visible):
policy = mommy.make(Policy, workflow_phase=workflow)
mommy.make(Requirement, policy=policy, public=req_public)
num_visible = APIClient().get('/requirements/').json()['count']
assert num_visible == int(visible)
@pytest.mark.django_db
def test_requirements_agencies_nonpublic():
client = APIClient()
agencies = mommy.make(Agency, _quantity=3)
agencies.append(mommy.make(Agency, public=False))
req = mommy.make(Requirement,
policy=mommy.make(Policy, workflow_phase='published'))
req.agencies.add(*agencies)
path = "/requirements/?id={0}".format(req.pk)
response = client.get(path).json()['results']
assert len(response) == 1
assert len(response[0]['agencies']) == 3
@pytest.mark.django_db
@pytest.mark.parametrize('term, icontains_count, search_count', [
('stem', 2, 2),
('stems', 1, 2),
('stemmed', 1, 2),
('full', 2, 1),
])
def test_requirements_fulltext_search(term, icontains_count, search_count):
client = APIClient()
mommy.make(Requirement,
policy=mommy.make(Policy, workflow_phase='published'),
req_text='Full text stems words')
mommy.make(Requirement,
policy=mommy.make(Policy, workflow_phase='published'),
req_text='Stemmed textual input')
mommy.make(Requirement,
policy=mommy.make(Policy, workflow_phase='published'),
req_text='Fullerton place')
path = "/requirements/?req_text__icontains=" + term
response = client.get(path).json()
assert response['count'] == icontains_count
path = "/requirements/?req_text__search=" + term
response = client.get(path).json()
assert response['count'] == search_count
@pytest.fixture
def applied_agencies():
a_req, a_group_match, a_group_nonmatch = mommy.make(Agency, _quantity=3)
g_match, g_nonmatch = mommy.make(AgencyGroup, _quantity=2)
g_match.agencies.add(a_group_match)
g_nonmatch.agencies.add(a_group_nonmatch)
req = mommy.make(Requirement,
policy=mommy.make(Policy, workflow_phase='published'))
req.agencies.add(a_req)
req.agency_groups.add(g_match)
yield a_req, a_group_match, a_group_nonmatch
@pytest.mark.django_db
def test_all_agencies_agency_match(applied_agencies):
a_req, _, _ = applied_agencies
path = "/requirements/?all_agencies__id__in=42,99,{0}".format(a_req.pk)
response = APIClient().get(path).json()['results']
assert response
@pytest.mark.django_db
def test_all_agencies_agency_group(applied_agencies):
_, a_group_match, _ = applied_agencies
path = "/requirements/?all_agencies__id__in={0} ".format(a_group_match.pk)
response = APIClient().get(path).json()['results']
assert response
@pytest.mark.django_db
def test_all_agencies_agency_nonmatching_group(applied_agencies):
_, _, a_group_nonmatch = applied_agencies
path = "/requirements/?all_agencies__id__in={0}".format(
a_group_nonmatch.pk)
response = APIClient().get(path).json()['results']
assert not response
|
473518
|
from __future__ import absolute_import
from celery import Celery
from django.conf import settings
app = Celery('backend')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
473535
|
import base64
import binascii
import pytest
from aead import AEAD
def test_vector():
key = base64.urlsafe_b64encode(binascii.unhexlify(
b"<KEY>"
))
data = binascii.unhexlify(
b"41206369706865722073797374656d206d757374206e6f742062652072657175"
b"6972656420746f206265207365637265742c20616e64206974206d7573742062"
b"652061626c6520746f2066616c6c20696e746f207468652068616e6473206f66"
b"2074686520656e656d7920776974686f757420696e636f6e76656e69656e6365"
)
iv = binascii.unhexlify(b"1af38c2dc2b96ffdd86694092341bc04")
associated_data = binascii.unhexlify(
b"546865207365636f6e64207072696e6369706c65206f66204175677573746520"
b"4b6572636b686f666673"
)
cryptor = AEAD(key)
foo = cryptor._encrypt_from_parts(data, associated_data, iv)
assert binascii.hexlify(foo) == (
b"<KEY>"
b"<KEY>"
b"<KEY>"
b"6e133314c54019e8ca7980dfa4b9cf1b384c486f3a54c51078158ee5d79de59f"
b"<KEY>"
b"<KEY>"
)
def test_key_length():
key = base64.urlsafe_b64encode(b"foobar")
with pytest.raises(ValueError):
AEAD(key)
def test_round_trip_encrypt_decrypt():
aead = AEAD(AEAD.generate_key())
ct = aead.encrypt(b"Hello, World!", b"Goodbye, World!")
assert aead.decrypt(ct, b"Goodbye, World!") == b"Hello, World!"
def test_invalid_signature():
aead = AEAD(AEAD.generate_key())
ct = aead.encrypt(b"Hello, World", b"Goodbye, World!")
with pytest.raises(ValueError):
aead.decrypt(ct, b"foobar")
|
473542
|
from metann.utils.containers import DefaultList
def test_default_list():
l = DefaultList()
l.fill([1, 2, 3, 4])
for i, _ in zip(l, range(10)):
print(i)
|
473559
|
import base64
from functools import wraps
from django.contrib.auth.models import AnonymousUser as DjangoAnonymousUser, User as DjangoUser
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from apps.canvas_auth.backends import authenticate
from apps.canvas_auth.models import AnonymousUser, User
from canvas.view_helpers import forbidden_response
from django.conf import settings
class AnonymousUserMiddleware(object):
""" Replaces request.user with our own AnonymousUser instead of Django's (if request.user is anonymous). """
def process_request(self, request):
if isinstance(request.user, DjangoAnonymousUser):
request.user = AnonymousUser()
class SessionMigrationMiddleware(object):
"""
Migrates the "_auth_backend_model" field in user sessions to the first backend listed in AUTHENTICATION_BACKENDS.
Does nothing if AUTHENTICATION_BACKENDS is empty.
Must come after "django.middleware.SessionMiddleware", and before
"django.contrib.auth.middleware.AuthenticationMiddleware".
"""
BACKEND_KEY = '_auth_user_backend'
def process_request(self, request):
if settings.AUTHENTICATION_BACKENDS:
auth_backend = settings.AUTHENTICATION_BACKENDS[0]
if request.session.get(self.BACKEND_KEY, auth_backend) != auth_backend:
request.session['_old_auth_user_backend'] = request.session[self.BACKEND_KEY]
request.session[self.BACKEND_KEY] = auth_backend
|
473582
|
from django.conf import settings
from django.conf.urls import url
from django.urls import include, path
from .views import login, launch, get_jwks, configure, score, scoreboard
urlpatterns = [
url(r'^login/$', login, name='game-login'),
url(r'^launch/$', launch, name='game-launch'),
url(r'^jwks/$', get_jwks, name='game-jwks'),
url(r'^configure/(?P<launch_id>[\w-]+)/(?P<difficulty>[\w-]+)/$', configure, name='game-configure'),
url(r'^api/score/(?P<launch_id>[\w-]+)/(?P<earned_score>[\w-]+)/(?P<time_spent>[\w-]+)/$', score,
name='game-api-score'),
url(r'^api/scoreboard/(?P<launch_id>[\w-]+)/$', scoreboard, name='game-api-scoreboard'),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
473620
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from .managers import CustomUserManager
class CustomUser(AbstractUser):
class Meta:
verbose_name = _("user")
verbose_name_plural = _("users")
username = None
email = models.EmailField(_("email address"), unique=True)
subscription = models.ForeignKey(
"Subscription", null=True, on_delete=models.SET_NULL
)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
objects = CustomUserManager()
def __str__(self):
return self.email
def is_premium(self):
if self.subscription is not None:
return self.subscription.valid_through > timezone.now()
return False
class Subscription(models.Model):
stripe_subscription_id = models.CharField(
max_length=1000, blank=True, null=True
)
stripe_customer_id = models.CharField(
max_length=1000, blank=True, null=True
)
valid_through = models.DateTimeField(null=True, blank=True)
def __str__(self):
user = CustomUser.objects.filter(subscription=self).first()
if user:
return f"{str(user)} ({self.stripe_subscription_id})"
else:
return "No subscription"
|
473643
|
import cgi
def notfound(environ, start_response):
start_response('404 Not Found', [('content-type', 'text/plain')])
return ['404 Not Found']
class PathDispatcher:
def __init__(self):
self.pathmap = {}
def __call__(self, environ, start_response):
path = environ['PATH_INFO']
params = cgi.FieldStorage(environ['wsgi.input'], environ = environ)
method = environ['REQUEST_METHOD'].lower()
environ['params'] = {k: params.getvalue(k) for k in params}
handler = self.pathmap.get((method, path), notfound)
return handler(environ, start_response)
def register(self, method, path, function):
self.pathmap[method.lower(), path] = function
return function
name_response = """
<html>
<head>
<title> Hello {name} </title>
</head>
<body>
<h1>HELLO {name} </h1>
</body>
</html>
"""
def name(environ, start_response):
start_response('404 Not Found', [('content-type', 'text/plain')])
params = environ['params']
response = name_response.format(name = params.get('name'))
yield response.encode('utf-8')
if __name__ == '__name__':
from wsgiref.simple_server import make_server
dispatcher = PathDispatcher()
dispatcher = register('GET', '/name', name)
httpd = make_server('localhost', 8080, dispatcher)
httpd.serve_forever()
|
473652
|
from .f_AlbumWinners_publisher import AlbumWinnersPublisher
from .f_AlbumWinners_subscriber import AlbumWinnersSubscriber
from .f_Store import Client as FStoreClient
from .f_Store import Iface as FStoreIface
from .ttypes import *
|
473653
|
from flask.ext.assets import Bundle
app_css = Bundle(
'app.scss',
filters='scss',
output='styles/app.css',
depends=('*.scss')
)
images_png = Bundle(
'clock.gif',
'images/guiders_x_button.jpg',
output='guiders_arrows.png'
)
app_js = Bundle(
'app.js',
filters='jsmin',
output='scripts/app.js'
)
vendor_css = Bundle(
'vendor/semantic.min.css',
'vendor/animate.min.css',
output='styles/vendor.css'
)
vendor_js = Bundle(
'vendor/jquery.min.js',
'vendor/semantic.min.js',
'vendor/tablesort.min.js',
'vendor/papaparse.min.js',
filters='jsmin',
output='scripts/vendor.js'
)
guiders_js = Bundle(
'guiders.js',
output='scripts/guiders.js'
)
|
473673
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import timezone
def get_affiliate_id(request, expiry_period=None):
expiry_period = expiry_period or getattr(settings, 'AFFILIATE_EXPIRY_PERIOD', None)
try:
affiliate_id = request.affiliate_id
except AttributeError:
raise ImproperlyConfigured('You need to add "simple_affiliate.middleware.affiliate_middleware" to '
'your MIDDLEWARE in settings.py to use this function!')
if expiry_period and affiliate_id and timezone.now() - expiry_period > request.affiliate_date:
# a valid affiliate ID has expired
return None
else:
return affiliate_id
|
473708
|
from django.conf.urls import re_path
from .views import (
schedule_conference,
schedule_edit,
schedule_list,
schedule_list_csv,
schedule_detail,
schedule_slot_edit,
schedule_json,
)
urlpatterns = [
re_path(r"^$", schedule_conference, name="schedule_conference"),
re_path(r"^edit/$", schedule_edit, name="schedule_edit"),
re_path(r"^list/$", schedule_list, name="schedule_list"),
re_path(
r"^presentations.csv$", schedule_list_csv, name="schedule_list_csv"
),
re_path(r"^([\w\-]+)/$", schedule_detail, name="schedule_detail"),
re_path(r"^([\w\-]+)/edit/$", schedule_edit, name="schedule_edit"),
re_path(r"^([\w\-]+)/list/$", schedule_list, name="schedule_list"),
re_path(
r"^([\w\-]+)/presentations.csv$",
schedule_list_csv,
name="schedule_list_csv",
),
re_path(
r"^([\w\-]+)/edit/slot/(\d+)/",
schedule_slot_edit,
name="schedule_slot_edit",
),
re_path(r"^conference.json", schedule_json, name="schedule_json"),
]
|
473711
|
from typing import Iterable
from typing import List
from typing import Union
from pathlib import Path
from typeguard import check_argument_types
import warnings
class WordTokenizer:
def __init__(
self,
delimiter: str = None,
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
remove_non_linguistic_symbols: bool = False,
):
assert check_argument_types()
self.delimiter = delimiter
if not remove_non_linguistic_symbols and non_linguistic_symbols is not None:
warnings.warn(
"non_linguistic_symbols is only used "
"when remove_non_linguistic_symbols = True"
)
if non_linguistic_symbols is None:
self.non_linguistic_symbols = set()
elif isinstance(non_linguistic_symbols, (Path, str)):
non_linguistic_symbols = Path(non_linguistic_symbols)
try:
with non_linguistic_symbols.open("r", encoding="utf-8") as f:
self.non_linguistic_symbols = set(
line.rstrip() for line in f)
except FileNotFoundError:
warnings.warn(f"{non_linguistic_symbols} doesn't exist.")
self.non_linguistic_symbols = set()
else:
self.non_linguistic_symbols = set(non_linguistic_symbols)
self.remove_non_linguistic_symbols = remove_non_linguistic_symbols
def __repr__(self):
return f'{self.__class__.__name__}(delimiter="{self.delimiter}")'
def text2tokens(self, line: str) -> List[str]:
tokens = []
for t in line.split(self.delimiter):
if self.remove_non_linguistic_symbols and t in self.non_linguistic_symbols:
continue
tokens.append(t)
return tokens
def tokens2text(self, tokens: Iterable[str]) -> str:
if self.delimiter is None:
delimiter = " "
else:
delimiter = self.delimiter
return delimiter.join(tokens)
|
473716
|
from enum import Enum
class FilterType(Enum):
"""
Filter type (filter densities, or gradients only)
"""
NoFilter = 0
Density = 1
Sensitivity = 2
class InterpolationType(Enum):
"""
Material interpolation scheme: classic SIMP, or Pedersen (for self-weight problems)
"""
SIMP = 1
Pedersen = 2
class ProblemType(Enum):
"""
Problem type. Minimize appearance only, minimize compliance only, or minimize
appearance with a compliance constraint.
"""
Appearance = 1
Compliance = 2
AppearanceWithMaxCompliance = 3
def involves_appearance(self):
"""
Returns true iff the given problem type requires the appearance evaluation.
"""
return self in (ProblemType.Appearance, ProblemType.AppearanceWithMaxCompliance)
def involves_compliance(self):
"""
Returns true iff the given problem type requires the appearance evaluation.
"""
return self in (ProblemType.Compliance, ProblemType.AppearanceWithMaxCompliance)
def involves_volume(self):
"""
Returns true iff the given problem type has a volume constraint.
"""
return self in (ProblemType.Compliance, ProblemType.AppearanceWithMaxCompliance)
def has_compliance_constraint(self):
"""
Returns true iff the given problem has a constraint on the compliance.
"""
return self in (ProblemType.AppearanceWithMaxCompliance, )
def has_volume_constraint(self):
"""
Returns true iff the given problem has a constraint on the volume.
"""
return self in (ProblemType.Compliance, ProblemType.AppearanceWithMaxCompliance)
|
473721
|
import argparse
import os
from os.path import join as pjoin
import pandas as pd
from src.evaluation import Evaluator
import zipfile
import shutil
debug_mode = 0
def mkdir(d):
if not os.path.exists(d):
os.makedirs(d)
def build_ref_pred_pair(ref_dict, pred_dict):
ref_list, pred_list = [], []
for k, v in ref_dict.items():
ref_list.append([v])
if k in pred_dict:
pred_list.append(pred_dict[k])
else:
pred_list.append(' ')
return ref_list, pred_list
def get_evaluations_final(run_mf, qrel):
metrics = {'recall_10', 'ndcg_cut_10'}
eval_obj = Evaluator(metrics)
indiv_res = eval_obj.evaluate(run_mf, qrel)
overall_res = eval_obj.show_all()
return overall_res, indiv_res
def read_run_file(run_file):
qret = {}
df_qret = pd.read_csv(run_file, sep="\t")
for row in df_qret.itertuples():
cur_user_qret = qret.get(str(row.userId), {})
cur_user_qret[str(row.itemId)] = float(row.score)
qret[str(row.userId)] = cur_user_qret
return qret
def read_qrel_file(qrel_file):
qrel = {}
df_qrel = pd.read_csv(qrel_file, sep="\t")
for row in df_qrel.itertuples():
cur_user_qrel = qrel.get(str(row.userId), {})
cur_user_qrel[str(row.itemId)] = int(row.rating)
qrel[str(row.userId)] = cur_user_qrel
return qrel
def read_string(solution_file):
with open(solution_file) as fi:
return fi.read().strip()
def merge_run_files(run_dir, market1, market2, output_market):
predict_path_val_market1 = os.path.join(run_dir, market1, 'valid_pred.tsv')
predict_path_test_market1 = os.path.join(run_dir, market1, 'test_pred.tsv')
predict_path_val_market2 = os.path.join(run_dir, market2, 'valid_pred.tsv')
predict_path_test_market2 = os.path.join(run_dir, market2, 'test_pred.tsv')
output_market_dir_path = os.path.join(run_dir, output_market)
mkdir(output_market_dir_path)
predict_path_val_out = os.path.join(run_dir, output_market, 'valid_pred.tsv')
predict_path_test_out = os.path.join(run_dir, output_market, 'test_pred.tsv')
write_market_files(predict_path_val_market1, predict_path_val_market2, predict_path_val_out)
write_market_files(predict_path_test_market1, predict_path_test_market2, predict_path_test_out)
def write_market_files(predict_path_val_market1, predict_path_val_market2, predict_path_val_out):
with open(predict_path_val_market1) as fi1:
with open(predict_path_val_market2) as fi2:
with open(predict_path_val_out, 'w') as fo:
for l in fi1:
fo.write(l)
for l in fi2:
if not l.startswith('userId'):
fo.write(l)
def validate_file_structure(extract_dir):
for m in ['t1', 't2']:
for f in ['test_pred.tsv', 'valid_pred.tsv']:
try:
with open(os.path.join(extract_dir, m, f)) as fi:
pass
except FileNotFoundError:
print('{} not found!'.format(os.path.join(extract_dir, m, f)))
return False
return True
def get_scores_for_market(input_dir, data_dir, market_name):
# prepare for val set
predict_path_val = os.path.join(input_dir, market_name, 'valid_pred.tsv')
ref_path_val = os.path.join(data_dir, market_name, 'valid_qrel.tsv')
my_valid_run = read_run_file(predict_path_val)
my_valid_qrel = read_qrel_file(ref_path_val)
task_ov_val, task_ind_val = get_evaluations_final(my_valid_run, my_valid_qrel)
return task_ov_val
def main():
parser = argparse.ArgumentParser()
parser.add_argument("submission_file", help="Zip file that contains the run files to be submitted to Codalab.")
parser.add_argument("--data_dir", help="Path to the DATA dir of the kit. Default: ./DATA/.", default='./DATA/')
args = parser.parse_args()
extract_dir = './tmp/'
scores = ['ndcg_cut_10', 'recall_10']
score_names = {
'recall_10': {'val': 'r10_val', 'test': 'r10_test'},
'ndcg_cut_10': {'val': 'ndcg10_val', 'test': 'ndcg10_test'}
}
# We assume that the submission comes with two markets (i.e., t1 and t2).
marekts = ['t1', 't2', 't1t2']
# First we unzip the run file in a tmp folder then start evaluating it.
mkdir(extract_dir)
print('Extracting the submission zip file')
with zipfile.ZipFile(args.submission_file, "r") as zip_ref:
zip_ref.extractall(extract_dir)
print('Validating the file structure of the submission')
file_structure_validation = validate_file_structure(extract_dir)
if file_structure_validation:
print('File structure validation successfully passed')
else:
print('File structure validation failed. Please refer to the instructions')
return
print('Evaluating the validation set')
# Then we merge the run files of the two markets for the joint performance evaluation, and call it 't1t2'.
merge_run_files(extract_dir, 't1', 't2', 't1t2')
task_ov_test, task_ov_val = {}, {}
for m in marekts: # iterate over the three target markets (including the joint one)
print(
"===================== Market : " + m + "=====================")
task_ov_val[m] = get_scores_for_market(extract_dir, args.data_dir, m)
for score in scores: # iterating over the scores
score_val_name = score_names[score]['val']
score_val = task_ov_val[m][score]
print(
"======= Set val : score(" + score_val_name + ")=%0.12f =======" % score_val)
# remove the tmp directory
shutil.rmtree(extract_dir)
if __name__ == "__main__":
main()
|
473736
|
from .. import miscellaneous
class BaseEntity(object):
is_fetched = True
def print(self, to_return=False, columns=None):
"""
:param to_return:
:param columns:
"""
return miscellaneous.List([self]).print(to_return=to_return, columns=columns)
def to_df(self, show_all=False, columns=None):
"""
:param show_all:
:param columns:
"""
return miscellaneous.List([self]).to_df(show_all=show_all, columns=columns)
# def __getattribute__(self, attr):
# if super(BaseEntity, self).__getattribute__(attr) is None:
# pass
# return super(BaseEntity, self).__getattribute__(attr)
|
473751
|
import taso as ts
import sys
seq_length = 512
hidden_dims = 768
batch_size = int(sys.argv[1])
def attention(graph, input, heads):
embed = input.dim(1) # embedding len
assert input.dim(1) % heads == 0
weights = list()
for i in range(3):
weights.append(graph.new_weight(dims=(embed, embed)))
# compute query, key, value tensors
q = graph.matmul(input, weights[0])
k = graph.matmul(input, weights[1])
v = graph.matmul(input, weights[2])
# reshape query, key, value to multiple heads
q = graph.reshape(q, shape=(batch_size, 512, 12, 64))
k = graph.reshape(k, shape=(batch_size, 512, 12, 64))
v = graph.reshape(v, shape=(batch_size, 512, 12, 64))
# transpose query, key, value for batched matmul
q = graph.transpose(q, perm=(0, 2, 1, 3), shuffle=True)
k = graph.transpose(k, perm=(0, 2, 3, 1), shuffle=True)
v = graph.transpose(v, perm=(0, 2, 1, 3), shuffle=True)
# perform matrix multiplications
logits = graph.matmul(q, k)
output = graph.matmul(logits, v)
# transpose the output back
output = graph.transpose(output, perm=(0, 2, 1, 3), shuffle=True)
output = graph.reshape(output, shape=(batch_size, 512, 768))
# a final linear layer
linear = graph.new_weight(dims=(batch_size, embed, embed))
linear2 = graph.new_weight(dims=(batch_size, embed, embed))
output = graph.matmul(output, linear)
output = graph.relu(graph.reshape(output, shape=(batch_size * 512, 768)))
output = graph.reshape(output, shape=(batch_size, 512, 768))
output = graph.matmul(output, linear2)
output = graph.relu(graph.reshape(output, shape=(batch_size * 512, 768)))
output = graph.add(output, input)
output = graph.reshape(output, shape=(batch_size * 512, 768))
# output = graph.new_weight(dims=(seq_length, embed))
return output
graph = ts.new_graph()
input = graph.new_input(dims=(batch_size * seq_length, hidden_dims))
input = graph.relu(input)
t = input
for i in range(12):
t = attention(graph, t, 16)
new_graph = ts.optimize(graph, alpha=1.0, budget=100)
print(graph.run_time())
print(new_graph.run_time())
|
473759
|
from typing import Text, List
__all__ = [
'Tokenizer'
]
# noinspection SpellCheckingInspection
class Tokenizer:
def tokenize(self, sentence: Text) -> List[Text]:
raise NotImplementedError()
|
473760
|
from six.moves import zip
from smqtk.algorithms import Classifier
from smqtk.representation.data_element import from_uri
class IndexLabelClassifier (Classifier):
"""
Applies a listing of labels (new-line separated) to input "descriptor"
values, which is actually a vector of class confidence values.
"""
@classmethod
def is_usable(cls):
return True
def __init__(self, index_to_label_uri):
"""
Construct a new "classifier" that applies labels to input vector
indices.
We expect to be given a URI to a new-line separated text file where each
line is a separate label in order and matching the dimensionality of an
input descriptor.
:param index_to_label_uri: URI to new-line separated sequence of labels.
:type index_to_label_uri: str
"""
super(IndexLabelClassifier, self).__init__()
# load label vector
self.index_to_label_uri = index_to_label_uri
self.label_vector = [line.strip() for line in
from_uri(index_to_label_uri).to_buffered_reader()]
def get_config(self):
"""
Return a JSON-compliant dictionary that could be passed to this class's
``from_config`` method to produce an instance with identical
configuration.
:return: JSON type compliant configuration dictionary.
:rtype: dict
"""
return {
"index_to_label_uri": self.index_to_label_uri,
}
def get_labels(self):
"""
Get a copy of the sequence of class labels that this classifier can
classify descriptors into.
:return: Sequence of possible classifier labels.
:rtype: collections.abc.Sequence[str]
"""
# copying container
return list(self.label_vector)
def _classify_arrays(self, array_iter):
check_dim = True
for d_vector in array_iter:
if check_dim:
if len(self.label_vector) != len(d_vector):
raise RuntimeError(
"Failed to apply label vector to input descriptor of "
"incongruous dimensionality ({} labels != {} vector "
"shape)".format(len(self.label_vector), d_vector.shape)
)
check_dim = False
yield dict(zip(self.label_vector, d_vector))
|
473778
|
from .regressors import KNeighborsRegressor
from .classifiers import KNeighborsClassifier
__all__ = ['KNeighborsRegressor', 'KNeighborsClassifier']
|
473842
|
from __future__ import absolute_import, division, print_function
from scitbx.array_family import flex # import dependency
|
473904
|
import datetime
import re
from bson.objectid import ObjectId
from flask import request, redirect, url_for, flash, render_template, Response
from pymongo import DESCENDING
from web import captcha
from web import client
from web.queues import crawler_q
from web.config import *
from web.filters import *
from web import run_crawler
from .forms import SearchForm, AddOnionForm, ReportOnionForm
from web.paginate import Pagination
from . import searchbp
import time
from urllib.parse import urlparse, urlunparse
@searchbp.route('/', methods=['GET', 'POST'])
def index():
# print (client.crawler.documents.find().count())
search_form = SearchForm(request.form)
if search_form.validate_on_submit():
return redirect(url_for('.search', phrase=search_form.phrase.data.lower()))
try:
alive_onions = client.crawler.documents.find({"status": 200}).count()
offline_checked_onions = client.crawler.documents.find({"status": 503}).count()
last_crawled = client.crawler.documents.find().sort("seen_time", DESCENDING).limit(1)
checked_onions = client.crawler.documents.find().count()
return render_template('index.html', form=search_form,
checked_onions=checked_onions,
alive_onions=alive_onions,
offline_onions=offline_checked_onions,
last_crawled=last_crawled[0]['seen_time'])
except:
return render_template('index.html', form=search_form)
@searchbp.route('/search/<phrase>/', methods=["GET"])
@searchbp.route('/search/<phrase>/<int:page_number>', methods=["GET"])
def search(phrase, page_number=1):
# report_form = ReportOnionForm()
search_form = SearchForm()
regex = " %s " % phrase
try:
all_count = client.crawler.documents.find({"body": re.compile(regex, re.IGNORECASE)}).count()
pagination = Pagination(page_number, n_per_page, all_count)
all = client.crawler.documents.find(
{"body": re.compile(regex, re.IGNORECASE)}
).sort("seen_time", DESCENDING).skip(
(page_number - 1) * n_per_page).limit(n_per_page)
except:
return render_template('result.html',phrase=phrase, all_count=0,
search_form=search_form)
return render_template('result.html',
results=all,
pagination=pagination,
phrase=phrase, search_form=search_form,
all_count=all_count)
@searchbp.route('/report/<string:id>', methods=["GET", "POST"])
def report(id):
report_form = ReportOnionForm()
search_form = SearchForm()
doc = None
try:
doc = client.crawler.documents.find_one({"_id": ObjectId(id)})
report_form.url = doc['url']
report_form.id = id
except:
flash("Invalid page")
redirect(url_for('search.index'))
if report_form.validate_on_submit():
if captcha.validate():
client.crawler.documents.update_one({'_id': ObjectId(id)},
{
'$push': {
'tags':
{
"report_body": report_form.body.data,
"report_date": datetime.datetime.utcnow()
}
}})
flash('Reported! your are helping community.' , 'success')
redirect(url_for("search.index"))
else:
flash("Wrong captcha", 'danger')
if doc['url']:
return render_template('report.html', search_form=search_form, report_form=report_form)
# doc = client.crawler.documents.update_one({'_id': id}, {"$set": {"reported": 1,}})
@searchbp.route('/new/', methods=['GET', 'POST'])
def add_onion():
add_form = AddOnionForm()
search_form = SearchForm()
if add_form.validate_on_submit():
if captcha.validate():
url = add_form.url.data.strip()
# try:
# exist = client.crawler.documents.find({"url": url}).count()
# except:
# exist = 0
#
# if exist:
# flash('This onion is already indexed', 'warning')
# return redirect(url_for("search.add_onion"))
print ("SEARCH BEFORE TRY")
try:
# print (url)
parsed_url = urlparse(url)
if parsed_url.scheme == None or parsed_url.scheme == "":
url = "http://%s" % url
job = crawler_q.enqueue_call(
func=run_crawler, args=(url,), ttl=60, result_ttl=10
)
print (job)
if job.get_id():
print (vars(job))
flash('New onion added to crawler queue.', 'success')
return redirect(url_for("index"))
except Exception:
# print (Exception)
print ("ERROR")
# exit(0)
else:
flash("Captcha is not validate", 'danger')
return redirect(url_for("search.add_onion"))
elif "url" in add_form.errors:
flash("Address is not valid, onion must be at least 16 chars, \
ie. http://xxxxxxxxxxxxxxxx.onion or xxxxxxxxxxxxxxxx.onion", 'danger')
return redirect(url_for("search.add_onion"))
# print (add_form.errors)
return render_template('new.html', add_form=add_form, search_form=search_form)
@searchbp.route('/directory/', methods=["GET"])
@searchbp.route('/directory/<int:page_number>', methods=["GET"])
def directory(page_number=1):
search_form = SearchForm()
try:
all_count = client.crawler.documents.find({'status':200}).count()
pagination = Pagination(page_number, n_per_page, all_count)
all = client.crawler.documents.find({'status':200}).sort("seen_time", DESCENDING).skip(
(page_number - 1) * n_per_page).limit(n_per_page)
except:
print ("ERROR[?]")
return render_template('directory.html',
search_form=search_form,
all_count=0)
return render_template('directory.html',
results=all,
pagination=pagination,
search_form=search_form,
all_count=all_count)
@searchbp.route('/directory/all', methods=["GET"])
@searchbp.route('/directory/all/<int:page_number>', methods=["GET"])
def directory_all(page_number=1):
search_form = SearchForm()
try:
all_count = client.crawler.documents.find().count()
pagination = Pagination(page_number, n_per_page, all_count)
all = client.crawler.documents.find().sort("seen_time", DESCENDING).skip(
(page_number - 1) * n_per_page).limit(n_per_page)
is_all = True
except:
return render_template('directory.html',
search_form=search_form,
all_count=0)
return render_template('directory.html',
results=all,
pagination=pagination,
search_form=search_form,
all_count=all_count, is_all=is_all)
@searchbp.route('/faq')
def faq():
search_form = SearchForm()
return render_template('faq.html', search_form = search_form)
@searchbp.route('/export_all')
def export_csv():
all = client.crawler.documents.find({'status': 200})
result = "# 200 OK status list\n "
for item in all:
result = str("%s%s\n" % (result, item['url']))
return Response(result, mimetype='text/plain')
# return render_template_string(result)
# return render_template('faq.html', search_form = search_form)
|
473912
|
from django.contrib import admin
from django.contrib.admin import register, models as admin_models
from django.utils.safestring import mark_safe
from tracker import models
from .filters import AdminActionLogEntryFlagFilter
from .forms import LogAdminForm
from .util import CustomModelAdmin
@register(models.Log)
class LogAdmin(CustomModelAdmin):
form = LogAdminForm
search_fields = ['category', 'message']
date_hierarchy = 'timestamp'
list_filter = [('timestamp', admin.DateFieldListFilter), 'event', 'user']
# logs are uneditable
readonly_fields = [
'timestamp',
'category',
'event',
'user',
'message',
]
fieldsets = [
(None, {'fields': ['timestamp', 'category', 'event', 'user', 'message',]}),
]
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
@register(admin_models.LogEntry)
class AdminActionLogEntryAdmin(CustomModelAdmin):
search_fields = ['object_repr', 'change_message']
date_hierarchy = 'action_time'
list_filter = [
('action_time', admin.DateFieldListFilter),
'user',
AdminActionLogEntryFlagFilter,
]
readonly_fields = (
'action_time',
'content_type',
'object_id',
'object_repr',
'action_type',
'action_flag',
'target_object',
'change_message',
'user',
)
fieldsets = [
(
None,
{
'fields': [
'action_type',
'action_time',
'user',
'change_message',
'target_object',
]
},
)
]
def action_type(self, instance):
if instance.is_addition():
return 'Addition'
elif instance.is_change():
return 'Change'
elif instance.is_deletion():
return 'Deletion'
else:
return 'Unknown'
def target_object(self, instance):
if instance.is_deletion():
return 'Deleted'
else:
return mark_safe(
'<a href="{0}">{1}</a>'.format(
instance.get_admin_url(), instance.object_repr
)
)
def has_add_permission(self, request, obj=None):
return self.has_log_edit_perms(request, obj)
def has_change_permission(self, request, obj=None):
return self.has_log_edit_perms(request, obj)
def has_delete_permission(self, request, obj=None):
return self.has_log_edit_perms(request, obj)
def has_log_edit_perms(self, request, obj=None):
return request.user.has_perm('tracker.can_change_log')
|
473913
|
import torch
from torch import multiprocessing, cuda
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.backends import cudnn
import numpy as np
import importlib
import os
import voc12.dataloader
from misc import torchutils, imutils
import cv2
cudnn.enabled = True
from step.gradCAM import GradCAM
def adv_climb(image, epsilon, data_grad):
sign_data_grad = data_grad / (torch.max(torch.abs(data_grad))+1e-12)
perturbed_image = image + epsilon*sign_data_grad
perturbed_image = torch.clamp(perturbed_image, image.min().data.cpu().float(), image.max().data.cpu().float()) # min, max from data normalization
return perturbed_image
def add_discriminative(expanded_mask, regions, score_th):
region_ = regions / regions.max()
expanded_mask[region_>score_th]=1
return expanded_mask
def _work(process_id, model, dataset, args):
databin = dataset[process_id]
n_gpus = torch.cuda.device_count()
data_loader = DataLoader(databin, shuffle=False, num_workers=args.num_workers // n_gpus, pin_memory=True)
with cuda.device(process_id):
model.cuda()
gcam = GradCAM(model=model, candidate_layers=["stage4", "stage2_4"])
for iter, pack in enumerate(data_loader):
img_name = pack['name'][0]
if os.path.exists(os.path.join(args.cam_out_dir, img_name + '.npy')):
continue
size = pack['size']
strided_size = imutils.get_strided_size(size, 4)
strided_up_size = imutils.get_strided_up_size(size, 16)
outputs_cam1 = []
outputs_cam2 = []
n_classes = len(list(torch.nonzero(pack['label'][0])[:, 0]))
# 1
for s_count, size_idx in enumerate([1, 2, 0, 3, 4, 5, 6]):
orig_img = pack['img'][size_idx].clone()
for c_idx, c in enumerate(list(torch.nonzero(pack['label'][0])[:, 0])):
pack['img'][size_idx] = orig_img
img_single = pack['img'][size_idx].detach()[0]
if size_idx != 1:
total_adv_iter = args.adv_iter
else:
if args.adv_iter > 10:
total_adv_iter = args.adv_iter // 2
mul_for_scale = 2
elif args.adv_iter < 6:
total_adv_iter = args.adv_iter
mul_for_scale = 1
else:
total_adv_iter = 5
mul_for_scale = float(total_adv_iter) / 5
for it in range(total_adv_iter):
img_single.requires_grad = True
outputs = gcam.forward(img_single.cuda(non_blocking=True), step=1)
if c_idx == 0 and it == 0:
cam_all_classes = torch.zeros([n_classes, outputs.shape[2], outputs.shape[3]])
gcam.backward(ids=c)
regions = gcam.generate(target_layer="stage4")
regions = regions[0] + regions[1].flip(-1)
if it == 0:
init_cam = regions.detach()
cam_all_classes[c_idx] += regions[0].data.cpu() * mul_for_scale
logit = outputs
logit = F.relu(logit)
logit = torchutils.gap2d(logit, keepdims=True)[:, :, 0, 0]
valid_cat = torch.nonzero(pack['label'][0])[:, 0]
logit_loss = - 2 * (logit[:, c]).sum() + torch.sum(logit)
expanded_mask = torch.zeros(regions.shape)
expanded_mask = add_discriminative(expanded_mask, regions, score_th=args.score_th)
L_AD = torch.sum((torch.abs(regions - init_cam))*expanded_mask.cuda())
loss = - logit_loss - L_AD * args.AD_coeff
model.zero_grad()
img_single.grad.zero_()
loss.backward()
data_grad = img_single.grad.data
perturbed_data = adv_climb(img_single, args.AD_stepsize, data_grad)
img_single = perturbed_data.detach()
outputs_cam1.append(cam_all_classes)
strided_cam1 = torch.sum(torch.stack(
[F.interpolate(torch.unsqueeze(o, 0), strided_size, mode='bilinear', align_corners=False)[0] for o
in outputs_cam1]), 0)
highres_cam1 = [F.interpolate(torch.unsqueeze(o, 1), strided_up_size,
mode='bilinear', align_corners=False) for o in outputs_cam1]
highres_cam1 = torch.sum(torch.stack(highres_cam1, 0), 0)[:, 0, :size[0], :size[1]]
strided_cam1 /= F.adaptive_max_pool2d(strided_cam1, (1, 1)) + 1e-5
highres_cam1 /= F.adaptive_max_pool2d(highres_cam1, (1, 1)) + 1e-5
# 2
for s_count, size_idx in enumerate([1, 2, 0, 3, 4, 5, 6]):
orig_img = pack['img'][size_idx].clone()
for c_idx, c in enumerate(list(torch.nonzero(pack['label'][0])[:, 0])):
pack['img'][size_idx] = orig_img
img_single = pack['img'][size_idx].detach()[0]
if size_idx != 1:
total_adv_iter = args.adv_iter
else:
if args.adv_iter > 10:
total_adv_iter = args.adv_iter // 2
mul_for_scale = 2
elif args.adv_iter < 6:
total_adv_iter = args.adv_iter
mul_for_scale = 1
else:
total_adv_iter = 5
mul_for_scale = float(total_adv_iter) / 5
for it in range(total_adv_iter):
img_single.requires_grad = True
outputs = gcam.forward(img_single.cuda(non_blocking=True), step=2)
if c_idx == 0 and it == 0:
cam_all_classes = torch.zeros([n_classes, outputs.shape[2], outputs.shape[3]])
gcam.backward(ids=c)
regions = gcam.generate(target_layer="stage2_4")
regions = regions[0] + regions[1].flip(-1)
if it == 0:
init_cam = regions.detach()
cam_all_classes[c_idx] += regions[0].data.cpu() * mul_for_scale
logit = outputs
logit = F.relu(logit)
logit = torchutils.gap2d(logit, keepdims=True)[:, :, 0, 0]
valid_cat = torch.nonzero(pack['label'][0])[:, 0]
logit_loss = - 2 * (logit[:, c]).sum() + torch.sum(logit)
expanded_mask = torch.zeros(regions.shape)
expanded_mask = add_discriminative(expanded_mask, regions, score_th=args.score_th)
L_AD = torch.sum((torch.abs(regions - init_cam))*expanded_mask.cuda())
loss = - logit_loss - L_AD * args.AD_coeff
model.zero_grad()
img_single.grad.zero_()
loss.backward()
data_grad = img_single.grad.data
perturbed_data = adv_climb(img_single, args.AD_stepsize, data_grad)
img_single = perturbed_data.detach()
outputs_cam2.append(cam_all_classes)
strided_cam2 = torch.sum(torch.stack(
[F.interpolate(torch.unsqueeze(o, 0), strided_size, mode='bilinear', align_corners=False)[0] for o
in outputs_cam2]), 0)
highres_cam2 = [F.interpolate(torch.unsqueeze(o, 1), strided_up_size,
mode='bilinear', align_corners=False) for o in outputs_cam2]
highres_cam2 = torch.sum(torch.stack(highres_cam2, 0), 0)[:, 0, :size[0], :size[1]]
strided_cam2 /= F.adaptive_max_pool2d(strided_cam2, (1, 1)) + 1e-5
highres_cam2 /= F.adaptive_max_pool2d(highres_cam2, (1, 1)) + 1e-5
strided_cam_weight = strided_cam1 * args.weight + strided_cam2 * (1-args.weight)
highres_cam_weight = highres_cam1 * args.weight + highres_cam2 * (1-args.weight)
np.save(os.path.join(args.cam_out_dir, img_name + '.npy'),
{"keys": valid_cat, "cam": strided_cam_weight.cpu(), "high_res": highres_cam_weight.cpu().numpy()})
if process_id == n_gpus - 1 and iter % (len(databin) // 20) == 0:
print("%d " % ((5*iter+1)//(len(databin) // 20)), end='')
def run(args):
model = getattr(importlib.import_module(args.amr_network), 'CAM')()
model.load_state_dict(torch.load(args.amr_weights_name + '.pth'), strict=True)
model.eval()
n_gpus = torch.cuda.device_count()
dataset = voc12.dataloader.VOC12ClassificationDatasetMSF(args.train_list,
voc12_root=args.voc12_root, scales=args.cam_scales)
dataset = torchutils.split_dataset(dataset, n_gpus)
print('[ ', end='')
multiprocessing.spawn(_work, nprocs=n_gpus, args=(model, dataset, args), join=True)
print(']')
torch.cuda.empty_cache()
|
473989
|
from .context import Context
from .constraint import PrimExprConstraint, VarConstraint, StmtConstraint, BlockConstraint, PrimFuncConstraint
from .constraint import Constraint
|
473997
|
from carla_utils import carla
import numpy as np
from typing import List, Any
import pickle
import os
from os.path import join
from ..basic import Data, YamlConfig
from ..world_map import Role, get_topology
from ..augment import GlobalPath
from ..agents import AgentListMaster, BaseAgent
from .scenario import ScenarioSingleAgent
class Recorder(object):
def __init__(self, dir_path):
self.dir_path = dir_path
self.records = dict()
def record_town_map(self, scenario: ScenarioSingleAgent):
file_path = join(self.dir_path, scenario.map_name + '.txt')
if not os.path.isfile(file_path):
with open(file_path, 'wb') as f:
pickle.dump(PicklableTownMap(scenario.town_map), f)
return
def record_scenario(self, config: YamlConfig, scenario: ScenarioSingleAgent):
self.records['scenario'] = {
'frequency': config.decision_frequency,
'map_name': scenario.map_name,
}
def record_agents(self, timestamp, agents_master: AgentListMaster, epoch_info: Data):
"""
Args:
timestamp: time.time()
agents: list of BaseAgent and BaseAgentObstacle
Returns:
"""
for agent in agents_master.agents:
agent_key = 'agent' + '_' + agent.role_name.atype.name + '_' + str(agent.vi)
if self.records.get(agent_key) == None:
self.records[agent_key] = dict()
self.records[agent_key][timestamp] = Data(agent=PicklableAgent(agent))
for obstacle in agents_master.obstacles:
obstacle_key = 'obstacle' + '_' + obstacle.role_name.atype.name + '_' + str(obstacle.vi)
if self.records.get(obstacle_key) == None:
self.records[obstacle_key] = dict()
self.records[obstacle_key][timestamp] = Data(agent=PicklableAgent(obstacle))
if epoch_info.done:
for agent in agents_master.agents:
agent_key = 'agent' + '_' + agent.role_name.atype.name + '_' + str(agent.vi)
global_path = PicklableGlobalPath(agent.global_path)
for t, picklable_agent in self.records[agent_key].items():
picklable_agent.global_path = global_path
return
def record_experience(self, timestamp, agents_master: AgentListMaster, actions):
for agent, action in zip(agents_master.agents, actions):
agent_key = 'agent' + '_' + agent.role_name.atype.name + '_' + str(agent.vi)
self.records[agent_key][timestamp].update(action=action)
return
def save_to_disk(self, index):
file_path = join(self.dir_path, str(index) + '.txt')
with open(file_path, 'wb') as f:
pickle.dump(self.records, f)
return
def clear(self):
del self.records
self.records = dict()
@staticmethod
def load_from_disk(file_path):
record = None
with open(file_path, 'rb') as f:
record = pickle.load(f)
return record
# =============================================================================
# -- Picklable ---------------------------------------------------------------
# =============================================================================
class PicklableAgent(object):
def __init__(self, agent: BaseAgent):
self.id = agent.vi
self.vi = agent.vi
self.state = agent.get_state()
attributes = agent.vehicle.attributes
attributes['role_name'] = agent.role_name
self.attributes = attributes
bbx = agent.vehicle.bounding_box.extent
x, y, z = bbx.x, bbx.y, bbx.z
bbx = PicklableBoundingBox(x, y, z)
self.bounding_box = bbx
self.max_velocity = agent.max_velocity if hasattr(agent, 'max_velocity') else None
self.global_path = None
def get_transform(self):
x, y, z = self.state.x, self.state.y, self.state.z
theta = self.state.theta
location = carla.Location(x, y, z)
rotation = carla.Rotation(yaw=np.rad2deg(theta))
return carla.Transform(location, rotation)
def get_state(self):
return self.state
class PicklableBoundingBox(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
self.extent = self
class PicklableGlobalPath(object):
def __init__(self, global_path: GlobalPath):
self.carla_waypoints = [PicklableWaypoint(wp) for wp in global_path.carla_waypoints]
self.options = global_path.options
self.x = global_path.x
self.y = global_path.y
self.z = global_path.z
self.theta = global_path.theta
self.curvatures = global_path.curvatures
self.distances = global_path.distances
self.sampling_resolution = global_path.sampling_resolution
self._max_coverage = 0
def __len__(self):
return len(self.carla_waypoints)
def _step_coverage(self, current_transform):
return GlobalPath._step_coverage(self, current_transform)
def remaining_waypoints(self, current_transform):
return GlobalPath.remaining_waypoints(self, current_transform)
class PicklableWaypoint(object):
def __init__(self, waypoint: carla.Waypoint):
self.transform = PicklableTransform(waypoint.transform)
class PicklableTransform(object):
def __init__(self, transform: carla.Transform):
self.location = PicklableLocation(transform.location)
self.rotation = PicklableRotation(transform.rotation)
class PicklableLocation(object):
def __init__(self, location: carla.Location):
self.x = location.x
self.y = location.y
self.z = location.z
def distance(self, loc):
dx = self.x - loc.x
dy = self.y - loc.y
dz = self.z - loc.z
return np.sqrt(dx**2 + dy**2 + dz**2)
class PicklableRotation(object):
def __init__(self, rotation: carla.Rotation):
self.roll = rotation.roll
self.pitch = rotation.pitch
self.yaw = rotation.yaw
class PicklableTownMap(object):
def __init__(self, town_map):
self.name = town_map.name
self.cua_waypoints = [PicklableWaypoint(wp) for wp in town_map.generate_waypoints(0.1)]
self.opendrive_content = town_map.to_opendrive()
self.topology_origin = [(PicklableWaypoint(start), PicklableWaypoint(end)) for (start, end) in town_map.get_topology()]
self.topology = [t.info for t in get_topology(town_map, sampling_resolution=2.0)]
def generate_waypoints(self, _):
return self.cua_waypoints
def to_opendrive(self):
return self.opendrive_content
def get_topology(self):
return self.topology_origin
|
474002
|
import pyblish.api
class FusionSaveComp(pyblish.api.ContextPlugin):
"""Save current comp"""
label = "Save current file"
order = pyblish.api.ExtractorOrder - 0.49
hosts = ["fusion"]
families = ["render"]
def process(self, context):
comp = context.data.get("currentComp")
assert comp, "Must have comp"
current = comp.GetAttrs().get("COMPS_FileName", "")
assert context.data['currentFile'] == current
self.log.info("Saving current file..")
comp.Save()
|
474015
|
import pytest
import numpy as np
import pybinding as pb
from pybinding.repository import graphene
def silence_parallel_output(factory):
factory.hooks.status.clear()
factory.config.pbar_fd = None
factory.config.filename = None
def test_sweep(baseline, plot_if_fails):
@pb.parallelize(v=np.linspace(0, 0.1, 10))
def factory(v, energy=np.linspace(0, 0.1, 10)):
model = pb.Model(
graphene.monolayer(),
graphene.hexagon_ac(side_width=15),
pb.constant_potential(v)
)
kpm = pb.kpm(model, kernel=pb.lorentz_kernel())
return kpm.deferred_ldos(energy, broadening=0.15, position=[0, 0], sublattice="B")
silence_parallel_output(factory)
labels = dict(title="test sweep", x="V (eV)", y="E (eV)", data="LDOS")
result = pb.parallel.sweep(factory, labels=labels)
expected = baseline(result)
plot_if_fails(result, expected, 'plot')
assert pytest.fuzzy_equal(result, expected, rtol=1e-3, atol=1e-6)
def test_ndsweep(baseline):
@pb.parallelize(v1=np.linspace(0, 0.1, 5), v2=np.linspace(-0.2, 0.2, 4))
def factory(v1, v2, energy=np.linspace(0, 0.1, 10)):
model = pb.Model(
graphene.monolayer(),
graphene.hexagon_ac(side_width=15),
pb.constant_potential(v1),
pb.constant_potential(v2)
)
kpm = pb.kpm(model, kernel=pb.lorentz_kernel())
return kpm.deferred_ldos(energy, broadening=0.15, position=[0, 0])
silence_parallel_output(factory)
result = pb.parallel.ndsweep(factory)
expected = baseline(result)
assert pytest.fuzzy_equal(result, expected, rtol=1e-3, atol=1e-6)
|
474024
|
from . import libevt
class EVTErrCode:
EVT_OK = 0
EVT_INTERNAL_ERROR = -1
EVT_INVALID_ARGUMENT = -2
EVT_INVALID_PRIVATE_KEY = -3
EVT_INVALID_PUBLIC_KEY = -4
EVT_INVALID_SIGNATURE = -5
EVT_INVALID_HASH = -6
EVT_INVALID_ACTION = -7
EVT_INVALID_BINARY = -8
EVT_INVALID_JSON = -9
EVT_INVALID_ADDRESS = -10
EVT_SIZE_NOT_EQUALS = -11
EVT_DATA_NOT_EQUALS = -12
EVT_INVALID_LINK = -13
EVT_NOT_INIT = -15
class EVTException(Exception):
def __init__(self, err):
if err == 'EVT_INTERNAL_ERROR':
evt = libevt.check_lib_init()
code = evt.evt_last_error()
errmsg = '{}: {}'.format(err, code)
super().__init__(self, errmsg)
else:
super().__init__(self, err)
class EVTInternalErrorException(Exception):
def __init__(self):
err = 'EVT_INTERNAL_ERROR'
super().__init__(self, err)
class EVTInvalidArgumentException(Exception):
def __init__(self):
err = 'EVT_INVALID_ARGUMENT'
super().__init__(self, err)
class EVTInvalidPrivateKeyException(Exception):
def __init__(self):
err = 'EVT_INVALID_PRIVATE_KEY'
super().__init__(self, err)
class EVTInvalidPublicKeyException(Exception):
def __init__(self):
err = 'EVT_INVALID_PUBLIC_KEY'
super().__init__(self, err)
class EVTInvalidSignatureException(Exception):
def __init__(self):
err = 'EVT_INVALID_SIGNATURE'
super().__init__(self, err)
class EVTInvalidHashException(Exception):
def __init__(self):
err = 'EVT_INVALID_HASH'
super().__init__(self, err)
class EVTInvalidActionException(Exception):
def __init__(self):
err = 'EVT_INVALID_ACTION'
super().__init__(self, err)
class EVTInvalidBinaryException(Exception):
def __init__(self):
err = 'EVT_INVALID_BINARY'
super().__init__(self, err)
class EVTInvalidJsonException(Exception):
def __init__(self):
err = 'EVT_INVALID_JSON'
super().__init__(self, err)
class EVTInvalidAddressException(Exception):
def __init__(self):
err = 'EVT_INVALID_ADDRESS'
super().__init__(self, err)
class EVTSizeNotEqualsException(Exception):
def __init__(self):
err = 'EVT_SIZE_NOT_EQUALS'
super().__init__(self, err)
class EVTDataNotEqualsException(Exception):
def __init__(self):
err = 'EVT_DATA_NOT_EQUALS'
super().__init__(self, err)
class EVTInvalidLinkException(Exception):
def __init__(self):
err = 'EVT_INVALID_LINK'
super().__init__(self, err)
class EVTNotInitException(Exception):
def __init__(self):
err = 'EVT_NOT_INIT'
super().__init__(self, err)
ex_map = {
EVTErrCode.EVT_INTERNAL_ERROR: EVTInternalErrorException,
EVTErrCode.EVT_INVALID_ARGUMENT: EVTInvalidArgumentException,
EVTErrCode.EVT_INVALID_PRIVATE_KEY: EVTInvalidPrivateKeyException,
EVTErrCode.EVT_INVALID_PUBLIC_KEY: EVTInvalidPublicKeyException,
EVTErrCode.EVT_INVALID_SIGNATURE: EVTInvalidSignatureException,
EVTErrCode.EVT_INVALID_HASH: EVTInvalidHashException,
EVTErrCode.EVT_INVALID_ACTION: EVTInvalidActionException,
EVTErrCode.EVT_INVALID_BINARY: EVTInvalidBinaryException,
EVTErrCode.EVT_INVALID_JSON: EVTInvalidJsonException,
EVTErrCode.EVT_INVALID_ADDRESS: EVTInvalidAddressException,
EVTErrCode.EVT_INVALID_LINK: EVTInvalidLinkException,
EVTErrCode.EVT_SIZE_NOT_EQUALS: EVTSizeNotEqualsException,
EVTErrCode.EVT_DATA_NOT_EQUALS: EVTDataNotEqualsException,
EVTErrCode.EVT_NOT_INIT: EVTNotInitException
}
def evt_exception_raiser(error_code):
if error_code == EVTErrCode.EVT_OK:
return
if error_code in ex_map:
raise ex_map[error_code]
raise Exception('Unknown error code')
|
474071
|
from modules.basemodule import BaseModule
from pprint import pformat
class Eval(BaseModule):
def alias(self, line):
if line.startswith('#py '):
rest = line[4:]
self.mud.log("\n" + pformat(eval(rest)))
return True
elif line.startswith('#pye '):
rest = line[5:]
exec(rest)
return True
|
474129
|
import os
import sys
sys.path.append ('opy')
import opy
from setuptools import setup
import codecs
def read (*paths):
with codecs.open (os.path.join (*paths), 'r', encoding = 'utf-8') as aFile:
return aFile.read()
setup (
name = 'Opy',
version = opy.programVersion,
description = 'OPY - Obfuscator for Python, string obfuscation added, keyword added',
long_description = (
read ('README.rst') + '\n\n' +
read ('license_reference.txt')
),
keywords = ['opy', 'obfuscator', 'obfuscation', 'obfuscate', 'kivy', 'pyo', 'python'],
url = 'https://github.com/JdeH/Opy/',
license = 'Apache 2',
author = '<NAME>',
author_email = '<EMAIL>',
packages = ['opy'],
include_package_data = True,
install_requires = [],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: Other/Proprietary License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
)
|
474142
|
import logging
from gearman.errors import UnknownCommandError
from gearman.protocol import get_command_name
gearman_logger = logging.getLogger(__name__)
class GearmanCommandHandler(object):
"""A command handler manages the state which we should be in given a certain stream of commands
GearmanCommandHandler does no I/O and only understands sending/receiving commands
"""
def __init__(self, connection_manager=None):
self.connection_manager = connection_manager
def initial_state(self, *largs, **kwargs):
"""Called by a Connection Manager after we've been instantiated and we're ready to send off commands"""
pass
def on_io_error(self):
pass
def decode_data(self, data):
"""Convenience function :: handle binary string -> object unpacking"""
return self.connection_manager.data_encoder.decode(data)
def encode_data(self, data):
"""Convenience function :: handle object -> binary string packing"""
return self.connection_manager.data_encoder.encode(data)
def fetch_commands(self):
"""Called by a Connection Manager to notify us that we have pending commands"""
continue_working = True
while continue_working:
cmd_tuple = self.connection_manager.read_command(self)
if cmd_tuple is None:
break
cmd_type, cmd_args = cmd_tuple
continue_working = self.recv_command(cmd_type, **cmd_args)
def send_command(self, cmd_type, **cmd_args):
"""Hand off I/O to the connection mananger"""
self.connection_manager.send_command(self, cmd_type, cmd_args)
def recv_command(self, cmd_type, **cmd_args):
"""Maps any command to a recv_* callback function"""
completed_work = None
gearman_command_name = get_command_name(cmd_type)
if bool(gearman_command_name == cmd_type) or not gearman_command_name.startswith('GEARMAN_COMMAND_'):
unknown_command_msg = 'Could not handle command: %r - %r' % (gearman_command_name, cmd_args)
gearman_logger.error(unknown_command_msg)
raise ValueError(unknown_command_msg)
recv_command_function_name = gearman_command_name.lower().replace('gearman_command_', 'recv_')
cmd_callback = getattr(self, recv_command_function_name, None)
if not cmd_callback:
missing_callback_msg = 'Could not handle command: %r - %r' % (get_command_name(cmd_type), cmd_args)
gearman_logger.error(missing_callback_msg)
raise UnknownCommandError(missing_callback_msg)
# Expand the arguments as passed by the protocol
# This must match the parameter names as defined in the command handler
completed_work = cmd_callback(**cmd_args)
return completed_work
def recv_error(self, error_code, error_text):
"""When we receive an error from the server, notify the connection manager that we have a gearman error"""
return self.connection_manager.on_gearman_error(error_code, error_text)
|
474149
|
import json
import os
import time
import argparse
parser = argparse.ArgumentParser(description="Post processing for converted explanation")
parser.add_argument("--data", type=str, default=None, help="path to preprocessed data")
parser.add_argument("--save", type=str, default='./', help="path for saving the data")
args = parser.parse_args()
special_token = {'1293422':'sidewalk','2109828':'fence','1749668':'table','846582':'helmet','mice':'mouse'}
word_dict = dict()
for split in ['train','val']:
explanation = json.load(open(os.path.join(args.data,'converted_explanation_'+split+'.json')))
for qid in explanation:
cur_exp = explanation[qid].replace('?','').replace(',',' ').replace('.',' ')
cur_exp = [cur for cur in cur_exp.split(' ') if cur not in ['',' ']]
for cur_word in cur_exp:
if '#' in cur_word or '@' in cur_word:
continue
if cur_word in special_token:
cur_word = special_token[cur_word]
if cur_word not in word_dict:
word_dict[cur_word] = len(word_dict)
# convert plural nouns into singular nouns
duplicated_word = dict()
for k in word_dict:
if k[:-1] in word_dict and k[-1] == 's':
duplicated_word[k] = k[:-1]
for k in special_token:
duplicated_word[k] = special_token[k]
# exclude tokens commonly used in plural forms (e.g., shaking hands, taking pictures)
del duplicated_word['pictures']
del duplicated_word['hands']
del duplicated_word['dvds']
start = time.time()
for split in ['train','val']:
explanation = json.load(open(os.path.join(args.data,'converted_explanation_'+split+'.json')))
processed_data = dict()
for idx,qid in enumerate(explanation):
if 'ERROR' in explanation[qid]:
# processed_data[qid] = ''
continue
cur_exp = explanation[qid].replace('?','').replace(',',' ').replace('.',' ')
cur_exp = ' '.join([cur for cur in cur_exp.split(' ') if cur not in ['',' ']])
cur_exp = ' '+ cur_exp + ' '
for k in duplicated_word:
cur_exp = cur_exp.replace(' '+k+' ',' '+duplicated_word[k]+' ')
processed_data[qid] = cur_exp[1:-1]
if (idx+1)%50000 == 0:
print('Finished %d out of %d questions in %s split, time spent: %.2f' %(idx+1, len(explanation),split,time.time()-start))
with open(os.path.join(args.save,'converted_explanation_'+split+'.json'),'w') as f:
json.dump(processed_data,f)
|
474156
|
import os
def move_files(abs_dirname,speaker,datapath,trainSet,cvSet,tstSet):
"""Move files into subdirectories."""
for subdir in os.listdir(abs_dirname):
files = [os.path.join(abs_dirname,subdir, f) for f in os.listdir(os.path.join(abs_dirname,subdir))]
cv_dir = os.path.abspath(os.path.join(datapath + cvSet + speaker))
cv_subdir = os.path.join(cv_dir,subdir)
test_dir = os.path.abspath(os.path.join(datapath + tstSet + speaker))
test_subdir = os.path.join(test_dir,subdir)
if not os.path.isdir(test_subdir):
if not os.path.isdir(test_dir):
print('splitting',speaker)
os.mkdir(cv_dir)
os.mkdir(test_dir)
os.mkdir(cv_subdir)
os.mkdir(test_subdir)
#separate files
# 1 test
# 2 cv
# 3,4,5 train
# 6 test
# 7 cv
# 8,9,0 train
ncv = [2,7]
ntest = [1,6]
for f in files:
num = f[-9:-5]
if num != 'tran':
rem = int(num) % 10
if rem in ncv: #move to cv
# move file to target dir
f_base = os.path.basename(f)
shutil.move(f, cv_subdir)
elif rem in ntest:
# move file to target dir
f_base = os.path.basename(f)
shutil.move(f, test_subdir)
def main(speakers,datapath,trainSet,cvSet,tstSet):
for speaker in speakers:
src_dir = datapath + trainSet + speaker
if not os.path.exists(src_dir):
raise Exception('Directory does not exist ({0}).'.format(src_dir))
move_files(os.path.abspath(src_dir),speaker,datapath,trainSet,cvSet,tstSet)
|
474227
|
import asyncio
import json
from unittest import mock
from mmpy_bot import ExamplePlugin, Message, Settings, WebHookExample
from mmpy_bot.driver import Driver
from mmpy_bot.event_handler import EventHandler
from mmpy_bot.wrappers import WebHookEvent
def create_message(
text="hello",
mentions=["q<PASSWORD>"],
channel_type="O",
sender_name="betty",
):
return Message(
{
"event": "posted",
"data": {
"channel_display_name": "Off-Topic",
"channel_name": "off-topic",
"channel_type": channel_type,
"mentions": mentions,
"post": {
"id": "wqpuawcw3iym3pq63s5xi1776r",
"create_at": 1533085458236,
"update_at": 1533085458236,
"edit_at": 0,
"delete_at": 0,
"is_pinned": "False",
"user_id": "131gkd5thbdxiq141b3514bgjh",
"channel_id": "4fgt3n51f7ftpff91gk1iy1zow",
"root_id": "",
"parent_id": "",
"original_id": "",
"message": text,
"type": "",
"props": {},
"hashtags": "",
"pending_post_id": "",
},
"sender_name": sender_name,
"team_id": "au64gza3iint3r31e7ewbrrasw",
},
"broadcast": {
"omit_users": "None",
"user_id": "",
"channel_id": "4fgt3n51f7ftpff91gk1iy1zow",
"team_id": "",
},
"seq": 29,
}
)
class TestEventHandler:
@mock.patch("mmpy_bot.driver.Driver.username", new="my_username")
def test_init(self):
handler = EventHandler(
Driver(), Settings(), plugins=[ExamplePlugin(), WebHookExample()]
)
# Test the name matcher regexp
assert handler._name_matcher.match("@my_username are you there?")
assert not handler._name_matcher.match("@other_username are you there?")
# Test that all listeners from the individual plugins are now registered on
# the handler
for plugin in handler.plugins:
for pattern, listener in plugin.message_listeners.items():
assert listener in handler.message_listeners[pattern]
for pattern, listener in plugin.webhook_listeners.items():
assert listener in handler.webhook_listeners[pattern]
# And vice versa, check that any listeners on the handler come from the
# registered plugins
for pattern, listeners in handler.message_listeners.items():
for listener in listeners:
assert any(
[
pattern in plugin.message_listeners
and listener in plugin.message_listeners[pattern]
for plugin in handler.plugins
]
)
for pattern, listeners in handler.webhook_listeners.items():
for listener in listeners:
assert any(
[
pattern in plugin.webhook_listeners
and listener in plugin.webhook_listeners[pattern]
for plugin in handler.plugins
]
)
@mock.patch("mmpy_bot.driver.Driver.username", new="my_username")
def test_should_ignore(self):
handler = EventHandler(
Driver(), Settings(IGNORE_USERS=["ignore_me"]), plugins=[]
)
# We shouldn't ignore a message from betty, since she is not listed
assert not handler._should_ignore(create_message(sender_name="betty"))
assert handler._should_ignore(create_message(sender_name="ignore_me"))
# We ignore our own messages by default
assert handler._should_ignore(create_message(sender_name="my_username"))
# But shouldn't do so if this is explicitly requested
handler = EventHandler(
Driver(),
Settings(IGNORE_USERS=["ignore_me"]),
plugins=[],
ignore_own_messages=False,
)
assert not handler._should_ignore(create_message(sender_name="my_username"))
@mock.patch("mmpy_bot.event_handler.EventHandler._handle_post")
def test_handle_event(self, handle_post):
handler = EventHandler(Driver(), Settings(), plugins=[])
# This event should trigger _handle_post
asyncio.run(handler._handle_event(json.dumps(create_message().body)))
# This event should not
asyncio.run(handler._handle_event(json.dumps({"event": "some_other_event"})))
handle_post.assert_called_once_with(create_message().body)
@mock.patch("mmpy_bot.driver.Driver.username", new="my_username")
def test_handle_post(self):
# Create an initialized plugin so its listeners are registered
driver = Driver()
plugin = ExamplePlugin().initialize(driver)
# Construct a handler with it
handler = EventHandler(driver, Settings(), plugins=[plugin])
# Mock the call_function of the plugin so we can make some asserts
async def mock_call_function(function, message, groups):
# This is the regexp that we're trying to trigger
assert function.matcher.pattern == "sleep ([0-9]+)"
assert message.text == "sleep 5" # username should be stripped off
assert groups == ["5"] # arguments should be matched and passed explicitly
with mock.patch.object(
plugin, "call_function", wraps=mock_call_function
) as mocked:
# Transform the default message into a raw post event so we can pass it
new_body = create_message(text="@my_username sleep 5").body.copy()
new_body["data"]["post"] = json.dumps(new_body["data"]["post"])
new_body["data"]["mentions"] = json.dumps(new_body["data"]["mentions"])
asyncio.run(handler._handle_post(new_body))
# Assert the function was called, so we know the asserts succeeded.
mocked.assert_called_once()
def test_handle_webhook(self):
# Create an initialized plugin so its listeners are registered
driver = Driver()
plugin = WebHookExample().initialize(driver, Settings())
# Construct a handler with it
handler = EventHandler(driver, Settings(), plugins=[plugin])
# Mock the call_function of the plugin so we can make some asserts
async def mock_call_function(function, event, groups):
# This is the regexp that we're trying to trigger
assert function.matcher.pattern == "ping"
assert event.text == "hello!"
assert groups == []
with mock.patch.object(
plugin, "call_function", wraps=mock_call_function
) as mocked:
asyncio.run(
handler._handle_webhook(
WebHookEvent(
body={"text": "hello!"},
request_id="request_id",
webhook_id="ping",
),
)
)
# Assert the function was called, so we know the asserts succeeded.
mocked.assert_called_once()
|
474248
|
from moai.data.datasets.generic.structured_images import StructuredImages
__all__ = [
"StructuredImages",
]
|
474254
|
from src.alerter.alert_severities.severity import Severity
from src.alerter.alert_severities.severity_code import SeverityCode
|
474263
|
import struct
LINK_TYPES = {
'BLE': 251,
'ZIGBEE': 195,
'H4': 201,
}
class PcapFile(object):
def __init__(self, filename, link_type="H4"):
self.filename = filename
self.link_type = LINK_TYPES[link_type]
self._file = open(self.filename, "wb")
self._file.write(struct.pack("<IHHiIII", 0xa1b2c3d4, 2, 4, 0, 0, 0xFFFF, self.link_type))
def write_packet(self, packet, ts_seconds=0, ts_useconds=0):
# TODO: timestamp from time..
self._file.write(struct.pack("<IIII", ts_seconds, ts_useconds, len(packet), len(packet)))
self._file.write(packet)
def close(self):
self._file.close()
|
474275
|
import responses
from wiremock.tests.base import BaseClientTestCase, attr
from wiremock.client import Scenarios
class ScenariosResourceTests(BaseClientTestCase):
@attr("unit", "scenarios", "resource")
@responses.activate
def test_reset_scenarios(self):
responses.add(responses.POST, "http://localhost/__admin/scenarios/reset", body="", status=200)
r = Scenarios.reset_all_scenarios()
self.assertEqual(200, r.status_code)
|
474307
|
from robovat.math.euler import Euler
from robovat.math.orientation import Orientation
from robovat.math.point import Point
from robovat.math.pose import get_transform
from robovat.math.pose import Pose
from robovat.math.quaternion import Quaternion
|
474315
|
class Stack:
def __init__(self):
self.stack = []
self.max_stack = []
def push(self, val):
self.stack.append(val)
if not self.max_stack or val > self.stack[self.max_stack[-1]]:
self.max_stack.append(len(self.stack) - 1)
def pop(self):
if not self.stack:
return None
if len(self.stack) - 1 == self.max_stack[-1]:
self.max_stack.pop()
return self.stack.pop()
def max(self):
if not self.stack:
return None
return self.stack[self.max_stack[-1]]
s = Stack()
s.push(1)
s.push(3)
s.push(2)
s.push(5)
assert s.max() == 5
s.pop()
assert s.max() == 3
s.pop()
assert s.max() == 3
s.pop()
assert s.max() == 1
s.pop()
assert not s.max()
s = Stack()
s.push(10)
s.push(3)
s.push(2)
s.push(5)
assert s.max() == 10
s.pop()
assert s.max() == 10
s.pop()
assert s.max() == 10
s.pop()
assert s.max() == 10
s.pop()
assert not s.max()
|
474333
|
import doctest
import optparse
import pytest
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
from flake8_rst.rst import RST_RE, apply_default_groupnames, apply_directive_specific_options, merge_by_group
from flake8_rst.sourceblock import SourceBlock, _extract_roles
from hypothesis import assume, given, note, example
from hypothesis import strategies as st
ROOT_DIR = pathlib.Path(__file__).parent
DATA_DIR = ROOT_DIR / 'data'
code_strategy = st.characters(blacklist_categories=['Cc'])
@given(code_strategy, code_strategy)
def test_from_sourcecode(bootstrap, src):
assume(bootstrap and src)
code_block = SourceBlock.from_source(bootstrap, src)
expected = '\n'.join([bootstrap, src])
result = code_block.complete_block
assert result == expected
@given(code_strategy)
def test_get_correct_line(src):
code_block = SourceBlock.from_source('', src)
for line_number, line in enumerate(src.splitlines(True), start=1):
code_line = code_block.get_code_line(line_number)
assert code_line['lineno'] == line_number
assert code_line['source'] == line
def test_find_block():
example = DATA_DIR / 'example_1.rst'
src = example.open().read()
code_block = SourceBlock.from_source('', src)
for match, block in zip(RST_RE.finditer(src), code_block.find_blocks(RST_RE)):
origin_code = match.group('code')
origin_code = ''.join(map(lambda s: s.lstrip() + '\n', origin_code.splitlines()))
assert block.source_block == origin_code
def test_clean_doctest():
example = DATA_DIR / 'example_1.rst'
src = example.open().read()
code_block = SourceBlock.from_source('', src)
for match, block in zip(RST_RE.finditer(src), code_block.find_blocks(RST_RE)):
origin_code = match.group('code')
origin_code = ''.join((line.source for line in doctest.DocTestParser().get_examples(origin_code)))
assert block.clean_doctest()
assert block.source_block == origin_code
assert '>>>' not in origin_code
@pytest.mark.parametrize('src, expected', [
(DATA_DIR / 'example_11.rst', "name = 'Brian'\nother = brian\n%timeit a = (1, 2,name) # noqa: F821\n"
"b = (3, 4, other)\nfor i in range(3):\n print(a[i] is b[i])\n\n"),
(".. ipython:: python\n In [4]: grouped = df.groupby('A')\n\n In [5]: for name, group in grouped:\n"
" ...: print(name)\n ...: print(group)\n ...:\n",
"grouped = df.groupby('A')\nfor name, group in grouped:\n print(name)\n print(group)\n\n")
])
def test_clean_ipython(src, expected):
if isinstance(src, pathlib.Path):
src = src.open().read()
code_block = SourceBlock.from_source('', src)
block = next(code_block.find_blocks(RST_RE))
assert block.clean_ipython()
assert expected == block.source_block
@pytest.mark.parametrize('src, expected', [
('%timeit a = (1, 2,name)\n', 'a = (1, 2,name)\n'),
('%time a = (1, 2,name)\nb = (3, 4, other)\n', 'a = (1, 2,name)\nb = (3, 4, other)\n'),
("%time df = pd.read_csv('big.csv')\n", "df = pd.read_csv('big.csv')\n"),
('%time df = pd.read_csv("big.csv")\n', 'df = pd.read_csv("big.csv")\n'),
('%time df = pd.read_csv("big.csv")\n%time df = pd.read_csv(\'big.csv\')\n',
'df = pd.read_csv("big.csv")\ndf = pd.read_csv(\'big.csv\')\n'),
])
def test_clean_console_syntax(src, expected):
block = SourceBlock.from_source('', src)
block.clean_console_syntax()
block.clean_ignored_lines()
assert block.source_block == expected
@pytest.mark.parametrize('src', [
'%prun -l 4 f(x)\n',
'%%timeit x = range(10000)\nmax(x)\n',
])
def test_ignore_unrecognized_console_syntax(src):
block = SourceBlock.from_source('', src)
block.clean_console_syntax()
block.clean_ignored_lines()
assert not block.source_block
@pytest.mark.parametrize('src, expected', [
('@okexcept\na = (1, 2,name)\n', 'a = (1, 2,name)\n'),
('@savefig "picture.png"\na = (1, 2,name)\nb = (3, 4, other)\n', 'a = (1, 2,name)\nb = (3, 4, other)\n'),
])
def test_clean_ignored_lines(src, expected):
block = SourceBlock.from_source('', src)
block.clean_ignored_lines()
assert block.source_block == expected
@given(code_strategy, code_strategy, code_strategy)
def test_merge_source_blocks(bootstrap, src_1, src_2):
block1 = SourceBlock.from_source(bootstrap, src_1)
block2 = SourceBlock.from_source(bootstrap, src_2, len(src_1.splitlines()) + 1)
expected = SourceBlock.from_source(bootstrap, src_1 + src_2)
merged = SourceBlock.merge([block1, block2])
reversed_merged = SourceBlock.merge([block1, block2])
assert merged.complete_block == expected.complete_block
assert reversed_merged.complete_block == expected.complete_block
@pytest.mark.parametrize("filename, directive, roles, default_groupnames, expected", [
('test.rst', 'code-block', {}, "*.rst->*: default", {'group': 'default'}),
('test.py', 'code-block', {}, "*.rst->*: default", {'group': 'None'}),
('test.rst', 'code-block', {}, "*->code-block: code-block, *->ipython: ipython", {'group': 'code-block'}),
('test.rst', 'ipython', {}, "*->code-block: code-block, *->ipython: ipython", {'group': 'ipython'}),
('test.py', 'code-block', {}, "last.py->code-block: code-block, *.rst->ipython: ipython", {'group': 'None'}),
])
def test_default_groupname(filename, directive, roles, default_groupnames, expected):
func = apply_default_groupnames(lambda *a, **k: [SourceBlock([], [], directive=directive, roles=roles)])
block = next(func(filename, options=optparse.Values(dict(default_groupnames=default_groupnames))))
assert block.roles == expected
@pytest.mark.parametrize("directive, roles, expected", [
('code-block', {}, {}),
('ipython', {}, {'add-ignore': 'E302, E305'}),
('ipython', {'add-ignore': 'F'}, {'add-ignore': 'F, E302, E305'}),
])
def test_directive_specific_options(directive, roles, expected):
func = apply_directive_specific_options(lambda *a, **k: [SourceBlock([], [], directive=directive, roles=roles)])
block = next(func())
assert block.roles == expected
@given(role=code_strategy, value=code_strategy, comment=code_strategy)
@example(role='group', value='Group#4', comment='Within 4th group.')
@pytest.mark.parametrize("string_format", [u' :flake8-{role}:{value}\n',
u' :flake8-{role}:{value} #{comment}\n'])
def test_roles(string_format, role, value, comment):
assume(role.strip() and value.strip() and comment.strip())
role_string = string_format.format(role=role, value=value, comment=comment)
note(role_string)
roles = _extract_roles(role_string)
assert value == roles[role]
@pytest.mark.parametrize("group_names, expected", [
(['None', 'None'], ['None', 'None']),
(['', ''], ['']),
(['A', 'B', 'A'], ['A', 'B']),
(['Ignore'], []),
])
def test_merge_by_group(group_names, expected):
source_blocks = [SourceBlock([], [(0, '', '')], roles={'group': group}) for group in group_names]
blocks = merge_by_group(lambda *a, **k: source_blocks)()
result = sorted([block.roles['group'] for block in blocks])
assert result == expected
@given(code_strategy, code_strategy, st.lists(code_strategy, min_size=1))
def test_inject_bootstrap_blocks(bootstrap, src, injected_bootstrap):
note(injected_bootstrap)
block = SourceBlock.from_source(bootstrap, src, roles={'bootstrap': '; '.join(injected_bootstrap)})
expected = SourceBlock.from_source('\n'.join(injected_bootstrap), src)
assert block.complete_block == expected.complete_block
|
474334
|
from fidesops.schemas.masking.masking_configuration import (
RandomStringMaskingConfiguration,
)
from fidesops.service.masking.strategy.masking_strategy_random_string_rewrite import (
RandomStringRewriteMaskingStrategy,
)
def test_mask_with_value():
request_id = "123432"
config = RandomStringMaskingConfiguration(length=6)
masker = RandomStringRewriteMaskingStrategy(configuration=config)
assert 6 == len(masker.mask(["string to mask"], request_id)[0])
config = RandomStringMaskingConfiguration(length=25)
masker = RandomStringRewriteMaskingStrategy(configuration=config)
assert 25 == len(masker.mask(["string to mask"], request_id)[0])
def test_mask_with_multi_value():
request_id = "123432"
config = RandomStringMaskingConfiguration(length=6)
masker = RandomStringRewriteMaskingStrategy(configuration=config)
masked = masker.mask(["string to mask", "another string"], request_id)
assert 6 == len(masked[0])
assert 6 == len(masked[1])
def test_mask_no_value():
request_id = "123432"
config = RandomStringMaskingConfiguration(length=6)
masker = RandomStringRewriteMaskingStrategy(configuration=config)
assert None is masker.mask(None, request_id)
|
474357
|
from polymath.codegen.dnnweavergen.dnnweaver2.scalar.dtypes import FQDtype
import numpy as np
import math
class Tensor(object):
"""
Tensor class for computations
n-dimensional array
"""
def __init__(self, shape, name, data, dtype=FQDtype.FP32, trainable=False):
if isinstance(shape, int):
shape = tuple([shape])
self.shape = shape
self.dtype = dtype
self.name = name
self.trainable = trainable
self.op = None
self.output_nodes = []
self.data = data
self.fpga_addr = None
_pad = []
for i in range(len(self.shape)):
_pad.append((0,0))
self.fpga_pad = tuple(_pad)
_padded_shape = []
for i in range(len(self.shape)):
_padded_shape.append(self.shape[i] + self.fpga_pad[i][0] + self.fpga_pad[i][1])
def initialize_data(self, value):
self.data = value
def __str__(self):
if isinstance(self.shape, tuple):
shape_str = '[' + ','.join([str(x) for x in self.shape]) + ']'
else:
shape_str = '[' + str(self.shape) + ']'
return '{}{} ({})'.format(self.name, shape_str, self.dtype.__str__())
# return '{}{}'.format(self.name, shape_str)
@property
def size(self):
return np.prod(self.shape)
@property
def fpga_shape(self):
_padded_shape = []
for i in range(len(self.shape)):
if isinstance(self.fpga_pad, int):
_padded_shape.append(self.shape[i] + self.fpga_pad*2)
elif isinstance(self.fpga_pad[i], int):
_padded_shape.append(self.shape[i] + self.fpga_pad[i]*2)
else:
_padded_shape.append(self.shape[i] + self.fpga_pad[i][0] + self.fpga_pad[i][1])
return tuple(_padded_shape)
@property
def fpga_size(self):
return np.prod(self.fpga_shape)
@property
def fpga_size_in_bytes(self):
return self.fpga_size * self.dtype.bits / 8
@property
def size_in_bytes(self):
return int(math.ceil(float(self.size * self.dtype.bits) / 8))
|
474405
|
from io import StringIO
from unittest.mock import patch
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from ditto.flickr.factories import AccountFactory, UserFactory
class FetchFlickrAccountUserTestCase(TestCase):
def setUp(self):
# What we'll use as return values from UserIdFetcher().fetch()...
self.id_fetcher_success = {
"success": True,
"id": "99999999999@N99",
"fetched": 1,
}
# ...and UserFetcher().fetch():
self.user_fetcher_success = {
"success": True,
"user": {"name": "<NAME>"},
"fetched": 1,
}
self.account = AccountFactory(id=32, user=None)
self.out = StringIO()
self.out_err = StringIO()
def test_fail_with_no_args(self):
with self.assertRaises(CommandError):
call_command("fetch_flickr_account_user")
def test_fail_with_invalid_id(self):
call_command("fetch_flickr_account_user", id="3", stderr=self.out_err)
self.assertIn("No Account found with an id of '3'", self.out_err.getvalue())
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserFetcher")
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserIdFetcher")
def test_with_id(self, id_fetcher, user_fetcher):
UserFactory(nsid="99999999999@N99")
id_fetcher.return_value.fetch.return_value = self.id_fetcher_success
user_fetcher.return_value.fetch.return_value = self.user_fetcher_success
call_command("fetch_flickr_account_user", id="32", stdout=self.out)
self.assertIn("Fetched and saved user '<NAME>'", self.out.getvalue())
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserFetcher")
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserIdFetcher")
def test_invalid_nsid(self, id_fetcher, user_fetcher):
"""
Correct error message if we fail to find a user for the fetched
Flickr ID (unlikely).
"""
id_fetcher.return_value.fetch.return_value = self.id_fetcher_success
user_fetcher.return_value.fetch.return_value = {
"success": False,
"messages": ["Oops"],
}
call_command("fetch_flickr_account_user", id="32", stderr=self.out_err)
self.assertIn(
"Failed to fetch a user using Flickr ID '99999999999@N99': Oops",
self.out_err.getvalue(),
)
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserIdFetcher")
def test_no_matching_nsid(self, id_fetcher):
"Correct error message if we can't find a Flickr ID for this Account."
id_fetcher.return_value.fetch.return_value = {
"success": False,
"messages": ["Oops"],
}
call_command("fetch_flickr_account_user", id="32", stderr=self.out_err)
self.assertIn(
"Failed to fetch a Flickr ID for this Account: Oops",
self.out_err.getvalue(),
)
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserFetcher")
@patch("ditto.flickr.management.commands.fetch_flickr_account_user.UserIdFetcher")
def test_associates_account_with_user(self, id_fetcher, user_fetcher):
"After fetching and saving the user, associate it with the Account."
UserFactory(nsid="99999999999@N99")
id_fetcher.return_value.fetch.return_value = self.id_fetcher_success
user_fetcher.return_value.fetch.return_value = self.user_fetcher_success
call_command("fetch_flickr_account_user", id="32", stdout=self.out)
self.account.refresh_from_db()
self.assertEqual(self.account.user.nsid, "99999999999@N99")
class FetchFlickrOriginalsTestCase(TestCase):
def setUp(self):
self.out = StringIO()
self.out_err = StringIO()
@patch(
"ditto.flickr.management.commands.fetch_flickr_originals.OriginalFilesMultiAccountFetcher" # noqa: E501
)
def test_sends_all_true_to_fetcher_with_account(self, fetcher):
call_command("fetch_flickr_originals", "--all", account="99999999999@N99")
fetcher.assert_called_with(nsid="99999999999@N99")
fetcher.return_value.fetch.assert_called_with(fetch_all=True)
@patch(
"ditto.flickr.management.commands.fetch_flickr_originals.OriginalFilesMultiAccountFetcher" # noqa: E501
)
def test_sends_all_true_to_fetcher_no_account(self, fetcher):
call_command("fetch_flickr_originals", "--all")
fetcher.assert_called_with(nsid=None)
fetcher.return_value.fetch.assert_called_with(fetch_all=True)
@patch(
"ditto.flickr.management.commands.fetch_flickr_originals.OriginalFilesMultiAccountFetcher" # noqa: E501
)
def test_sends_all_false_to_fetcher(self, fetcher):
call_command("fetch_flickr_originals")
fetcher.assert_called_with(nsid=None)
fetcher.return_value.fetch.assert_called_with(fetch_all=False)
@patch(
"ditto.flickr.management.commands.fetch_flickr_originals.OriginalFilesMultiAccountFetcher" # noqa: E501
)
def test_success_output(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "<NAME>", "success": True, "fetched": 33}
]
call_command("fetch_flickr_originals", stdout=self.out)
self.assertIn("Phil Gyford: Fetched 33 Files", self.out.getvalue())
@patch(
"ditto.flickr.management.commands.fetch_flickr_originals.OriginalFilesMultiAccountFetcher" # noqa: E501
)
def test_success_output_verbosity_0(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "<NAME>", "success": True, "fetched": 33}
]
call_command("fetch_flickr_originals", verbosity=0, stdout=self.out)
self.assertEqual("", self.out.getvalue())
@patch(
"ditto.flickr.management.commands.fetch_flickr_originals.OriginalFilesMultiAccountFetcher" # noqa: E501
)
def test_error_output(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "<NAME>", "success": False, "messages": ["Oops"]}
]
call_command("fetch_flickr_originals", stdout=self.out, stderr=self.out_err)
self.assertIn(
"Phil Gyford: Failed to fetch Files: Oops", self.out_err.getvalue()
)
class FetchFlickrPhotosTestCase(TestCase):
def setUp(self):
self.out = StringIO()
self.out_err = StringIO()
def test_fail_with_no_args(self):
with self.assertRaises(CommandError):
call_command("fetch_flickr_photos")
def test_fail_with_account_only(self):
with self.assertRaises(CommandError):
call_command("fetch_flickr_photos", account="99999999999@N99")
def test_fail_with_non_numeric_days(self):
with self.assertRaises(CommandError):
call_command("fetch_flickr_photos", days="foo")
@patch(
"ditto.flickr.management.commands.fetch_flickr_photos.RecentPhotosMultiAccountFetcher" # noqa: E501
)
def test_sends_days_to_fetcher_with_account(self, fetcher):
call_command("fetch_flickr_photos", account="99999999999@N99", days="4")
fetcher.assert_called_with(nsid="99999999999@N99")
fetcher.return_value.fetch.assert_called_with(days=4)
@patch(
"ditto.flickr.management.commands.fetch_flickr_photos.RecentPhotosMultiAccountFetcher" # noqa: E501
)
def test_sends_days_to_fetcher_no_account(self, fetcher):
call_command("fetch_flickr_photos", days="4")
fetcher.assert_called_with(nsid=None)
fetcher.return_value.fetch.assert_called_with(days=4)
@patch(
"ditto.flickr.management.commands.fetch_flickr_photos.RecentPhotosMultiAccountFetcher" # noqa: E501
)
def test_sends_all_to_fetcher_with_account(self, fetcher):
call_command("fetch_flickr_photos", account="99999999999@N99", days="all")
fetcher.assert_called_with(nsid="99999999999@N99")
fetcher.return_value.fetch.assert_called_with(days="all")
@patch(
"ditto.flickr.management.commands.fetch_flickr_photos.RecentPhotosMultiAccountFetcher" # noqa: E501
)
def test_success_output(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "<NAME>", "success": True, "fetched": "40"}
]
call_command("fetch_flickr_photos", days="4", stdout=self.out)
self.assertIn("<NAME>: Fetched 40 Photos", self.out.getvalue())
@patch(
"ditto.flickr.management.commands.fetch_flickr_photos.RecentPhotosMultiAccountFetcher" # noqa: E501
)
def test_success_output_verbosity_0(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "<NAME>", "success": True, "fetched": "40"}
]
call_command("fetch_flickr_photos", days="4", verbosity=0, stdout=self.out)
self.assertEqual("", self.out.getvalue())
@patch(
"ditto.flickr.management.commands.fetch_flickr_photos.RecentPhotosMultiAccountFetcher" # noqa: E501
)
def test_error_output(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "<NAME>", "success": False, "messages": ["Oops"]}
]
call_command(
"fetch_flickr_photos", days="4", stdout=self.out, stderr=self.out_err
)
self.assertIn(
"<NAME>: Failed to fetch Photos: Oops", self.out_err.getvalue()
)
class FetchFlickrPhotosetsTestCase(TestCase):
def setUp(self):
self.out = StringIO()
self.out_err = StringIO()
@patch(
"ditto.flickr.management.commands.fetch_flickr_photosets.PhotosetsMultiAccountFetcher" # noqa: E501
)
def test_calls_fetcher_with_account(self, fetcher):
call_command("fetch_flickr_photosets", account="99999999999@N99")
fetcher.assert_called_with(nsid="99999999999@N99")
fetcher.return_value.fetch.assert_called_with()
@patch(
"ditto.flickr.management.commands.fetch_flickr_photosets.PhotosetsMultiAccountFetcher" # noqa: E501
)
def test_calls_fetcher_with_no_account(self, fetcher):
call_command("fetch_flickr_photosets")
fetcher.assert_called_with(nsid=None)
fetcher.return_value.fetch.assert_called_with()
@patch(
"ditto.flickr.management.commands.fetch_flickr_photosets.PhotosetsMultiAccountFetcher" # noqa: E501
)
def test_success_output(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "<NAME>", "success": True, "fetched": "40"}
]
call_command("fetch_flickr_photosets", stdout=self.out)
self.assertIn("<NAME>: Fetched 40 Photosets", self.out.getvalue())
@patch(
"ditto.flickr.management.commands.fetch_flickr_photosets.PhotosetsMultiAccountFetcher" # noqa: E501
)
def test_success_output_verbosity_0(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "<NAME>", "success": True, "fetched": "40"}
]
call_command("fetch_flickr_photosets", verbosity=0, stdout=self.out)
self.assertEqual("", self.out.getvalue())
@patch(
"ditto.flickr.management.commands.fetch_flickr_photosets.PhotosetsMultiAccountFetcher" # noqa: E501
)
def test_error_output(self, fetcher):
fetcher.return_value.fetch.return_value = [
{"account": "<NAME>", "success": False, "messages": ["Oops"]}
]
call_command("fetch_flickr_photosets", stdout=self.out, stderr=self.out_err)
self.assertIn(
"<NAME>: Failed to fetch Photosets: Oops", self.out_err.getvalue()
)
|
474410
|
pkgname = "firmware-ipw2200"
pkgver = "3.1"
pkgrel = 0
pkgdesc = "Firmware for the Intel PRO/Wireless 2200BG wifi cards"
maintainer = "q66 <<EMAIL>>"
license = "custom:ipw2200"
url = "http://ipw2200.sourceforge.net"
source = f"http://firmware.openbsd.org/firmware-dist/ipw2200-fw-{pkgver}.tgz"
sha256 = "c6818c11c18cc030d55ff83f64b2bad8feef485e7742f84f94a61d811a6258bd"
options = ["!strip", "foreignelf"]
def do_install(self):
for f in self.cwd.glob("*.fw"):
self.install_file(f, "usr/lib/firmware")
self.install_license("LICENSE.ipw2200-fw")
|
474415
|
from typing import Dict,List
from django.apps import apps
from django.forms import models
from automatic_crud.data_types import Instance,DjangoForm
def get_model(__app_name:str,__model_name:str) -> Instance:
# return the model corresponding to the application name and model name sent
return apps.get_model(app_label = __app_name,model_name = __model_name)
def get_object(model: Instance,pk: int):
# return the record for a pk sended
instance = model.objects.filter(id = pk,model_state = True).first()
if instance:
return instance
return None
def get_model_fields_names(__model: Instance) -> List:
# return a list of field names from a model
return [name for name,_ in models.fields_for_model(__model).items()]
def get_queryset(__model:Instance) -> Dict:
# returns all records in a dictionary for a model
return __model.objects.all().values()
def get_form(form: DjangoForm,model: Instance) -> DjangoForm:
"""
Return a Django Form for a model, also a Django Form can be indicated
by default the Django Form will exclude the 'state' field from the model
"""
if form is not None:
return models.modelform_factory(model = model,form = form)
else:
return models.modelform_factory(model = model,exclude = ('model_state',))
def build_template_name(template_name: str,model: Instance,action:str) -> str:
"""
Build template name with app label from model, model name and action(list,create,update,detail)
"""
if template_name == None:
template_name = '{0}/{1}_{2}.html'.format(
model._meta.app_label,
model._meta.object_name.lower(),
action
)
return template_name
|
474425
|
import pytest
from tsplib95 import fields as F
from tsplib95 import exceptions as E
@pytest.fixture
def field():
return F.DemandsField('foo')
@pytest.mark.parametrize('text,value,exc', [
('1 2', {1: 2}, None),
('1 2\n2 3', {1: 2, 2: 3}, None),
('2 x 0', None, E.ParsingError),
])
def test_parse(field, text, value, exc):
if exc:
with pytest.raises(exc):
field.parse(text)
else:
field.parse(text) == value
|
474435
|
import maya.api.OpenMaya as om
def mobject_from_name(name):
sel_list = om.MSelectionList()
sel_list.add(name)
return sel_list.getDependNode(0)
|
474472
|
import select
class POLL_EVENT_TYPE:
READ = 1
WRITE = 2
ERROR = 4
class Poller(object):
def subscribe(self, descr, callback, eventMask):
raise NotImplementedError
def unsubscribe(self, descr):
raise NotImplementedError
def poll(self, timeout):
raise NotImplementedError
class SelectPoller(Poller):
def __init__(self):
self.__descrsRead = set()
self.__descrsWrite = set()
self.__descrsError = set()
self.__descrToCallbacks = {}
def subscribe(self, descr, callback, eventMask):
self.unsubscribe(descr)
if eventMask & POLL_EVENT_TYPE.READ:
self.__descrsRead.add(descr)
if eventMask & POLL_EVENT_TYPE.WRITE:
self.__descrsWrite.add(descr)
if eventMask & POLL_EVENT_TYPE.ERROR:
self.__descrsError.add(descr)
self.__descrToCallbacks[descr] = callback
def unsubscribe(self, descr):
self.__descrsRead.discard(descr)
self.__descrsWrite.discard(descr)
self.__descrsError.discard(descr)
self.__descrToCallbacks.pop(descr, None)
def poll(self, timeout):
rlist, wlist, xlist = select.select(list(self.__descrsRead),
list(self.__descrsWrite),
list(self.__descrsError),
timeout)
allDescrs = set(rlist + wlist + xlist)
rlist = set(rlist)
wlist = set(wlist)
xlist = set(xlist)
for descr in allDescrs:
event = 0
if descr in rlist:
event |= POLL_EVENT_TYPE.READ
if descr in wlist:
event |= POLL_EVENT_TYPE.WRITE
if descr in xlist:
event |= POLL_EVENT_TYPE.ERROR
self.__descrToCallbacks[descr](descr, event)
class PollPoller(Poller):
def __init__(self):
self.__poll = select.poll()
self.__descrToCallbacks = {}
def subscribe(self, descr, callback, eventMask):
pollEventMask = 0
if eventMask & POLL_EVENT_TYPE.READ:
pollEventMask |= select.POLLIN
if eventMask & POLL_EVENT_TYPE.WRITE:
pollEventMask |= select.POLLOUT
if eventMask & POLL_EVENT_TYPE.ERROR:
pollEventMask |= select.POLLERR
self.__descrToCallbacks[descr] = callback
self.__poll.register(descr, pollEventMask)
def unsubscribe(self, descr):
try:
self.__poll.unregister(descr)
except KeyError:
pass
def poll(self, timeout):
events = self.__poll.poll(timeout * 1000)
for descr, event in events:
eventMask = 0
if event & select.POLLIN:
eventMask |= POLL_EVENT_TYPE.READ
if event & select.POLLOUT:
eventMask |= POLL_EVENT_TYPE.WRITE
if event & select.POLLERR or event & select.POLLHUP:
eventMask |= POLL_EVENT_TYPE.ERROR
self.__descrToCallbacks[descr](descr, eventMask)
def createPoller(pollerType):
if pollerType == 'auto':
if hasattr(select, 'poll'):
return PollPoller()
return SelectPoller()
elif pollerType == 'poll':
return PollPoller()
elif pollerType == 'select':
return SelectPoller()
else:
raise Exception('unknown poller type')
|
474475
|
from typing import Callable
from typing import Tuple
import tensorflow as tf
from libspn_keras.sum_ops.base import SumOpBase
from libspn_keras.sum_ops.batch_scope_transpose import batch_scope_transpose
class SumOpSampleBackprop(SumOpBase):
"""
Sum op with hard EM signals in backpropagation when computed through TensorFlow's autograd engine.
Args:
sample_prob: Sampling probability in the range of [0, 1]. Sampling logits are taken from
the normalized log probability of the children of each sum.
"""
@batch_scope_transpose
def weighted_sum(
self,
x: tf.Tensor,
accumulators: tf.Tensor,
logspace_accumulators: bool,
normalize_in_forward_pass: bool,
) -> tf.Tensor:
"""
Compute a weighted sum.
Args:
x: Input Tensor
accumulators: Accumulators, can be seen as unnormalized representations of weights.
logspace_accumulators: Whether or not accumulators are represented in logspace.
normalize_in_forward_pass: Whether weights should be normalized during forward inference.
Returns:
A Tensor with the weighted sums.
Raises:
NotImplementedError: When called with ``losgpace_accumulators == True``.
"""
if logspace_accumulators:
raise NotImplementedError(
"Hard EM is only implemented for linear space accumulators"
)
with tf.name_scope("HardEMForwardPass"):
weights = (
self._to_log_weights(accumulators)
if normalize_in_forward_pass
else tf.math.log(accumulators)
)
# Pairwise product in forward pass
# [scope, decomp, batch, nodes_in] -> [scope, decomp, batch, 1, nodes_in]
x = tf.expand_dims(x, axis=3)
# [scope, decomp, nodes_in, nodes_out] -> [scope, decomp, 1, nodes_out, nodes_in]
weights = tf.expand_dims(tf.linalg.matrix_transpose(weights), axis=2)
# Max per sum for determining winning child + choosing the constant for numerical
# stability
# [scope, decomp, batch, nodes_out, nodes_in]
weighted_children = x + weights
max_weighted_child = tf.stop_gradient(
tf.reduce_max(weighted_children, axis=-1, keepdims=True)
)
# Perform log(sum(exp(...))) with the numerical stability trick
# [scope, decomp, batch, nodes_out]
out = tf.math.log(
tf.reduce_sum(tf.exp(weighted_children - max_weighted_child), axis=-1)
) + tf.squeeze(max_weighted_child, axis=-1)
@tf.custom_gradient
def _inner_fn(
x: tf.Tensor, accumulators: tf.Tensor
) -> Tuple[tf.Tensor, Callable[[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]]:
def grad(dy: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
# Determine winning child
num_in = tf.shape(x)[-1]
num_scopes = tf.shape(weights)[0]
num_decomps = tf.shape(weights)[1]
num_out = tf.shape(weights)[-2]
num_batch = tf.shape(x)[2]
xw_flat_outer = tf.reshape(
weighted_children,
[num_scopes * num_decomps * num_batch * num_out, num_in],
)
# Holds the index of the winning child per sum
samples = tf.random.categorical(xw_flat_outer, num_samples=1)
winning_child_per_sum = tf.reshape(
samples, [num_scopes, num_decomps, num_batch, num_out]
)
# Pass on the counts to the edges between child and parent
per_sample_weight_counts = dy[..., tf.newaxis] * tf.one_hot(
winning_child_per_sum, depth=num_in
)
child_counts = tf.reduce_sum(per_sample_weight_counts, axis=3)
weight_counts = tf.reduce_sum(per_sample_weight_counts, axis=2)
return child_counts, tf.linalg.matrix_transpose(weight_counts)
return out, grad
return _inner_fn(x, accumulators)
def weighted_children(
self,
x: tf.Tensor,
accumulators: tf.Tensor,
logspace_accumulators: bool,
normalize_in_forward_pass: bool,
) -> tf.Tensor:
"""
Compute weighted children, without summing over the final axis.
This is used for a RootSum to compute :math:`P(X,Y_i)` for any :math:`i`
Args:
x: Input Tensor
accumulators: Accumulators, can be seen as unnormalized representations of weights.
logspace_accumulators: Whether or not accumulators are represented in logspace.
normalize_in_forward_pass: Whether weights should be normalized during forward inference.
Raises:
NotImplementedError: Not implemented for SumOpSampleBackprop.
"""
raise NotImplementedError(
"Weighted children is not implemented for SumOpSampleBackprop"
)
def weighted_conv(
self,
x: tf.Tensor,
accumulators: tf.Tensor,
logspace_accumulators: bool,
normalize_in_forward_pass: bool,
) -> tf.Tensor:
"""
Compute weighted convolutions.
This is used for a Conv2DSum.
Args:
x: Input Tensor
accumulators: Accumulators, can be seen as unnormalized representations of weights.
logspace_accumulators: Whether or not accumulators are represented in logspace.
normalize_in_forward_pass: Whether weights should be normalized during forward inference.
Raises:
NotImplementedError: When called with ``losgpace_accumulators == True``.
"""
raise NotImplementedError(
"EM is only implemented for linear space accumulators"
)
def default_logspace_accumulators(self) -> bool:
"""
Whether or not accumulators should be represented in log-space by default.
Returns:
True if the default representation is in logspace and False otherwise.
"""
return False
|
474513
|
import os
import re
import torch
import random
import time
import logging
import argparse
import subprocess
import numpy as np
from tqdm import tqdm, trange
from collections import defaultdict
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader, SequentialSampler
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers import (BertConfig, BertTokenizer, AdamW, get_linear_schedule_with_warmup)
from modeling import RepBERT_Train
from dataset import MSMARCODataset, get_collate_function
from utils import generate_rank, eval_results
logger = logging.getLogger(__name__)
logging.basicConfig(format = '%(asctime)s-%(levelname)s-%(name)s- %(message)s',
datefmt = '%d %H:%M:%S',
level = logging.INFO)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def save_model(model, output_dir, save_name, args):
save_dir = os.path.join(output_dir, save_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(save_dir)
torch.save(args, os.path.join(save_dir, 'training_args.bin'))
def train(args, model):
""" Train the model """
tb_writer = SummaryWriter(args.log_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_dataset = MSMARCODataset("train", args.msmarco_dir,
args.collection_memmap_dir, args.tokenize_dir,
args.max_query_length, args.max_doc_length)
# NOTE: Must Sequential! Pos, Neg, Pos, Neg, ...
train_sampler = SequentialSampler(train_dataset)
collate_fn = get_collate_function(mode="train")
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
batch_size=args.train_batch_size, num_workers=args.data_num_workers,
collate_fn=collate_fn)
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=t_total)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch")
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for epoch_idx, _ in enumerate(train_iterator):
epoch_iterator = tqdm(train_dataloader, desc="Iteration")
for step, (batch, _, _) in enumerate(epoch_iterator):
batch = {k:v.to(args.device) for k, v in batch.items()}
model.train()
outputs = model(**batch)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.evaluate_during_training and (global_step % args.training_eval_steps == 0):
mrr = evaluate(args, model, mode="dev", prefix="step_{}".format(global_step))
tb_writer.add_scalar('dev/MRR@10', mrr, global_step)
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
cur_loss = (tr_loss - logging_loss)/args.logging_steps
tb_writer.add_scalar('train/loss', cur_loss, global_step)
logging_loss = tr_loss
if args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
save_model(model, args.model_save_dir, 'ckpt-{}'.format(global_step), args)
def evaluate(args, model, mode, prefix):
eval_output_dir = args.eval_save_dir
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
eval_dataset = MSMARCODataset(mode, args.msmarco_dir,
args.collection_memmap_dir, args.tokenize_dir,
args.max_query_length, args.max_doc_length)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
collate_fn = get_collate_function(mode=mode)
eval_dataloader = DataLoader(eval_dataset, batch_size=args.eval_batch_size,
num_workers=args.data_num_workers, collate_fn=collate_fn)
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
output_file_path = f"{eval_output_dir}/{prefix}.{mode}.score.tsv"
with open(output_file_path, 'w') as outputfile:
for batch, qids, docids in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
with torch.no_grad():
batch = {k:v.to(args.device) for k, v in batch.items()}
outputs = model(**batch)
scores = torch.diagonal(outputs[0]).detach().cpu().numpy()
assert len(qids) == len(docids) == len(scores)
for qid, docid, score in zip(qids, docids, scores):
outputfile.write(f"{qid}\t{docid}\t{score}\n")
rank_output = f"{eval_output_dir}/{prefix}.{mode}.rank.tsv"
generate_rank(output_file_path, rank_output)
if mode == "dev":
mrr = eval_results(rank_output)
return mrr
def run_parse_args():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--task", choices=["train", "dev", "eval"], required=True)
parser.add_argument("--output_dir", type=str, default=f"./data/train")
parser.add_argument("--msmarco_dir", type=str, default=f"./data/msmarco-passage")
parser.add_argument("--collection_memmap_dir", type=str, default="./data/collection_memmap")
parser.add_argument("--tokenize_dir", type=str, default="./data/tokenize")
parser.add_argument("--max_query_length", type=int, default=20)
parser.add_argument("--max_doc_length", type=int, default=256)
## Other parameters
parser.add_argument("--eval_ckpt", type=int, default=None)
parser.add_argument("--per_gpu_eval_batch_size", default=26, type=int,)
parser.add_argument("--per_gpu_train_batch_size", default=26, type=int)
parser.add_argument("--gradient_accumulation_steps", type=int, default=2)
parser.add_argument("--no_cuda", action='store_true')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument("--evaluate_during_training", action="store_true")
parser.add_argument("--training_eval_steps", type=int, default=5000)
parser.add_argument("--save_steps", type=int, default=5000)
parser.add_argument("--logging_steps", type=int, default=100)
parser.add_argument("--data_num_workers", default=0, type=int)
parser.add_argument("--learning_rate", default=3e-6, type=float)
parser.add_argument("--weight_decay", default=0.01, type=float)
parser.add_argument("--warmup_steps", default=10000, type=int)
parser.add_argument("--adam_epsilon", default=1e-8, type=float)
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument("--num_train_epochs", default=1, type=int)
args = parser.parse_args()
time_stamp = time.strftime("%b-%d_%H:%M:%S", time.localtime())
args.log_dir = f"{args.output_dir}/log/{time_stamp}"
args.model_save_dir = f"{args.output_dir}/models"
args.eval_save_dir = f"{args.output_dir}/eval_results"
return args
def main():
args = run_parse_args()
logger.info(args)
# Setup CUDA, GPU
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
# Setup logging
logger.warning("Device: %s, n_gpu: %s", device, args.n_gpu)
# Set seed
set_seed(args)
if args.task == "train":
load_model_path = f"bert-base-uncased"
else:
assert args.eval_ckpt is not None
load_model_path = f"{args.model_save_dir}/ckpt-{args.eval_ckpt}"
config = BertConfig.from_pretrained(load_model_path)
model = RepBERT_Train.from_pretrained(load_model_path, config=config)
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Evaluation
if args.task == "train":
train(args, model)
else:
result = evaluate(args, model, args.task, prefix=f"ckpt-{args.eval_ckpt}")
print(result)
if __name__ == "__main__":
main()
|
474575
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from forums.models import UserProfile
CustomUser = get_user_model()
class UserProfileInline(admin.StackedInline):
model = UserProfile
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
inlines = [UserProfileInline]
list_display = ['email', 'username', 'is_staff', 'is_active','date_joined']
# list_filter = ('date_joined',)
list_filter = (
('is_staff', admin.BooleanFieldListFilter),
('is_superuser', admin.BooleanFieldListFilter),
('is_active', admin.BooleanFieldListFilter),
('date_joined', admin.DateFieldListFilter),
)
admin.site.register(CustomUser, CustomUserAdmin)
|
474630
|
import json
from zipfile import ZipFile
import pandas as pd
from download import download
from dicodile.config import DATA_HOME
def get_gait_data(subject=1, trial=1):
"""
Retrieve gait data from this `dataset`_.
Parameters
----------
subject: int, defaults to 1
Subject identifier.
Valid subject-trial pairs can be found in this `list`_.
trial: int, defaults to 1
Trial number.
Valid subject-trial pairs can be found in this `list`_.
Returns
-------
dict
A dictionary containing metadata and data relative
to a trial. The 'data' attribute contains time
series for the trial, as a Pandas dataframe.
.. _dataset: https://github.com/deepcharles/gait-data
.. _list:
https://github.com/deepcharles/gait-data/blob/master/code_list.json
"""
# coerce subject and trial
subject = int(subject)
trial = int(trial)
gait_dir = DATA_HOME / "gait"
gait_dir.mkdir(parents=True, exist_ok=True)
gait_zip = download(
"http://dev.ipol.im/~truong/GaitData.zip",
gait_dir / "GaitData.zip"
)
with ZipFile(gait_zip) as zf:
with zf.open(f"GaitData/{subject}-{trial}.json") as meta_file, \
zf.open(f"GaitData/{subject}-{trial}.csv") as data_file:
meta = json.load(meta_file)
data = pd.read_csv(data_file, sep=',', header=0)
meta['data'] = data
return meta
|
474632
|
import numpy as np
from sklearn.base import RegressorMixin
from sklearn.linear_model.base import LinearModel
from sklearn.utils import check_X_y, check_array, as_float_array
from sklearn.utils.validation import check_is_fitted
from scipy.linalg import svd
import warnings
class BayesianLinearRegression(RegressorMixin,LinearModel):
'''
Superclass for Empirical Bayes and Variational Bayes implementations of
Bayesian Linear Regression Model
'''
def __init__(self, n_iter, tol, fit_intercept,copy_X, verbose):
self.n_iter = n_iter
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.verbose = verbose
self.tol = tol
def _check_convergence(self, mu, mu_old):
'''
Checks convergence of algorithm using changes in mean of posterior
distribution of weights
'''
return np.sum(abs(mu-mu_old)>self.tol) == 0
def _center_data(self,X,y):
''' Centers data'''
X = as_float_array(X,self.copy_X)
# normalisation should be done in preprocessing!
X_std = np.ones(X.shape[1], dtype = X.dtype)
if self.fit_intercept:
X_mean = np.average(X,axis = 0)
y_mean = np.average(y,axis = 0)
X -= X_mean
y = y - y_mean
else:
X_mean = np.zeros(X.shape[1],dtype = X.dtype)
y_mean = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X,y, X_mean, y_mean, X_std
def predict_dist(self,X):
'''
Calculates mean and variance of predictive distribution for each data
point of test set.(Note predictive distribution for each data point is
Gaussian, therefore it is uniquely determined by mean and variance)
Parameters
----------
x: array-like of size (n_test_samples, n_features)
Set of features for which corresponding responses should be predicted
Returns
-------
:list of two numpy arrays [mu_pred, var_pred]
mu_pred : numpy array of size (n_test_samples,)
Mean of predictive distribution
var_pred: numpy array of size (n_test_samples,)
Variance of predictive distribution
'''
# Note check_array and check_is_fitted are done within self._decision_function(X)
mu_pred = self._decision_function(X)
data_noise = 1./self.beta_
model_noise = np.sum(np.dot(X,self.eigvecs_)**2 * self.eigvals_,1)
var_pred = data_noise + model_noise
return [mu_pred,var_pred]
class EBLinearRegression(BayesianLinearRegression):
'''
Bayesian Regression with type II maximum likelihood (Empirical Bayes)
Parameters:
-----------
n_iter: int, optional (DEFAULT = 300)
Maximum number of iterations
tol: float, optional (DEFAULT = 1e-3)
Threshold for convergence
optimizer: str, optional (DEFAULT = 'fp')
Method for optimization , either Expectation Maximization or
Fixed Point Gull-MacKay {'em','fp'}. Fixed point iterations are
faster, but can be numerically unstable (especially in case of near perfect fit).
fit_intercept: bool, optional (DEFAULT = True)
If True includes bias term in model
perfect_fit_tol: float (DEAFAULT = 1e-5)
Prevents overflow of precision parameters (this is smallest value RSS can have).
( !!! Note if using EM instead of fixed-point, try smaller values
of perfect_fit_tol, for better estimates of variance of predictive distribution )
alpha: float (DEFAULT = 1)
Initial value of precision paramter for coefficients ( by default we define
very broad distribution )
copy_X : boolean, optional (DEFAULT = True)
If True, X will be copied, otherwise will be
verbose: bool, optional (Default = False)
If True at each iteration progress report is printed out
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of posterior distribution)
intercept_: float
Value of bias term (if fit_intercept is False, then intercept_ = 0)
alpha_ : float
Estimated precision of coefficients
beta_ : float
Estimated precision of noise
eigvals_ : array, shape = (n_features, )
Eigenvalues of covariance matrix (from posterior distribution of weights)
eigvecs_ : array, shape = (n_features, n_featues)
Eigenvectors of covariance matrix (from posterior distribution of weights)
'''
def __init__(self,n_iter = 300, tol = 1e-3, optimizer = 'fp', fit_intercept = True,
perfect_fit_tol = 1e-6, alpha = 1, copy_X = True, verbose = False):
super(EBLinearRegression,self).__init__(n_iter, tol, fit_intercept, copy_X, verbose)
if optimizer not in ['em','fp']:
raise ValueError('Optimizer can be either "em" of "fp" ')
self.optimizer = optimizer
self.alpha = alpha
self.perfect_fit = False
self.scores_ = [np.NINF]
self.perfect_fit_tol = perfect_fit_tol
def fit(self, X, y):
'''
Fits Bayesian Linear Regression using Empirical Bayes
Parameters
----------
X: array-like of size [n_samples,n_features]
Matrix of explanatory variables (should not include bias term)
y: array-like of size [n_features]
Vector of dependent variables.
Returns
-------
object: self
self
'''
# preprocess data
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y)
# precision of noise & and coefficients
alpha = self.alpha
var_y = np.var(y)
# check that variance is non zero !!!
if var_y == 0 :
beta = 1e-2
else:
beta = 1. / np.var(y)
# to speed all further computations save svd decomposition and reuse it later
u,d,vt = svd(X, full_matrices = False)
Uy = np.dot(u.T,y)
dsq = d**2
mu = 0
for i in range(self.n_iter):
# find mean for posterior of w ( for EM this is E-step)
mu_old = mu
if n_samples > n_features:
mu = vt.T * d/(dsq+alpha/beta)
else:
# clever use of SVD here , faster for large n_features
mu = u * 1./(dsq + alpha/beta)
mu = np.dot(X.T,mu)
mu = np.dot(mu,Uy)
# precompute errors, since both methods use it in estimation
error = y - np.dot(X,mu)
sqdErr = np.sum(error**2)
if sqdErr / n_samples < self.perfect_fit_tol:
self.perfect_fit = True
warnings.warn( ('Almost perfect fit!!! Estimated values of variance '
'for predictive distribution are computed using only RSS'))
break
if self.optimizer == "fp":
gamma = np.sum(beta*dsq/(beta*dsq + alpha))
# use updated mu and gamma parameters to update alpha and beta
# !!! made computation numerically stable for perfect fit case
alpha = gamma / (np.sum(mu**2) + np.finfo(np.float32).eps )
beta = ( n_samples - gamma ) / (sqdErr + np.finfo(np.float32).eps )
else:
# M-step, update parameters alpha and beta to maximize ML TYPE II
eigvals = 1. / (beta * dsq + alpha)
alpha = n_features / ( np.sum(mu**2) + np.sum(1/eigvals) )
beta = n_samples / ( sqdErr + np.sum(dsq/eigvals) )
# if converged or exceeded maximum number of iterations => terminate
converged = self._check_convergence(mu_old,mu)
if self.verbose:
print( "Iteration {0} completed".format(i) )
if converged is True:
print("Algorithm converged after {0} iterations".format(i))
if converged or i==self.n_iter -1:
break
eigvals = 1./(beta * dsq + alpha)
self.coef_ = beta*np.dot(vt.T*d*eigvals ,Uy)
self._set_intercept(X_mean,y_mean,X_std)
self.beta_ = beta
self.alpha_ = alpha
self.eigvals_ = eigvals
self.eigvecs_ = vt.T
return self
# ============================== VBLR =========================================
def gamma_mean(a,b):
'''
Computes mean of gamma distribution
Parameters
----------
a: float
Shape parameter of Gamma distribution
b: float
Rate parameter of Gamma distribution
Returns
-------
: float
Mean of Gamma distribution
'''
return float(a) / b
class VBLinearRegression(BayesianLinearRegression):
'''
Implements Bayesian Linear Regression using mean-field approximation.
Assumes gamma prior on precision parameters of coefficients and noise.
Parameters:
-----------
n_iter: int, optional (DEFAULT = 100)
Maximum number of iterations for KL minimization
tol: float, optional (DEFAULT = 1e-3)
Convergence threshold
fit_intercept: bool, optional (DEFAULT = True)
If True will use bias term in model fitting
a: float, optional (Default = 1e-4)
Shape parameter of Gamma prior for precision of coefficients
b: float, optional (Default = 1e-4)
Rate parameter of Gamma prior for precision coefficients
c: float, optional (Default = 1e-4)
Shape parameter of Gamma prior for precision of noise
d: float, optional (Default = 1e-4)
Rate parameter of Gamma prior for precision of noise
verbose: bool, optional (Default = False)
If True at each iteration progress report is printed out
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of posterior distribution)
intercept_: float
Value of bias term (if fit_intercept is False, then intercept_ = 0)
alpha_ : float
Mean of precision of coefficients
beta_ : float
Mean of precision of noise
eigvals_ : array, shape = (n_features, )
Eigenvalues of covariance matrix (from posterior distribution of weights)
eigvecs_ : array, shape = (n_features, n_featues)
Eigenvectors of covariance matrix (from posterior distribution of weights)
'''
def __init__(self, n_iter = 100, tol =1e-4, fit_intercept = True,
a = 1e-4, b = 1e-4, c = 1e-4, d = 1e-4, copy_X = True,
verbose = False):
super(VBLinearRegression,self).__init__(n_iter, tol, fit_intercept, copy_X,
verbose)
self.a,self.b = a, b
self.c,self.d = c, d
def fit(self,X,y):
'''
Fits Variational Bayesian Linear Regression Model
Parameters
----------
X: array-like of size [n_samples,n_features]
Matrix of explanatory variables (should not include bias term)
Y: array-like of size [n_features]
Vector of dependent variables.
Returns
-------
object: self
self
'''
# preprocess data
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y)
# SVD decomposition, done once , reused at each iteration
u,D,vt = svd(X, full_matrices = False)
dsq = D**2
UY = np.dot(u.T,y)
# some parameters of Gamma distribution have closed form solution
a = self.a + 0.5 * n_features
c = self.c + 0.5 * n_samples
b,d = self.b, self.d
# initial mean of posterior for coefficients
mu = 0
for i in range(self.n_iter):
# update parameters of distribution Q(weights)
e_beta = gamma_mean(c,d)
e_alpha = gamma_mean(a,b)
mu_old = np.copy(mu)
mu,eigvals = self._posterior_weights(e_beta,e_alpha,UY,dsq,u,vt,D,X)
# update parameters of distribution Q(precision of weights)
b = self.b + 0.5*( np.sum(mu**2) + np.sum(eigvals))
# update parameters of distribution Q(precision of likelihood)
sqderr = np.sum((y - np.dot(X,mu))**2)
xsx = np.sum(dsq*eigvals)
d = self.d + 0.5*(sqderr + xsx)
# check convergence
converged = self._check_convergence(mu,mu_old)
if self.verbose is True:
print("Iteration {0} is completed".format(i))
if converged is True:
print("Algorithm converged after {0} iterations".format(i))
# terminate if convergence or maximum number of iterations are achieved
if converged or i==(self.n_iter-1):
break
# save necessary parameters
self.beta_ = gamma_mean(c,d)
self.alpha_ = gamma_mean(a,b)
self.coef_, self.eigvals_ = self._posterior_weights(self.beta_, self.alpha_, UY,
dsq, u, vt, D, X)
self._set_intercept(X_mean,y_mean,X_std)
self.eigvecs_ = vt.T
return self
def _posterior_weights(self, e_beta, e_alpha, UY, dsq, u, vt, d, X):
'''
Calculates parameters of approximate posterior distribution
of weights
'''
# eigenvalues of covariance matrix
sigma = 1./ (e_beta*dsq + e_alpha)
# mean of approximate posterior distribution
n_samples, n_features = X.shape
if n_samples > n_features:
mu = vt.T * d/(dsq + e_alpha/e_beta)# + np.finfo(np.float64).eps)
else:
mu = u * 1./(dsq + e_alpha/e_beta)# + np.finfo(np.float64).eps)
mu = np.dot(X.T,mu)
mu = np.dot(mu,UY)
return mu,sigma
|
474725
|
import importlib.metadata as im
import pytest
@pytest.mark.parametrize("name", ("foo-bar", "foo_bar", "Foo-Bar"))
def test_distribution(name):
assert im.distribution(name) is not None
def test_unknown_package():
with pytest.raises(im.PackageNotFoundError):
im.distribution("bar")
def test_version():
assert im.version("foo-bar") == "1.2.3"
def test_metadata():
assert im.metadata("foo-bar") is not None
def test_files():
files = im.files("foo-bar")
assert len(files) == 1
assert files[0].name == "foo_bar.py"
assert files[0].size == 20
def test_requires():
assert im.requires("foo-bar") == ["Werkzeug (>=0.15)", "Jinja2 (>=2.10.1)"]
def test_entry_points():
entry_points = im.entry_points()
assert "console_scripts" in entry_points
flg_found = False
for entry_point in entry_points["console_scripts"]:
if entry_point.name == "foo_cli" and entry_point.value == "foo_bar:cli":
flg_found = True
assert flg_found
|
474780
|
from datetime import date, timedelta
from maintenance.models import TaskSchedule
from logical.models import Database
def register_schedule_task_restart_database(hostnames):
today = date.today()
try:
databases = Database.objects.filter(
databaseinfra__instances__hostname__hostname__in=hostnames
).distinct()
for database in databases:
print("Checking database {}".format(database.name))
scheudled_tasks = TaskSchedule.objects.filter(
status=TaskSchedule.SCHEDULED,
database=database,
method_path='restart_database'
)
if scheudled_tasks:
print("Already scheduled for database {}!".format(
database.name)
)
else:
task = TaskSchedule.objects.create(
method_path='restart_database',
scheduled_for=TaskSchedule.next_maintenance_window(
today + timedelta(days=2),
database.databaseinfra.maintenance_window,
database.databaseinfra.maintenance_day
),
database=database
)
task.send_mail(is_new=True)
print("Done")
except Exception as err:
print("Error: {}".format(err))
|
474837
|
from .variable_plotter import VarPlot
from .base import get_os_friendly_name
class MonPlot (VarPlot):
def plot(self, keyx="clock", keyy=None):
# assert len(keys) == 1, "We only support a single key at the moment."
# key = keys[0]
# abscissa_key = self.options.get("abscissa_key", "clock") # episode | epoch | frame | clock
fig, ax = self.init_plot()
limits = [None, None, None, None]
for subloaders in self.loaders:
monlogs = [l.getMonlogLoader() for l in subloaders]
new_limit = self._plot_stack(fig, ax, monlogs, keyx, keyy)
limits = self._update_plot_limits(limits, new_limit)
self._set_plot_options(fig, ax, keyx, limits)
self.close_plot(fig, ax, path=self.output_dir, name=get_os_friendly_name(keyy))
# "/cpu/per"
# "/cpu/all/total"
# "/cpu/memory/total"
# "/cpu/memory/used"
# "/cpu/memory/mem"
# "/gpu/load"
# "/gpu/memory/total"
# "/gpu/memory/used"
|
474917
|
import json
import os
import horovod.tensorflow as hvd
hvd.init()
with open(os.path.join('/opt/ml/model/local-rank-%s-rank-%s' % (hvd.local_rank(), hvd.rank())), 'w+') as f:
basic_info = {'local-rank': hvd.local_rank(), 'rank': hvd.rank(), 'size': hvd.size()}
print(basic_info)
json.dump(basic_info, f)
|
474919
|
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.schema import FetchedValue
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.hybrid import hybrid_property
from app.api.utils.models_mixins import Base, AuditMixin
from app.extensions import db
from app.api.now_applications.models.activity_summary.activity_summary_base import ActivitySummaryBase
class SandGravelQuarryOperation(ActivitySummaryBase):
__tablename__ = "sand_gravel_quarry_operation"
__mapper_args__ = {
'polymorphic_identity': 'sand_gravel_quarry_operation', ## type code
}
activity_summary_id = db.Column(
db.Integer, db.ForeignKey('activity_summary.activity_summary_id'), primary_key=True)
average_overburden_depth = db.Column(db.Numeric(14, 2))
average_overburden_depth_unit_type_code = db.Column(
db.String, db.ForeignKey('unit_type.unit_type_code'), nullable=False)
average_top_soil_depth = db.Column(db.Numeric(14, 2))
average_top_soil_depth_unit_type_code = db.Column(
db.String, db.ForeignKey('unit_type.unit_type_code'), nullable=False)
stability_measures_description = db.Column(db.String)
is_agricultural_land_reserve = db.Column(db.Boolean)
agri_lnd_rsrv_permit_application_number = db.Column(db.String)
has_local_soil_removal_bylaw = db.Column(db.Boolean)
community_plan = db.Column(db.String)
land_use_zoning = db.Column(db.String)
proposed_land_use = db.Column(db.String)
total_mineable_reserves = db.Column(db.Numeric(14, 2))
total_mineable_reserves_unit_type_code = db.Column(
db.String, db.ForeignKey('unit_type.unit_type_code'), nullable=False)
total_annual_extraction = db.Column(db.Numeric(14, 2))
total_annual_extraction_unit_type_code = db.Column(
db.String, db.ForeignKey('unit_type.unit_type_code'), nullable=False)
average_groundwater_depth = db.Column(db.Numeric(14, 1))
average_groundwater_depth_unit_type_code = db.Column(
db.String, db.ForeignKey('unit_type.unit_type_code'))
has_groundwater_from_existing_area = db.Column(db.Boolean)
has_groundwater_from_test_pits = db.Column(db.Boolean)
has_groundwater_from_test_wells = db.Column(db.Boolean)
groundwater_from_other_description = db.Column(db.String)
groundwater_protection_plan = db.Column(db.String)
nearest_residence_distance = db.Column(db.Numeric(14, 2))
nearest_residence_distance_unit_type_code = db.Column(
db.String, db.ForeignKey('unit_type.unit_type_code'), nullable=False)
nearest_water_source_distance = db.Column(db.Numeric(14, 2))
nearest_water_source_distance_unit_type_code = db.Column(
db.String, db.ForeignKey('unit_type.unit_type_code'), nullable=False)
noise_impact_plan = db.Column(db.String)
secure_access_plan = db.Column(db.String)
dust_impact_plan = db.Column(db.String)
visual_impact_plan = db.Column(db.String)
progressive_reclamation = db.Column(db.Boolean)
max_unreclaimed = db.Column(db.Numeric)
max_unreclaimed_unit_type_code = db.Column(
db.String, db.ForeignKey('unit_type.unit_type_code'))
reclamation_backfill_detail = db.Column(db.String)
proposed_activity_description = db.Column(db.String)
work_year_info = db.Column(db.String)
details = db.relationship(
'SandGravelQuarryOperationDetail',
secondary='activity_summary_detail_xref',
load_on_pending=True)
# TODO replace with value from vFCBC
# If the other description is provided, the other option has been selected.
@hybrid_property
def has_ground_water_from_other(self):
if self.groundwater_from_other_description == None:
return False
return True
@hybrid_property
def calculated_total_disturbance(self):
return self.calculate_total_disturbance_area(self.details)
def __repr__(self):
return '<SandGravelQuarryOperation %r>' % self.activity_id
|
474933
|
from builtins import object
from yapsy.IPlugin import IPlugin
import jsonpickle
import lxml.etree as etree
from elasticsearch_dsl import Search
from elasticsearch_dsl.query import Match
from opentargets_urlzsource import URLZSource
from mrtarget.common.UniprotIO import Parser
import logging
class ReactomeRetriever(object):
def __init__(self, es, index):
self.es = es
self.index = index
def get_reaction(self, reaction_id):
response = Search().using(self.es).index(self.index).extra(track_total_hits=True).query(Match(_id=reaction_id))[0:1].execute()
if int(response.hits.total.value) > 0 and len(response.hits) > 0:
return response.hits[0].to_dict()
else:
return None
class Uniprot(IPlugin):
def __init__(self, *args, **kwargs):
self._logger = logging.getLogger(__name__)
self.missing_ensembl = set()
self.missing_reactome = set()
def load_uniprot_entry(self, gene, seqrec, reactome_retriever):
gene.uniprot_id = seqrec.id
gene.is_in_swissprot = True
if seqrec.dbxrefs:
gene.dbxrefs.extend(seqrec.dbxrefs)
gene.dbxrefs= sorted(list(set(gene.dbxrefs)))
for k, v in list(seqrec.annotations.items()):
if k == 'accessions':
#gene.uniprot_accessions = v
# to avoid NoneType error
gene.uniprot_accessions.extend(v)
acc_set = set(gene.uniprot_accessions)
gene.uniprot_accessions = list(acc_set)
if k == 'keywords':
gene.uniprot_keywords = v
if k == 'comment_function':
gene.uniprot_function = v
if k == 'comment_similarity':
gene.uniprot_similarity = v
if k == 'comment_subunit':
gene.uniprot_subunit = v
if k == 'comment_subcellularlocation_location':
gene.uniprot_subcellular_location = v
if k == 'comment_pathway':
gene.uniprot_pathway = v
if k == 'gene_name_primary':
if not gene.approved_symbol:
gene.approved_symbol= v
elif v!= gene.approved_symbol:
if v not in gene.symbol_synonyms:
gene.symbol_synonyms.append(v)
if k == 'gene_name_synonym':
for symbol in v:
if symbol not in gene.symbol_synonyms:
gene.symbol_synonyms.append(symbol)
if k.startswith('recommendedName'):
gene.name_synonyms.extend(v)
if k.startswith('alternativeName'):
gene.name_synonyms.extend(v)
gene.name_synonyms.append(seqrec.description)
gene.name_synonyms = list(set(gene.name_synonyms))
if 'GO' in seqrec.annotations['dbxref_extended']:
gene.go = seqrec.annotations['dbxref_extended']['GO']
if 'Reactome' in seqrec.annotations['dbxref_extended']:
gene.reactome = seqrec.annotations['dbxref_extended']['Reactome']
for r in gene.reactome:
reaction = reactome_retriever.get_reaction(r['id'])
if reaction is None:
self.missing_reactome.add(r["id"])
else:
r['value'] = {}
r['value']['pathway name'] = reaction["label"]
r['value']['pathway types'] = []
type_codes =[]
for path in reaction['path']:
if len(path) > 1:
type_codes.append(path[1])
for type_code in type_codes:
r['value']['pathway types'].append({
'pathway type':type_code,
'pathway type name': reactome_retriever.get_reaction(type_code)['label']
})
if 'PDB' in seqrec.annotations['dbxref_extended']:
gene.pdb = seqrec.annotations['dbxref_extended']['PDB']
if 'ChEMBL' in seqrec.annotations['dbxref_extended']:
gene.chembl = seqrec.annotations['dbxref_extended']['ChEMBL']
if 'DrugBank' in seqrec.annotations['dbxref_extended']:
gene.drugbank = seqrec.annotations['dbxref_extended']['DrugBank']
if 'Pfam' in seqrec.annotations['dbxref_extended']:
gene.pfam = seqrec.annotations['dbxref_extended']['Pfam']
if 'InterPro' in seqrec.annotations['dbxref_extended']:
gene.interpro = seqrec.annotations['dbxref_extended']['InterPro']
def generate_uniprot(self, uri):
with URLZSource(uri).open() as r_file:
for event, elem in etree.iterparse(r_file, events=("end",),
tag='{http://uniprot.org/uniprot}entry'):
#parse the XML into an object
entry = Parser(elem, return_raw_comments=False).parse()
elem.clear()
yield entry
def merge_data(self, genes, es, r_server, data_config, es_config):
reactome_retriever = ReactomeRetriever(es, es_config.rea.name)
c = 0
for seqrec in self.generate_uniprot(data_config.uniprot_uri):
c += 1
if c % 1000 == 0:
self._logger.info("%i entries retrieved for uniprot" % c)
if 'Ensembl' in seqrec.annotations['dbxref_extended']:
ensembl_data = seqrec.annotations['dbxref_extended']['Ensembl']
ensembl_genes_id = []
for ens_data_point in ensembl_data:
ensembl_genes_id.append(ens_data_point['value']['gene ID'])
ensembl_genes_id = list(set(ensembl_genes_id))
success = False
for ensembl_id in ensembl_genes_id:
if ensembl_id in genes:
gene = genes.get_gene(ensembl_id)
self.load_uniprot_entry(gene, seqrec, reactome_retriever)
genes.add_gene(gene)
success = True
break
if not success:
self._logger.debug(
'Cannot find ensembl id(s) %s coming from uniprot entry %s in available geneset' % (
ensembl_genes_id, seqrec.id))
else:
self.missing_ensembl.add(seqrec.id)
for reactome_id in sorted(self.missing_reactome):
self._logger.warning("Unable to find reactome for %s", reactome_id)
for uniprot_id in sorted(self.missing_ensembl):
self._logger.warning("Unable to find ensemble for %s", uniprot_id)
self._logger.info("%i entries retrieved for uniprot" % c)
# self._logger.info("STATS AFTER UNIPROT MAPPING:\n" + self.genes.get_stats())
|
474958
|
from multiprocessing import Process
from bilibili import Bilibili
from config import config
from mirrativ import Mirrativ
from openrec import Openrec
from tools import check_ddir_is_exist, get_logger
from twitcasting import Twitcasting
from youtube import Youtube, start_temp_daemon
logger = get_logger()
class Event:
def __init__(self):
self.events_multi = []
self.gen_process()
logger.info(self.events_multi)
def start(self):
self.start_multi_task()
if config['youtube']['enable_temp']:
temp = Process(target=start_temp_daemon)
temp.run()
for event in self.events_multi:
event.join()
def gen_process(self):
if config['youtube']['enable']:
for user_config in config['youtube']['users']:
y = Youtube(user_config)
self.events_multi.append(y)
if config['twitcasting']['enable']:
for user_config in config['twitcasting']['users']:
t = Twitcasting(user_config)
self.events_multi.append(t)
if config['openrec']['enable']:
for user_config in config['openrec']['users']:
o = Openrec(user_config)
self.events_multi.append(o)
if config['mirrativ']['enable']:
for user_config in config['mirrativ']['users']:
m = Mirrativ(user_config)
self.events_multi.append(m)
if config['bilibili']['enable']:
for user_config in config['bilibili']['users']:
b = Bilibili(user_config)
self.events_multi.append(b)
def start_multi_task(self):
for proc in self.events_multi:
proc.start()
if __name__ == '__main__':
check_ddir_is_exist()
e = Event()
e.start()
|
474975
|
import json
import io
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_ip_command(requests_mock):
from IPQualityScore import Client, ip_command
mock_response = util_load_json('test_data/ip_response.json')
requests_mock.get('https://ipqualityscore.com/api/json/ip/api_key_here/192.168.3.11', json=mock_response)
client = Client(
base_url='https://ipqualityscore.com/api/json/ip/api_key_here',
verify=False)
ip_suspicious_score_threshold = 75
ip_malicious_score_threshold = 85
reliability = "A - Completely reliable"
args = {
"ip": "192.168.3.11"
}
response = ip_command(client, args, ip_suspicious_score_threshold, ip_malicious_score_threshold, reliability)
assert response[0].outputs_prefix == 'IPQualityScore.IP'
def test_email_command(requests_mock):
from IPQualityScore import Client, email_command
mock_response = util_load_json('test_data/email_response.json')
requests_mock.get('https://ipqualityscore.com/api/json/email/api_key_here/[email protected]', json=mock_response)
client = Client(
base_url='https://ipqualityscore.com/api/json/email/api_key_here',
verify=False)
email_suspicious_score_threshold = 75
email_malicious_score_threshold = 85
reliability = "A - Completely reliable"
args = {
"email": "<EMAIL>"
}
response = email_command(client, args, email_suspicious_score_threshold, email_malicious_score_threshold,
reliability)
assert response[0].outputs_prefix == 'IPQualityScore.Email'
def test_url_command(requests_mock):
from IPQualityScore import Client, url_command
mock_response = util_load_json('test_data/url_response.json')
requests_mock.get('https://ipqualityscore.com/api/json/url/api_key_here/https%3A%2F%2Fgoogle.com',
json=mock_response)
client = Client(
base_url='https://ipqualityscore.com/api/json/url/api_key_here',
verify=False)
url_suspicious_score_threshold = 75
url_malicious_score_threshold = 85
reliability = "A - Completely reliable"
args = {
"url": "https://google.com"
}
response = url_command(client, args, url_suspicious_score_threshold, url_malicious_score_threshold,
reliability)
assert response[0].outputs_prefix == 'IPQualityScore.Url'
|
474981
|
from pyramid.view import view_config
import logging
log = logging.getLogger(__name__)
@view_config(route_name='appcache', http_cache=0,
renderer='../templates/geoportailv3.appcache')
def appcache(request):
request.response.content_type = 'text/cache-manifest'
return {}
|
474983
|
import configparser
from os.path import join
def config(path, section, option, name='scrapy.cfg', default=None):
"""
parse scrapy config
:param path: config path
:param section: config section
:param option: other params
:param name: file name
:param default:
:return:
"""
try:
cf = configparser.ConfigParser()
cfg_path = join(path, name)
cf.read(cfg_path)
return cf.get(section, option)
except configparser.NoOptionError:
return default
|
474988
|
from dataclasses import dataclass
from .flyweight import Flyweight
@dataclass(frozen=True)
class Gametime(Flyweight):
"""
NHL gametime object.
This object represents a unique time of the game. There are convenience properties to convert
the gametime into convenient formats.
Parameters
----------
period : int
The period of the game.
period_sec : int
The number of elapsed seconds in the period.
"""
__slots__ = ["period", "period_sec"]
_instances = {}
period: int
"""int: Game period. 1-3 for regulation. 4+ for overtime."""
period_sec: int
"""int: Elapsed seconds of the period."""
@classmethod
def _key(cls, period, period_sec, *args, **kwargs):
return (period, period_sec)
@classmethod
def has_key(cls, period, period_sec):
return super().has_key(period, period_sec)
@classmethod
def from_key(cls, period, period_sec):
return super().from_key(period, period_sec)
def __repr__(self):
return "<nhl.Gametime: {} {:02d}:{:02d}>".format(self.period_str, *self.period_min_sec)
@property
def sec(self):
"""int: Elapsed seconds of the game."""
return (self.period - 1)*20*60 + self.period_sec
@property
def min_sec(self):
"""(int, int): Elapsed minutes and seconds of the game."""
return (self.sec // 60, self.sec % 60)
@property
def period_str(self):
"""str: Period number as string (i.e. "2nd")"""
if self.period == 1:
return "1st"
elif self.period == 2:
return "2nd"
elif self.period == 3:
return "3rd"
else:
return "{}th".format(self.period)
@property
def period_min_sec(self):
"""(int, int): Elapsed minutes and seconds of the period."""
return (self.period_sec // 60, self.period_sec % 60)
|
475076
|
from django.conf.urls import include
from django.urls import path
from rest_framework.routers import DefaultRouter
from search.views import NewsDocumentView
app_name = "search"
router = DefaultRouter()
router.register(r"", viewset=NewsDocumentView, basename="search")
urlpatterns = [
path("", include(router.urls)),
]
|
475078
|
import pytest
from flex.exceptions import ValidationError
from flex.validation.request import (
validate_request,
)
from flex.error_messages import MESSAGES
from flex.constants import (
ARRAY,
BOOLEAN,
CSV,
INTEGER,
PATH,
PIPES,
QUERY,
SSV,
STRING,
TSV,
)
from tests.factories import (
SchemaFactory,
RequestFactory,
)
from tests.utils import assert_message_in_errors
def test_request_parameter_validation():
"""
Test that request validation does parameter validation. This is largely a
smoke test to ensure that parameter validation is wired into request
validation correctly.
"""
schema = SchemaFactory(
paths={
'/get/{id}/': {
'parameters': [
{
'name': 'id',
'in': PATH,
'description': 'id',
'required': True,
'type': STRING,
'format': 'uuid',
},
{
'name': 'page',
'in': QUERY,
'type': INTEGER,
},
],
'get': {
'responses': {200: {'description': "Success"}},
},
},
},
)
request = RequestFactory(url='http://www.example.com/get/32/?page=abcd')
with pytest.raises(ValidationError) as err:
validate_request(
request=request,
schema=schema,
)
assert_message_in_errors(
MESSAGES['format']['invalid_uuid'],
err.value.detail,
'method.parameters.path.id.format',
)
assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'query.page.type',
)
def test_request_parameter_validation_with_base_path():
"""
Test that path parameter validation works even when there is a base path in
the api.
"""
schema = SchemaFactory(
basePath='/api/v1',
paths={
'/get/{id}/': {
'parameters': [
{
'name': 'id',
'in': PATH,
'description': 'id',
'required': True,
'type': STRING,
},
],
'get': {
'responses': {200: {'description': "Success"}},
},
},
},
)
request = RequestFactory(url='http://www.example.com/api/v1/get/32/')
validate_request(
request=request,
schema=schema,
)
@pytest.mark.parametrize(
'type_,value',
(
(BOOLEAN, 'true'),
(INTEGER, '123'),
)
)
def test_request_parameter_validation_typecasting(type_, value):
"""
Test that request validation does parameter validation for all parameters that require
typecasting since query params are generally treated as strings.
"""
schema = SchemaFactory(
paths={
'/get/': {
'parameters': [
{
'name': 'id',
'in': QUERY,
'type': type_,
}
],
'get': {
'responses': {"200": {'description': "Success"}},
},
},
},
)
request = RequestFactory(url='http://www.example.com/get/?id={}'.format(value))
validate_request(
request=request,
schema=schema,
)
@pytest.mark.parametrize(
'format_,value',
(
(CSV, '1,2,3'),
(SSV, '1 2 3'),
(TSV, '1\t2\t3'),
(PIPES, '1|2|3'),
),
)
def test_request_parameter_array_extraction(format_, value):
schema = SchemaFactory(
paths={
'/get/': {
'get': {
'responses': {'200': {'description': "Success"}},
'parameters': [
{
'name': 'id',
'in': QUERY,
'type': ARRAY,
'collectionFormat': format_,
'minItems': 3,
'maxItems': 3,
'items': {
'type': INTEGER,
'minimum': 1,
'maximum': 3,
},
},
],
},
},
},
)
request = RequestFactory(url='http://www.example.com/get/?id={}'.format(value))
validate_request(
request=request,
schema=schema,
)
|
475091
|
import os
import zipfile
from shlex import split
from subprocess import PIPE, Popen
from uuid import uuid4
from django.conf import settings
from rest_framework import exceptions
def _linuxCopy(src, dest):
src_path = src.replace(' ', '\ ')
dest_path = dest.replace(' ', '\ ')
# -R, -r, --recursive
# copy directories recursively
# -u, --update
# copy only when the SOURCE file is newer
# than the destination file or when the
# destination file is missing
# -v, --verbose
# explain what is being done
cmd = f'cp -ruv {src_path} {dest_path}'
popen = Popen(split(cmd), stdout=PIPE, universal_newlines=True)
_, stderr = popen.communicate()
if stderr:
raise exceptions.NotAcceptable(stderr)
return True
def _uploadFile(action, my_file, folder_path, name, chunk, overwrite=False):
file_path = os.path.join(
settings.MEDIA_ROOT, folder_path if action == 'publishItems' else 'tmp', name)
while chunk is 0 and os.path.isfile(file_path):
if overwrite:
# delete old file
os.remove(file_path)
else:
# generate new file
tempName = name.split('.')
name = f'{tempName[0]}_copy.{tempName[1]}'
file_path = os.path.join(settings.MEDIA_ROOT, 'tmp', name)
# Appends all chunks of this request (chunks of chunks)
# UI sends multiple requests with multiple chunks each per file
with open(file_path, 'ab+') as temp_file:
for chunk in my_file.chunks():
temp_file.write(chunk)
return file_path
def _zipFiles(sources, sources_folder, target_file, overwrite=False, denied_folders=None):
# check if target_file is a path or a temporary file
if(isinstance(target_file, str)):
if os.path.isfile(target_file):
if overwrite:
# delete file
os.remove(target_file)
else:
# generate new file
file_name, file_extension = os.path.splitext(target_file)
target_file = f'{file_name}_{uuid4().hex}{file_extension}'
with zipfile.ZipFile(target_file, 'w', zipfile.ZIP_DEFLATED) as zfobj:
for source in sources:
src = os.path.join(sources_folder, source)
if os.path.isfile(src):
zfobj.write(src, os.path.relpath(src, os.path.join(src, '..')))
else:
_zipdir(src, zfobj, denied_folders)
for zfile in zfobj.filelist:
zfile.create_system = 0
return target_file
def _zipdir(path, ziph, denied_folders):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
# check if folder is not in any department denied folders
if not denied_folders or not any(list(map(lambda item: item in denied_folders, root.rsplit(os.path.sep)))):
for file in files:
ziph.write(os.path.join(root, file), os.path.relpath(
os.path.join(root, file), os.path.join(path, '..')))
def _unzipFile(source_file, target_folder):
# Unzip the file, creating subdirectories as needed
zfobj = zipfile.ZipFile(source_file)
zfobj.extractall(target_folder)
|
475111
|
import pytest
from ..helpers import *
import hail.experimental.time as htime
@skip_unless_spark_backend()
def test_strftime():
assert hl.eval(htime.strftime("%A, %B %e, %Y. %r", 876541523, "America/New_York")) == "Friday, October 10, 1997. 11:45:23 PM"
assert hl.eval(htime.strftime("%A, %B %e, %Y. %r", 876541523, "GMT+2")) == "Saturday, October 11, 1997. 05:45:23 AM"
assert hl.eval(htime.strftime("%A, %B %e, %Y. %r", 876541523, "+08:00")) == "Saturday, October 11, 1997. 11:45:23 AM"
assert hl.eval(htime.strftime("%A, %B %e, %Y. %r", -876541523, "+08:00")) == "Tuesday, March 24, 1942. 04:14:37 AM"
@skip_unless_spark_backend()
def test_strptime():
assert hl.eval(htime.strptime("Friday, October 10, 1997. 11:45:23 PM", "%A, %B %e, %Y. %r", "America/New_York")) == 876541523
assert hl.eval(htime.strptime("Friday, October 10, 1997. 11:45:23 PM", "%A, %B %e, %Y. %r", "GMT+2")) == 876519923
assert hl.eval(htime.strptime("Friday, October 10, 1997. 11:45:23 PM", "%A, %B %e, %Y. %r", "+08:00")) == 876498323
assert hl.eval(htime.strptime("Tuesday, March 24, 1942. 04:14:37 AM", "%A, %B %e, %Y. %r", "+08:00")) == -876541523
|
475115
|
import math
from typing import Optional
from cftool.misc import Incrementer
from ...protocol import TrainerMonitor
@TrainerMonitor.register("basic")
class BasicMonitor(TrainerMonitor):
def __init__(self, patience: int = 25): # type: ignore
super().__init__()
self.patience = patience
self.num_snapshot = 0
self.best_score = -math.inf
self.worst_score: Optional[float] = None
def snapshot(self, new_score: float) -> bool:
self.num_snapshot += 1
if self.worst_score is None:
self.worst_score = new_score
else:
self.worst_score = min(new_score, self.worst_score)
if new_score > self.best_score:
self.best_score = new_score
return True
return False
def check_terminate(self, new_score: float) -> bool:
if self.num_snapshot <= self.patience:
return False
if self.worst_score is None:
return False
return new_score <= self.worst_score
def punish_extension(self) -> None:
return None
@TrainerMonitor.register("mean_std")
class MeanStdMonitor(BasicMonitor):
def __init__(
self,
*,
patience: int = 5,
window_size: int = 25,
overfit_tolerance: float = 25.0,
):
super().__init__()
self.patience = patience
self.overfit_tolerance = overfit_tolerance
self.best_score = -math.inf
self.overfit_level = 0.0
self._incrementer = Incrementer(window_size)
def snapshot(self, new_score: float) -> bool:
self._incrementer.update(new_score)
mean, std = self._incrementer.mean, self._incrementer.std
std = max(std, 1.0e-8)
if new_score < mean - std:
max_decrease = self.overfit_tolerance / self.patience
decrease = min(max_decrease, (mean - new_score) / std + 1.0)
self.overfit_level += decrease
elif new_score > mean + std:
improvement = (new_score - mean) / std - 1.0
self.overfit_level = max(0.0, self.overfit_level - improvement)
return super().snapshot(new_score)
def check_terminate(self, new_score: float) -> bool:
if self.num_snapshot <= 10:
return False
if self.overfit_level >= self.overfit_tolerance:
return True
return False
@TrainerMonitor.register("plateau")
class PlateauMonitor(TrainerMonitor):
def __init__(
self,
*,
patience: float = 5.0,
window_size: int = 25,
plateau_tolerance: float = 25.0,
plateau_threshold: float = 0.2,
):
super().__init__()
self.patience = patience
self.window_size = window_size
self.plateau_tolerance = plateau_tolerance
self.plateau_threshold = plateau_threshold
self.num_snapshot = 0
self.plateau_level = 0.0
self._incrementer = Incrementer(window_size)
@property
def max_plateau_increase(self) -> float:
return self.plateau_tolerance / self.patience
def snapshot(self, new_score: float) -> bool:
self.num_snapshot += 1
self._incrementer.update(new_score)
if self.num_snapshot <= self.window_size:
return False
mean, std = self._incrementer.mean, self._incrementer.std
ratio = max(abs(new_score - mean) / max(std, 1.0e-8), 1.0e-8)
if ratio < self.plateau_threshold:
plateau = min(
self.max_plateau_increase,
1.0 / ratio - 1.0 / self.plateau_threshold,
)
self.plateau_level += plateau
return False
def check_terminate(self, new_score: float) -> bool:
if self.plateau_level >= self.plateau_tolerance:
return True
return False
def punish_extension(self) -> None:
self.plateau_level += self.max_plateau_increase / 5.0
@TrainerMonitor.register("conservative")
class ConservativeMonitor(TrainerMonitor):
def snapshot(self, new_score: float) -> bool:
return True
def check_terminate(self, new_score: float) -> bool:
return False
def punish_extension(self) -> None:
pass
@TrainerMonitor.register("lazy")
class LazyMonitor(TrainerMonitor):
def snapshot(self, new_score: float) -> bool:
return False
def check_terminate(self, new_score: float) -> bool:
return False
def punish_extension(self) -> None:
pass
__all__ = [
"BasicMonitor",
"MeanStdMonitor",
"PlateauMonitor",
"ConservativeMonitor",
"LazyMonitor",
]
|
475116
|
import os
import sys
import errno
import subprocess
import glob
import shutil
from contextlib import contextmanager
def normpath(path):
"""Normalize UNIX path to a native path."""
normalized = os.path.join(*path.split("/"))
if os.path.isabs(path):
return os.path.abspath("/") + normalized
else:
return normalized
def cp(source, target):
source = normpath(source)
target = normpath(target)
print("cp {0} {1}".format(source, target))
shutil.copy(source, target)
def maybe_makedirs(path):
path = normpath(path)
print("mkdir -p " + path)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@contextmanager
def cd(path):
path = normpath(path)
cwd = os.getcwd()
os.chdir(path)
print("cd " + path)
try:
yield path
finally:
os.chdir(cwd)
def run(command, **kwargs):
print(command)
subprocess.check_call(command, shell=True, **kwargs)
def main():
with cd("jvm-packages/"):
print("====copying pure-Python tracker====")
for use_cuda in [True, False]:
xgboost4j = "xgboost4j-gpu" if use_cuda else "xgboost4j"
cp("../python-package/xgboost/tracker.py", f"{xgboost4j}/src/main/resources")
print("====copying resources for testing====")
with cd("../demo/CLI/regression"):
run(f"{sys.executable} mapfeat.py")
run(f"{sys.executable} mknfold.py machine.txt 1")
for use_cuda in [True, False]:
xgboost4j = "xgboost4j-gpu" if use_cuda else "xgboost4j"
xgboost4j_spark = "xgboost4j-spark-gpu" if use_cuda else "xgboost4j-spark"
maybe_makedirs(f"{xgboost4j}/src/test/resources")
maybe_makedirs(f"{xgboost4j_spark}/src/test/resources")
for file in glob.glob("../demo/data/agaricus.*"):
cp(file, f"{xgboost4j}/src/test/resources")
cp(file, f"{xgboost4j_spark}/src/test/resources")
for file in glob.glob("../demo/CLI/regression/machine.txt.t*"):
cp(file, f"{xgboost4j_spark}/src/test/resources")
print("====Creating directories to hold native binaries====")
for os, arch in [("linux", "x86_64"), ("windows", "x86_64"), ("macos", "x86_64")]:
output_dir = f"xgboost4j/src/main/resources/lib/{os}/{arch}"
maybe_makedirs(output_dir)
for os, arch in [("linux", "x86_64")]:
output_dir = f"xgboost4j-gpu/src/main/resources/lib/{os}/{arch}"
maybe_makedirs(output_dir)
print("====Next Steps====")
print("1. Obtain Linux and Windows binaries from the CI server")
print("2. Put them in xgboost4j(-gpu)/src/main/resources/lib/[os]/[arch]")
print("3. Now on a Mac machine, run:")
print(" GPG_TTY=$(tty) mvn deploy -Prelease -DskipTests")
if __name__ == "__main__":
main()
|
475118
|
try:
from public_config import *
except ImportError:
pass
PORT = 9030
SERVICE_NAME = 'message'
|
475132
|
import logging
import os
def load_word_dict(opt):
"""
Load word dictionary. Naming format: str(opt.word_dict_size) + '_' + opt.name + '.vocab.dict'
:param opt:
:return:
"""
fname = str(opt.word_dict_size) + '_' + opt.name + '.vocab.dict'
logging.info('Word dict fname %s' % (fname))
word_dict = read_bin_file(fname)
opt.word_dict = word_dict
opt.word_dict_size = len(word_dict)
# opt.sos = word_dict.fword2idx('<s>')
# opt.eos = word_dict.fword2idx('<\\s>')
opt.unk = word_dict.fword2idx('<unk>')
opt.pad = word_dict.fword2idx('<pad>')
assert opt.pad == 0
return opt
def load_tag_dict(opt, name):
"""
Load tag dictionary.
:param opt:
:param name: [ ner, pos ]
:return:
"""
with open(name + '.dict', 'rb') as f:
tag_dict = pkl.load(f)
setattr(opt, name + '_dict', tag_dict)
setattr(opt, name + '_dict_size', len(tag_dict))
return opt
def load_data(opt):
"""
Load dataset given mode {dbg, normal} + {train, test}
:param opt:
:return: data_patch
"""
if opt.mode == 0:
os.chdir('trains')
train_files = os.listdir('.')
if opt.dbg:
files = [x for x in files if x.endswith('000.bin') or x.endswith('001.bin')]
logging.info("DEBUG TRAIN mode: %d batch of data" % (len(files)))
train_files = [fname for fname in train_files if fname.endswith('.bin')]
logging.info('Total %d batch to load' % len(train_files))
# bag = concurrent_io(read_bin_file, files)
bag = []
for f in train_files:
bag.append(read_bin_file(f))
os.chdir('..')
os.chdir('tests')
test_files = os.listdir('.')
test_files = [fname for fname in test_files if fname.endswith('.bin')]
test_bag = []
for f in test_files:
test_bag.append(read_bin_file(f))
os.chdir('..')
elif opt.mode == 1:
os.chdir('tests')
files = os.listdir('.')
np.random.RandomState(seed=42).shuffle(files)
# files = files[:200] # TODO
if opt.dbg:
files = files[:100]
logging.info("DEBUG EVAL mode: %d batch of data" % (len(files)))
else:
logging.error('Unrecognizable mode. 0 - train 1 - test')
raise Exception
os.chdir('..')
if opt.mode == 0:
cand_ext_dict_size = reset_ext_dict_size(bag)
if cand_ext_dict_size > opt.ext_dict_size:
opt.ext_dict_size = cand_ext_dict_size
logging.info("Extd dict size %s" % str(opt.ext_dict_size))
opt.full_dict_size = opt.word_dict_size + opt.ext_dict_size
logging.info('Full dict size: %d' % opt.full_dict_size)
os.chdir('..')
return opt, [bag, test_bag]
def load_pretrain_word_embedding(opt):
"""
Loading pretrain embedding.
:param opt:
:return:
"""
full_embedding = None
if opt.dbg:
opt.pretrain_word = None
if opt.pretrain_word is not None:
rng = np.random.RandomState(2018)
full_embedding = rng.uniform(-0.3, 0.3, (opt.full_dict_size, opt.inp_dim))
with open(opt.pretrain_word, 'r') as pretrain:
lines = pretrain.readlines()
for l in lines:
x = l.split(' ')
word = x[0]
if opt.word_dict.has_word(word):
idx = opt.word_dict.fword2idx(word)
nums = x[1:]
assert len(nums) == opt.inp_dim
tmp_vec = []
for i in nums:
tmp_vec.append(float(i))
full_embedding[idx] = np.asarray(tmp_vec)
else:
logging.warning('Not loading pretraining word embedding.')
return full_embedding
def load_prev_state(option, module, strict_flag=False):
if option:
logging.info('Loading module %s' % option)
module.load_state_dict(torch.load(option), strict=strict_flag)
return module
else:
return module
def load_dataset(opt):
"""
Load everything needed, including dataset, dictionaries, and pre-training model.
:param opt:
:return:
"""
_cur_path = os.getcwd()
os.chdir(opt.data_path) # chage dir
opt = load_word_dict(opt)
# opt = load_tag_dict(opt, 'pos')
# opt = load_tag_dict(opt, 'ner')
opt, data_bag = load_data(opt)
os.chdir(_cur_path)
return opt, data_bag
|
475162
|
def raw_to_libsvm(ofile, store, full_first_header=False):
"""Write a raw store to libsvm format
:param ofile: file like object to write to
:param store: the raw store
:param full_first_header: Write the full first line regardless of sparseness
"""
for idx,data in enumerate(store):
line = []
for label_idx, (c, v) in enumerate(zip(store.colnames, data)):
if full_first_header and idx == 0:
line.append("%s:%f"%(label_idx, v))
elif v:
line.append("%s:%f"%(label_idx, v))
ofile.write(" ".join(line))
ofile.write("\n")
|
475245
|
import markdown
MARKDOWN_EXTENSIONS = ["def_list", "fenced_code", "codehilite", "tables"]
def extended_markdown(text):
if isinstance(text, str):
text = text.decode("utf8")
return markdown.markdown(text, extensions=MARKDOWN_EXTENSIONS,
output_format="html")
Config.transformers['markdown'] = extended_markdown
|
475272
|
import os
import sys
import discord
from dotenv import load_dotenv
env_files = [f for f in os.listdir() if f.endswith(".env")]
if env_files:
load_dotenv(env_files[0])
# Path to the terminal
GST_PATH = os.path.join("~", "Documents", "GamestonkTerminal")
sys.path.append(GST_PATH)
# https://discord.com/developers/applications/
DISCORD_BOT_TOKEN = os.getenv("GT_DISCORD_BOT_TOKEN") or "REPLACE_ME"
# https://apidocs.imgur.com
IMGUR_CLIENT_ID = os.getenv("GT_IMGUR_CLIENT_ID") or "REPLACE_ME"
# Settings
COMMAND_PREFIX = "!"
DATE_FORMAT = "%Y-%m-%d"
COLOR = discord.Color.from_rgb(0, 206, 154)
MENU_TIMEOUT = 30
AUTHOR_NAME = "Gamestonk Terminal"
AUTHOR_ICON_URL = "https://github.com/GamestonkTerminal/GamestonkTerminal/blob/main/images/gst_logo_green_white_background.png?raw=true"
|
475283
|
import pymzml
import numpy as np
import pandas as pd
from tqdm import tqdm
class ResultTable:
def __init__(self, files, features):
n_features = len(features)
n_files = len(files)
self.files = {k: v for v, k in enumerate(files)}
self.intensities = np.zeros((n_files, n_features))
self.mz = np.zeros(n_features)
self.rtmin = np.zeros(n_features)
self.rtmax = np.zeros(n_features)
# fill in intensities values
for i, feature in enumerate(features):
self.mz[i] = feature.mz
self.rtmin[i] = feature.rtmin
self.rtmax[i] = feature.rtmax
for j, sample in enumerate(feature.samples):
self.intensities[self.files[sample], i] = feature.intensities[j]
def fill_zeros(self, delta_mz):
print('zero filling...')
for file, k in tqdm(self.files.items()):
# read all scans in mzML file
run = pymzml.run.Reader(file)
scans = []
for scan in run:
scans.append(scan)
begin_time = scans[0].scan_time[0]
end_time = scans[-1].scan_time[0]
frequency = len(scans) / (end_time - begin_time)
for m, intensity in enumerate(self.intensities[k]):
if intensity == 0:
mz = self.mz[m]
begin = int((self.rtmin[m] - begin_time) * frequency) - 1
end = int((self.rtmax[m] - begin_time) * frequency) + 1
for scan in scans[begin:end]:
pos = np.searchsorted(scan.mz, mz)
if pos < len(scan.mz) and mz - delta_mz < scan.mz[pos] < mz + delta_mz:
self.intensities[k, m] += scan.i[pos]
if pos >= 1 and mz - delta_mz < scan.mz[pos - 1] < mz + delta_mz:
self.intensities[k, m] += scan.i[pos - 1]
def to_csv(self, path):
df = pd.DataFrame()
df['mz'] = self.mz
df['rtmin'] = self.rtmin / 60
df['rtmax'] = self.rtmax / 60
for file, k in self.files.items():
df[file] = self.intensities[k]
df.to_csv(path)
|
475305
|
from __future__ import annotations
import os
from typing import BinaryIO
from aiosnow.utils import convert_size
class FileHandler:
def __init__(self, file_name: str, dir_path: str = "."):
self.file_path = os.path.join(dir_path, file_name)
self.file = self._open()
self.open = True
def _open(self) -> BinaryIO:
raise NotImplementedError
def read(self) -> bytes:
raise NotImplementedError
def write(self, data: bytes) -> None:
raise NotImplementedError
def __enter__(self) -> FileHandler:
return self
def __exit__(self, *_: tuple) -> None:
self.open = False
self.file.close()
class FileWriter(FileHandler):
bytes_written: int
def __repr__(self) -> str:
written, unit = convert_size(self.bytes_written)
return (
f"<{self.__class__.__name__} [path: {self.file_path}, "
f"open: {self.open}, written: {written}{unit}]>"
)
def _open(self) -> BinaryIO:
self.bytes_written = 0
dir_path = os.path.dirname(self.file_path)
os.makedirs(dir_path, exist_ok=True)
return open(self.file_path, "wb")
def write(self, data: bytes) -> None:
self.bytes_written += self.file.write(data)
def read(self) -> bytes:
pass
class FileReader(FileHandler):
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__} [path: {self.file_path}, open: {self.open}]>"
)
def _open(self) -> BinaryIO:
return open(self.file_path, "rb")
def read(self) -> bytes:
return self.file.read()
def write(self, data: bytes) -> None:
pass
|
475357
|
from __future__ import print_function
import torch.nn as nn
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, feat):
return feat.view(feat.size(0), -1)
class LinearClassifierAlexNet(nn.Module):
def __init__(self, layer=5, n_label=1000, pool_type='max'):
super(LinearClassifierAlexNet, self).__init__()
if layer == 1:
pool_size = 10
nChannels = 96
elif layer == 2:
pool_size = 6
nChannels = 256
elif layer == 3:
pool_size = 5
nChannels = 384
elif layer == 4:
pool_size = 5
nChannels = 384
elif layer == 5:
pool_size = 6
nChannels = 256
else:
raise NotImplementedError('layer not supported: {}'.format(layer))
self.classifier = nn.Sequential()
if layer < 5:
if pool_type == 'max':
self.classifier.add_module('MaxPool', nn.AdaptiveMaxPool2d((pool_size, pool_size)))
elif pool_type == 'avg':
self.classifier.add_module('AvgPool', nn.AdaptiveAvgPool2d((pool_size, pool_size)))
self.classifier.add_module('Flatten', Flatten())
self.classifier.add_module('LinearClassifier', nn.Linear(nChannels*pool_size*pool_size, n_label))
self.initilize()
def initilize(self):
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.fill_(0.0)
def forward(self, x):
return self.classifier(x)
class LinearClassifierResNet(nn.Module):
def __init__(self, layer=6, n_label=1000, pool_type='avg', width=1):
super(LinearClassifierResNet, self).__init__()
if layer == 1:
pool_size = 8
nChannels = 128 * width
pool = pool_type
elif layer == 2:
pool_size = 6
nChannels = 256 * width
pool = pool_type
elif layer == 3:
pool_size = 4
nChannels = 512 * width
pool = pool_type
elif layer == 4:
pool_size = 3
nChannels = 1024 * width
pool = pool_type
elif layer == 5:
pool_size = 7
nChannels = 2048 * width
pool = pool_type
elif layer == 6:
pool_size = 1
nChannels = 2048 * width
pool = pool_type
else:
raise NotImplementedError('layer not supported: {}'.format(layer))
self.classifier = nn.Sequential()
if layer < 5:
if pool == 'max':
self.classifier.add_module('MaxPool', nn.AdaptiveMaxPool2d((pool_size, pool_size)))
elif pool == 'avg':
self.classifier.add_module('AvgPool', nn.AdaptiveAvgPool2d((pool_size, pool_size)))
else:
# self.classifier.add_module('AvgPool', nn.AvgPool2d(7, stride=1))
pass
self.classifier.add_module('Flatten', Flatten())
print('classifier input: {}'.format(nChannels * pool_size * pool_size))
self.classifier.add_module('LiniearClassifier', nn.Linear(nChannels * pool_size * pool_size, n_label))
self.initilize()
def initilize(self):
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.fill_(0.0)
def forward(self, x):
return self.classifier(x)
|
475373
|
import random
import torch
from torch.autograd import Variable
from torch.utils.data.dataloader import DataLoader
from config import batch_size
from data_gen import AiChallengerDataset
from data_gen import pad_collate
if __name__ == '__main__':
checkpoint = 'BEST_checkpoint.tar'
checkpoint = torch.load(checkpoint)
model = checkpoint['model']
model.eval()
dset = AiChallengerDataset()
dset.set_mode('valid')
valid_loader = DataLoader(dset, batch_size=batch_size, shuffle=False, collate_fn=pad_collate)
chosen_samples = range(len(dset))
_ids = random.sample(chosen_samples, 10)
_pred_ids = []
for i, data in enumerate(valid_loader):
contexts, questions, _, alternatives = data
contexts = Variable(contexts.long().cuda())
questions = Variable(questions.long().cuda())
alternatives = Variable(alternatives.long().cuda())
preds = model.forward(contexts, questions, alternatives)
_, pred_ids = torch.max(preds, dim=1)
_pred_ids += list(pred_ids.cpu().numpy())
print('len(_pred_ids): ' + str(len(_pred_ids)))
for id in _ids:
context = dset[id][0]
context = [''.join([dset.QA.IVOCAB[id] for id in sentence]) for sentence in context]
context = '。'.join(context).replace('<EOS>', '')
question = dset[id][1]
question = ''.join([dset.QA.IVOCAB[id] for id in question]).replace('<EOS>', '')
answer = dset[id][3][dset[id][2]]
answer = dset.QA.IVOCAB[answer]
alternative = dset[id][3]
alternative = [dset.QA.IVOCAB[id] for id in alternative]
pred_id = _pred_ids[id]
pred = alternative[pred_id]
alternative = '|'.join(alternative)
print('文章:' + context)
print('提问:' + question)
print('备选答案:' + alternative)
print('标准答案:' + answer)
print('电脑抢答:' + pred)
print()
|
475393
|
import scipy.stats as stats
import numpy as np
from cde.density_simulation.BaseConditionalDensitySimulation import BaseConditionalDensitySimulation
class SkewNormal(BaseConditionalDensitySimulation):
""" This model represents a univariate skewed normal distribution.
"""
def __init__(self, random_seed=None):
self.random_state = np.random.RandomState(seed=random_seed)
self.random_seed = random_seed
# parameters of the X to distribution parameters mapping
self.loc_slope = 0.1
self.loc_intercept = 0.0
self.scale_square_param = 0.1
self.scale_intercept = 0.05
self.skew_low = -4
self.skew_high = 0.0
# x folows gaussian
self.x_loc = 0
self.x_scale = 0.5
self.x_dist = stats.norm(loc=self.x_loc, scale=self.x_scale)
self.ndim_x = 1
self.ndim_y = 1
self.ndim = self.ndim_x + self.ndim_y
# approximate data statistics
self.y_mean, self.y_std = self._compute_data_statistics()
self.has_cdf = True
self.has_pdf = True
self.can_sample = True
def _loc_scale_skew_mapping(self, X):
loc = self.loc_intercept + self.loc_slope * X
scale = self.scale_intercept + self.scale_square_param * X**2
skew = self.skew_low + (self.skew_high - self.skew_low) * sigmoid(X)
return loc, scale, skew
def _sample_x(self, n_samples):
return self.x_dist.rvs((n_samples,self.ndim_x), random_state=self.random_state)
def pdf(self, X, Y):
""" Conditional probability density function p(y|x) of the underlying probability model
(
Args:
X: x to be conditioned on - numpy array of shape (n_points, ndim_x)
Y: y target values for witch the pdf shall be evaluated - numpy array of shape (n_points, ndim_y)
Returns:
p(X|Y) conditional density values for the provided X and Y - numpy array of shape (n_points, )
"""
X, Y = self._handle_input_dimensionality(X, Y)
locs, scales, skews = self._loc_scale_skew_mapping(X)
P = np.zeros(X.shape[0])
for i in range(X.shape[0]):
P[i] = stats.skewnorm.pdf(Y[i], skews[i], loc=locs[i], scale=scales[i])
return P
def cdf(self, X, Y):
""" Conditional cumulated probability density function P(Y < y | x) of the underlying probability model
Args:
X: x to be conditioned on - numpy array of shape (n_points, ndim_x)
Y: y target values for witch the cdf shall be evaluated - numpy array of shape (n_points, ndim_y)
Returns:
P(Y < y | x) cumulated density values for the provided X and Y - numpy array of shape (n_points, )
"""
X, Y = self._handle_input_dimensionality(X, Y)
locs, scales, skews = self._loc_scale_skew_mapping(X)
P = np.zeros(X.shape[0])
for i in range(X.shape[0]):
P[i] = stats.skewnorm.cdf(Y[i], skews[i], loc=locs[i], scale=scales[i])
return P
def simulate_conditional(self, X):
""" Draws random samples from the conditional distribution
Args:
X: x to be conditioned on when drawing a sample from y ~ p(y|x) - numpy array of shape (n_samples, ndim_x)
Returns:
Conditional random samples y drawn from p(y|x) - numpy array of shape (n_samples, ndim_y)
"""
X = self._handle_input_dimensionality(X)
locs, scales, skews = self._loc_scale_skew_mapping(X)
rvs = np.zeros(X.shape[0])
for i in range(X.shape[0]):
rvs[i] = stats.skewnorm.rvs(skews[i], loc=locs[i], scale=scales[i], random_state=self.random_state)
rvs = np.expand_dims(rvs, 1)
assert rvs.shape == (X.shape[0], self.ndim_y)
return rvs
def simulate(self, n_samples=1000):
""" Draws random samples from the unconditional distribution p(x,y)
Args:
n_samples: (int) number of samples to be drawn from the conditional distribution
Returns:
(X,Y) - random samples drawn from p(x,y) - numpy arrays of shape (n_samples, ndim_x) and (n_samples, ndim_y)
"""
X = self._sample_x(n_samples)
assert X.shape == (n_samples, self.ndim_x)
return X, self.simulate_conditional(X)
def mean_(self, x_cond, n_samples=None):
""" Conditional mean of the distribution
Args:
x_cond: different x values to condition on - numpy array of shape (n_values, ndim_x)
Returns:
Means E[y|x] corresponding to x_cond - numpy array of shape (n_values, ndim_y)
"""
x = self._handle_input_dimensionality(x_cond)
locs, _, _ = self._loc_scale_skew_mapping(x)
assert locs.shape == (x_cond.shape[0], self.ndim_y)
return locs
def sigmoid(x):
return 1 / (1+np.exp(-x))
|
475423
|
def binary_search(arr, tn):
low = 0
high = len(arr) - 1
while low <= high:
m = int((high - low) / 2) + low
if arr[m] == tn:
return m
else:
if arr[m] < tn:
low = m + 1
else:
high = m - 1
if low > high:
return -1
# 下面一行为打印提示语句,不要出现在代码示例里:
print("\n\nFollowing are output of code 10-3\n")
# 下面代码要显示在书中10-3部分
arr = [3, 5, 9, 7, 12, 15, 18, 32, 66, 78, 94, 103, 269]
tn = 5
result = binary_search(arr, tn)
if result >= 0:
print("Succeeded! The target index is: ", result)
else:
print("Search failed.")
# 下面一行为打印提示语句,不要出现在代码示例里:
print("\n\nFollowing are output of code 10-4\n") # TOBE IGNORED
# 下面代码要显示在书中10-4部分
arr = []
for i in range(1, 1001):
arr.append(i)
for tn in range(1, 1001):
result = binary_search(arr, tn)
if result >= 0:
print("Succeeded! The target index is: ", result)
else:
print("Search failed.")
|
475467
|
import FWCore.ParameterSet.Config as cms
# This modifier is used to adapt input configurations of tau producers if input files with old tau ID format are used.
tau_readOldDiscriminatorFormat = cms.Modifier()
|
475479
|
import os
import string
import tensorflow as tf
from aster.core import label_map
from aster.protos import label_map_pb2
def build(config):
if not isinstance(config, label_map_pb2.LabelMap):
raise ValueError('config not of type label_map_pb2.LabelMap')
character_set = _build_character_set(config.character_set)
label_map_object = label_map.LabelMap(
character_set=character_set,
label_offset=config.label_offset,
unk_label=config.unk_label)
return label_map_object
def _build_character_set(config):
if not isinstance(config, label_map_pb2.CharacterSet):
raise ValueError('config not of type label_map_pb2.CharacterSet')
source_oneof = config.WhichOneof('source_oneof')
character_set_string = None
if source_oneof == 'text_file':
file_path = config.text_file
with open(file_path, 'r') as f:
character_set_string = f.read()
character_set = character_set_string.split('\n')
elif source_oneof == 'text_string':
character_set_string = config.text_string
character_set = character_set_string.split()
elif source_oneof == 'built_in_set':
if config.built_in_set == label_map_pb2.CharacterSet.LOWERCASE:
character_set = list(string.digits + string.ascii_lowercase)
elif config.built_in_set == label_map_pb2.CharacterSet.ALLCASES:
character_set = list(string.digits + string.ascii_letters)
elif config.built_in_set == label_map_pb2.CharacterSet.ALLCASES_SYMBOLS:
character_set = list(string.printable[:-6])
else:
raise ValueError('Unknown built_in_set')
else:
raise ValueError('Unknown source_oneof: {}'.format(source_oneof))
return character_set
|
475480
|
from .imports import *
class SwitchDelegate(QItemDelegate):
def __init__(self, parent):
QItemDelegate.__init__(self, parent)
def createEditor(self, parent, option, index):
switch = QPushButton(parent)
switch.setCheckable(False)
switch.setAutoExclusive(False)
icon = switch.style().standardIcon(QStyle.SP_BrowserReload)
switch.setIcon(icon)
switch.setSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed)
switch.setMaximumSize(switch.iconSize().width()+14,switch.iconSize().height()+14)
switch.clicked.connect(lambda : self.click(index=index))
switch.setFocusPolicy(Qt.NoFocus)
return switch
def click(self, index):
index.model().switchRow(index.row())
# The following two lines add and then delete a random row.
# This serves as a solution to the problem where the table does not refresh on MacOS.
index.model().addRow(row=('xx', 'xx'))
index.model().removeRow(ind=len(index.model().rows)-1)
|
475577
|
from cliff.command import Command
import call_server as server
class AppList(Command):
def get_parser(self, prog_name):
parser = super(AppList, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
response = server.TakeAction().get_app_list()
print(response)
|
475587
|
import numpy as np
import pickle, torch
from . import tools
class Feeder_single(torch.utils.data.Dataset):
""" Feeder for single inputs """
def __init__(self, data_path, label_path, shear_amplitude=0.5, temperal_padding_ratio=6, mmap=True):
self.data_path = data_path
self.label_path = label_path
self.shear_amplitude = shear_amplitude
self.temperal_padding_ratio = temperal_padding_ratio
self.load_data(mmap)
def load_data(self, mmap):
# load label
with open(self.label_path, 'rb') as f:
self.sample_name, self.label = pickle.load(f)
# load data
if mmap:
self.data = np.load(self.data_path, mmap_mode='r')
else:
self.data = np.load(self.data_path)
def __len__(self):
return len(self.label)
def __getitem__(self, index):
# get data
data_numpy = np.array(self.data[index])
label = self.label[index]
# processing
data = self._aug(data_numpy)
return data, label
def _aug(self, data_numpy):
if self.temperal_padding_ratio > 0:
data_numpy = tools.temperal_crop(data_numpy, self.temperal_padding_ratio)
if self.shear_amplitude > 0:
data_numpy = tools.shear(data_numpy, self.shear_amplitude)
return data_numpy
class Feeder_dual(torch.utils.data.Dataset):
""" Feeder for dual inputs """
def __init__(self, data_path, label_path, shear_amplitude=0.5, temperal_padding_ratio=6, mmap=True):
self.data_path = data_path
self.label_path = label_path
self.shear_amplitude = shear_amplitude
self.temperal_padding_ratio = temperal_padding_ratio
self.load_data(mmap)
def load_data(self, mmap):
# load label
with open(self.label_path, 'rb') as f:
self.sample_name, self.label = pickle.load(f)
# load data
if mmap:
self.data = np.load(self.data_path, mmap_mode='r')
else:
self.data = np.load(self.data_path)
def __len__(self):
return len(self.label)
def __getitem__(self, index):
# get data
data_numpy = np.array(self.data[index])
label = self.label[index]
# processing
data1 = self._aug(data_numpy)
data2 = self._aug(data_numpy)
return [data1, data2], label
def _aug(self, data_numpy):
if self.temperal_padding_ratio > 0:
data_numpy = tools.temperal_crop(data_numpy, self.temperal_padding_ratio)
if self.shear_amplitude > 0:
data_numpy = tools.shear(data_numpy, self.shear_amplitude)
return data_numpy
# class Feeder_semi(torch.utils.data.Dataset):
# """ Feeder for semi-supervised learning """
# def __init__(self, data_path, label_path, shear_amplitude=0.5, temperal_padding_ratio=6, mmap=True, label_list=None):
# self.data_path = data_path
# self.label_path = label_path
# self.shear_amplitude = shear_amplitude
# self.temperal_padding_ratio = temperal_padding_ratio
# self.label_list = label_list
# self.load_data(mmap)
# self.load_semi_data()
# def load_data(self, mmap):
# # load label
# with open(self.label_path, 'rb') as f:
# self.sample_name, self.label = pickle.load(f)
# # load data
# if mmap:
# self.data = np.load(self.data_path, mmap_mode='r')
# else:
# self.data = np.load(self.data_path)
# def load_semi_data(self):
# data_length = len(self.label)
# if not self.label_list:
# self.label_list = list(range(data_length))
# else:
# self.label_list = np.load(self.label_list).tolist()
# self.label_list.sort()
# self.unlabel_list = list(range(data_length))
# def __len__(self):
# return len(self.unlabel_list)
# def __getitem__(self, index):
# # get data
# data_numpy = np.array(self.data[index])
# label = self.label[index]
# # processing
# data = self._aug(data_numpy)
# return data, label
# def __getitem__(self, index):
# label_index = self.label_list[index % len(self.label_list)]
# unlabel_index = self.unlabel_list[index]
# # get data
# label_data_numpy = np.array(self.data[label_index])
# unlabel_data_numpy = np.array(self.data[unlabel_index])
# label = self.label[label_index]
# # processing
# data1 = self._aug(unlabel_data_numpy)
# data2 = self._aug(unlabel_data_numpy)
# return [data1, data2], label_data_numpy, label
# def _aug(self, data_numpy):
# if self.temperal_padding_ratio > 0:
# data_numpy = tools.temperal_crop(data_numpy, self.temperal_padding_ratio)
# if self.shear_amplitude > 0:
# data_numpy = tools.shear(data_numpy, self.shear_amplitude)
# return data_numpy
|
475588
|
from keras.layers import Input, Activation, Dense,Flatten, BatchNormalization, Add, Conv2D
from keras.layers import MaxPooling2D,AveragePooling2D,Permute,Reshape,LSTM,Lambda,GRU,Bidirectional,BatchNormalization,Concatenate
from keras import regularizers
from keras.optimizers import Adam
from models.attention_layer import *
from keras.models import Model
from utils import sharpe_ratio_loss,sharpe_ratio
###############################
# additive attention RNN models
###############################
def build_add_att_lstm_model(params):
units = params['units']
activation = params['activation']
reg1 = params['l2']
reg2 = params['l2_1']
lr = params['l2_2']
input_shape = params['input_shape']
ts = input_shape[1]
tickers = input_shape[0]
input = Input(shape=input_shape)
reshape_inp = Lambda(lambda x: K.permute_dimensions(x,pattern=(0,2,1,3))) (input)
reshape_inp = Reshape((ts,-1)) (reshape_inp)
batch_norm = BatchNormalization()(reshape_inp)
recurrent_layer = LSTM(units = units,
activation = activation,
kernel_regularizer=regularizers.l2(reg1)) (batch_norm)
batch_norm_2 = BatchNormalization()(recurrent_layer)
##ATTENTION LAYER
contxt_layer = AdditiveAttentionLayer(name='Att',latent_dim=32,kernel_regularizer=regularizers.l2(0.01))([batch_norm,batch_norm_2])
merge = Concatenate()([batch_norm_2,contxt_layer])
out = Dense(units, kernel_regularizer =regularizers.l2(reg2),activation='tanh') (merge)
batch_norm_3 = BatchNormalization()(out)
out = Dense(tickers, kernel_regularizer =regularizers.l2(reg2)) (batch_norm_3)
out = Activation('sigmoid')(out)
model = Model([input], [out])
optimizer = Adam(lr = lr)
model.compile(loss=sharpe_ratio_loss, optimizer=optimizer, metrics = [sharpe_ratio])
return model
def build_add_att_gru_model(params):
units = params['units']
activation = params['activation']
reg1 = params['l2']
reg2 = params['l2_1']
lr = params['l2_2']
input_shape = params['input_shape']
ts = input_shape[1]
tickers = input_shape[0]
input = Input(shape=input_shape)
reshape_inp = Lambda(lambda x: K.permute_dimensions(x,pattern=(0,2,1,3))) (input)
reshape_inp = Reshape((ts,-1)) (reshape_inp)
batch_norm = BatchNormalization()(reshape_inp)
recurrent_layer = GRU(units = units,
activation = activation,
kernel_regularizer=regularizers.l2(reg1)) (batch_norm)
batch_norm_2 = BatchNormalization()(recurrent_layer)
##ATTENTION LAYER
contxt_layer = AdditiveAttentionLayer(name='Att',latent_dim=32,kernel_regularizer=regularizers.l2(0.01))([batch_norm,batch_norm_2])
merge = Concatenate()([batch_norm_2,contxt_layer])
out = Dense(units, kernel_regularizer =regularizers.l2(reg2),activation='tanh') (merge)
batch_norm_3 = BatchNormalization()(out)
out = Dense(tickers, kernel_regularizer =regularizers.l2(reg2)) (batch_norm_3)
out = Activation('sigmoid')(out)
model = Model([input], [out])
optimizer = Adam(lr = lr)
model.compile(loss=sharpe_ratio_loss, optimizer=optimizer, metrics = [sharpe_ratio])
return model
|
475610
|
from argparse import ArgumentParser
import logging
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torchvision.transforms import Compose, ToTensor, Normalize
from torchvision.datasets import MNIST
from ignite.engine import (
Events, create_supervised_trainer, create_supervised_evaluator)
from ignite.metrics import Accuracy, Loss
from ignite.handlers import ModelCheckpoint
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(
download=True, root=".", transform=data_transform, train=True),
batch_size=train_batch_size, shuffle=True)
val_loader = DataLoader(
MNIST(
download=False, root=".", transform=data_transform, train=False),
batch_size=val_batch_size, shuffle=False)
return train_loader, val_loader
def run(train_batch_size, val_batch_size,
epochs, lr, momentum,
log_interval, restore_from, crash_iteration=1000):
train_loader, val_loader = get_data_loaders(
train_batch_size, val_batch_size)
model = Net()
device = 'cpu'
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(
model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(model,
metrics={'accuracy': Accuracy(),
'nll': Loss(F.nll_loss)},
device=device)
# Setup debug level of engine logger:
trainer._logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s|%(name)s|%(levelname)s| %(message)s")
ch.setFormatter(formatter)
trainer._logger.addHandler(ch)
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
iter = (engine.state.iteration - 1) % len(train_loader) + 1
if iter % log_interval == 0:
print("Epoch[{}] Iteration[{}/{}] Loss: {:.2f}"
"".format(
engine.state.epoch, iter,
len(train_loader), engine.state.output))
if engine.state.iteration == crash_iteration:
raise Exception("STOP at {}".format(engine.state.iteration))
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
print(
"Training Results - Epoch: {}\
Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll))
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
print(
"Validation Results - Epoch: {}\
Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll))
objects_to_checkpoint = {"model": model, "optimizer": optimizer}
engine_checkpoint = ModelCheckpoint(
dirname="engine_checkpoint",
filename_prefix='ignite_checking',
require_empty=False,
save_interval=100)
trainer.add_event_handler(
Events.ITERATION_COMPLETED, engine_checkpoint, objects_to_checkpoint)
if restore_from == "":
trainer.run(train_loader, max_epochs=epochs)
else:
raise NotImplementedError('Not implemented yet')
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64,
help='input batch size for training (default: 64)')
parser.add_argument('--val_batch_size', type=int, default=1000,
help='input batch size for validation (default: 1000)')
parser.add_argument('--epochs', type=int, default=10,
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5,
help='SGD momentum (default: 0.5)')
parser.add_argument('--log_interval', type=int, default=300,
help='how many batches to wait before logging')
parser.add_argument('--restore_from', type=str, default="",
help='location from where the model can be reloaded')
parser.add_argument(
'--crash_iteration', type=int, default=1000,
help='Iteration to suddenly raise as exception')
args = parser.parse_args()
run(args.batch_size, args.val_batch_size,
args.epochs, args.lr, args.momentum,
args.log_interval, args.restore_from, args.crash_iteration)
|
475616
|
import setuptools
def get_long_description():
with open('README.md', 'r') as f:
long_description = f.read()
return long_description
version = '0.0.1'
description = 'FSA/FST algorithms, intended to (eventually) be interoperable with PyTorch and similar'
setuptools.setup(
python_requires='>=3.6',
name='k2',
version=version,
author='<NAME>',
author_email='<EMAIL>',
description=description,
keywords='k2, FSA, FST',
long_description=get_long_description(),
long_description_content_type='text/markdown',
url='https://github.com/k2-fsa/k2',
package_dir={'': 'k2/python'},
packages=['k2'],
install_requires=['torch', 'graphviz'],
data_files=[('', ['LICENSE'])],
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: C++',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Operating System :: OS Independent',
],
)
|
475699
|
from django.apps import AppConfig
class SecateurConfig(AppConfig):
name = "secateur"
def ready(self) -> None:
import secateur.signals
return super().ready()
|
475704
|
import numpy as np
# Example taken from : http://cs231n.github.io/python-numpy-tutorial/#numpy
x = np.array([[1,2],[3,4]])
print (np.sum(x)) # Compute sum of all elements; prints "10"
print (np.sum(x, axis=0)) # Compute sum of each column; prints "[4 6]"
print (np.sum(x, axis=1)) # Compute sum of each row; prints "[3 7]"
|
475735
|
import atexit
def exit_with_exception(message):
raise RuntimeError(message)
atexit.register(exit_with_exception, 'Registered first')
atexit.register(exit_with_exception, 'Registered second')
|
475779
|
from visual_mpc.envs.base_env import BaseEnv
from .robosuite_wrappers.SawyerIKEnv import make_sawyer_env
import numpy as np
from robosuite.utils.transform_utils import mat2quat, rotation_matrix
low_bound = np.array([0.35, -0.2, 0.83, 0, -1])
high_bound = np.array([0.75, 0.2, 0.95, np.pi, 1])
start_rot = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
class SawyerEnv(BaseEnv):
def __init__(self, env_params_dict, reset_state=None):
self._hp = self._default_hparams()
for name, value in env_params_dict.items():
print('setting param {} to value {}'.format(name, value))
self._hp.set_hparam(name, value)
self._env = make_sawyer_env({'num_objects': self._hp.num_objects})
self._adim, self._sdim = 5, 5
def _default_hparams(self):
parent_params = super()._default_hparams()
parent_params.add_hparam('substeps', 10)
parent_params.add_hparam('num_objects', 1)
return parent_params
def _init_dynamics(self):
self._previous_target_qpos = np.random.uniform(low_bound, high_bound)
self._previous_target_qpos[-1] = low_bound[-1] # gripper starts open
def _next_qpos(self, action):
if action[-1] > 0:
action[-1] = 1
else:
action[-1] = -1
return self._previous_target_qpos * [1., 1., 1., 1., 0.] + action
def _step(self, target_qpos):
o = None
delta_xyz = (target_qpos[:3] - self._eef_pos) / self._hp.substeps
for i in range(self._hp.substeps):
current_rot = self._env._right_hand_orn
pitch, roll, yaw = 0, 0, target_qpos[3]
drot1 = rotation_matrix(angle=-pitch, direction=[1., 0, 0], point=None)[:3, :3]
drot2 = rotation_matrix(angle=roll, direction=[0, 1., 0], point=None)[:3, :3]
drot3 = rotation_matrix(angle=yaw, direction=[0, 0, 1.], point=None)[:3, :3]
desired_rot = start_rot.dot(drot1.dot(drot2.dot(drot3)))
drotation = current_rot.T.dot(desired_rot)
dquat = mat2quat(drotation)
o = self._env.step(np.concatenate((delta_xyz, dquat, [target_qpos[-1]])))[0]
self._previous_target_qpos = target_qpos
return self._proc_obs(o)
def _proc_obs(self, env_obs):
self._eef_pos, self._eef_quat = env_obs['eef_pos'], env_obs['eef_quat']
return env_obs
def step(self, action):
target_qpos = np.clip(self._next_qpos(action), low_bound, high_bound)
return self._step(target_qpos)
def reset(self):
super().reset()
self._proc_obs(self._env.reset())
self._init_dynamics()
return self._step(self._previous_target_qpos), None
def valid_rollout(self):
return True
@property
def adim(self):
return self._adim
@property
def sdim(self):
return self._sdim
@property
def ncam(self):
return 2
@property
def num_objects(self):
return self._hp.num_objects
|
475793
|
import PyLidar3
import time # Time module
#Serial port to which lidar connected, Get it from device manager windows
#In linux type in terminal -- ls /dev/tty*
port = input("Enter port name which lidar is connected:") #windows
#port = "/dev/ttyUSB0" #linux
Obj = PyLidar3.YdLidarX4(port) #PyLidar3.your_version_of_lidar(port,chunk_size)
if(Obj.Connect()):
print(Obj.GetDeviceInfo())
gen = Obj.StartScanning()
t = time.time() # start time
while (time.time() - t) < 30: #scan for 30 seconds
print(next(gen))
time.sleep(0.5)
Obj.StopScanning()
Obj.Disconnect()
else:
print("Error connecting to device")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.