id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
482618
|
import re
from netaddr import IPNetwork
try:
from stratosphere.utils import get_google_auth
except ImportError:
# Python2
from utils import get_google_auth
class ResourceValidators(object):
@classmethod
def regex_match(cls, regex, string):
RE = re.compile(regex)
if RE.match(string):
return True
return False
@staticmethod
def name(name):
return ResourceValidators.regex_match('^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$', name)
@staticmethod
def zone(zone):
return ResourceValidators.regex_match('^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$', zone)
@staticmethod
def base_instance_name(name):
return ResourceValidators.regex_match('^[a-z][-a-z0-9]{0,57}$', name)
@staticmethod
def ipAddress(network):
try:
IPNetwork(network)
return True
except:
raise ValueError('Invalid CIDR - {}'.format(network))
@staticmethod
def is_valid_machine_type(_type):
return _type in [
'n1-standard-1',
'n1-standard-2',
'n1-standard-4',
'n1-standard-8',
'n1-standard-16',
'n1-standard-32',
'n1-highmem-2',
'n1-highmem-4',
'n1-highmem-8',
'n1-highmem-16',
'n1-highmem-32',
'n1-highcpu-2',
'n1-highcpu-4',
'n1-highcpu-8',
'n1-highcpu-16',
'n1-highcpu-32',
'f1-micro',
'g1-small',
]
@staticmethod
def is_url(value):
if ResourceValidators.regex_match('^\$\(ref\..*.selfLink\)$', value):
return True
if ResourceValidators.regex_match('^http.*$', value):
return True
return False
class ResourceNames(object):
"""
Provides some helper functions to consistently name things
"""
def __init__(self, project, env):
self.project = project
self.env = env
@property
def networkName(self):
return '{}-network'.format(self.env)
@property
def networkUrl(self):
return "projects/{}/global/networks/{}".format(self.project, self.networkName)
def subnetworkName(self, zone):
return '{}-{}-subnetwork'.format(self.env, zone)
def zone_to_region(self, zone):
"""Derives the region from a zone name."""
parts = zone.split('-')
if len(parts) < 2:
raise Exception('Cannot derive region from zone "%s"' % zone)
return '-'.join(parts[:2])
|
482619
|
import sys
sys.path.append("..")
#import tensorflow as tf
import numpy as np
import time
from Featurize import *
import glob
from multiprocessing import Pool
def GetNodeMask(graph_size, max_size=None):
if max_size == None:
max_size = np.max(graph_size)
return np.array([np.pad(np.ones([s, 1]), ((0, max_size-s), (0, 0)), 'constant', constant_values=(0)) for s in graph_size], dtype=np.float32)
class _create_placeholders(object):
def __init__(self, dataset):
input_attr_names = {'V', 'A', 'labels', 'tags', 'global_state', 'masks', 'graph_size', 'fingerprints'}
for name in dataset.__dict__:
if name in input_attr_names:
if len(dataset.__dict__[name].shape) == 1:
shape = [None]
self.__dict__[name] = tf.placeholder(tf.as_dtype(dataset.__dict__[name].dtype), shape=shape, name=name+'_input')
else:
shape = [None] + list(dataset.__dict__[name].shape[1:])
self.__dict__[name] = tf.placeholder(tf.as_dtype(dataset.__dict__[name].dtype), shape=shape, name=name+'_input')
class Dataset(object):
def __init__(self, table_dir, mol_blocks_dir='./Mol_Blocks.dir'):
try:
self.table = eval(open(table_dir).read())
except:
self.table = [i.strip().split('\t') for i in open(table_dir).readlines()]
self.mol_blocks = eval(open(mol_blocks_dir).read())
def _task(self, items):
array = {}
tag = items[3]
array.setdefault('tags',tag)
block1 = self.mol_blocks[items[0]]
block2 = self.mol_blocks[items[1]]
try:
c1 = Coformer(block1)
c2 = Coformer(block2)
cc = Cocrystal(c1, c2)
label = int(items[2])
array.setdefault('labels',label)
subgraph_size = np.array([c1.atom_number, c2.atom_number])
array.setdefault('subgraph_size', subgraph_size)
if self.Desc:
desc = cc.descriptors()
array.setdefault('global_state',desc)
if self.A_type:
A = cc.CCGraphTensor(t_type=self.A_type, hbond=self.hbond, pipi_stack=self.pipi_stack, contact=self.contact)
V = cc.VertexMatrix.feature_matrix()
array.setdefault('A', A)
array.setdefault('V', V)
if self.fp_type:
fp = cc.Fingerprints(fp_type=self.fp_type, nBits=self.nBits, radii=self.radii)
array.setdefault('fingerprints', fp)
return array
except:
print('Bad input sample:'+tag+', skipped.')
def _PreprocessData(self, max_graph_size=None):
graph_size = np.array([a.shape[0] for a in self.A]).astype(np.int32)
if max_graph_size:
largest_graph = max_graph_size
else:
largest_graph = max(graph_size)
graph_vertices = []
graph_adjacency = []
for i in range(len(self.V)):
graph_vertices.append(np.pad(self.V[i].astype(np.float32),
((0, largest_graph-self.V[i].shape[0]), (0, 0)),
'constant', constant_values=(0)))
new_A = self.A[i]
graph_adjacency.append(np.pad(new_A.astype(np.float32),
((0, largest_graph-new_A.shape[0]), (0, 0), (0, largest_graph-new_A.shape[0])),
'constant', constant_values=(0)))
self.V = np.stack(graph_vertices, axis=0)
self.A = np.stack(graph_adjacency, axis=0)
self.labels = self.labels.astype(np.int32)
if 'global_state' in self.__dict__:
self.global_state = self.global_state.astype(np.float32)
self.masks = GetNodeMask(graph_size, max_size=largest_graph)
self.graph_size = graph_size
self.subgraph_size = self.subgraph_size.astype(np.int32)
def make_graph_dataset(self, Desc=0, A_type='OnlyCovalentBond', hbond=0, pipi_stack=0, contact=0,
processes=15, max_graph_size=None, make_dataframe=True, save_name=None):
exception = {'BOVQUY','CEJPAK','GAWTON','GIPTAA','IDIGUY','LADBIB01','PIGXUY','SIFBIT','SOJZEW','TOFPOW',
'QOVZIK','RIJNEF','SIBFAK','SIBFEO','TOKGIJ','TOKGOP','TUQTEE','BEDZUF'}
self.Desc = Desc
self.A_type = A_type
self.hbond = hbond
self.pipi_stack = pipi_stack
self.contact = contact
self.fp_type = None
start = time.time()
pool = Pool(processes=processes)
D = pool.map(self._task, [items for items in self.table if items[-1] not in exception])
pool.close()
pool.join()
self.data_attr_names = D[0].keys()
attrs = self.__dict__
for k in self.data_attr_names:
attrs.setdefault(k, [])
for i in [j for j in D if j!=None]:
for j in i:
attrs[j].append(i[j])
for l in attrs:
if l in self.data_attr_names:
attrs[l] = np.array(attrs[l])
del D
self._PreprocessData(max_graph_size=max_graph_size)
if save_name:
if 'global_state' in self.__dict__:
np.savez(save_name, V=self.V, A=self.A, labels=self.labels, masks=self.masks, graph_size=self.graph_size, tags=self.tags, global_state=self.global_state, subgraph_size=self.subgraph_size)
else:
np.savez(save_name, V=self.V, A=self.A, labels=self.labels, masks=self.masks, graph_size=self.graph_size, tags=self.tags, subgraph_size=self.subgraph_size)
if make_dataframe:
self.dataframe = {}
for ix,tag in enumerate(self.tags):
self.dataframe.setdefault(tag, {})
self.dataframe[tag].setdefault('V', self.V[ix])
self.dataframe[tag].setdefault('A', self.A[ix])
self.dataframe[tag].setdefault('label', self.labels[ix])
if 'global_state' in self.__dict__:
self.dataframe[tag].setdefault('global_state', self.global_state[ix])
self.dataframe[tag].setdefault('tag', tag)
self.dataframe[tag].setdefault('mask', self.masks[ix])
self.dataframe[tag].setdefault('graph_size', self.graph_size[ix])
self.dataframe[tag].setdefault('subgraph_size', self.subgraph_size[ix])
del self.V, self.A, self.labels, self.masks, self.graph_size, self.tags
if 'global_state' in self.__dict__:
del self.global_state
if 'subgraph_size' in self.__dict__:
del self.subgraph_size
end = time.time()
t = round(end-start, 2)
print('Elapsed Time: '+str(t)+' s')
return None
end = time.time()
t = round(end-start, 2)
print('Elapsed Time: '+str(t)+' s')
def make_embedding_dataset(self, fp_type='ecfp', nBits=2048, processes=15, make_dataframe=True, radii=2, save_name=None):
exception = {'BOVQUY','CEJPAK','GAWTON','GIPTAA','IDIGUY','LADBIB01','PIGXUY','SIFBIT','SOJZEW','TOFPOW',
'QOVZIK','RIJNEF','SIBFAK','SIBFEO','TOKGIJ','TOKGOP','TUQTEE','BEDZUF'}
self.fp_type = fp_type
self.nBits = nBits
self.Desc = 0
self.A_type = 0
self.radii = 2
start = time.time()
pool = Pool(processes=processes)
D = pool.map(self._task, [items for items in self.table if items[-1] not in exception])
D = [i for i in D if i != None]
pool.close()
pool.join()
self.data_attr_names = D[0].keys()
attrs = self.__dict__
for k in self.data_attr_names:
attrs.setdefault(k, [])
for i in D:
for j in i:
attrs[j].append(i[j])
for l in attrs:
if l in self.data_attr_names:
attrs[l] = np.array(attrs[l])
del D
if save_name:
np.savez(save_name, fingerprints=self.fingerprints, labels=self.labels, tags=self.tags)
if make_dataframe:
self.dataframe = {}
for ix,tag in enumerate(self.tags):
self.dataframe.setdefault(tag, {})
self.dataframe[tag].setdefault('fingerprints', self.fingerprints[ix])
self.dataframe[tag].setdefault('label', self.labels[ix])
self.dataframe[tag].setdefault('tag', tag)
del self.fingerprints, self.labels, self.tags
end = time.time()
t = round(end-start, 2)
print('Elapsed Time: '+str(t)+' s')
return self.dataframe
end = time.time()
t = round(end-start, 2)
print('Elapsed Time: '+str(t)+' s')
return self
def create_placeholders(self):
return _create_placeholders(self)
def _embedding_func(self, samples, dataframe):
embedding, labels, tags = [], [], []
for i in samples:
embedding.append(dataframe[i]['fingerprints'])
tags.append(dataframe[i]['tag'])
labels.append(int(dataframe[i]['label']))
data = [embedding, labels, tags]
return np.array(embedding, dtype=np.float32), np.array(labels), np.array(tags)
def _graph_func(self, samples, dataframe):
V, A, labels, tags, desc, graph_size, masks, subgraph_size = [], [], [], [], [], [], [], []
for i in samples:
V.append(dataframe[i]['V'])
A.append(dataframe[i]['A'])
labels.append(int(dataframe[i]['label']))
tags.append(dataframe[i]['tag'])
graph_size.append(dataframe[i]['graph_size'])
masks.append(dataframe[i]['mask'])
subgraph_size.append(dataframe[i]['subgraph_size'])
if self.Desc:
desc.append(dataframe[i]['global_state'])
if self.Desc:
data = [V, A, labels, masks, graph_size, tags, desc, subgraph_size]
return [np.array(i) for i in data]
else:
data = [V, A, labels, masks, graph_size, tags]
return [np.array(i) for i in data]
def split(self, train_samples=None, valid_samples=None, with_test=False, test_samples=None, with_fps=False):
self.train_samples = train_samples
self.valid_samples = valid_samples
self.with_test = with_test
if self.with_test:
self.test_samples = test_samples
if with_fps:
train_data = self._embedding_func(self.train_samples, self.dataframe)
valid_data = self._embedding_func(self.valid_samples, self.dataframe)
if self.with_test:
test_data = self._embedding_func(self.test_samples, self.dataframe)
return train_data, valid_data, test_data
else:
return train_data, valid_data
else:
train_data = self._graph_func(self.train_samples, self.dataframe)
valid_data = self._graph_func(self.valid_samples, self.dataframe)
if self.with_test:
test_data = self._graph_func(self.test_samples, self.dataframe)
return train_data, valid_data, test_data
else:
return train_data, valid_data
class DataLoader(Dataset):
def __init__(self, npz_file, make_df=True):
data = np.load(npz_file, allow_pickle=True)
for key in data:
self.__dict__[key] = data[key]
del data
if 'global_state' in self.__dict__:
self.Desc = True
if make_df:
self.dataframe = {}
for ix,tag in enumerate(self.tags):
self.dataframe.setdefault(tag, {})
self.dataframe[tag].setdefault('V', self.V[ix])
self.dataframe[tag].setdefault('A', self.A[ix])
self.dataframe[tag].setdefault('label', self.labels[ix])
if 'global_state' in self.__dict__:
self.dataframe[tag].setdefault('global_state', self.global_state[ix])
self.dataframe[tag].setdefault('tag', tag)
self.dataframe[tag].setdefault('mask', self.masks[ix])
self.dataframe[tag].setdefault('graph_size', self.graph_size[ix])
if 'subgraph_size' in self.__dict__:
self.dataframe[tag].setdefault('subgraph_size', self.subgraph_size[ix])
del self.V, self.A, self.labels, self.tags, self.masks, self.graph_size
if 'global_state' in self.__dict__:
del self.global_state
if 'subgraph_size' in self.__dict__:
del self.subgraph_size
|
482629
|
import sys
import random
def banner(info=None, banner_len=60, sym="-"):
print()
if not info:
print(sym * banner_len)
else:
info = sym * ((banner_len - len(info)) // 2 - 1) + " " + info
info = info + " " + sym * (banner_len - len(info) - 1)
print(info)
print()
def evenly_divide(start, end, num_splits):
step = (end - start) / num_splits
return [start + step * i for i in range(num_splits + 1)]
def random_drop(lst):
lst.remove(lst[random.randrange(0, len(lst))])
return lst
def exclude_randrange(start, end, exclude):
result = random.randrange(start, end)
while result == exclude and end - start > 1:
result = random.randrange(start, end)
return result
def uniqify(ls):
non_empty_ls = list(filter(lambda x: x != "", ls))
return list(dict.fromkeys(non_empty_ls))
def size_split(sizes):
max_range = list(range(sum(sizes)))
splits = []
for i in range(len(sizes)):
start = sizes[i - 1] if i >= 1 else 0
end = sizes[i] + start
splits.append(max_range[start:end])
return splits
def batch_split(batch_size, max_num):
"""Split into equal parts of {batch_size} as well as the tail"""
if batch_size > max_num:
print("Fix the batch size to maximum number.")
batch_size = max_num
max_range = list(range(max_num))
num_splits = max_num // batch_size
num_splits = (
num_splits - 1 if max_num % batch_size == 0 else num_splits
) # for edge case, there will be an empty batch
splits = []
for i in range(num_splits + 1):
start = i * batch_size
end = min((i + 1) * batch_size, max_num) # dealing the tail part
splits.append(max_range[start:end])
assert len(splits) == num_splits + 1
return splits
def equal_split(num_splits, max_num):
"""Split into equal {num_splits} part as well as the tail"""
max_range = list(range(max_num))
interval_range = max_num // num_splits
splits = []
for i in range(num_splits + 1):
start = i * interval_range
end = min((i + 1) * interval_range, max_num) # dealing the tail part
splits.append(max_range[start:end])
assert len(splits) == num_splits + 1
return splits
def log_print(log_info, log_path: str):
"""Logging information"""
print(log_info)
with open(log_path, "a+") as f:
f.write(f"{log_info}\n")
# flush() is important for printing logs during multiprocessing
sys.stdout.flush()
|
482659
|
import uuid
import logging
from e2e_tests.aws_lambda.utils import (
send_test_data_to_endpoint,
run_lambda_create_or_update_command,
)
from e2e_tests.cli_operations import delete_deployment
logger = logging.getLogger('bentoml.test')
def test_aws_lambda_update_deployment(basic_bentoservice_v1, basic_bentoservice_v2):
random_hash = uuid.uuid4().hex[:6]
deployment_name = f'tests-lambda-update-{random_hash}'
create_deployment_command = [
'bentoml',
'lambda',
'deploy',
deployment_name,
'-b',
basic_bentoservice_v1,
'--region',
'us-west-2',
'--verbose',
]
try:
deployment_success, deployment_endpoint = run_lambda_create_or_update_command(
create_deployment_command
)
assert deployment_success, "AWS Lambda deployment creation should success"
assert deployment_endpoint, "AWS Lambda deployment should have endpoint"
status_code, content = send_test_data_to_endpoint(deployment_endpoint)
assert status_code == 200, "prediction request should success"
assert content == '"cat"', "prediction result mismatch"
update_deployment_command = [
'bentoml',
'lambda',
'update',
deployment_name,
'-b',
basic_bentoservice_v2,
'--verbose',
]
(
update_deployment_success,
update_deployment_endpoint,
) = run_lambda_create_or_update_command(update_deployment_command)
assert (
update_deployment_success
), "AWS Lambda deployment creation should success"
assert update_deployment_endpoint, "AWS Lambda deployment should have endpoint"
status_code, content = send_test_data_to_endpoint(deployment_endpoint)
assert status_code == 200, "Updated prediction request should success"
assert content == '"dog"', "Updated prediction result mismatch"
finally:
delete_deployment('lambda', deployment_name)
|
482692
|
import numpy as np
import readsnap
import readsubf
import sys
import time
import random
###############################################################################
#this function returns an array containing the positions of the galaxies (kpc/h)
#in the catalogue according to the fiducial density, M1 and alpha
#CDM halos with masses within [min_mass,max_mass], are populated
#with galaxies. The IDs and positions of the CDM particles belonging to the
#different groups are read from the snapshots
#If one needs to creates many catalogues, this function is not appropiate,
#since it wastes a lot of time reading the snapshots and sorting the IDs
#min_mass and max_mass are in units of Msun/h, not 1e10 Msun/h
#mass_criteria: definition of the halo virial radius -- 't200' 'm200' 'c200'
#fiducial_density: galaxy number density to be reproduced, in (h/Mpc)^3
def hod(snapshot_fname,groups_fname,groups_number,min_mass,max_mass,
fiducial_density,M1,alpha,mass_criteria,verbose=False):
thres=1e-3 #controls the max relative error to accept a galaxy density
#read the header and obtain the boxsize
head=readsnap.snapshot_header(snapshot_fname)
BoxSize=head.boxsize #BoxSize in kpc/h
#read positions and IDs of DM particles: sort the IDs array
DM_pos=readsnap.read_block(snapshot_fname,"POS ",parttype=-1) #kpc/h
DM_ids=readsnap.read_block(snapshot_fname,"ID ",parttype=-1)-1
sorted_ids=DM_ids.argsort(axis=0)
#the particle whose ID is N is located in the position sorted_ids[N]
#i.e. DM_ids[sorted_ids[N]]=N
#the position of the particle whose ID is N would be:
#DM_pos[sorted_ids[N]]
#read the IDs of the particles belonging to the CDM halos
halos_ID=readsubf.subf_ids(groups_fname,groups_number,0,0,
long_ids=True,read_all=True)
IDs=halos_ID.SubIDs-1
del halos_ID
#read CDM halos information
halos=readsubf.subfind_catalog(groups_fname,groups_number,
group_veldisp=True,masstab=True,
long_ids=True,swap=False)
if mass_criteria=='t200':
halos_mass=halos.group_m_tophat200*1e10 #masses in Msun/h
halos_radius=halos.group_r_tophat200 #radius in kpc/h
elif mass_criteria=='m200':
halos_mass=halos.group_m_mean200*1e10 #masses in Msun/h
halos_radius=halos.group_r_mean200 #radius in kpc/h
elif mass_criteria=='c200':
halos_mass=halos.group_m_crit200*1e10 #masses in Msun/h
halos_radius=halos.group_r_crit200 #radius in kpc/h
else:
print('bad mass_criteria')
sys.exit()
halos_pos=halos.group_pos #positions in kpc/h
halos_len=halos.group_len
halos_offset=halos.group_offset
halos_indexes=np.where((halos_mass>min_mass) & (halos_mass<max_mass))[0]
del halos
if verbose:
print(' ')
print('total halos found=',halos_pos.shape[0])
print('halos number density=',len(halos_pos)/(BoxSize*1e-3)**3)
#keep only the halos in the given mass range
halo_mass=halos_mass[halos_indexes]
halo_pos=halos_pos[halos_indexes]
halo_radius=halos_radius[halos_indexes]
halo_len=halos_len[halos_indexes]
halo_offset=halos_offset[halos_indexes]
del halos_indexes
##### COMPUTE Mmin GIVEN M1 & alpha #####
i=0; max_iterations=20 #maximum number of iterations
Mmin1=min_mass; Mmin2=max_mass
while (i<max_iterations):
Mmin=0.5*(Mmin1+Mmin2) #estimation of the HOD parameter Mmin
total_galaxies=0
inside=np.where(halo_mass>Mmin)[0] #take all galaxies with M>Mmin
mass=halo_mass[inside] #only halos with M>Mmin have central/satellites
total_galaxies=mass.shape[0]+np.sum((mass/M1)**alpha)
mean_density=total_galaxies*1.0/(BoxSize*1e-3)**3 #galaxies/(Mpc/h)^3
if (np.absolute((mean_density-fiducial_density)/fiducial_density)<thres):
i=max_iterations
elif (mean_density>fiducial_density):
Mmin1=Mmin
else:
Mmin2=Mmin
i+=1
if verbose:
print(' ')
print('Mmin=',Mmin)
print('average number of galaxies=',total_galaxies)
print('average galaxy density=',mean_density)
#########################################
#just halos with M>Mmin; the rest do not host central/satellite galaxies
inside=np.where(halo_mass>Mmin)[0]
halo_mass=halo_mass[inside]
halo_pos=halo_pos[inside]
halo_radius=halo_radius[inside]
halo_len=halo_len[inside]
halo_offset=halo_offset[inside]
del inside
#compute number of satellites in each halo using the Poisson distribution
N_mean_sat=(halo_mass/M1)**alpha #mean number of satellites
N_sat=np.empty(len(N_mean_sat),dtype=np.int32)
for i in range(len(N_sat)):
N_sat[i]=np.random.poisson(N_mean_sat[i])
N_tot=np.sum(N_sat)+len(halo_mass) #total number of galaxies in the catalogue
if verbose:
print(' ')
print(np.min(halo_mass),'< M_halo <',np.max(halo_mass))
print('total number of galaxies=',N_tot)
print('galaxy number density=',N_tot/(BoxSize*1e-3)**3)
#put satellites following the distribution of dark matter in groups
if verbose:
print(' ')
print('Creating mock catalogue ...',)
pos_galaxies=np.empty((N_tot,3),dtype=np.float32)
#index: variable that go through halos (may be several galaxies in a halo)
#i: variable that go through all (central/satellites) galaxies
#count: find number of galaxies that lie beyond its host halo virial radius
index=0; count=0; i=0
while (index<halo_mass.shape[0]):
position=halo_pos[index] #position of the DM halo
radius=halo_radius[index] #radius of the DM halo
#save the position of the central galaxy
pos_galaxies[i]=position; i+=1
#if halo contains satellites, save their positions
Nsat=N_sat[index]
if Nsat>0:
offset=halo_offset[index]
length=halo_len[index]
idss=sorted_ids[IDs[offset:offset+length]]
#compute the distances to the halo center keeping those with R<Rvir
pos=DM_pos[idss] #positions of the particles belonging to the halo
posc=pos-position
#this is to populate correctly halos closer to box boundaries
if np.any((position+radius>BoxSize) + (position-radius<0.0)):
inside=np.where(posc[:,0]>BoxSize/2.0)[0]
posc[inside,0]-=BoxSize
inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
posc[inside,0]+=BoxSize
inside=np.where(posc[:,1]>BoxSize/2.0)[0]
posc[inside,1]-=BoxSize
inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
posc[inside,1]+=BoxSize
inside=np.where(posc[:,2]>BoxSize/2.0)[0]
posc[inside,2]-=BoxSize
inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
posc[inside,2]+=BoxSize
radii=np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2)
inside=np.where(radii<radius)[0]
selected=random.sample(inside,Nsat)
pos=pos[selected]
#aditional, not esential check. Can be comment out
posc=pos-position
if np.any((posc>BoxSize/2.0) + (posc<-BoxSize/2.0)):
inside=np.where(posc[:,0]>BoxSize/2.0)[0]
posc[inside,0]-=BoxSize
inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
posc[inside,0]+=BoxSize
inside=np.where(posc[:,1]>BoxSize/2.0)[0]
posc[inside,1]-=BoxSize
inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
posc[inside,1]+=BoxSize
inside=np.where(posc[:,2]>BoxSize/2.0)[0]
posc[inside,2]-=BoxSize
inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
posc[inside,2]+=BoxSize
r_max=np.max(np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2))
if r_max>radius: #check no particles beyond Rv selected
print(position)
print(radius)
print(pos)
count+=1
for j in range(Nsat):
pos_galaxies[i]=pos[j]; i+=1
index+=1
if verbose:
print('done')
#some final checks
if i!=N_tot:
print('some galaxies missing:')
print('register',i,'galaxies out of',N_tot)
if count>0:
print('error:',count,'particles beyond the virial radius selected')
return pos_galaxies
###############################################################################
#This function is equal to the above one, except that the snapshot read, halos
#read and ID sorting it is not performing here. It is best suited when many
#galaxy catalogues need to be created: for example, when iterating among M1 and
#alpha trying to find the best combination that reproduces the measured wp(r)
#VARIABLES:
#DM_pos: array containing the positions of the CDM particles
#DM_vel: array containing the velocities of the CDM particles
#sorted_ids: array containing the positions of the IDs in the snapshots.
#sorted_ids[N] gives the position where the particle whose ID is N is located
#IDs:IDs array as read from the subfind ID file
#halo_mass: array containing the masses of the CDM halos in the mass interval
#halo_pos: array containing the positions of the CDM halos in the mass interval
#halo_vel: array with the velocities of the CDM halos in the mass interval
#halo_radius: array containing the radii of the CDM halos in the mass interval
#halo_len: array containing the len of the CDM halos in the mass interval
#halo_offset: array containing the offset of the CDM halos in the mass interval
#BoxSize: Size of the simulation Box. In Mpc/h
#fiducial_density: galaxy number density to be reproduced, in (h/Mpc)^3
#model: 'standard' or 'LBG'. The former in the normal one whereas the latter is
#for the LBG HOD model which do not assign central galaxies to the halos
#NEW!!! In the latest version this routine requires both the positions and
#velocities of the CDM particles. It will also return the galaxies velocities
class hod_fast:
def __init__(self,DM_pos,DM_vel,sorted_ids,IDs,halo_mass,halo_pos,halo_vel,
halo_radius,halo_len,halo_offset,BoxSize,min_mass,max_mass,
fiducial_density,M1,alpha,seed,model='standard',
verbose=False):
problematic_cases=0 #number of problematic cases (halos with Rvir=0 ...)
thres=1e-3 #controls the max relative error to accept a galaxy density
##### COMPUTE Mmin GIVEN M1 & alpha #####
i=0; max_iterations=20 #maximum number of iterations
Mmin1=min_mass; Mmin2=max_mass
while (i<max_iterations):
#estimation of the HOD parameter Mmin
Mmin=10**(0.5*(np.log10(Mmin1)+np.log10(Mmin2)))
total_galaxies=0
#only halos with M>Mmin have central/satellites
inside=np.where(halo_mass>Mmin)[0]; mass=halo_mass[inside]
if model=='standard': #central + satellites
total_galaxies=mass.shape[0]+np.sum((mass/M1)**alpha)
elif model=='LBG': #only satellites
total_galaxies=np.sum((mass/M1)**alpha)
else:
print('incorrect model'); sys.exit()
mean_density=total_galaxies*1.0/BoxSize**3
if (np.absolute((mean_density-fiducial_density)/fiducial_density)\
<thres):
i=max_iterations
elif (mean_density>fiducial_density):
Mmin1=Mmin
else:
Mmin2=Mmin
i+=1
self.Mmin=Mmin
if verbose:
print('\nMmin = %.3e'%Mmin);
print('average number of galaxies =',total_galaxies)
print('average galaxy density = %.3e galaxies/(Mpc/h)^3'\
%mean_density)
#########################################
#just halos with M>Mmin; the rest do not host central/satellite galaxies
inside=np.where(halo_mass>Mmin)[0]
halo_mass = halo_mass[inside]; halo_pos = halo_pos[inside]
halo_radius = halo_radius[inside]; halo_len = halo_len[inside]
halo_offset = halo_offset[inside]; del inside
#compute the # of satellites in each halo with the Poisson distribution
np.random.seed(seed) #this is just to check convergence on w_p(r_p)
N_mean_sat=(halo_mass/M1)**alpha #mean number of satellites
N_sat=np.empty(len(N_mean_sat),dtype=np.int32)
for i in range(len(N_sat)):
N_sat[i]=np.random.poisson(N_mean_sat[i])
#total number of galaxies in the catalogue
if model=='standard': #central + satellites
N_tot=np.sum(N_sat)+len(halo_mass)
elif model=='LBG': #only satellites
N_tot=np.sum(N_sat)
self.galaxy_density=N_tot*1.0/BoxSize**3
if verbose:
print('\n%.3e < M_halo < %.3e'%(np.min(halo_mass),np.max(halo_mass)))
print('total number of galaxies =',N_tot)
print('galaxy number density = %.3e'%(N_tot/BoxSize**3))
#put satellites following the distribution of dark matter in groups
if verbose:
print('\nCreating mock catalogue ...',)
pos_galaxies=np.empty((N_tot,3),dtype=np.float32)
vel_galaxies=np.empty((N_tot,3),dtype=np.float32)
#index: variable that go through halos(can be several galaxies per halo)
#i: variable that go through galaxies
#count: find # of galaxies that lie beyond its host halo virial radius
random.seed(seed) #this is just to check convergence on w_p(r_p)
index=0; count=0; i=0
while (index<halo_mass.size):
position=halo_pos[index] #position of the DM halo
velocity=halo_vel[index] #velocity of the DM halo
radius=halo_radius[index] #radius of the DM halo
#save the position of the central galaxy
if model=='standard':
pos_galaxies[i]=position; vel_galaxies[i]=velocity; i+=1
#if halo contains satellites, save their positions
Nsat=N_sat[index]
if Nsat>0:
offset=halo_offset[index]; length=halo_len[index]
idss=sorted_ids[IDs[offset:offset+length]]
#compute the radius of the particles and keep those with R<Rvir
pos=DM_pos[idss]; posc=pos-position; vel=DM_vel[idss]
#this is to populate correctly halos closer to box boundaries
if np.any((position+radius>BoxSize) + (position-radius<0.0)):
inside=np.where(posc[:,0]>BoxSize/2.0)[0]
posc[inside,0]-=BoxSize
inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
posc[inside,0]+=BoxSize
inside=np.where(posc[:,1]>BoxSize/2.0)[0]
posc[inside,1]-=BoxSize
inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
posc[inside,1]+=BoxSize
inside=np.where(posc[:,2]>BoxSize/2.0)[0]
posc[inside,2]-=BoxSize
inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
posc[inside,2]+=BoxSize
radii=np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2)
inside=np.where(radii<radius)[0]
if len(inside)<Nsat:
problematic_cases+=1
print('problematic case',len(inside),Nsat)
else:
selected=random.sample(inside,Nsat)
pos=pos[selected]; vel=vel[selected]
#aditional, not esential check. Can be comment out
#posc=pos-position
#if np.any((posc>BoxSize/2.0) + (posc<-BoxSize/2.0)):
# inside=np.where(posc[:,0]>BoxSize/2.0)[0]
# posc[inside,0]-=BoxSize
# inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
# posc[inside,0]+=BoxSize
# inside=np.where(posc[:,1]>BoxSize/2.0)[0]
# posc[inside,1]-=BoxSize
# inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
# posc[inside,1]+=BoxSize
# inside=np.where(posc[:,2]>BoxSize/2.0)[0]
# posc[inside,2]-=BoxSize
# inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
# posc[inside,2]+=BoxSize
#r_max=np.max(np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2))
#if r_max>radius: #check no particles beyond Rv selected
# print position
# print radius
# print pos
# count+=1
for j in range(Nsat):
pos_galaxies[i]=pos[j]; vel_galaxies[i]=vel[j]; i+=1
index+=1
if verbose:
print('done')
#some final checks
if i!=N_tot:
print('some galaxies missing:\nregister %d galaxies out of %d'\
%(i,N_tot))
if count>0:
print('error:',count,'particles beyond the virial radius selected')
self.pos_galaxies=pos_galaxies
self.vel_galaxies=vel_galaxies
###############################################################################
##### example of use #####
"""
snapshot_fname='/data1/villa/b500p512nu0.6z99np1024tree/snapdir_017/snap_017'
groups_fname='/home/villa/data1/b500p512nu0.6z99np1024tree'
groups_number=17
### HALO CATALOGUE PARAMETERS ###
mass_criteria='t200'
min_mass=2e12 #Msun/h
max_mass=2e15 #Msun/h
### HOD PARAMETERS ###
fiducial_density=0.00111 #mean number density for galaxies with Mr<-21
M1=8e13
alpha=1.4
pos=hod(snapshot_fname,groups_fname,groups_number,min_mass,max_mass,fiducial_density,M1,alpha,mass_criteria,verbose=True)
print pos
"""
|
482699
|
from pybacktest.backtest import Backtest
from pybacktest.optimizer import Optimizer
from pybacktest import performance
from pybacktest.data import load_from_yahoo
from pybacktest.ami_funcs import *
from pybacktest.verification import iter_verify, verify
from pybacktest.production import check_position_change
|
482702
|
import unittest
from katas.kyu_7.chessboard import chessboard
class ChessboardTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(chessboard('2 2'), '*.\n.*')
def test_equal_2(self):
self.assertEqual(chessboard('5 2'), '*.\n.*\n*.\n.*\n*.')
def test_equal_3(self):
self.assertEqual(chessboard('7 7'),
'*.*.*.*\n.*.*.*.\n*.*.*.*\n'
'.*.*.*.\n*.*.*.*\n.*.*.*.\n*.*.*.*')
def test_equal_4(self):
self.assertEqual(chessboard('8 8'),
'*.*.*.*.\n.*.*.*.*\n*.*.*.*.\n.*.*.*.*\n'
'*.*.*.*.\n.*.*.*.*\n*.*.*.*.\n.*.*.*.*')
def test_equal_5(self):
self.assertEqual(chessboard('17 0'), '')
|
482761
|
import Image
import numpy as np
import time
import numbers
class vec3():
def __init__(self, x, y, z):
(self.x, self.y, self.z) = (x, y, z)
def __mul__(self, other):
return vec3(self.x * other, self.y * other, self.z * other)
def __add__(self, other):
return vec3(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
return vec3(self.x - other.x, self.y - other.y, self.z - other.z)
def dot(self, other):
return (self.x * other.x) + (self.y * other.y) + (self.z * other.z)
def __abs__(self):
return self.dot(self)
def norm(self):
mag = np.sqrt(abs(self))
return self * (1.0 / np.where(mag == 0, 1, mag))
def components(self):
return (self.x, self.y, self.z)
rgb = vec3
(w, h) = (512, 512)
x = np.tile(np.linspace(-1, 1, w), h)
y = np.repeat(np.linspace(-1, 1, h), w)
if 0:
c = np.sqrt(.5)
print 'c', c
def norm(v):
# v -= min(v)
v /= max(v)
return v
gx = norm(np.exp(-(x*x) / (2 * c ** 2)))
gy = norm(np.exp(-(y*y) / (2 * c ** 2)))
g = gx * gy
else:
g = ((x*x) + (y*y))
if 1:
g = g / 2
else:
g = np.sqrt(g) / 1.4
g = 1 - g
print min(g), max(g)
print g[256]
c0 = rgb(1, 1, 0)
c1 = rgb(0, 0, 1)
color = (c0 * g) + c1 * (1 - g)
# color.y = np.where(g > 0.5, 0.0, 1.0)
rgb = [Image.fromarray((255 * c.reshape((h, w))).astype(np.uint8), "L") for c in color.components()]
Image.merge("RGB", rgb).save("fig.png")
|
482763
|
import py
import pytest
from xprocess import XProcess
def getrootdir(config):
return config.cache.makedir(".xprocess")
def pytest_addoption(parser):
group = parser.getgroup(
"xprocess", "managing external processes across test-runs [xprocess]"
)
group.addoption("--xkill", action="store_true", help="kill all external processes")
group.addoption(
"--xshow", action="store_true", help="show status of external process"
)
def pytest_cmdline_main(config):
xkill = config.option.xkill
xshow = config.option.xshow
if xkill or xshow:
config._do_configure()
tw = py.io.TerminalWriter()
rootdir = getrootdir(config)
xprocess = XProcess(config, rootdir)
if xkill:
return xprocess._xkill(tw)
if xshow:
return xprocess._xshow(tw)
@pytest.fixture(scope="session")
def xprocess(request):
"""yield session-scoped XProcess helper to manage long-running
processes required for testing."""
rootdir = getrootdir(request.config)
with XProcess(request.config, rootdir) as xproc:
# pass in xprocess object into pytest_unconfigure
# through config for proper cleanup during teardown
request.config._xprocess = xproc
yield xproc
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
logfiles = getattr(item.config, "_extlogfiles", None)
report = yield
if logfiles is None:
return
for name in sorted(logfiles):
content = logfiles[name].read()
if content:
longrepr = getattr(report, "longrepr", None)
if hasattr(longrepr, "addsection"): # pragma: no cover
longrepr.addsection("%s log" % name, content)
def pytest_unconfigure(config):
try:
xprocess = config._xprocess
except AttributeError:
# xprocess fixture was not used
pass
else:
xprocess._clean_up_resources()
print(
"pytest-xprocess reminder::Be sure to terminate the started process by running "
"'pytest --xkill' if you have not explicitly done so in your fixture with "
"'xprocess.getinfo(<process_name>).terminate()'."
)
def pytest_configure(config):
config.pluginmanager.register(InterruptionHandler())
class InterruptionHandler:
"""The purpose of this class is exposing the
config object containing references necessary
to properly clean-up in the event of an exception
during test runs"""
def pytest_configure(self, config):
self.config = config
def info_objects(self):
return self.config._xprocess._info_objects
def interruption_clean_up(self):
try:
xprocess = self.config._xprocess
except AttributeError:
pass
else:
for info, terminate_on_interrupt in self.info_objects():
if terminate_on_interrupt:
info.terminate()
xprocess._clean_up_resources()
def pytest_keyboard_interrupt(self, excinfo):
self.interruption_clean_up()
def pytest_internalerror(self, excrepr, excinfo):
self.interruption_clean_up()
|
482772
|
import numpy as np
import pandas as pd
import pytest
from anndata import AnnData
from anndata.tests.helpers import assert_equal
def test_uns_color_subset():
# Tests for https://github.com/theislab/anndata/issues/257
obs = pd.DataFrame(
{
"cat1": pd.Categorical(list("aabcd")),
"cat2": pd.Categorical(list("aabbb")),
},
index=[f"cell{i}" for i in range(5)],
)
# If number of categories does not match number of colors, they should be reset
wrong_color_length_adata = AnnData(
np.ones((5, 5)),
obs=obs,
uns={
"cat1_colors": ["red", "green", "blue"],
"cat2_colors": ["red", "green", "blue"],
},
)
v = wrong_color_length_adata[:, [0, 1]]
assert "cat1_colors" not in v.uns
assert "cat2_colors" not in v.uns
# Otherwise the colors should still match after reseting
cat1_colors = np.array(["red", "green", "blue", "yellow"], dtype=object)
adata = AnnData(np.ones((5, 5)), obs=obs, uns={"cat1_colors": cat1_colors.copy()})
for color, idx in [("red", [0, 1]), ("green", [2]), ("blue", [3]), ("yellow", [4])]:
v = adata[idx, :]
assert len(v.uns["cat1_colors"]) == 1
assert v.uns["cat1_colors"][0] == color
c = v.copy()
assert_equal(v.uns, c.uns, elem_name="uns")
with pytest.raises(AssertionError):
assert_equal(adata.uns, c.uns, elem_name="uns")
# But original object should not change
assert list(adata.uns["cat1_colors"]) == list(cat1_colors)
|
482801
|
import threading
import time
# create a mutable object that is shared among threads
class Shared:
val = 1
def func():
y = Shared.val
time.sleep(0.00001)
y += 1
Shared.val = y
threads = []
for i in range(99):
thread = threading.Thread(target=func)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
print(Shared.val)
|
482805
|
from django.test import TestCase
class TestWebpackStatusLoader(TestCase):
def test_loader_output(self):
from .utils import get_clean_bundle
print(get_clean_bundle())
|
482908
|
import os
# Try running on CPU
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import numpy as np
import cv2
from keras.models import load_model
R = 2 ** 4
MODEL_NAME = './model1.h5'
model = load_model(MODEL_NAME)
model.summary()
for root, dirs, files in os.walk('./input', topdown=False):
for name in files:
print(os.path.join(root, name))
im = cv2.imread(os.path.join(root, name), cv2.IMREAD_GRAYSCALE)
im_predict = cv2.resize(im, (im.shape[1] // R * R, im.shape[0] // R * R))
im_predict = np.reshape(im_predict, (1, im_predict.shape[0], im_predict.shape[1], 1))
im_predict = im_predict.astype(np.float32) / 255.
result = model.predict(im_predict)
im_res = cv2.resize(result[0] * 255., (im.shape[1], im.shape[0]))
cv2.imwrite(os.path.join('./output', name), im_res)
|
482927
|
from django.forms.models import BaseInlineFormSet, inlineformset_factory
from django.utils.translation import ugettext_lazy as _
from publishing.utils.forms import is_empty_form, is_form_persisted
from .models import Publisher, Book, BookImage
# The formset for editing the BookImages that belong to a Book.
BookImageFormset = inlineformset_factory(
Book,
BookImage,
fields=('image', 'alt_text'),
extra=1)
class BaseBooksWithImagesFormset(BaseInlineFormSet):
"""
The base formset for editing Books belonging to a Publisher, and the
BookImages belonging to those Books.
"""
def add_fields(self, form, index):
super().add_fields(form, index)
# Save the formset for a Book's Images in the nested property.
form.nested = BookImageFormset(
instance=form.instance,
data=form.data if form.is_bound else None,
files=form.files if form.is_bound else None,
prefix='bookimage-%s-%s' % (
form.prefix,
BookImageFormset.get_default_prefix()),
)
def is_valid(self):
"""
Also validate the nested formsets.
"""
result = super().is_valid()
if self.is_bound:
for form in self.forms:
if hasattr(form, 'nested'):
result = result and form.nested.is_valid()
return result
def clean(self):
"""
If a parent form has no data, but its nested forms do, we should
return an error, because we can't save the parent.
For example, if the Book form is empty, but there are Images.
"""
super().clean()
for form in self.forms:
if not hasattr(form, 'nested') or self._should_delete_form(form):
continue
if self._is_adding_nested_inlines_to_empty_form(form):
form.add_error(
field=None,
error=_('You are trying to add image(s) to a book which '
'does not yet exist. Please add information '
'about the book and choose the image file(s) again.'))
def save(self, commit=True):
"""
Also save the nested formsets.
"""
result = super().save(commit=commit)
for form in self.forms:
if hasattr(form, 'nested'):
if not self._should_delete_form(form):
form.nested.save(commit=commit)
return result
def _is_adding_nested_inlines_to_empty_form(self, form):
"""
Are we trying to add data in nested inlines to a form that has no data?
e.g. Adding Images to a new Book whose data we haven't entered?
"""
if not hasattr(form, 'nested'):
# A basic form; it has no nested forms to check.
return False
if is_form_persisted(form):
# We're editing (not adding) an existing model.
return False
if not is_empty_form(form):
# The form has errors, or it contains valid data.
return False
# All the inline forms that aren't being deleted:
non_deleted_forms = set(form.nested.forms).difference(
set(form.nested.deleted_forms)
)
# At this point we know that the "form" is empty.
# In all the inline forms that aren't being deleted, are there any that
# contain data? Return True if so.
return any(not is_empty_form(nested_form) for nested_form in non_deleted_forms)
# This is the formset for the Books belonging to a Publisher and the
# BookImages belonging to those Books.
#
# You'd use this by passing in a Publisher:
# PublisherBooksWithImagesFormset(**form_kwargs, instance=self.object)
PublisherBooksWithImagesFormset = inlineformset_factory(
Publisher,
Book,
formset=BaseBooksWithImagesFormset,
# We need to specify at least one Book field:
fields=('title',),
extra=1,
# If you don't want to be able to delete Publishers:
#can_delete=False
)
|
482929
|
from youseedee import ucd_data
from .BaseShaper import BaseShaper
import re
from fontFeatures.shaperLib.Buffer import BufferItem
from fontFeatures.shaperLib.VowelConstraints import preprocess_text_vowel_constraints
from .IndicShaperData import script_config, syllabic_category_map, syllable_machine_indic, IndicPositionalCategory2IndicPosition, IndicPosition, reassign_category_and_position_indic
from .SyllabicShaper import SyllabicShaper
import unicodedata
DOTTED_CIRCLE = 0x25CC
class IndicShaper(SyllabicShaper):
syllable_machine = syllable_machine_indic
syllable_types = ["consonant_syllable", "vowel_syllable", "standalone_cluster","symbol_cluster","broken_cluster","other"]
@property
def config(self):
return script_config.get(self.buffer.script, script_config["Invalid"])
def override_features(self, shaper):
shaper.disable_feature("liga")
def consonant_position_from_face(self, consonant):
virama = self.config["virama"]
consonant_item = BufferItem.new_unicode(consonant)
virama_item = BufferItem.new_unicode(virama)
consonant_item.map_to_glyph(self.buffer.font)
virama_item.map_to_glyph(self.buffer.font)
if self.would_substitute("blwf", [virama_item, consonant_item]):
return IndicPosition.BELOW_C
if self.would_substitute("blwf", [consonant_item, virama_item]):
return IndicPosition.BELOW_C
if self.would_substitute("vatu", [virama_item, consonant_item]):
return IndicPosition.BELOW_C
if self.would_substitute("vatu", [consonant_item, virama_item]):
return IndicPosition.BELOW_C
if self.would_substitute("pstf", [virama_item, consonant_item]):
return IndicPosition.POST_C
if self.would_substitute("pstf", [consonant_item, virama_item]):
return IndicPosition.POST_C
if self.would_substitute("pref", [virama_item, consonant_item]):
return IndicPosition.POST_C
if self.would_substitute("pref", [consonant_item, virama_item]):
return IndicPosition.POST_C
return IndicPosition.BASE_C
def initial_reordering_pre(self):
if self.config["base_pos"] == "last": # Not Sinhala
for item in self.buffer.items:
if item.syllabic_position == IndicPosition.BASE_C:
item.syllabic_position = self.consonant_position_from_face(item.codepoint)
pass
def reassign_category(self, item):
reassign_category_and_position_indic(item)
def initial_reordering_consonant_syllable(self, start, end):
def cat(i):
return self.buffer.items[i].syllabic_category
def pos(i):
return self.buffer.items[i].syllabic_position
def swap(a,b):
self.buffer.items[b], self.buffer.items[a] = self.buffer.items[a], self.buffer.items[b]
def is_joiner(n):
return cat(n) == "ZWJ" or cat(n) == "ZWNJ"
def is_consonant(n):
isc = cat(n)
is_medial = isc == "CM"
return isc in ["C", "CS", "Ra", "V", "PLACEHOLDER", "DOTTEDCIRCLE"] or is_medial
if self.buffer.script == "Kannada" and start + 3 <= end and cat(start) == "Ra" and cat(start+1) == "H" and cat(start+2) == "ZWJ":
swap(start+1, start+2)
syllable_index = self.buffer.items[start].syllable_index
base = end
has_reph = False
limit = start
if "rphf" in self.plan.fontfeatures.features and start + 3 <= end \
and ( \
(self.config["reph_mode"] == "implicit" and not is_joiner(start+2)) \
or (self.config["reph_mode"] == "explicit" and cat(start+2) == "ZWJ") \
):
if self.would_substitute("rphf", self.buffer.items[start:start+2]) \
or self.would_substitute("rphf", self.buffer.items[start:start+3]):
limit = limit + 2
while limit < end and is_joiner(limit):
limit = limit + 1
base = start
has_reph = True
elif self.config["reph_mode"] == "log_repha" and cat(start) == "Repha":
limit = limit + 1
while limit < end and is_joiner(limit):
limit = limit + 1
base = start
has_reph = True
if self.config["base_pos"] == "last":
i = end
seen_below = False
while True:
i = i -1
if is_consonant(i):
if pos(i) != IndicPosition.BELOW_C and \
(pos(i) != IndicPosition.POST_C or seen_below):
base = i
break
if pos(i) == IndicPosition.BELOW_C:
seen_below = True
base = i
else:
if start < i and cat(i) == "ZWJ" and cat(i-1) == "H":
break
if i <= limit:
break
elif self.config["base_pos"] == "last_sinhala":
if not has_reph:
base = limit
for i in range(limit, end):
if is_consonant(i):
if limit < i and cat(i-1) == "ZWJ":
break
else:
base = i
for i in range(base+1, end):
if is_consonant(i):
self.buffer.items[i].syllabic_position = IndicPosition.BELOW_C
if has_reph and base == start and limit - base <= 2:
has_reph = False
self.plan.msg("Base consonant for syllable %i is %s" % (syllable_index, self.buffer.items[base].glyph))
for i in range(start, base):
self.buffer.items[i].syllabic_position = min(IndicPosition.PRE_C, pos(i))
if base < end:
self.buffer.items[i].syllabic_position = IndicPosition.BASE_C
# Mark final consonants
for i in range(base+1, end):
if cat(i) == "M":
for j in range(i, end):
if is_consonant(j):
self.buffer.items[j].syllabic_position = IndicPosition.FINAL_C
break
break
if has_reph:
self.buffer.items[start].syllabic_category = IndicPosition.RA_TO_BECOME_REPH
if self.config["old_spec"]:
disallow_double_halants = self.buffer.script == "Kannada"
for i in range(base+1, end):
if cat(i) == "H":
j = end - 1
while j > i:
if is_consonant(j) or (disallow_double_halants and cat(j) == "H"):
break
j = j - 1
if cat(j) != "H" and j > i:
self.buffer.items.insert(j, self.buffer.items.pop(i))
self.plan.msg("Moved double halant", self.buffer)
break
last_pos = IndicPosition.START
for i in range(start, end):
if cat(i) in ["ZWJ", "ZWNJ", "N", "RS", "CM", "H"]:
self.buffer.items[i].syllabic_position = last_pos
if cat(i) == "H" and pos(i) == IndicPosition.PRE_M:
for j in range(i,start,-1):
if pos(j-1) != IndicPosition.PRE_M:
self.buffer.items[i].syllabic_position = pos(j-1)
break
elif pos(i) != IndicPosition.SMVD:
last_pos = pos(i)
last = base
for i in range(base+1, end):
if is_consonant(i):
for j in range(last+1, i):
if pos(j) < IndicPosition.SMVD:
self.buffer.items[j].syllabic_position = pos(i)
last = i
elif cat(i) == "M":
last = i
# As with Harfbuzz, temporarily abuse syllable index
for i in range(start, end):
self.buffer.items[i].syllable_index = start - i
# REORDER
self.buffer.items[start:end] = sorted(self.buffer.items[start:end], key=lambda x:x.syllabic_position)
base = end
for i in range(start, end):
if pos(i) == IndicPosition.BASE_C:
base = i
break
if self.config["old_spec"] or end - start > 127:
# Merge clusters
pass
else:
for i in range(base, end):
if self.buffer.items[i].syllable_index != 255:
max_i = i
j = start + self.buffer.items[i].syllable_index
while j != i:
max_i = max(max_i, j)
next_i = start + self.buffer.items[j].syllable_index
self.buffer.items[j].syllable_index = 255
j = next_i
if i != max_i:
# Merge clusters
pass
for i in range(start, end):
self.buffer.items[i].syllable_index = syllable_index
self.plan.msg("After initial reordering", self.buffer)
# Set up masks now. Note that these masks have the opposite
# value to Harfbuzz - i.e. False means "not masked"
rphf_mask = False
for i in range(start, end):
if pos(i) != IndicPosition.RA_TO_BECOME_REPH:
rphf_mask = True
self.buffer.items[i].feature_masks["rphf"] = rphf_mask
self.buffer.items[i].feature_masks["half"] = i > base
if not self.config["old_spec"] and self.config["blwf_mode"] == "pre_and_post":
self.buffer.items[i].feature_masks["blwf"] = i > base
self.buffer.items[i].feature_masks["blwf"] = i < base
self.buffer.items[i].feature_masks["abvf"] = i < base
self.buffer.items[i].feature_masks["pstf"] = i < base
# We are not supporting old spec eyelash ra
# pref substitutes pairwise
pref_len = 2
i = base + 1
for j in range(0,i):
self.buffer.items[j].feature_masks["pref"] = True
while i < end-pref_len:
if self.would_substitute("pref", [self.buffer.items[i], self.buffer.items[i+1]]):
self.buffer.items[i].feature_masks["pref"] = False
self.buffer.items[i+1].feature_masks["pref"] = False
i = i + 2
else:
self.buffer.items[i].feature_masks["pref"] = True
i = i + 1
# ZWJ/ZWNJ
for i in range(start+1, end):
if cat(i) in ["ZWJ", "ZWNJ"]:
non_joiner = cat(i) == "ZWNJ"
j = i
while True:
j = j - 1
if non_joiner:
self.buffer.items[j].feature_masks["half"] = True
if not (j > start and not is_consonant(j)):
break
initial_reordering_syllable = {
"standalone_cluster": initial_reordering_consonant_syllable,
"consonant_syllable": initial_reordering_consonant_syllable
}
def final_reordering_syllable(self, start, end):
def cat(i):
return self.buffer.items[i].syllabic_category
def pos(i):
return self.buffer.items[i].syllabic_position
def swap(a,b):
self.buffer.items[b], self.buffer.items[a] = self.buffer.items[a], self.buffer.items[b]
def is_joiner(n):
return cat(n) == "ZWJ" or cat(n) == "ZWNJ"
def is_halant(n):
return cat(n) == "H"
def is_consonant(n):
isc = cat(n)
is_medial = isc == "CM"
return isc in ["C", "CS", "Ra", "V", "PLACEHOLDER", "DOTTEDCIRCLE"] or is_medial
virama = self.config["virama"]
virama_item = BufferItem.new_unicode(virama)
virama_item.map_to_glyph(self.buffer.font)
if virama_item.glyph != ".notdef":
for i in range(start, end):
if self.buffer.items[i].glyph == virama_item.glyph \
and self.buffer.items[i].ligated \
and self.buffer.items[i].multiplied:
self.buffer.items[i].syllabic_category = "H"
self.buffer.items[i].ligated = False
self.buffer.items[i].multiplied = False
try_pref = any(["pref" in item.feature_masks and item.feature_masks["pref"] == False for item in self.buffer.items])
base = start
while base < end:
if pos(base) >= IndicPosition.BASE_C:
if try_pref and base + 1 < end:
for i in range(base+1, end):
item = self.buffer.items[i]
if not item.feature_masks.get("pref",True):
if not (item.substituted and (item.ligated and not item.multiplied)):
base = i
while base < end and is_halant(base):
base = base + 1
self.buffer.items[base].syllabic_positional_category = IndicPosition.BASE_C
try_pref = false
break
if self.buffer.script == "Malayalam":
i = base + 1
while i < end:
while i < end and is_joiner(i):
i = i + 1
if i == end or not is_halant(i):
break
i = i + 1
while i < end and is_joiner(i):
i = i + 1
if i < end and is_consonant(i) and pos(i) == IndicPosition.BELOW_C:
base = i
self.buffer.items[base].syllabic_positional_category = IndicPosition.BASE_C
i = i + 1
if start < base and pos(base) > IndicPosition.BASE_C:
base = base - 1
break
base = base + 1
if base == end and start < base and cat(base-i) == "ZWJ":
base = base - 1
if base < end:
while start < base and cat(base) in ["N","H"]:
base = base - 1
# Reorder matras
if start + 1 < end and start < base:
new_pos = base -1
if base == end:
new_pos = base - 2
# XXX
for i in range(start,end):
self.buffer.items[i].feature_masks["init"] = True
if pos(start) == IndicPosition.PRE_M:
if start == 0 or ucd_data(self.buffer.font.codepointForGlyph(self.buffer.items[start-1].glyph))["General_Category"] not in ["Cf", "Cn", "Co", "Cs", "Ll", "Lm", "Lo", "Lt", "Lu", "Mc", "Me", "Mn"]:
self.buffer.items[start].feature_masks["init"] = False
def normalize_unicode_buffer(self):
unicodes = [item.codepoint for item in self.buffer.items]
newunicodes = []
for cp in unicodes:
if cp in [0x0931, 0x09DC, 0x09DD, 0x0B94]:
newunicodes.append(cp)
elif cp in [0x0DDA, 0x0DDC, 0x0DDD, 0x0DDE]: # Sinhala split matras
glyph = BufferItem.new_unicode(cp)
glyph.map_to_glyph(self.buffer.font)
if self.would_substitute("pstf", [glyph]):
newunicodes.extend([0x0DD9, cp])
else:
newunicodes.append(cp)
else:
newunicodes.extend([ord(x) for x in unicodedata.normalize("NFD", chr(cp)) ])
# Now recompose
newstring = ""
ix = 0
while ix < len(newunicodes):
a = newunicodes[ix]
if ix+1 == len(newunicodes):
newstring = newstring + chr(a)
break
b = newunicodes[ix+1]
s = chr(a) + chr(b)
composed = unicodedata.normalize("NFC", s)
if ucd_data(a)["General_Category"][0] == "M":
newstring = newstring + chr(a)
ix = ix + 1
continue
elif a == 0x9af and b == 0x9bc:
newstring = newstring + chr(0x9df)
ix = ix + 2
continue
elif composed != unicodedata.normalize("NFD", s):
assert(len(s) == 1)
newunicodes[ix] = ord(x)
del newunicodes[ix+1]
continue
else:
newstring = newstring + chr(a)
ix =ix + 1
self.buffer.store_unicode(newstring)
|
482961
|
import tensorrt as trt
import torch
from ..torch2trt_dynamic import (get_arg, slice_shape_trt,
tensor_trt_get_shape_trt, tensorrt_converter,
trt_)
@tensorrt_converter('torch.flip')
@tensorrt_converter('torch.Tensor.flip')
def convert_flip(ctx):
input = ctx.method_args[0]
dims = get_arg(ctx, 'dims', pos=1, default=0)
if isinstance(dims, int):
dims = ctx.method_args[1:]
input_dim = len(input.shape)
dims = [input_dim + dim if dim < 0 else dim for dim in dims]
input_trt = trt_(ctx.network, input)
output = ctx.method_return
input_shape_trt = tensor_trt_get_shape_trt(ctx.network, input_trt)
zero_trt = trt_(ctx.network, input.new_zeros(1, dtype=torch.int32))
one_trt = trt_(ctx.network, input.new_ones(1, dtype=torch.int32))
minus_one_trt = trt_(ctx.network,
-1 * input.new_ones(1, dtype=torch.int32))
starts_trt = [zero_trt for _ in range(input_dim)]
steps_trt = [one_trt for _ in range(input_dim)]
for d in dims:
tmp_slice_trt = slice_shape_trt(ctx.network, input_shape_trt, d, 1)
starts_trt[d] = ctx.network.add_elementwise(
tmp_slice_trt, one_trt, trt.ElementWiseOperation.SUB).get_output(0)
steps_trt[d] = minus_one_trt
starts_trt = ctx.network.add_concatenation(starts_trt).get_output(0)
steps_trt = ctx.network.add_concatenation(steps_trt).get_output(0)
layer = ctx.network.add_slice(input_trt, [0] * input_dim, [1] * input_dim,
[0] * input_dim)
layer.set_input(1, starts_trt)
layer.set_input(2, input_shape_trt)
layer.set_input(3, steps_trt)
output._trt = layer.get_output(0)
|
482962
|
from sip_parser.sdp_fields import (
FieldRaw,
MediaField,
OriginField,
TimingField,
RepeatTimesField,
TimeDescription,
MediaDescription,
ConnectionDataField,
)
from sip_parser.exceptions import SdpParseError
def parse_version(value):
if value != "0":
raise SdpParseError(
f"Unexpected SDP protocol version number {value}. Only version 0 supported"
)
return 0
def parse_origin(value):
subfields = value.split(" ")
if len(subfields) != 6:
raise SdpParseError("Unexpected format found while parsing origin header (o=)")
return OriginField(*subfields)
def parse_media(value):
subfields = value.split(" ")
port_str = subfields[1]
try:
if "/" in port_str:
port_parts = port_str.split("/")
port = int(port_parts[0])
number_of_ports = int(port_parts[1])
else:
number_of_ports = 1
port = int(port_str)
except ValueError:
raise SdpParseError(f"Invalid media description's port sub-field found: <{port_str}>")
return MediaField(
media=subfields[0],
port=port,
number_of_ports=number_of_ports,
proto=subfields[2],
fmt=subfields[3],
)
def parse_repeat(value):
# TODO: Support SDP compressed repeat times using letters, transform to seconds
subfields = value.split(" ")
return RepeatTimesField(
repeat_interval=subfields[0], active_duration=subfields[1], offsets=subfields[2:]
)
def parse_media_attributes(value):
subfields = value.split(":")
if len(subfields) > 1:
return (subfields[0], subfields[1])
# Otherwise, it's a property attribute
return (value, True)
parse_functions = {
"v": parse_version,
"o": parse_origin,
"s": lambda val: val,
"i": lambda val: val,
"u": lambda val: val,
"m": parse_media,
"t": lambda val: TimingField(*val.split(" ")),
"r": parse_repeat,
"a": parse_media_attributes,
"e": lambda val: val,
"p": lambda val: val,
"c": lambda val: ConnectionDataField(*val.split(" ")),
}
|
482966
|
class Solution:
def numJewelsInStones(self, J: str, S: str) -> int:
count = 0
for char in S:
if char in J:
count +=1
return count
if __name__ == '__main__':
J = "aA"
S = "aAAbbbb"
instance = Solution()
solution = instance.numJewelsInStones(J,S)
print(solution)
|
482997
|
def _sum_compose(losses):
loss = sum(v[0] for v in losses)
loss_value = sum(v[1] for v in losses)
loss_dict = {}
[loss_dict.update(v[2]) for v in losses]
return loss, loss_value, loss_dict
class MultiScaleLossComposer:
def __init__(self, compose_fn, single_scale_loss_composers):
self.single_scale_loss_composers = single_scale_loss_composers
self.compose_fn = compose_fn
def __call__(self, losses):
return self.compose_fn(tuple(composer(loss) for loss, composer in zip(losses, self.single_scale_loss_composers)))
def state_dict(self):
state = []
for single_scale_loss_composer in self.single_scale_loss_composers:
state.append(single_scale_loss_composer.state_dict())
return state
def load_state_dict(self, states):
for state, single_scale_loss_composer in zip(states, self.single_scale_loss_composers):
single_scale_loss_composer.load_state_dict(state)
def on_iteration_end(self, is_training):
for single_scale_loss_composer in self.single_scale_loss_composers:
single_scale_loss_composer.on_iteration_end(is_training)
def on_epoch_begin(self, epoch):
for single_scale_loss_composer in self.single_scale_loss_composers:
single_scale_loss_composer.on_epoch_begin(epoch)
|
483041
|
from modeling import *
# def ar_for_ro(ro, N, Cap, k, D, S):
# return ro*N*Cap/k.mean()/D.mean()/S.mean()
def ar_for_ro(ro, N, Cap, k, R, L, S):
return ro*N*Cap/k.mean()/R.mean()/L.mean()/S.mean()
def EW_MMc(ar, EX, c):
ro = ar*EX/c
C = 1/(1 + (1-ro)*G(c+1)/(c*ro)**c * sum([(c*ro)**k/G(k+1) for k in range(c) ] ) )
# EN = ro/(1-ro)*C + c*ro
return C/(c/EX - ar)
def EW_MGc(ar, X, c):
EX2, EX = X.moment(2), X.moment(1)
CoeffVar = math.sqrt(EX2 - EX**2)/EX
return (1 + CoeffVar**2)/2 * EW_MMc(ar, EX, c)
def check_MGc_assumption():
# N, Cap = 10, 1
N_times_Cap = 100
r = 1
L = Exp(1, 1)
S = DUniform(1, 1)
sinfo_m['njob'] = 2000*10
sching_m = {'type': 'plain', 'r': r}
blog(N_times_Cap=N_times_Cap, sinfo_m=sinfo_m, mapping_m=mapping_m, sching_m=sching_m)
def run(ro, N, k, R, L, S, r=1):
Cap = int(N_times_Cap/N)
print("\n")
log(INFO, "ro= {}, N= {}, Cap= {}, k= {}, R= {}, L= {}, S= {}, r= {}".format(ro, N, Cap, k, R, L, S, r) )
ar = round(ar_for_ro(ro, N, Cap, k, R, L, S), 2)
sinfo_m.update({
'nworker': N, 'wcap': Cap, 'ar': ar,
'k_rv': k,
'reqed_rv': R,
'lifetime_rv': L,
'straggle_m': {'slowdown': lambda load: S.sample() } } )
sching_m['r'] = r
sim_m = sim(sinfo_m, mapping_m, sching_m, "N{}_C{}".format(N, Cap) )
blog(sim_m=sim_m)
# c = int(N*Cap/R.mean() ) # N*Cap
# print("c= {}".format(c) )
# EW = EW_MGc(ar, L, c)
# print("M/G/c_EW= {}".format(EW) )
return {
'ar': ar,
'EW': sim_m['waittime_mean'],
'pblocking': sim_m['frac_jobs_waited_inq'],
'EW_givenqed': sim_m['waittime_givenqed_mean'] }
def test(ro, R=DUniform(1, 1) ):
print("---------------")
run(ro, 1, k, R, L, S)
# run(ro, 2, k, R, L, S)
# run(ro, 5, k, R, L, S)
# run(ro, 10, k, R, L, S)
def check_EW_scaling_wrt_ro(N, R):
log(INFO, "", N=N, R=R)
# '''
ro_l, EW_l = [], []
for ro in np.linspace(0.1, 0.9, 9):
ro = round(ro, 2)
ar, EW, pblocking = run(ro, N, k, R, L, S)
print("ro= {}, EW= {}".format(ro, EW) )
ro_l.append(ro)
EW_l.append(EW)
blog(ro_l=ro_l, EW_l=EW_l)
# '''
# ro_l= [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# EW_l= [0.00025548087470978202, 0.00056689800613990546, 0.00089200542402208672, 0.0012637166320921696, 0.0017178514022176334, 0.0021802843452227629, 0.002912705863562876, 0.0061096923858674568, 0.043253547318583753]
print("ratio = EW/(ro/(1-ro))")
for i, EW in enumerate(EW_l):
ro = ro_l[i]
ratio = EW/(ro/(1-ro) )
print("ro= {}, ratio= {}".format(ro, ratio) )
log(INFO, "done.")
def check_EW_scaling_wrt_EL2_over_EL(N, R, ro):
log(INFO, "", N=N, R=R, ro=ro)
EL2_over_EL_l, EW_l = [], []
for mu in np.linspace(0.1, 1, 10):
L = Exp(mu, 1)
EL2_over_EL = round(L.moment(2)/L.moment(1), 2)
ar, EW, pblocking = run(ro, N, k, R, L, S)
print("EL2_over_EL= {}, EW= {}".format(EL2_over_EL, EW) )
EL2_over_EL_l.append(EL2_over_EL)
EW_l.append(EW)
blog(EL2_over_EL_l=EL2_over_EL_l, EW_l=EW_l)
# '''
print("ratio = EW/(EL2/EL)")
for i, EW in enumerate(EW_l):
EL2_over_EL = EL2_over_EL_l[i]
ratio = EW/EL2_over_EL
print("EL2_over_EL= {}, ratio= {}".format(EL2_over_EL, ratio) )
log(INFO, "done.")
def check_EW_scaling_wrt_ER2_over_ER(N, L, ro):
log(INFO, "", N=N, L=L, ro=ro)
ER2_over_ER_l, EW_l = [], []
for u in np.linspace(0.1, 1, 10):
R = Uniform(0.1, u)
ER2_over_ER = round(R.moment(2)/R.moment(1), 2)
ar, EW, pblocking = run(ro, N, k, R, L, S)
print("ER2_over_ER= {}, EW= {}".format(ER2_over_ER, EW) )
ER2_over_ER_l.append(ER2_over_ER)
EW_l.append(EW)
blog(ER2_over_ER_l=ER2_over_ER_l, EW_l=EW_l)
print("ratio = EW/(ER2/ER)")
for i, EW in enumerate(EW_l):
ER2_over_ER = ER2_over_ER_l[i]
ratio = EW/ER2_over_ER
print("ER2_over_ER= {}, ratio= {}".format(ER2_over_ER, ratio) )
log(INFO, "done.")
def check_EW_scaling_wrt_Ek2_over_Ek(N, R, L, ro):
log(INFO, "", N=N, R=R, L=L, ro=ro)
Ek2_over_Ek_l, EW_l = [], []
for u in range(1, 10):
k = DUniform(1, u)
Ek2_over_Ek = round(k.moment(2)/k.moment(1), 2)
ar, EW, pblocking = run(ro, N, k, R, L, S)
print("Ek2_over_Ek= {}, EW= {}".format(Ek2_over_Ek, EW) )
Ek2_over_Ek_l.append(Ek2_over_Ek)
EW_l.append(EW)
blog(Ek2_over_Ek_l=Ek2_over_Ek_l, EW_l=EW_l)
print("ratio = EW/(ER2/ER)")
for i, EW in enumerate(EW_l):
Ek2_over_Ek = Ek2_over_Ek_l[i]
ratio = EW/Ek2_over_Ek
print("Ek2_over_Ek= {}, ratio= {}".format(Ek2_over_Ek, ratio) )
log(INFO, "done.")
def check_EW_scaling_wrt_model(N, k, R, L, S):
log(INFO, "", N=N, k=k, R=R, L=L, S=S)
sinfo_m['njob'] = 2000*10
ET = L.mean()*sum([X_n_k(S, i, i).mean()*k.pdf(i) for i in k.v_l] )
ET2 = L.moment(2)*sum([X_n_k(S, i, i).moment(2)*k.pdf(i) for i in k.v_l] )
EL, EL2 = L.mean(), L.moment(2)
blog(ET=ET, ET2=ET2, EL=EL, EL2=EL2)
C_moment = lambda i: k.moment(i)*R.moment(i)*L.moment(i)*S.moment(i)
print(">> C_moment(1)= {}, C_moment(2)= {}".format(C_moment(1), C_moment(2) ) )
def Pr_blocking(ar, ro):
# narr_atleast_forblocking = (1-ro)*N_times_Cap/(k.moment(1)*R.moment(1) ) - 1
# blog(narr_atleast_forblocking=narr_atleast_forblocking)
# ar_ = ar*L.tail(ET)*ET # *L.u_l/10
# return max(0, \
# 1 - math.exp(-ar_)*sum([ar_**i/math.factorial(i) for i in range(int(narr_atleast_forblocking) ) ] ) )
alpha = 0.9 # 1/2 # L.cdf(L.u_l/10) # L.cdf(10*EL) # 1/2 # L.cdf(EL)
# print("alpha= {}".format(alpha) )
long_jlifetime = EL + math.sqrt((EL2 - EL**2)*alpha/(1-alpha) ) # ET + math.sqrt((ET2 - ET**2)*alpha/(1-alpha) )
ro_short = ar*L.cdf(long_jlifetime)*C_moment(1)/N_times_Cap
narr_atleast_forblocking = (1-ro_short)*N_times_Cap / (k.moment(1)*R.moment(1) ) - 1
blog(narr_atleast_forblocking=narr_atleast_forblocking)
ar_long = ar*L.tail(long_jlifetime)*long_jlifetime
return max(0, \
1 - math.exp(-ar_long)*sum([ar_long**i/math.factorial(i) for i in range(int(narr_atleast_forblocking) ) ] ) )
def EW_givenqed_model(ro):
return ro/(1-ro) * C_moment(2)/C_moment(1)
def EW_model(ar, ro, pblocking=None):
if pblocking is None:
pblocking = Pr_blocking(ar, ro)
print("pblocking= {}".format(pblocking) )
return ro/(1-ro) * C_moment(2)/C_moment(1) / 2 * pblocking
EW_l, sim_EW_l = [], []
# for ro in np.linspace(0.1, 0.9, 9):
for ro in np.linspace(0.7, 0.9, 3):
ro = round(ro, 2)
m = run(ro, N, k, R, L, S)
ar, sim_EW, sim_pblocking = m['ar'], m['EW'], m['pblocking']
print("ar= {}, ro= {}".format(ar, ro) )
pblocking = Pr_blocking(ar, ro)
print("sim_pblocking= {}, pblocking= {}".format(sim_pblocking, pblocking) )
EW = EW_model(ar, ro, pblocking)
print("sim_EW= {}, EW= {}".format(sim_EW, EW) )
sim_EW_l.append(sim_EW)
EW_l.append(EW)
sim_EW_givenqed = m['EW_givenqed']
EW_givenqed = EW_givenqed_model(ro)
print("sim_EW_givenqed= {}, EW_givenqed= {}".format(sim_EW_givenqed, EW_givenqed) )
blog(EW_l=EW_l, sim_EW_l=sim_EW_l)
# print("ratio = sim_EW/model")
# for i, sim_EW in enumerate(sim_EW_l):
# EW = EW_l[i]
# ratio = sim_EW/EW
# print("EW= {}, ratio= {}".format(EW, ratio) )
log(INFO, "done.")
def check_EW_scaling_w_increasing_r(N, k, R, L, S, ro):
log(INFO, "", N=N, k=k, R=R, L=L, S=S, ro=ro)
# for r in np.linspace(1, 2, 3):
for r in range(1, 4):
m = run(ro, N, k, R, L, S, r)
ar, sim_EW, sim_pblocking = m['ar'], m['EW'], m['pblocking']
print("ar= {}, ro= {}".format(ar, ro) )
# test(ro=0.4)
# test(ro=0.65)
# test(ro=0.9)
# R = Uniform(0.25, 0.75) # Uniform(0.5, 1.5)
# test(0.9, R)
# R = Uniform(0.25, 0.75) # Uniform(1, 1) # Uniform(0.05, 0.15) # Uniform(0.5, 1.5)
# check_EW_scaling_wrt_ro(5, R)
# R = Uniform(1.5, 2.5) # Uniform(2, 2)
# check_EW_scaling_wrt_EL2_over_EL(N, R, ro=0.85)
# L = Exp(0.1, 1)
# check_EW_scaling_wrt_ER2_over_ER(N, L, ro=0.85)
# R = Uniform(1, 1) # Uniform(1, 1)
# L = Exp(0.1, 1) # Uniform(1, 1)
# check_EW_scaling_wrt_Ek2_over_Ek(N, R, L, ro=0.85)
k = BZipf(1, 10) # DUniform(1, 1) # DUniform(1, 4)
R = Uniform(1, 1)
L = TPareto(10, 10**6, 4) # Exp(0.1, 1) # Uniform(1, 1)
S = TPareto(1, 10, 2) # Uniform(1, 1)
check_EW_scaling_wrt_model(N, k, R, L, S)
log(INFO, "done.")
def plot_ET_wrt_d():
N, Cap = 10, 10
k = BZipf(1, 1) # DUniform(1, 1)
R = Uniform(1, 1)
b, beta = 10, 4
L = Pareto(b, beta) # TPareto(10, 10**6, 4)
a, alpha = 1, 4
S = Pareto(a, alpha) # Uniform(1, 1)
def alpha_gen(ro):
return alpha
ro = 0.55
red, r = 'Coding', 2
print("ro= {}".format(ro) )
ar = round(ar_for_ro(ro, N, Cap, k, R, L, S), 2)
sinfo_m.update({
'njob': 2000*10,
'nworker': N, 'wcap': Cap, 'ar': ar,
'k_rv': k,
'reqed_rv': R,
'lifetime_rv': L,
'straggle_m': {'slowdown': lambda load: S.sample() } } )
sching_m = {'type': 'expand_if_totaldemand_leq', 'r': r, 'threshold': None}
log(INFO, "", sinfo_m=sinfo_m, sching_m=sching_m, mapping_m=mapping_m)
def run(d):
sching_m['threshold'] = d
sim_m = sim(sinfo_m, mapping_m, sching_m, "N{}_C{}".format(N, Cap) )
blog(sim_m=sim_m)
return sim_m['responsetime_mean'], sim_m['waittime_mean']
sim_ET0, sim_EW0 = 0, 0 # run(d=0)
# print("** sim_ET0= {}, sim_EW0= {}".format(sim_ET0, sim_EW0) )
l = L.l_l*S.l_l
u = 50*L.mean()*S.mean()
d_l, sim_ET_l, ET_l = [], [], []
for d in np.logspace(math.log10(l), math.log10(u), 10):
print(">> d= {}".format(d) )
sim_ET, sim_EW = 0 ,0 # run(d)
print("** sim_ET= {}, sim_EW= {}".format(sim_ET, sim_EW) )
ET, EW = ET_EW_pareto(ro, sim_EW0, N, Cap, k, r, b, beta, a, alpha_gen, d, red)
print("** ET= {}, EW= {}".format(ET, EW) )
# ET_dummy, EW_dummy = ET_EW_pareto(ro, sim_EW0, N, Cap, k, r, b, beta, a, alpha_gen, d, red, K=1)
# print("** ET_dummy= {}, EW_dummy= {}".format(ET_dummy, EW_dummy) )
# print("EW_dummy/sim_EW= {}".format(EW_dummy/sim_EW) )
d_l.append(d)
sim_ET_l.append(sim_ET)
ET_l.append(ET)
if sim_ET > 3*sim_ET0:
break
plot.plot(d_l, sim_ET_l, label='Sim', c=next(darkcolor_c), marker=next(marker_c), ls=':', mew=1)
plot.plot(d_l, ET_l, label='Model', c=next(darkcolor_c), marker=next(marker_c), ls=':', mew=1)
prettify(plot.gca() )
plot.legend()
plot.xscale('log')
fontsize = 14
plot.xlabel('d', fontsize=fontsize)
plot.ylabel('E[T]', fontsize=fontsize)
plot.title(r'$N= {}$, $C= {}$, $\rho_0= {}$, $r= {}$, $k \sim$ {}'.format(N, Cap, ro, r, k) + '\n' + r'$R \sim$ {}, $L \sim$ {}, $S \sim$ {}'.format(R, L, S) )
plot.gcf().set_size_inches(5, 5)
plot.savefig('plot_ET_wrt_d.png', bbox_inches='tight')
plot.gcf().clear()
log(INFO, "done.")
if __name__ == "__main__":
N, Cap = 10, 1
b, beta = 10, 5
a, alpha = 1, 1000 # 2
k = BZipf(1, 1)
r = 1
# log(INFO, "", k=k, r=r, b=b, beta=beta, a=a, alpha=alpha)
def alpha_gen(ro):
return alpha
S = Pareto(a, alpha)
ar = round(ar_for_ro_pareto(1/2, N, Cap, k, b, beta, a, alpha_gen), 2)
sinfo_m = {
'ar': ar, 'njob': 2000*10, 'nworker': N, 'wcap': Cap,
'lifetime_rv': Pareto(b, beta),
'reqed_rv': DUniform(1, 1),
'k_rv': k,
'straggle_m': {'slowdown': lambda load: S.sample() } }
mapping_m = {'type': 'spreading'}
sching_m = {'type': 'expand_if_totaldemand_leq', 'r': r, 'threshold': None}
# blog(sinfo_m=sinfo_m, mapping_m=mapping_m, sching_m=sching_m)
# check_MGc_assumption()
plot_ET_wrt_d()
|
483045
|
import os
import contextlib
import shutil
import gzip
import simplejson as json
import fastavro
from .base import BlobStorage, Blob
class LocalStorage(BlobStorage):
"""Local storage provider that utilizes the local file system.
Args:
root: the root directory, will be created if not exists.
"""
def __init__(self, root):
self._root = root
# Create if not exists.
if not os.path.exists(self._root):
os.makedirs(self._root)
def _get_and_check_path(self, blob_name):
path = os.path.join(self._root, blob_name)
if not os.path.exists(path):
raise ValueError("Cannot find blob at path: "+path)
return path
def _get_path_and_create_dir(self, blob_name):
path = os.path.join(self._root, blob_name)
path_dir = os.path.dirname(path)
if not os.path.exists(path_dir):
os.makedirs(path_dir)
return path
def get_object(self, blob_name):
path = self._get_and_check_path(blob_name)
with open(path, "r") as f:
obj = json.load(f)
return obj
@contextlib.contextmanager
def get_file(self, blob_name):
path = self._get_and_check_path(blob_name)
try:
fileobj = open(path, 'rb')
yield fileobj
finally:
fileobj.close()
def put_file(self, fileobj, blob_name):
path = self._get_path_and_create_dir(blob_name)
with open(path, 'wb') as f:
shutil.copyfileobj(fileobj, f)
size = os.path.getsize(path)
return Blob(blob_name, size)
def put_object(self, obj, blob_name):
path = self._get_path_and_create_dir(blob_name)
with open(path, "w") as f:
json.dump(obj, f)
size = os.path.getsize(path)
return Blob(blob_name, size)
def put_avro(self, schema, records, blob_name, codec='snappy'):
path = self._get_path_and_create_dir(blob_name)
with open(path, "wb") as f:
fastavro.writer(f, schema, records, codec)
size = os.path.getsize(path)
return Blob(blob_name, size)
def put_json(self, records, blob_name, gzip_compress=True):
path = self._get_path_and_create_dir(blob_name)
newline = "\n"
if gzip_compress:
with gzip.open(path, "wt") as f:
for record in records:
f.write(json.dumps(record))
f.write(newline)
else:
with open(path, "w") as f:
for record in records:
f.write(json.dumps(record))
f.write(newline)
size = os.path.getsize(path)
return Blob(blob_name, size)
|
483110
|
import pandas as pd
import re
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import mapping_career_causeways.text_cleaning_utils as text_cleaning_utils
def tfidf_keywords(p, dataframe, text_field, stopwords, N=10):
"""
Fast method to generate keywords characterising each cluster
Parameters
----------
p (list or nd.array):
Cluster integer labels
dataframe (pandas.DataFrame):
Dataframe with information about the clustered nodes.
text_field (string):
Column name of the 'dataframe' that contains the text corpus
to be used for keyword extraction.
stopwords (list of strings):
Specific words which should be excluded from the text corpus.
N (int)
Number of keywords to use
Returns
-------
tfidf_keywords (list of strings):
Strings containing cluster keywords
tfidf_keywords_ (list of strings):
Strings containing the cluster number and cluster keywords
"""
# Collect text for each cluster & remove custom stopwords
cluster_text = []
for c in range(len(np.unique(p))):
t=" ".join(dataframe.loc[p==c][text_field].to_list())
for stopword in stopwords:
t=re.sub(stopword,'',t)
cluster_text.append(t)
# Further clean the text (see 'text_cleaning_utils' for more details)
clust_descriptions_clean = []
for descr in cluster_text:
text = text_cleaning_utils.clean_text(descr)
text = text_cleaning_utils.remove_stopwords(text)
text = text_cleaning_utils.lemmatise(text)
clust_descriptions_clean.append(text)
# Find keywords using tf-idf vectors
vectorizer = TfidfVectorizer(ngram_range=(1, 2))
vectors = vectorizer.fit_transform(clust_descriptions_clean)
names = vectorizer.get_feature_names()
Data = vectors.todense().tolist()
# Create a dataframe with the results
df = pd.DataFrame(Data, columns=names)
tfidf_keywords = []
for i, row in df.iterrows():
tfidf_keywords.append(row.sort_values(ascending=False)[:N].index.to_list())
# Final outputs: string with the cluster number and N keywords per cluster
tfidf_keywords_ = ["["+str(i)+"] "+", ".join(x) for i, x in enumerate(tfidf_keywords)]
# Final outputs: string with N keywords per cluster
tfidf_keywords = [", ".join(x) for i, x in enumerate(tfidf_keywords)]
return tfidf_keywords, tfidf_keywords_
|
483227
|
import json
import os
import time
import requests
from PIL import Image
from StringIO import StringIO
from requests.exceptions import ConnectionError
def go(query, path):
"""Download full size images from Google image search.
Don't print or republish images without permission.
I used this to train a learning algorithm.
"""
BASE_URL = 'https://ajax.googleapis.com/ajax/services/search/images?'\
'v=1.0&q=' + query + '&start=%d'
BASE_PATH = os.path.join(path, query)
if not os.path.exists(BASE_PATH):
os.makedirs(BASE_PATH)
start = 0 # Google's start query string parameter for pagination.
while start < 60: # Google will only return a max of 56 results.
r = requests.get(BASE_URL % start)
for image_info in json.loads(r.text)['responseData']['results']:
url = image_info['unescapedUrl']
try:
image_r = requests.get(url)
except ConnectionError, e:
print 'could not download %s' % url
continue
# Remove file-system path characters from name.
title = image_info['titleNoFormatting'].replace('/', '').replace('\\', '')
file = open(os.path.join(BASE_PATH, '%s.jpg') % title, 'w')
try:
Image.open(StringIO(image_r.content)).save(file, 'JPEG')
except IOError, e:
# Throw away some gifs...blegh.
print 'could not save %s' % url
continue
finally:
file.close()
print start
start += 4 # 4 images per page.
# Be nice to Google and they'll be nice back :)
time.sleep(1.5)
# Example use
go('landscape', 'myDirectory')
|
483236
|
from collections import namedtuple
import torch as tr
from torch import nn
from torch.nn import functional as F
from configs import Config
from utils.tr_utils import ellipse_params, rotate
class NLinear(nn.Sequential):
def __init__(self, in_feat, units, act=nn.ELU):
layers = [nn.Linear(in_feat, units[0])]
for i in range(len(units) - 1):
in_feat, out_feat = units[i:i + 2]
layers.append(act())
layers.append(nn.Linear(in_feat, out_feat))
super(NLinear, self).__init__(*layers)
if Config.use_gpu:
self.cuda()
ZParams = namedtuple('ZParams', 'means cov')
class SingleZTransform(nn.Module):
def __init__(self, params):
super(SingleZTransform, self).__init__()
self.means, self.cov = params
self.th, self.a, self.b = ellipse_params(self.cov)
self.scale = tr.tensor([self.a, self.b], dtype=tr.float32)
@property
def params(self):
return ZParams(self.means, self.cov)
@params.setter
def params(self, value):
means, cov = value
self.means, self.cov = tr.Tensor(means), tr.Tensor(cov)
@property
def inv_params(self):
return ZParams(-self.means, self.cov)
def normalize(self, x):
x = x - self.means
x = rotate(x, - self.th)
x = x / self.scale
return x
def denormalize(self, x):
x = x * self.scale
x = rotate(x, self.th)
x = x + self.means
return x
class ZTransform(nn.Module):
def __init__(self, src_params, target_params=None):
super(ZTransform, self).__init__()
# print(type(src_params))
# print(src_params)
src_params = list(map(tr.tensor, src_params))
# print(src_params)
# print(type(src_params))
if target_params is None:
target_params = tr.zeros(src_params[0].shape), tr.eye(src_params[0].shape[0])
self.src_transform = SingleZTransform(src_params)
self.target_transform = SingleZTransform(target_params)
@property
def src_params(self):
return self.src_transform.params
@property
def target_params(self):
return self.target_transform.params
@src_params.setter
def src_params(self, value):
self.src_transform.params = value
@target_params.setter
def target_params(self, value):
self.target_transform.params = value
def forward(self, x):
x = self.src_transform.normalize(x)
x = self.target_transform.denormalize(x)
return x
def inv(self, x):
x = self.target_transform.normalize(x)
x = self.src_transform.denormalize(x)
return x
# use only when h == w for an image
def get_padding(stride, in_dim, kernel_dim, out_dim=None, mode='SAME'):
k = kernel_dim
if out_dim == None:
out_dim = (in_dim + stride - 1) // stride
if mode.lower() == 'same':
val = stride * (out_dim - 1) - in_dim + k
if val % 2 == 0:
p1, p2 = val // 2, val // 2
else:
p1, p2 = (val + 1) // 2, (val + 1) // 2
return (p1, p2, p1, p2)
class ConvBlock(nn.Module):
def __init__(self, in_filters, out_filters, bn=True, kernel_size=3, stride=2, padding=None):
super(ConvBlock, self).__init__()
self.stride = stride
self.padding = padding
self.kernel = kernel_size
layers = [nn.Conv2d(in_filters, out_filters, kernel_size=kernel_size, stride=stride)]
if bn:
layers.append(nn.BatchNorm2d(out_filters, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.conv_block = nn.Sequential(*layers)
def forward(self, input):
padding = self.padding
if isinstance(padding, str):
padding = get_padding(self.stride, input.shape[2], self.kernel, mode=self.padding)
elif padding is None:
padding = 0
input = F.pad(input, padding)
input = self.conv_block(input)
return input
|
483253
|
import numpy as np
import sklearn.linear_model as skl_linear_model
import sklearn.preprocessing as skl_preprocessing
from Orange.data import Variable, ContinuousVariable
from Orange.preprocess import Normalize
from Orange.preprocess.score import LearnerScorer
from Orange.regression import Learner, Model, SklLearner, SklModel
__all__ = [
"LinearRegressionLearner",
"RidgeRegressionLearner",
"LassoRegressionLearner",
"SGDRegressionLearner",
"ElasticNetLearner",
"ElasticNetCVLearner",
"PolynomialLearner",
]
class _FeatureScorerMixin(LearnerScorer):
feature_type = Variable
class_type = ContinuousVariable
def score(self, data):
data = Normalize()(data)
model = self(data)
return np.abs(model.coefficients)
class LinearRegressionLearner(SklLearner, _FeatureScorerMixin):
__wraps__ = skl_linear_model.LinearRegression
def __init__(self, preprocessors=None):
super().__init__(preprocessors=preprocessors)
def fit(self, X, Y, W):
model = super().fit(X, Y, W)
return LinearModel(model.skl_model)
class RidgeRegressionLearner(LinearRegressionLearner):
__wraps__ = skl_linear_model.Ridge
def __init__(
self,
alpha=1.0,
fit_intercept=True,
normalize=False,
copy_X=True,
max_iter=None,
tol=0.001,
solver="auto",
preprocessors=None,
):
super().__init__(preprocessors=preprocessors)
self.params = vars()
class LassoRegressionLearner(LinearRegressionLearner):
__wraps__ = skl_linear_model.Lasso
def __init__(
self,
alpha=1.0,
fit_intercept=True,
normalize=False,
precompute=False,
copy_X=True,
max_iter=1000,
tol=0.0001,
warm_start=False,
positive=False,
preprocessors=None,
):
super().__init__(preprocessors=preprocessors)
self.params = vars()
class ElasticNetLearner(LinearRegressionLearner):
__wraps__ = skl_linear_model.ElasticNet
def __init__(
self,
alpha=1.0,
l1_ratio=0.5,
fit_intercept=True,
normalize=False,
precompute=False,
max_iter=1000,
copy_X=True,
tol=0.0001,
warm_start=False,
positive=False,
preprocessors=None,
):
super().__init__(preprocessors=preprocessors)
self.params = vars()
class ElasticNetCVLearner(LinearRegressionLearner):
__wraps__ = skl_linear_model.ElasticNetCV
def __init__(
self,
l1_ratio=0.5,
eps=0.001,
n_alphas=100,
alphas=None,
fit_intercept=True,
normalize=False,
precompute="auto",
max_iter=1000,
tol=0.0001,
cv=None,
copy_X=True,
verbose=0,
n_jobs=1,
positive=False,
preprocessors=None,
):
super().__init__(preprocessors=preprocessors)
self.params = vars()
class SGDRegressionLearner(LinearRegressionLearner):
__wraps__ = skl_linear_model.SGDRegressor
preprocessors = SklLearner.preprocessors + [Normalize()]
def __init__(
self,
loss="squared_loss",
penalty="l2",
alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True,
max_iter=5,
tol=None,
shuffle=True,
epsilon=0.1,
n_jobs=1,
random_state=None,
learning_rate="invscaling",
eta0=0.01,
power_t=0.25,
class_weight=None,
warm_start=False,
average=False,
preprocessors=None,
):
super().__init__(preprocessors=preprocessors)
self.params = vars()
class PolynomialLearner(Learner):
"""Generate polynomial features and learn a prediction model
Parameters
----------
learner : LearnerRegression
learner to be fitted on the transformed features
degree : int
degree of used polynomial
preprocessors : List[Preprocessor]
preprocessors to be applied on the data before learning
"""
name = "poly learner"
preprocessors = SklLearner.preprocessors
def __init__(self, learner=LinearRegressionLearner(), degree=2, preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.degree = degree
self.learner = learner
def fit(self, X, Y, W):
polyfeatures = skl_preprocessing.PolynomialFeatures(self.degree)
X = polyfeatures.fit_transform(X)
clf = self.learner
if W is None or not self.supports_weights:
model = clf.fit(X, Y, None)
else:
model = clf.fit(X, Y, sample_weight=W.reshape(-1))
return PolynomialModel(model, polyfeatures)
class LinearModel(SklModel):
@property
def intercept(self):
return self.skl_model.intercept_
@property
def coefficients(self):
return self.skl_model.coef_
def predict(self, X):
vals = self.skl_model.predict(X)
if len(vals.shape) == 1:
# Prevent IndexError for 1D array
return vals
elif vals.shape[1] == 1:
return vals.ravel()
else:
return vals
def __str__(self):
return "LinearModel {}".format(self.skl_model)
class PolynomialModel(Model):
def __init__(self, model, polyfeatures):
self.model = model
self.polyfeatures = polyfeatures
def predict(self, X):
X = self.polyfeatures.fit_transform(X)
return self.model.predict(X)
def __str__(self):
return "PolynomialModel {}".format(self.model)
PolynomialLearner.__returns__ = PolynomialModel
|
483259
|
from django.urls import reverse
from django.views.generic import RedirectView
from django.views.generic import TemplateView
from users.models import TenantUser
from tenant_users.tenants.tasks import provision_tenant
from tenant_users.tenants.utils import create_public_tenant
class TenantView(TemplateView):
template_name = 'core/tenant.html'
|
483285
|
from dataclasses import dataclass
from typing import Any, Dict
@dataclass
class ServerConfig:
host: str
port: int
debug: bool
token: str
base_url: str
@dataclass
class PluginConfig:
enabled: bool
args: Dict[str, Any]
@dataclass
class Config:
version: int
server_config: ServerConfig
plugin_config: Dict[str, PluginConfig]
|
483320
|
from socket import inet_ntoa
from typing import Any, Iterable, List, Optional, Sequence, Tuple, TypedDict
from eth_enr import ENR
from eth_enr.abc import ENRAPI
from eth_enr.exceptions import OldSequenceNumber
from eth_typing import HexStr, NodeID
from eth_utils import (
ValidationError,
decode_hex,
encode_hex,
is_list_like,
to_bytes,
to_dict,
)
from ddht.abc import RPCHandlerAPI
from ddht.endpoint import Endpoint
from ddht.kademlia import compute_distance
from ddht.rpc import RPCError, RPCHandler, RPCRequest
from ddht.v5_1.abc import NetworkAPI
from ddht.validation import (
validate_and_convert_hexstr,
validate_and_extract_destination,
validate_and_normalize_distances,
validate_params_length,
)
class PongResponse(TypedDict):
enr_seq: int
packet_ip: str
packet_port: int
class SendPingResponse(TypedDict):
request_id: HexStr
class GetENRResponse(TypedDict):
enr_repr: str
def extract_params(request: RPCRequest) -> List[Any]:
try:
params = request["params"]
except KeyError:
raise RPCError("Request missing `params` key")
if not is_list_like(params):
raise RPCError(
f"Params must be list-like: params-type={type(params)} params={params}"
)
return params
class PingHandler(RPCHandler[Tuple[NodeID, Optional[Endpoint]], PongResponse]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> Tuple[NodeID, Optional[Endpoint]]:
raw_params = extract_params(request)
validate_params_length(raw_params, 1)
raw_destination = raw_params[0]
node_id, endpoint = validate_and_extract_destination(raw_destination)
return node_id, endpoint
async def do_call(self, params: Tuple[NodeID, Optional[Endpoint]]) -> PongResponse:
node_id, endpoint = params
pong = await self._network.ping(node_id, endpoint=endpoint)
return PongResponse(
enr_seq=pong.enr_seq,
packet_ip=inet_ntoa(pong.packet_ip),
packet_port=pong.packet_port,
)
class SendPingHandler(RPCHandler[Tuple[NodeID, Optional[Endpoint]], SendPingResponse]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> Tuple[NodeID, Optional[Endpoint]]:
raw_params = extract_params(request)
validate_params_length(raw_params, 1)
raw_destination = raw_params[0]
node_id, endpoint = validate_and_extract_destination(raw_destination)
return node_id, endpoint
async def do_call(
self, params: Tuple[NodeID, Optional[Endpoint]]
) -> SendPingResponse:
node_id, endpoint = params
if endpoint is None:
enr = await self._network.lookup_enr(node_id)
endpoint = Endpoint.from_enr(enr)
request_id = await self._network.client.send_ping(node_id, endpoint)
return SendPingResponse(request_id=encode_hex(request_id))
class SendPongHandler(RPCHandler[Tuple[NodeID, Optional[Endpoint], HexStr], None]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(
self, request: RPCRequest
) -> Tuple[NodeID, Optional[Endpoint], HexStr]:
raw_params = extract_params(request)
validate_params_length(raw_params, 2)
raw_destination, request_id = raw_params
node_id, endpoint = validate_and_extract_destination(raw_destination)
return node_id, endpoint, request_id
async def do_call(self, params: Tuple[NodeID, Optional[Endpoint], HexStr]) -> None:
node_id, endpoint, request_id = params
if endpoint is None:
enr = await self._network.lookup_enr(node_id)
endpoint = Endpoint.from_enr(enr)
response = await self._network.client.send_pong(
node_id, endpoint, request_id=decode_hex(request_id)
)
return response
FindNodesRPCParams = Tuple[NodeID, Optional[Endpoint], Tuple[int, ...]]
class FindNodesHandler(RPCHandler[FindNodesRPCParams, Tuple[str, ...]]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> FindNodesRPCParams:
raw_params = extract_params(request)
validate_params_length(raw_params, 2)
raw_destination, raw_distances = raw_params
node_id, endpoint = validate_and_extract_destination(raw_destination)
distances = validate_and_normalize_distances(raw_distances)
return node_id, endpoint, distances
async def do_call(self, params: FindNodesRPCParams) -> Tuple[str, ...]:
node_id, endpoint, distances = params
enrs = await self._network.find_nodes(node_id, *distances, endpoint=endpoint)
return tuple(repr(enr) for enr in enrs)
class SendFindNodesHandler(RPCHandler[FindNodesRPCParams, HexStr]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> FindNodesRPCParams:
raw_params = extract_params(request)
validate_params_length(raw_params, 2)
raw_destination, raw_distances = raw_params
node_id, endpoint = validate_and_extract_destination(raw_destination)
distances = validate_and_normalize_distances(raw_distances)
return node_id, endpoint, distances
async def do_call(self, params: FindNodesRPCParams) -> HexStr:
node_id, endpoint, distances = params
if endpoint is None:
enr = await self._network.lookup_enr(node_id)
endpoint = Endpoint.from_enr(enr)
request_id = await self._network.client.send_find_nodes(
node_id, endpoint, distances=distances
)
return encode_hex(request_id)
SendFoundNodesRPCParams = Tuple[NodeID, Optional[Endpoint], Sequence[ENRAPI], bytes]
class SendFoundNodesHandler(RPCHandler[SendFoundNodesRPCParams, int]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> SendFoundNodesRPCParams:
raw_params = extract_params(request)
validate_params_length(raw_params, 3)
raw_destination, raw_enrs, raw_request_id = raw_params
node_id, endpoint = validate_and_extract_destination(raw_destination)
enrs = [ENR.from_repr(enr) for enr in raw_enrs]
request_id = to_bytes(hexstr=raw_request_id)
return node_id, endpoint, enrs, request_id
async def do_call(self, params: SendFoundNodesRPCParams) -> int:
node_id, endpoint, enrs, request_id = params
if endpoint is None:
enr = await self._network.lookup_enr(node_id)
endpoint = Endpoint.from_enr(enr)
num_batches = await self._network.client.send_found_nodes(
node_id, endpoint, enrs=enrs, request_id=request_id
)
return num_batches
TalkRPCParams = Tuple[NodeID, Optional[Endpoint], bytes, bytes]
class SendTalkRequestHandler(RPCHandler[TalkRPCParams, HexStr]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> TalkRPCParams:
raw_params = extract_params(request)
validate_params_length(raw_params, 3)
raw_destination, raw_protocol, raw_payload = raw_params
node_id, endpoint = validate_and_extract_destination(raw_destination)
protocol, payload = validate_and_convert_hexstr(raw_protocol, raw_payload)
return (
node_id,
endpoint,
protocol,
payload,
)
async def do_call(self, params: TalkRPCParams) -> HexStr:
node_id, endpoint, protocol, payload = params
if endpoint is None:
enr = await self._network.lookup_enr(node_id)
endpoint = Endpoint.from_enr(enr)
message_request_id = await self._network.client.send_talk_request(
node_id, endpoint, protocol=protocol, payload=payload,
)
return encode_hex(message_request_id)
class SendTalkResponseHandler(RPCHandler[TalkRPCParams, None]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> TalkRPCParams:
raw_params = extract_params(request)
validate_params_length(raw_params, 3)
raw_destination, raw_payload, raw_request_id = raw_params
node_id, endpoint = validate_and_extract_destination(raw_destination)
payload, request_id = validate_and_convert_hexstr(raw_payload, raw_request_id)
return (
node_id,
endpoint,
payload,
request_id,
)
async def do_call(self, params: TalkRPCParams) -> None:
node_id, endpoint, payload, request_id = params
if endpoint is None:
enr = await self._network.lookup_enr(node_id)
endpoint = Endpoint.from_enr(enr)
response = await self._network.client.send_talk_response(
node_id, endpoint, payload=payload, request_id=request_id,
)
return response
class TalkHandler(RPCHandler[TalkRPCParams, HexStr]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> TalkRPCParams:
raw_params = extract_params(request)
validate_params_length(raw_params, 3)
raw_destination, raw_protocol, raw_payload = raw_params
node_id, endpoint = validate_and_extract_destination(raw_destination)
protocol, payload = validate_and_convert_hexstr(raw_protocol, raw_payload)
return (
node_id,
endpoint,
protocol,
payload,
)
async def do_call(self, params: TalkRPCParams) -> HexStr:
node_id, endpoint, protocol, payload = params
if endpoint is None:
enr = await self._network.lookup_enr(node_id)
endpoint = Endpoint.from_enr(enr)
response = await self._network.talk(
node_id, protocol=protocol, payload=payload, endpoint=endpoint,
)
return encode_hex(response)
class RecursiveFindNodesHandler(RPCHandler[NodeID, Tuple[str, ...]]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> NodeID:
raw_params = extract_params(request)
validate_params_length(raw_params, 1)
raw_destination = raw_params[0]
node_id, _ = validate_and_extract_destination(raw_destination)
return node_id
async def do_call(self, params: NodeID) -> Tuple[str, ...]:
node_id = params
async with self._network.recursive_find_nodes(node_id) as enr_aiter:
found_nodes = tuple(
sorted(
[enr async for enr in enr_aiter],
key=lambda enr: compute_distance(node_id, enr.node_id),
)
)
return tuple(repr(node) for node in found_nodes)
class BondHandler(RPCHandler[Tuple[NodeID, Optional[Endpoint]], int]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> Tuple[NodeID, Optional[Endpoint]]:
raw_params = extract_params(request)
validate_params_length(raw_params, 1)
raw_destination = raw_params[0]
node_id, endpoint = validate_and_extract_destination(raw_destination)
return node_id, endpoint
async def do_call(self, params: Tuple[NodeID, Optional[Endpoint]]) -> bool:
node_id, endpoint = params
return await self._network.bond(node_id, endpoint=endpoint)
class GetENRHandler(RPCHandler[NodeID, GetENRResponse]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> NodeID:
raw_params = extract_params(request)
validate_params_length(raw_params, 1)
raw_destination = raw_params[0]
node_id, _ = validate_and_extract_destination(raw_destination)
return node_id
async def do_call(self, params: NodeID) -> GetENRResponse:
response = self._network.enr_db.get_enr(params)
return GetENRResponse(enr_repr=repr(response))
class SetENRHandler(RPCHandler[ENRAPI, None]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> ENRAPI:
raw_params = extract_params(request)
validate_params_length(raw_params, 1)
enr_repr = raw_params[0]
try:
enr = ENR.from_repr(enr_repr)
except ValidationError:
raise RPCError(f"Invalid ENR repr: {enr_repr}")
return enr
async def do_call(self, params: ENRAPI) -> None:
try:
self._network.enr_db.set_enr(params)
except OldSequenceNumber as exc:
raise RPCError(f"Invalid ENR, outdated sequence number: {exc}.")
return None
class DeleteENRHandler(RPCHandler[NodeID, None]):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(self, request: RPCRequest) -> NodeID:
raw_params = extract_params(request)
validate_params_length(raw_params, 1)
raw_destination = raw_params[0]
node_id, _ = validate_and_extract_destination(raw_destination)
return node_id
async def do_call(self, params: NodeID) -> None:
self._network.enr_db.delete_enr(params)
return None
class LookupENRHandler(
RPCHandler[Tuple[NodeID, Optional[Endpoint], int], GetENRResponse]
):
def __init__(self, network: NetworkAPI) -> None:
self._network = network
def extract_params(
self, request: RPCRequest
) -> Tuple[NodeID, Optional[Endpoint], int]:
raw_params = extract_params(request)
if len(raw_params) == 1:
raw_destination = raw_params[0]
raw_sequence = 0
elif len(raw_params) == 2:
raw_destination, raw_sequence = raw_params
else:
raise RPCError("Invalid params for discv5_lookupENR request.")
node_id, endpoint = validate_and_extract_destination(raw_destination)
sequence_number = raw_sequence if raw_sequence else 0
return node_id, endpoint, sequence_number
async def do_call(
self, params: Tuple[NodeID, Optional[Endpoint], int]
) -> GetENRResponse:
node_id, endpoint, sequence_number = params
response = await self._network.lookup_enr(
node_id, enr_seq=sequence_number, endpoint=endpoint
)
return GetENRResponse(enr_repr=repr(response))
@to_dict
def get_v51_rpc_handlers(network: NetworkAPI) -> Iterable[Tuple[str, RPCHandlerAPI]]:
yield ("discv5_bond", BondHandler(network))
yield ("discv5_deleteENR", DeleteENRHandler(network))
yield ("discv5_findNodes", FindNodesHandler(network))
yield ("discv5_getENR", GetENRHandler(network))
yield ("discv5_lookupENR", LookupENRHandler(network))
yield ("discv5_ping", PingHandler(network))
yield ("discv5_recursiveFindNodes", RecursiveFindNodesHandler(network))
yield ("discv5_sendFindNodes", SendFindNodesHandler(network))
yield ("discv5_sendFoundNodes", SendFoundNodesHandler(network))
yield ("discv5_sendPing", SendPingHandler(network))
yield ("discv5_sendPong", SendPongHandler(network))
yield ("discv5_sendTalkRequest", SendTalkRequestHandler(network))
yield ("discv5_sendTalkResponse", SendTalkResponseHandler(network))
yield ("discv5_setENR", SetENRHandler(network))
yield ("discv5_talk", TalkHandler(network))
|
483430
|
import pickle
from rdflib import Graph
from pyshacl.monkey import apply_patches
from pyshacl.monkey.memory2 import Memory2
apply_patches()
identifier = "http://datashapes.org/schema"
store = Memory2(identifier=identifier)
with open("./schema.ttl", "rb") as f:
g = Graph(store=store, identifier=identifier).parse(file=f)
with open("./schema.pickle", "wb") as f:
pickle.dump((store, identifier), f, protocol=4) # protocol 5 only works in python 3.8+
|
483448
|
import uuid, pytest, decimal, math, datetime
from app.extensions import db
from app.api.utils.models_mixins import SoftDeleteMixin, Base
from app.api.mines.mine.models.mine import Mine
from tests.factories import MineFactory
def test_column_existence(db_session):
assert issubclass(Mine, SoftDeleteMixin)
assert hasattr(MineFactory(), 'deleted_ind')
def test_delete_method(db_session):
assert issubclass(Mine, SoftDeleteMixin)
model = MineFactory()
model.delete()
assert model.deleted_ind == True
def test_delete_method_does_not_hard_delete(db_session):
assert issubclass(Mine, SoftDeleteMixin)
model = MineFactory()
model.delete()
assert len(db_session.deleted) == 0
|
483463
|
import os
import pymongo
import random
import sys
from uuid import uuid4
from bson.objectid import ObjectId
from pymongo import ASCENDING
import math
from random import choice
from random import randint
from trueskill import trueskill
import numpy.random as rnd
# FIXME: We can't get away with silencing all errors in these methods.
# They've made too many bugs hard to find. Let's add a real error logging system.
class database(object):
#--------------------Results
studs = None
locs = None
biased_studs = None
study2activeLocID = None
def getResultsForStudy(self,studyID):
# FIXME: Normalize use of ObjectId/str for _id's.
return self.results.find_one({
"$or":[
{'study_id': studyID},
{'study_id': ObjectId(studyID)}
]
})
#--------------------Studies
def getAllStudies(self):
return self.studies.find()
def getAllLocations(self):
return self.locations.find()
def getAllQS(self):
return self.qs.find()
def getAllVotes(self):
return self.votes.find()
def getAllPlaces(self):
return self.places.find()
def getStudy(self,study_id):
return self.studies.find_one(ObjectId(study_id))
# Study objects can get very large (see places_id), so this func just returns the title
def getStudyQuestion(self,study_id):
return self.studies.find_one(ObjectId(study_id),{"study_question":1})['study_question']
def getRandomStudy(self):
return self.getStudy(self.biased_studs[rnd.choice(len(self.biased_studs))]['_id'])
def getAnotherStudy(self,study_id):
count = self.studies.count()
randomNumber = random.randint(0,count-2)
return self.studies.find({'_id':{'$ne':ObjectId(study_id)}}).limit(-1).skip(randomNumber).next()
def deleteStudy(self,study_id, owner):
study_query = { '_id' : ObjectId(study_id), 'owner': owner }
self.studies.remove(study_query)
# delete all the qs entries from that study
self.qs.remove({'study_id': study_id})
return True
def deleteStudyAdmin(self,study_id):
self.studies.remove( { '_id' : ObjectId(study_id)})
# delete all the qs entries from that study
self.qs.remove({'study_id': study_id})
return True
def returnObjectId(self,study_id):
return ObjectId(study_id)
def getStudies(self,owner):
return self.studies.find({'owner':owner})
def getStudiesAdmin(self):
return self.studies.find({}, {'places_id': 0})
def getNewStudies(self,limit):
return self.studies.find().limit(limit)
def getPopularStudies(self,limit):
return self.studies.find().limit(limit)
def getInactiveStudies(self,limit):
return self.studies.find().limit(limit) #Need to add votes_needed field for studies to track how long they have to go.
#--------------------Places
def getPlaces(self):
return self.places.find()
def getPlace(self,place_id):
return self.places.find_one(ObjectId(place_id))
def deletePlace_Locations(self,place_id):
self.places.remove( { '_id' : ObjectId(place_id) })
self.locations.remove( { 'place_id' : str(place_id) })
return True
def getNewCities(self,limit):
return self.studies.find().limit(limit)
#--------------------Locations
def getLocations(self,place_id,limit=96):
return self.locations.find({'place_id': place_id}).limit(limit)
def getLocationsByOwner(self,owner):
return self.locations.find({'owner': owner})
def getLocation(self,location_id):
return self.locations.find_one(ObjectId(location_id))
def updateLocation(self,location_id,heading,pitch):
self.locations.update( { '_id' : ObjectId(location_id) } , { '$set' : { 'heading' : heading, 'pitch' : pitch } } )
return True
def deleteLocation(self,location_id):
self.locations.remove( { '_id' : ObjectId(location_id) })
return True
def getRandomLocationByPlace(self,place_id):
if isinstance(place_id,ObjectId):
place_id = str(place_id)
placeCount = Database.locations.find({"places_id": place_id}).count()
if placeCount == 0: return None
return self.locations.find().limit(-1).skip(randint(0,placeCount-1)).next()
#--------------------Users
def getUserById(self,userID):
return self.users.find_one({
'_id': userID if isinstance(userID,ObjectId) else ObjectId(userID)
})
def getUserByEmail(self,email):
return self.users.find_one({
'email': email
})
def getUserByVoterID(self,voterID):
return self.users.find_one({
'voter_uniqueid': voterID
})
def add_place(self, data_resolution, location_distribution, polygon, place_name, owner):
return Database.places.insert({
'data_resolution': data_resolution,
'location_distribution': location_distribution,
'polygon': polygon,
'place_name': place_name,
'owner': owner,
})
# should be called internally, when adding a new location to a place for a given study
def _add_qs(self, location_id, place_id, study_id):
return Database.qs.insert({
'location_id': str(location_id),
'study_id': str(study_id),
'place_id': str(place_id),
'num_votes': 0,
'trueskill': {
'score':trueskill.get_score(trueskill.mu0, trueskill.std0),
'mus':[trueskill.mu0],
'stds':[trueskill.std0]
}
})
# should be called internally, when adding a new location to a place for a given study
def _add_qs_place(self, place_id, study_id):
return self.db.qs_place.insert({
'place_id': str(place_id),
'study_id': str(study_id),
'num_votes':0,
'trueskill': {
'score':trueskill.get_score(trueskill.mu0, trueskill.std0),
'mus':[trueskill.mu0],
'stds':[trueskill.std0]
}
})
def get_qs_place(self, place_id, study_id):
return self.db.qs_place.find_one({'place_id': str(place_id),'study_id': str(study_id)})
def add_location(self, lat, lng, place_id, owner, study_id):
''' Adding the location consists of several tasks:
1. create/update the score for the place for the current study
2. add the new location
3. add score for the location for the current study
'''
# 1. add/update score for the place
qs_place = self.get_qs_place(place_id, study_id)
if qs_place is None:
self._add_qs_place(place_id, study_id)
else:
# update the score of a place accordingly (qs_entry)
# get the old scores
mus = qs_place['trueskill']['mus']
stds = qs_place['trueskill']['stds']
old_mu = mus[-1]
old_std = stds[-1]
# count how many locations are already from that place
N = Database.locations.find({'place_id': str(place_id)}).count()
# compute the new scores
new_mu = float(old_mu * N + trueskill.mu0)/(N+1)
mus[-1] = new_mu
new_std = math.sqrt(old_std**2 * N**2 + trueskill.std0**2)/(N+1)
stds[-1] = new_std
new_score = trueskill.get_score(new_mu, new_std)
# finally, update the qs_place entry
self.db.qs_place.update({'place_id': str(place_id), 'study_id': str(study_id)}, {
'$set': { 'trueskill.score': new_score,
'trueskill.mus' : mus,
'trueskill.stds': stds
}
})
# 2. add the new location
locID = Database.locations.insert({
'loc': [lat, lng],
'type':'gsv',
'place_id': str(place_id),
'owner': owner, #TODO: REAL LOGIN SECURITY
'heading': 0,
'pitch': 0,
'votes':0
})
# 3. add score for the location
self._add_qs(str(locID), place_id, study_id)
return locID
def createUserObj(self,voterID=None,email=None,extra_data=None):
if voterID is None:
voterID = str(uuid4().hex)
userObj = {
"voter_uniqueid": voterID
}
if email is not None:
userObj['email'] = email
if extra_data is not None:
userObj.update(extra_data)
newID = self.users.insert(userObj)
userObj['_id'] = str(newID)
return userObj
#--------------------Votes
def getVotes(self,study_id):
# FIXME: Normalize use of ObjectId/str for _id's.
return self.votes.find({
"$or":[
{'study_id': study_id},
{'study_id': ObjectId(study_id)}
]
})
def getVotesCount(self, study_id=None):
if study_id is not None:
return self.votes.find({"study_id": study_id}).count()
else:
return self.votes.find().count()
#--------------------QS
def getQS(self,study_id, location_id):
return self.qs.find_one({
"study_id": str(study_id),
"location_id": str(location_id)
})
# should be called internally, when update the qs scores of locations/places after a vote
def _pushQscore(self, qs_row_loc, mu_loc, std_loc, old_mu_loc, old_std_loc):
# update qs entry for the location
score = trueskill.get_score(mu_loc, std_loc)
self.qs.update({'_id': qs_row_loc['_id']}, {
'$set': { 'trueskill.score': score},
'$inc' : { 'num_votes': 1 },
'$push' : { 'trueskill.mus' : mu_loc, 'trueskill.stds': std_loc }
})
# update the qs entry for the place where the location is from
place_id = qs_row_loc['place_id']
study_id = qs_row_loc['study_id']
qs_place = self.get_qs_place(place_id, study_id)
if qs_place == None:
print "Couldn't find qs_place row with place_id", place_id, "and study_id", study_id
return
# update the score of a place accordingly (qs_entry)
# get the old scores
old_mu = qs_place['trueskill']['mus'][-1]
old_std = qs_place['trueskill']['stds'][-1]
# count how many locations are already from that place
N = Database.locations.find({'place_id': str(place_id)}).count()
# compute the new score
new_mu = old_mu + float(mu_loc - old_mu_loc)/N
new_std = math.sqrt( old_std**2 + (std_loc**2 - old_std_loc**2)/N**2 )
score = trueskill.get_score(new_mu, new_std)
self.db.qs_place.update({'_id': qs_place['_id']}, {
'$set': { 'trueskill.score': score},
'$inc' : { 'num_votes': 1 },
'$push' : { 'trueskill.mus' : new_mu, 'trueskill.stds': new_std }
})
def updateQScores(self, study_id, winner_locid, loser_locid, isDraw):
''' Update Q scores consists of several tasks:
1. update the scores of the two locations
2. update the scores of the place/two places where these two locations are from
3. increment the vote count for the study
'''
# 1. update the scores of the two locations (images)
winner_qs = self.getQS(study_id, winner_locid)
loser_qs = self.getQS(study_id, loser_locid)
if winner_qs is None:
print "Couldn't find a qs row with study_id", study_id, "and location id", winner_locid
return
if loser_qs is None:
print "Couldn't find a qs row with study_id", study_id, "and location id", loser_locid
return
# get the last mu and standard deviation
old_mu_winner = winner_qs['trueskill']['mus'][-1]
old_std_winner = winner_qs['trueskill']['stds'][-1]
old_mu_loser = loser_qs['trueskill']['mus'][-1]
old_std_loser = loser_qs['trueskill']['stds'][-1]
# update scores using the trueskill update equations
(mu_winner, std_winner), (mu_loser, std_loser) = trueskill.update_rating((old_mu_winner, old_std_winner), (old_mu_loser, old_std_loser), isDraw)
# 2. push and scores of the locations to the db and update the scores of the places where these locations are from
self._pushQscore(winner_qs, mu_winner, std_winner, old_mu_winner, old_std_winner)
self._pushQscore(loser_qs, mu_loser, std_loser, old_mu_loser, old_std_loser)
# 3. increment vote count for the study
self.studies.update({'_id': ObjectId(study_id)}, { '$inc' : { 'num_votes': 1 }})
# 4. If voted equal, bind the score of the image with less # of votes to the score of the other image and mark it inactive
if isDraw:
winner_nvotes = winner_qs['num_votes']
loser_nvotes = loser_qs['num_votes']
if winner_nvotes > loser_nvotes:
self.qs.update({'_id': loser_qs['_id']}, {'$set': { 'active': 0, 'equal_to': winner_qs['_id']}})
del self.study2activeLocID[study_id][loser_qs['location_id']]
else:
self.qs.update({'_id': winner_qs['_id']}, {'$set': { 'active': 0, 'equal_to': loser_qs['_id']}})
del self.study2activeLocID[study_id][winner_qs['location_id']]
return True
def randomLocPair(self, study_id):
activeLocID = self.study2activeLocID[study_id]
a, b = rnd.choice(activeLocID.keys(), size=2, replace=False)
return self.locs[a], self.locs[b]
@property
def locations(self):
return self.db.locations
@property
def places(self):
return self.db.places
@property
def results(self):
return self.db.results
@property
def studies(self):
return self.db.studies
@property
def users(self):
return self.db.pp_users
@property
def votes(self):
return self.db.votes
@property
def voterids(self):
return self.db.voterids
@property
def qs(self):
return self.db.qs
@property
def qs_place(self):
return self.db.qs_place
@property
def db(self):
if not hasattr(self, '_db'):
dbName = os.environ.get('MONGO_DBNAME', 'placepulse')
self._db = self.conn[dbName]
if os.environ.get('MONGO_USER') and os.environ.get('MONGO_PASSWORD'):
self._db.authenticate(os.environ['MONGO_USER'],os.environ['MONGO_PASSWORD'])
return self._db
@property
def conn(self):
if not hasattr(self, '_conn'):
if os.environ.get('MONGO_HOSTNAME') and os.environ.get('MONGO_PORT'):
self._conn = pymongo.Connection(os.environ.get('MONGO_HOSTNAME'), port=int(os.environ.get('MONGO_PORT')))
else: self._conn = pymongo.Connection()
return self._conn
# a singleton object
Database = database()
|
483486
|
import os
import sys
import numpy as np
import torch
import h5py
from tqdm import tqdm
from torch.utils import data
from torch.utils.data import DataLoader, TensorDataset
import musdb
from yacs.config import CfgNode as CN
import SharedArray as sa
import random
def norm(arr, thres=None, max_=None):
arr -= np.mean(arr)
if thres is not None and np.max(arr) < thres:
arr = arr
silence = True
else:
if max_ is not None:
arr = arr / max_
else:
arr = arr / float(np.max(arr))
silence = False
return arr, silence
def norm_std(arr, thres=None, std=None):
arr -= np.mean(arr)
if thres is not None and np.max(arr) < thres:
arr = arr
silence = True
else:
if std is not None:
arr = arr / std
else:
arr = arr / float(np.std(arr))
silence = False
return arr, silence
def make_dataset(data_path, mode=None):
try:
mixture_array = sa.attach(f"shm://{mode}_mixture_array")
vocal_array = sa.attach(f"shm://{mode}_vocal_array")
except:
mus = musdb.DB(root=data_path, is_wav=True, subsets=mode)
mixture_list = list()
vocal_list = list()
for track in tqdm(mus):
#mixture_list.append(track.audio.sum(axis=-1))
mixture_list.append(norm(track.audio)[0])
#vocal_list.append(track.targets['vocals'].audio.sum(axis=-1))
vocal_list.append(norm(track.targets['vocals'].audio)[0])
mixture_array = np.concatenate(mixture_list)
vocal_array = np.concatenate(vocal_list)
assert mixture_array.shape == vocal_array.shape
mixture_array_sa = sa.create(f"shm://{mode}_mixture_array", mixture_array.shape)
vocal_array_sa = sa.create(f"shm://{mode}_vocal_array", vocal_array.shape)
mixture_array_sa[::] = mixture_array
vocal_array_sa[::] = vocal_array
return dict(mixture_array=mixture_array,
vocal_array=vocal_array
)
class musdb18(data.Dataset):
def __init__(self, config):
self.mode = config.mode
print(f'########subset:{self.mode}###########')
self.data_dicts = make_dataset(config.data_root, mode=self.mode)
self.sample_dis = config.sample_dis
self.seg_len = config.seg_len
def __getitem__(self, index):
start = self.sample_dis * index
end = start + self.seg_len
mix_wav = self.data_dicts["mixture_array"][start:end]
vocal_wav = self.data_dicts["vocal_array"][start:end]
mix_std = np.std(mix_wav)
mix_max = np.max(mix_wav)
#mix_wav, silence = norm_std(mix_wav, thres=1e-2)
mix_wav, silence = norm(mix_wav, thres=1e-2)
if silence:
return self.__getitem__(random.randint(0, len(self)-1))
#vocal_wav, _ = norm_std(vocal_wav, thres=1e-2, std=mix_std)
vocal_wav, _ = norm(vocal_wav, thres=1e-2, max_=mix_max)
return dict(mix_wav=mix_wav,
vocal_wav=vocal_wav
)
def __len__(self):
return (self.data_dicts["mixture_array"].shape[0] - self.seg_len) // self.sample_dis
class Musdb18DB:
def __init__(self, config):
print('#####LOADING DATASOURCE#####')
self.config = config
train_set = musdb18(config.train)
test_set = musdb18(config.test)
self.mode = 'Train'
distributed = self.config.distributed
train_sampler = None
test_sampler = None
world_size = 1
if distributed:
local_rank = int(sys.argv[1].split('=')[-1])
world_size = torch.distributed.get_world_size()
print('######world size {}#####'.format(world_size))
train_sampler = torch.utils.data.distributed.DistributedSampler(train_set, num_replicas=world_size, rank=local_rank)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_set, num_replicas=world_size, rank=local_rank)
self.train_loader = DataLoader(train_set, batch_size=self.config.batch_size, shuffle=(train_sampler is None),
num_workers=self.config.data_loader_workers,
pin_memory=self.config.pin_memory, sampler=train_sampler)
self.test_loader = DataLoader(test_set, batch_size=self.config.test.batch_size, shuffle=(test_sampler is None),
num_workers=self.config.data_loader_workers,
pin_memory=self.config.pin_memory, sampler=test_sampler)
self.train_iterations = len(train_set) // self.config.batch_size
self.test_iterations = len(test_set) // self.config.test.batch_size // world_size
self.loader = self.train_loader
self.iterations = self.train_iterations // world_size
self.counter = 0
if __name__ == "__main__":
config = CN()
config.batch_size = 12
config.data_loader_workers = 24
config.pin_memory = True
config.train = CN()
config.train.data_root = "/root/thome/musdb18_wav"
config.train.sample_dis = 1024
config.train.segLen = 44100 * 7
config.train.mode = "Train"
# make_dataset('Train', "/root/thome/musdb18/train_data_unet.h5")
sb = Musdb18DB(config)
for i in sb.train_loader:
__import__('pdb').set_trace()
__import__('pdb').set_trace()
|
483503
|
import os
import subprocess
import sys
from distutils.version import LooseVersion
from glob import glob
from os.path import join
import numpy as np
import setuptools.command.build_py
import setuptools.command.develop
from setuptools import Extension, find_packages, setup
platform_is_windows = sys.platform == "win32"
version = "0.0.1"
min_cython_ver = "0.21.0"
try:
import Cython
ver = Cython.__version__
_CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver)
except ImportError:
_CYTHON_INSTALLED = False
try:
if not _CYTHON_INSTALLED:
raise ImportError("No supported version of Cython installed.")
from Cython.Distutils import build_ext
cython = True
except ImportError:
cython = False
if cython:
ext = ".pyx"
cmdclass = {"build_ext": build_ext}
else:
ext = ".cpp"
cmdclass = {}
if not os.path.exists(join("pysinsy", "sinsy" + ext)):
raise RuntimeError("Cython is required to generate C++ code")
# sinsy sources
src_top = join("lib", "sinsy", "src")
all_src = []
include_dirs = []
for s in [
"lib",
"lib/converter",
"lib/japanese",
"lib/label",
"lib/score",
"lib/temporary",
"lib/util",
"lib/xml",
"lib/hts_engine_API",
"lib/hts_engine_API/hts_engine/src/lib",
]:
all_src += glob(join(src_top, s, "*.c"))
all_src += glob(join(src_top, s, "*.cpp"))
include_dirs.append(join(os.getcwd(), src_top, s))
# Add top include dir
include_dirs.append(join(src_top, "include", "sinsy"))
include_dirs.append(join(src_top, "lib/hts_engine_API/hts_engine/src/include"))
# Extension for sinsy
ext_modules = [
Extension(
name="pysinsy.sinsy",
sources=[join("pysinsy", "sinsy" + ext)] + all_src,
include_dirs=[np.get_include()] + include_dirs,
extra_compile_args=[],
extra_link_args=[],
libraries=["winmm"] if platform_is_windows else [],
language="c++",
)
]
# Adapted from https://github.com/pytorch/pytorch
cwd = os.path.dirname(os.path.abspath(__file__))
if os.getenv("PYSINSY_BUILD_VERSION"):
version = os.getenv("PYSINSY_BUILD_VERSION")
else:
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd)
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except subprocess.CalledProcessError:
pass
except IOError: # FileNotFoundError for python 3
pass
class build_py(setuptools.command.build_py.build_py):
def run(self):
self.create_version_file()
setuptools.command.build_py.build_py.run(self)
@staticmethod
def create_version_file():
global version, cwd
print("-- Building version " + version)
version_path = os.path.join(cwd, "pysinsy", "version.py")
with open(version_path, "w") as f:
f.write("__version__ = '{}'\n".format(version))
class develop(setuptools.command.develop.develop):
def run(self):
build_py.create_version_file()
setuptools.command.develop.develop.run(self)
cmdclass["build_py"] = build_py
cmdclass["develop"] = develop
with open("README.md", "r") as fd:
long_description = fd.read()
setup(
name="pysinsy",
version=version,
description="A python wrapper for sinsy",
long_description=long_description,
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/r9y9/pysinsy",
license="MIT",
packages=find_packages(include=["pysinsy*"]),
package_data={"": ["htsvoice/*"]},
ext_modules=ext_modules,
cmdclass=cmdclass,
install_requires=[
"numpy >= 1.8.0",
"cython >= " + min_cython_ver,
"six",
],
tests_require=["pytest", "coverage"],
extras_require={
"docs": [
"sphinx_rtd_theme",
"nbsphinx>=0.8.6",
"Jinja2>=3.0.1",
"pandoc",
"ipython",
"jupyter",
],
"lint": [
"pysen",
"types-setuptools",
"mypy<=0.910",
"black>=19.19b0,<=20.8",
"flake8>=3.7,<4",
"flake8-bugbear",
"isort>=4.3,<5.2.0",
],
"test": ["pytest", "scipy"],
},
classifiers=[
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Cython",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
],
keywords=["Sinsy", "Research"],
)
|
483532
|
from island_backup import network
from island_backup.island_switcher import island_switcher
import aiohttp
import asyncio
import pytest
NO_NEXT_PAGE = object()
async def get_page(url):
network.session = aiohttp.ClientSession()
island_switcher.detect_by_url(url)
url = island_switcher.sanitize_url(url)
print(url)
p = await island_switcher.island_page_model.from_url(url, page_num=1)
network.session.close()
return p
def check_block(block, data_dict):
check_key = ['uid', 'id', 'content', 'image_url', 'created_time']
for key in check_key:
assert getattr(block, key) == data_dict[key]
class BaseTest:
NEXT_PAGE_URL = ''
THREAD_LIST_NUM = 20
BLOCK_0_DATA = None
BLOCK_1_DATA = None
# 原始用户输入的URL
RAW_URLS = None
# 转换后程序应该请求的URL, 与RAW_URLS一一对应。
REQUEST_URLS = None
@pytest.fixture(scope='class')
def page(self):
return asyncio.get_event_loop().run_until_complete(get_page(self.RAW_URLS[0]))
@pytest.fixture(scope='class')
def thread_list(self, page):
return page.thread_list()
def test_sanitize_url(self, page):
for raw, req in zip(self.RAW_URLS, self.REQUEST_URLS):
assert page.url_page_combine(page.sanitize_url(raw), 1) == req
def test_page(self, page):
if self.NEXT_PAGE_URL is NO_NEXT_PAGE:
return
assert page.has_next()
print(page.next_page_info)
assert page.url_page_combine(*page.next_page_info) == self.NEXT_PAGE_URL
def test_blocks_num(self, thread_list):
print(len(thread_list))
assert len(thread_list) == self.THREAD_LIST_NUM
def test_first_block(self, thread_list):
block = thread_list[0]
check_block(block, self.BLOCK_0_DATA)
def test_second_block(self, thread_list):
block = thread_list[1]
check_block(block, self.BLOCK_1_DATA)
def test_another_block(self, thread_list):
raise NotImplementedError
|
483615
|
import torch
import torch.nn as nn
from torch.nn import init
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, n_layers=3, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
kw = 4
padw = 1
sequence = [
nn.Conv2d(3, 64, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(64 * nf_mult_prev, 64 * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=True),
nn.InstanceNorm2d(64 * nf_mult, affine=False),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(64 * nf_mult_prev, 64 * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=True),
nn.InstanceNorm2d(64 * nf_mult, affine=False),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(64 * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
483649
|
from django.core.cache import cache, caches, InvalidCacheBackendError
from sorl.thumbnail.kvstores.base import KVStoreBase
from sorl.thumbnail.conf import settings
from sorl.thumbnail.models import KVStore as KVStoreModel
class EMPTY_VALUE:
pass
class KVStore(KVStoreBase):
def __init__(self):
super().__init__()
@property
def cache(self):
try:
kv_cache = caches[settings.THUMBNAIL_CACHE]
except InvalidCacheBackendError:
kv_cache = cache
return kv_cache
def clear(self, delete_thumbnails=False):
"""
We can clear the database more efficiently using the prefix here rather
than calling :meth:`_delete_raw`.
"""
prefix = settings.THUMBNAIL_KEY_PREFIX
for key in self._find_keys_raw(prefix):
self.cache.delete(key)
KVStoreModel.objects.filter(key__startswith=prefix).delete()
if delete_thumbnails:
self.delete_all_thumbnail_files()
def _get_raw(self, key):
value = self.cache.get(key)
if value is None:
try:
value = KVStoreModel.objects.get(key=key).value
except KVStoreModel.DoesNotExist:
# we set the cache to prevent further db lookups
value = EMPTY_VALUE
self.cache.set(key, value, settings.THUMBNAIL_CACHE_TIMEOUT)
if value == EMPTY_VALUE:
return None
return value
def _set_raw(self, key, value):
kvstore_value, created = KVStoreModel.objects.get_or_create(
key=key, defaults={'value': value})
if not created:
kvstore_value.value = value
kvstore_value.save()
self.cache.set(key, value, settings.THUMBNAIL_CACHE_TIMEOUT)
def _delete_raw(self, *keys):
KVStoreModel.objects.filter(key__in=keys).delete()
for key in keys:
self.cache.delete(key)
def _find_keys_raw(self, prefix):
qs = KVStoreModel.objects.filter(key__startswith=prefix)
return qs.values_list('key', flat=True)
|
483707
|
import os, requests
def solve() -> bool:
flag = "magpie{r1ch4rd_l0v35_t0_5w34t}"
challenge_host = "http://web01.magpiectf.ca:9949"
infile = open(os.path.join(os.path.dirname(__file__), "assets/latex-solve.txt"))
latex = infile.read()
r = requests.post(challenge_host + "/ajax.php", data={"content": latex})
infile.close()
return flag in r.text
|
483722
|
import os
import glob
from django.core.management import BaseCommand
from ...bootstrap import process_json_file
class Command(BaseCommand):
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('data_file', nargs='+', type=str)
def handle(self, *args, **options):
for suggestion in options['data_file']:
# if it's just a simple json file
if os.path.exists(suggestion) and os.path.isfile(suggestion):
process_json_file(suggestion)
else:
# check if it's a glob
for filename in glob.glob(suggestion):
process_json_file(filename)
self.stdout.write("Done loading")
|
483724
|
from hwt.interfaces.agents.handshaked import HandshakedAgent
from hwt.interfaces.std import VectSignal, HandshakeSync, Signal
from hwt.synthesizer.param import Param
from hwtSimApi.hdlSimulator import HdlSimulator
class AddrDataHs(HandshakeSync):
"""
Simple handshaked interface with address and data signal
.. hwt-autodoc::
"""
def _config(self):
self.ADDR_WIDTH = Param(8)
self.DATA_WIDTH = Param(8)
self.HAS_MASK = Param(False)
def _declr(self):
super(AddrDataHs, self)._declr()
self.addr = VectSignal(self.ADDR_WIDTH)
self.data = VectSignal(self.DATA_WIDTH)
if self.HAS_MASK:
assert self.DATA_WIDTH % 8 == 0, self.DATA_WIDTH
self.mask = VectSignal(self.DATA_WIDTH // 8)
def _initSimAgent(self, sim: HdlSimulator):
if self.HAS_MASK:
self._ag = AddrDataMaskHsAgent(sim, self)
else:
self._ag = AddrDataHsAgent(sim, self)
class AddrDataVldHs(AddrDataHs):
"""
:see: :class:`.AddrDataHs` with a vld_flag signal
.. hwt-autodoc::
"""
def _declr(self):
super(AddrDataVldHs, self)._declr()
self.vld_flag = Signal()
def _initSimAgent(self, sim: HdlSimulator):
self._ag = AddrDataVldAgent(sim, self)
class AddrDataBitMaskHs(AddrDataHs):
"""
:see: :class:`.AddrDataHs` with a mask signal
:note: mask has 1b granularity
.. hwt-autodoc::
"""
def _declr(self):
super(AddrDataBitMaskHs, self)._declr()
self.mask = VectSignal(self.DATA_WIDTH)
def _initSimAgent(self, sim: HdlSimulator):
self._ag = AddrDataMaskHsAgent(sim, self)
class AddrDataHsAgent(HandshakedAgent):
def set_data(self, data):
i = self.intf
if data is None:
addr, d = None, None
else:
addr, d = data
i.addr.write(addr)
i.data.write(d)
def get_data(self):
i = self.intf
return i.addr.read(), i.data.read()
class AddrDataMaskHsAgent(HandshakedAgent):
def set_data(self, data):
i = self.intf
if data is None:
addr, d, mask = None, None, None
else:
addr, d, mask = data
i.addr.write(addr)
i.data.write(d)
i.mask.write(mask)
def get_data(self):
i = self.intf
return i.addr.read(), i.data.read(), i.mask.read()
class AddrDataVldAgent(HandshakedAgent):
def set_data(self, data):
i = self.intf
if data is None:
addr, d, vld = None, None, None
else:
addr, d, vld = data
i.addr.write(addr)
i.data.write(d)
i.vld_flag.write(vld)
def get_data(self):
i = self.intf
return i.addr.read(), i.data.read(), i.vld_flag.read()
|
483770
|
from typing import Tuple
import thop
import torch
import torch.nn as nn
import yacs.config
def count_op(config: yacs.config.CfgNode, model: nn.Module) -> Tuple[str, str]:
data = torch.zeros((1, config.dataset.n_channels,
config.dataset.image_size, config.dataset.image_size),
dtype=torch.float32,
device=torch.device(config.device))
return thop.clever_format(thop.profile(model, (data, ), verbose=False))
|
483824
|
import scipy as sp
from enum import Enum
from sklearn.metrics import pairwise_distances
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_array, check_is_fitted, check_random_state
class HiddenLayerType(Enum):
RANDOM = 1 # Gaussian random projection
SPARSE = 2 # Sparse Random Projection
PAIRWISE = 3 # Pairwise kernel with a number of centroids
def dummy(x):
return x
def flatten(items):
"""Yield items from any nested iterable."""
for x in items:
# don't break strings into characters
if hasattr(x, '__iter__') and not isinstance(x, (str, bytes)):
yield from flatten(x)
else:
yield x
def _is_list_of_strings(obj):
return obj is not None and all(isinstance(elem, str) for elem in obj)
def _dense(X):
if sp.sparse.issparse(X):
return X.todense()
else:
return X
class PairwiseRandomProjection(BaseEstimator, TransformerMixin):
def __init__(self, n_components=100, pairwise_metric='l2', n_jobs=None, random_state=None):
"""Pairwise distances projection with random centroids.
Parameters
----------
n_components : int
Number of components (centroids) in the projection. Creates the same number of output features.
pairwise_metric : str
A valid pairwise distance metric, see pairwise-distances_.
.. _pairwise-distances: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html#sklearn.metrics.pairwise_distances
n_jobs : int or None, optional, default=None
Number of jobs to use in distance computations, or `None` for no parallelism.
Passed to _pairwise-distances function.
random_state
Used for random generation of centroids.
"""
self.n_components = n_components
self.pairwise_metric = pairwise_metric
self.n_jobs = n_jobs
self.random_state = random_state
def fit(self, X, y=None):
"""Generate artificial centroids.
Centroids are sampled from a normal distribution. They work best if the data is normalized.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Input data
"""
X = check_array(X, accept_sparse=True)
self.random_state_ = check_random_state(self.random_state)
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s" % self.n_components)
self.components_ = self.random_state_.randn(self.n_components, X.shape[1])
self.n_jobs_ = 1 if self.n_jobs is None else self.n_jobs
return self
def transform(self, X):
"""Compute distance matrix between input data and the centroids.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Input data samples.
Returns
-------
X_dist : numpy array
Distance matrix between input data samples and centroids.
"""
X = check_array(X, accept_sparse=True)
check_is_fitted(self, 'components_')
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection: X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
try:
X_dist = pairwise_distances(X, self.components_, n_jobs=self.n_jobs_, metric=self.pairwise_metric)
except TypeError:
# scipy distances that don't support sparse matrices
X_dist = pairwise_distances(_dense(X), _dense(self.components_), n_jobs=self.n_jobs_, metric=self.pairwise_metric)
return X_dist
|
483880
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
triggerFlagPSet = cms.PSet(
dcsInputTag = cms.InputTag('scalersRawToDigi'),
dcsPartitions = cms.vint32( 24, 25, 26, 27, 28, 29 ),
andOrDcs = cms.bool(False),
errorReplyDcs = cms.bool(True),
dbLabel = cms.string(''),
andOrHlt = cms.bool(True),
hltInputTag = cms.InputTag('TriggerResults', '', 'HLT'),
hltPaths = cms.vstring(),
hltDBKey = cms.string(''),
errorReplyHlt = cms.bool(False),
verbosityLevel = cms.uint32(1)
)
mssmHbbBtagTriggerMonitor = DQMEDAnalyzer("TagAndProbeBtagTriggerMonitor",
dirname = cms.string("HLT/HIG/MssmHbb/"),
requireValidHLTPaths = cms.bool(True),
processname = cms.string("HLT"),
jetPtMin = cms.double(40),
jetEtaMax = cms.double(2.2),
tagBtagMin = cms.double(0.80),
probeBtagMin = cms.double(0.45),
triggerobjbtag = cms.string("hltBTagCaloDeepCSV0p71Single8Jets30"),
triggerSummary = cms.InputTag("hltTriggerSummaryAOD","","HLT"),
offlineBtag = cms.InputTag("pfCombinedInclusiveSecondaryVertexV2BJetTags"),
histoPSet = cms.PSet(
jetPt = cms.vdouble(40,45,50,55,60,65,70,75,80,85,90,95,100),
jetEta = cms.vdouble(-2.5,-2.0,-1.5,-1.0,-0.5,0.0,0.5,1.0,1.5,2.0,2.5),
jetPhi = cms.vdouble(-3.5,-3.0,-2.5,-2.0,-1.5,-1.0,-0.5,0.0,0.5,1.0,1.5,2.0,2.5,3.0,3.5),
jetBtag = cms.vdouble(0.80,0.81,0.82,0.83,0.84,0.85,0.86,0.87,0.88,0.89,0.90,0.91,0.92,0.93,0.94,0.95,0.96,0.97,0.98,0.99,1.00),
),
genericTriggerEventPSet = triggerFlagPSet.clone(),
)
# online btagging monitor
mssmHbbBtagTriggerMonitorSL40noMu = mssmHbbBtagTriggerMonitor.clone()
mssmHbbBtagTriggerMonitorSL40noMu.dirname = cms.string("HLT/HIG/MssmHbb/semileptonic/BtagTrigger/pt40_noMuon")
mssmHbbBtagTriggerMonitorSL40noMu.jetPtMin = cms.double(40)
mssmHbbBtagTriggerMonitorSL40noMu.triggerobjbtag = cms.string("hltBTagCaloDeepCSV0p71Single8Jets30")
mssmHbbBtagTriggerMonitorSL40noMu.histoPSet.jetPt = cms.vdouble(40,45,50,55,60,65,70,75,80,85,90,95,100)
mssmHbbBtagTriggerMonitorSL40noMu.genericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoublePFJets40_CaloBTagDeepCSV_p71_v*')
mssmHbbBtagTriggerMonitorSL40 = mssmHbbBtagTriggerMonitor.clone()
mssmHbbBtagTriggerMonitorSL40.dirname = cms.string("HLT/HIG/MssmHbb/semileptonic/BtagTrigger/pt40")
mssmHbbBtagTriggerMonitorSL40.jetPtMin = cms.double(40)
mssmHbbBtagTriggerMonitorSL40.triggerobjbtag = cms.string("hltBTagCaloDeepCSV0p71Single8Jets30")
mssmHbbBtagTriggerMonitorSL40.histoPSet.jetPt = cms.vdouble(40,45,50,55,60,65,70,75,80,85,90,95,100)
mssmHbbBtagTriggerMonitorSL40.genericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu12_DoublePFJets40_CaloBTagDeepCSV_p71_v*')
mssmHbbBtagTriggerMonitorSL100 = mssmHbbBtagTriggerMonitor.clone()
mssmHbbBtagTriggerMonitorSL100.dirname = cms.string("HLT/HIG/MssmHbb/semileptonic/BtagTrigger/pt100")
mssmHbbBtagTriggerMonitorSL100.jetPtMin = cms.double(100)
mssmHbbBtagTriggerMonitorSL100.triggerobjbtag = cms.string("hltBTagCaloDeepCSV0p71Single8Jets30")
mssmHbbBtagTriggerMonitorSL100.histoPSet.jetPt = cms.vdouble(100,110,120,130,140,150,160,170,180,190,200)
mssmHbbBtagTriggerMonitorSL100.genericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu12_DoublePFJets100_CaloBTagDeepCSV_p71_v*')
mssmHbbBtagTriggerMonitorSL200 = mssmHbbBtagTriggerMonitor.clone()
mssmHbbBtagTriggerMonitorSL200.dirname = cms.string("HLT/HIG/MssmHbb/semileptonic/BtagTrigger/pt200")
mssmHbbBtagTriggerMonitorSL200.jetPtMin = cms.double(200)
mssmHbbBtagTriggerMonitorSL200.triggerobjbtag = cms.string("hltBTagCaloDeepCSV0p71Single8Jets30")
mssmHbbBtagTriggerMonitorSL200.histoPSet.jetPt = cms.vdouble(200,210,220,230,240,250,260,270,280,290,300,310,320,330,340,350)
mssmHbbBtagTriggerMonitorSL200.genericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu12_DoublePFJets200_CaloBTagDeepCSV_p71_v*')
mssmHbbBtagTriggerMonitorSL350 = mssmHbbBtagTriggerMonitor.clone()
mssmHbbBtagTriggerMonitorSL350.dirname = cms.string("HLT/HIG/MssmHbb/semileptonic/BtagTrigger/pt350")
mssmHbbBtagTriggerMonitorSL350.jetPtMin = cms.double(350)
mssmHbbBtagTriggerMonitorSL350.triggerobjbtag = cms.string("hltBTagCaloDeepCSV0p71Single8Jets30")
mssmHbbBtagTriggerMonitorSL350.histoPSet.jetPt = cms.vdouble(350,360,370,380,390,400,410,420,430,440,450,460,470,480,490,500,510,520,530,540,550,560,570,580,590,600)
mssmHbbBtagTriggerMonitorSL350.genericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu12_DoublePFJets350_CaloBTagDeepCSV_p71_v*')
mssmHbbBtagTriggerMonitorAH100 = mssmHbbBtagTriggerMonitor.clone()
mssmHbbBtagTriggerMonitorAH100.dirname = cms.string("HLT/HIG/MssmHbb/fullhadronic/BtagTrigger/pt100")
mssmHbbBtagTriggerMonitorAH100.jetPtMin = cms.double(100)
mssmHbbBtagTriggerMonitorAH100.triggerobjbtag = cms.string("hltBTagCaloDeepCSV0p71Single6Jets80")
mssmHbbBtagTriggerMonitorAH100.histoPSet.jetPt = cms.vdouble(100,110,120,130,140,150,160,170,180,190,200)
mssmHbbBtagTriggerMonitorAH100.genericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoublePFJets100_CaloBTagDeepCSV_p71_v*')
mssmHbbBtagTriggerMonitorAH200 = mssmHbbBtagTriggerMonitor.clone()
mssmHbbBtagTriggerMonitorAH200.dirname = cms.string("HLT/HIG/MssmHbb/fullhadronic/BtagTrigger/pt200")
mssmHbbBtagTriggerMonitorAH200.jetPtMin = cms.double(200)
mssmHbbBtagTriggerMonitorAH200.triggerobjbtag = cms.string("hltBTagCaloDeepCSV0p71Single6Jets80")
mssmHbbBtagTriggerMonitorAH200.histoPSet.jetPt = cms.vdouble(200,210,220,230,240,250,260,270,280,290,300,310,320,330,340,350)
mssmHbbBtagTriggerMonitorAH200.genericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoublePFJets200_CaloBTagDeepCSV_p71_v*')
mssmHbbBtagTriggerMonitorAH350 = mssmHbbBtagTriggerMonitor.clone()
mssmHbbBtagTriggerMonitorAH350.dirname = cms.string("HLT/HIG/MssmHbb/fullhadronic/BtagTrigger/pt350")
mssmHbbBtagTriggerMonitorAH350.jetPtMin = cms.double(350)
mssmHbbBtagTriggerMonitorAH350.triggerobjbtag = cms.string("hltBTagCaloDeepCSV0p71Single6Jets80")
mssmHbbBtagTriggerMonitorAH350.histoPSet.jetPt = cms.vdouble(350,360,370,380,390,400,410,420,430,440,450,460,470,480,490,500,510,520,530,540,550,560,570,580,590,600)
mssmHbbBtagTriggerMonitorAH350.genericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoublePFJets350_CaloBTagDeepCSV_p71_v*')
|
483901
|
from django.core.exceptions import ValidationError
from gcoin import is_address, b58check_to_hex
def validate_address(value):
if not is_address(value):
raise ValidationError(
"%(value)s is not a valid address",
params={'value': value},
)
try:
b58check_to_hex(value)
except AssertionError:
raise ValidationError(
"%(value)s is not a valid address",
params={'value': value},
)
|
483905
|
import numpy as np
import numpy.random as npr
from sds.initial import SingleBayesianInitGaussianLatent
from sds.latents import SingleBayesianAutoRegressiveGaussianLatent
from sds.emissions import SingleBayesianLinearGaussianEmission
from sds.utils.decorate import ensure_args_are_viable
from sds.utils.general import Statistics as Stats
from operator import add
from functools import reduce
from tqdm import trange
class LinearGaussianDynamics:
def __init__(self, ems_dim, act_dim, ltn_dim, ltn_lag=1,
init_ltn_prior=None, ltn_prior=None, ems_prior=None,
init_ltn_kwargs={}, ltn_kwargs={}, ems_kwargs={}):
self.ltn_dim = ltn_dim
self.act_dim = act_dim
self.ems_dim = ems_dim
self.latent_lag = ltn_lag
self.init_ltn_prior = init_ltn_prior
self.ltn_prior = ltn_prior
self.ems_prior = ems_prior
self.init_latent = SingleBayesianInitGaussianLatent(self.ltn_dim, self.act_dim, self.latent_lag,
self.init_ltn_prior, **init_ltn_kwargs)
self.latent = SingleBayesianAutoRegressiveGaussianLatent(self.ltn_dim, self.act_dim, self.latent_lag,
self.ltn_prior, **ltn_kwargs)
self.emission = SingleBayesianLinearGaussianEmission(self.ltn_dim, self.ems_dim,
self.ems_prior, **ems_kwargs)
@property
def params(self):
return self.init_latent.params,\
self.latent.params,\
self.emission.params
@params.setter
def params(self, value):
self.init_latent.params = value[0]
self.latent.params = value[1]
self.emission.params = value[2]
@ensure_args_are_viable
def initialize(self, ems, act=None, **kwargs):
pass
@ensure_args_are_viable
def kalman_filter(self, ems, act=None):
if isinstance(ems, np.ndarray) \
and isinstance(act, np.ndarray):
nb_steps = ems.shape[0]
filt_mean = np.zeros((nb_steps, self.ltn_dim))
filt_covar = np.zeros((nb_steps, self.ltn_dim, self.ltn_dim))
mu, lmbda = self.init_latent.likelihood.params
pred_mean, pred_cov = mu, np.linalg.inv(lmbda)
log_lik = 0.
for t in range(nb_steps):
log_lik += self.emission.expected_log_liklihood(pred_mean, pred_cov, ems[t]) # log_lik
filt_mean[t], filt_covar[t] = self.emission.condition(pred_mean, pred_cov, ems[t]) # condition
pred_mean, pred_cov = self.latent.propagate(filt_mean[t], filt_covar[t], act[t]) # predict
return filt_mean, filt_covar, log_lik
else:
def inner(ems, act):
return self.kalman_filter.__wrapped__(self, ems, act)
result = map(inner, ems, act)
filt_mean, filt_covar, log_lik = list(map(list, zip(*result)))
return filt_mean, filt_covar, np.sum(np.hstack(log_lik))
@ensure_args_are_viable
def kalman_smoother(self, ems, act=None):
if isinstance(ems, np.ndarray) \
and isinstance(act, np.ndarray):
filt_mean, filt_covar, log_lik = self.kalman_filter(ems, act)
nb_steps = ems.shape[0]
smooth_mean = np.zeros((nb_steps, self.ltn_dim))
smooth_covar = np.zeros((nb_steps, self.ltn_dim, self.ltn_dim))
gain = np.zeros((nb_steps - 1, self.ltn_dim, self.ltn_dim))
smooth_mean[-1], smooth_covar[-1] = filt_mean[-1], filt_covar[-1]
for t in range(nb_steps - 2, -1, -1):
smooth_mean[t], smooth_covar[t], gain[t] =\
self.latent.smooth(smooth_mean[t + 1], smooth_covar[t + 1],
filt_mean[t], filt_covar[t], act[t])
return smooth_mean, smooth_covar, gain, log_lik
else:
def inner(ems, act):
return self.kalman_smoother.__wrapped__(self, ems, act)
result = map(inner, ems, act)
smooth_mean, smooth_covar, gain, log_lik = list(map(list, zip(*result)))
return smooth_mean, smooth_covar, gain, np.sum(np.hstack(log_lik))
@ensure_args_are_viable
def estep(self, ems, act=None):
if isinstance(ems, np.ndarray) \
and isinstance(act, np.ndarray):
smooth_mean, smooth_covar, gain, log_lik =\
self.kalman_smoother(ems, act)
nb_steps = ems.shape[0]
# currently only for full covariances
Ex = smooth_mean # E[x{n}]
ExxpT = np.zeros_like(gain) # E[x_{n} x_{n-1}^T]
for t in range(nb_steps - 1):
ExxpT[t] = smooth_covar[t + 1] @ gain[t].T\
+ np.outer(smooth_mean[t + 1], smooth_mean[t])
ExxT = np.zeros_like(smooth_covar) # E[x_{n} x_{n}^T]
for t in range(nb_steps):
ExxT[t] = smooth_covar[t] + np.outer(smooth_mean[t], smooth_mean[t])
# init_ltn_stats
x, xxT = Ex[0], ExxT[0]
init_ltn_stats = Stats([x, 1., xxT, 1.])
# ltn_stats
xxT = np.zeros((nb_steps - 1, self.ltn_dim + 1, self.ltn_dim + 1))
for t in range(nb_steps - 1):
xxT[t] = np.block([[ExxT[t], Ex[t][:, np.newaxis]],
[Ex[t][np.newaxis, :], np.ones((1,))]])
yxT = np.zeros((nb_steps - 1, self.ltn_dim, self.ltn_dim + 1))
for t in range(nb_steps - 1):
yxT[t] = np.hstack((ExxpT[t], Ex[t + 1][:, np.newaxis]))
yyT = ExxT[1:]
ltn_stats = Stats([np.sum(yxT, axis=0),
np.sum(xxT, axis=0),
np.sum(yyT, axis=0),
yyT.shape[0]])
# ems_stats
xxT = np.zeros((nb_steps, self.ltn_dim + 1, self.ltn_dim + 1))
for t in range(nb_steps):
xxT[t] = np.block([[ExxT[t], Ex[t][:, np.newaxis]],
[Ex[t][np.newaxis, :], np.ones((1,))]])
x = np.hstack((Ex, np.ones((Ex.shape[0], 1))))
yxT = np.einsum('nd,nl->ndl', ems, x)
yyT = np.einsum('nd,nl->ndl', ems, ems)
ems_stats = Stats([np.sum(yxT, axis=0),
np.sum(xxT, axis=0),
np.sum(yyT, axis=0),
yyT.shape[0]])
return init_ltn_stats, ltn_stats, ems_stats, log_lik
else:
def inner(ems, act):
return self.estep.__wrapped__(self, ems, act)
result = map(inner, ems, act)
init_ltn_stats, ltn_stats, ems_stats, log_lik = list(map(list, zip(*result)))
stats = tuple([reduce(add, init_ltn_stats),
reduce(add, ltn_stats),
reduce(add, ems_stats)])
return stats, np.sum(np.hstack(log_lik))
def mstep(self, stats, ems, act,
init_ltn_mstep_kwarg,
ltn_mstep_kwarg,
ems_mstep_kwargs):
init_ltn_stats, ltn_stats, ems_stats = stats
self.init_latent.mstep(init_ltn_stats, **init_ltn_mstep_kwarg)
self.latent.mstep(ltn_stats, **ltn_mstep_kwarg)
# self.emission.mstep(ems_stats, **ems_mstep_kwargs)
def em(self, train_ems, train_act=None,
nb_iter=50, tol=1e-4, initialize=True,
init_ltn_mstep_kwarg={},
ltn_mstep_kwarg={},
ems_mstep_kwarg={}, **kwargs):
process_id = kwargs.pop('process_id', 0)
if initialize:
self.initialize(train_ems, train_act)
train_lls = []
stats, train_ll = self.estep(train_ems, train_act)
train_ll += self.init_latent.prior.log_likelihood(self.init_latent.likelihood.params)[0]
train_ll += self.latent.prior.log_likelihood(self.latent.likelihood.params)[0]
# train_ll += self.emission.prior.log_likelihood(self.emission.likelihood.params)[0]
train_lls.append(train_ll)
last_train_ll = train_ll
pbar = trange(nb_iter, position=process_id)
pbar.set_description("#{}, ll: {:.5f}".format(process_id, train_lls[-1]))
for _ in pbar:
self.mstep(stats,
train_ems, train_act,
init_ltn_mstep_kwarg,
ltn_mstep_kwarg,
ems_mstep_kwarg)
stats, train_ll = self.estep(train_ems, train_act)
train_ll += self.init_latent.prior.log_likelihood(self.init_latent.likelihood.params)[0]
train_ll += self.latent.prior.log_likelihood(self.latent.likelihood.params)[0]
# train_ll += self.emission.prior.log_likelihood(self.emission.likelihood.params)[0]
train_lls.append(train_ll)
pbar.set_description("#{}, ll: {:.5f}".format(process_id, train_lls[-1]))
if abs(train_ll - last_train_ll) < tol:
break
else:
last_train_ll = train_ll
return train_lls
|
483908
|
from mirage.libs import ir,utils,io
from mirage.core import module
class ir_inject(module.WirelessModule):
def init(self):
self.technology = "ir"
self.type = "action"
self.description = "Injection module for IR signals"
self.args = {
"INTERFACE":"irma0",
"DATA":"",
"PROTOCOL":"",
"CODE":"",
"CODE_SIZE":"",
"FREQUENCY":"38"
}
def checkCapabilities(self):
return self.emitter.hasCapabilities("SNIFFING", "CHANGING_FREQUENCY")
def run(self):
self.emitter = self.getEmitter(interface=self.args["INTERFACE"])
if self.checkCapabilities():
frequency = self.emitter.getFrequency()
if frequency != utils.integerArg(self.args["FREQUENCY"]):
self.emitter.setFrequency(utils.integerArg(self.args["FREQUENCY"]))
if self.args["CODE"] != "" and utils.isHexadecimal(self.args["CODE"]):
code = self.args["CODE"]
if "0x" == self.args["CODE"][:2]:
code = self.args["CODE"][2:]
code = bytes.fromhex(code)
if self.args["PROTOCOL"].upper() == "NEC":
packet = ir.IRNECPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "SONY":
packet = ir.IRSonyPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "RC5":
packet = ir.IRRC5Packet(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "RC6":
packet = ir.IRRC6Packet(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "DISH":
packet = ir.IRDishPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "SHARP":
packet = ir.IRSharpPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "JVC":
packet = ir.IRJVCPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "SANYO":
packet = ir.IRSanyoPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "MITSUBISHI":
packet = ir.IRMitsubishiPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "SAMSUNG":
packet = ir.IRSamsungPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "LG":
packet = ir.IRLGPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "WHYNTER":
packet = ir.IRWhynterPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "AIWA":
packet = ir.IRAiwaPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "PANASONIC":
packet = ir.IRPanasonicPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
elif self.args["PROTOCOL"].upper() == "DENON":
packet = ir.IRDenonPacket(code=code, size=utils.integerArg(self.args["CODE_SIZE"]))
else:
io.fail("Unknown protocol !")
return self.nok()
io.info("Injecting ...")
self.emitter.sendp(packet)
utils.wait(seconds=1)
io.success("Injection done !")
return self.ok()
elif self.args["DATA"] != "":
data = [int(i) for i in utils.listArg(self.args["DATA"])]
packet = ir.IRPacket(data=data)
io.info("Injecting ...")
self.emitter.sendp(packet)
utils.wait(seconds=1)
io.success("Injection done !")
return self.ok()
else:
io.fail("Incorrect parameters !")
return self.nok()
else:
io.fail("Interface provided ("+str(self.args["INTERFACE"])+") is not able to inject IR signals.")
return self.nok()
|
483925
|
from django.urls import path, re_path
from .views import empty_view
urlpatterns = [
# No kwargs
path("conflict/cannot-go-here/", empty_view, name="name-conflict"),
path("conflict/", empty_view, name="name-conflict"),
# One kwarg
re_path(r"^conflict-first/(?P<first>\w+)/$", empty_view, name="name-conflict"),
re_path(
r"^conflict-cannot-go-here/(?P<middle>\w+)/$", empty_view, name="name-conflict"
),
re_path(r"^conflict-middle/(?P<middle>\w+)/$", empty_view, name="name-conflict"),
re_path(r"^conflict-last/(?P<last>\w+)/$", empty_view, name="name-conflict"),
# Two kwargs
re_path(
r"^conflict/(?P<another>\w+)/(?P<extra>\w+)/cannot-go-here/$",
empty_view,
name="name-conflict",
),
re_path(
r"^conflict/(?P<extra>\w+)/(?P<another>\w+)/$", empty_view, name="name-conflict"
),
]
|
483941
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Generic, Tuple, TypeVar, final
from pydantic import parse_obj_as
from expression import SingleCaseUnion, Tag, TaggedUnion, match, tag
_T = TypeVar("_T")
@dataclass
class Rectangle:
width: float
length: float
@dataclass
class Circle:
radius: float
@final
class Shape(TaggedUnion):
RECTANGLE = tag(Rectangle)
CIRCLE = tag(Circle)
@staticmethod
def rectangle(width: float, length: float) -> Shape:
return Shape(Shape.RECTANGLE, Rectangle(width, length))
@staticmethod
def circle(radius: float) -> Shape:
return Shape(Shape.CIRCLE, Circle(radius))
def test_union_create():
shape = Shape.circle(2.3)
assert shape.tag == Shape.CIRCLE
assert shape.value == Circle(2.3)
def test_union_match_tag():
shape = Shape.rectangle(2.3, 3.3)
with match(shape.tag) as case:
if case(Shape.CIRCLE):
assert False
if case(Shape.RECTANGLE):
assert True
if case.default():
assert False
def test_union_match_type():
shape = Shape.rectangle(2.3, 3.3)
with match(shape) as case:
for rect in case(Rectangle):
assert rect.length == 3.3
if case.default():
assert False
def test_union_match_value():
shape = Shape.rectangle(2.3, 3.3)
with match(shape) as case:
for rect in case(Shape.RECTANGLE(width=2.3)):
assert rect.length == 3.3
if case.default():
assert False
def test_union_no_match_value():
shape = Shape.rectangle(2.3, 3.3)
with match(shape) as case:
if case(Shape.RECTANGLE(width=12.3)):
assert False
if case.default():
assert True
@final
class Weather(TaggedUnion):
Sunny = tag()
Rainy = tag()
@staticmethod
def sunny() -> Weather:
return Weather(Weather.Sunny)
@staticmethod
def rainy() -> Weather:
return Weather(Weather.Rainy)
def test_union_wether_match():
rainy = Weather.sunny()
with match(rainy) as case:
if case(Weather.Rainy):
assert False
if case(Weather.Sunny):
assert True
if case.default():
assert False
class Maybe(TaggedUnion, Generic[_T]):
NOTHING = tag()
JUST = Tag[_T]()
@staticmethod
def just(value: _T) -> Maybe[_T]:
return Maybe[_T](Maybe.JUST, value)
@staticmethod
def nothing() -> Maybe[None]:
return Maybe[None](Maybe.NOTHING)
def test_union_maybe_match():
maybe = Maybe.just(10)
with match(maybe) as case:
if case(Maybe.NOTHING):
assert False
for value in case(
maybe.JUST
): # Note the lower-case maybe to get the type right
assert value == 10
if case.default():
assert False
@final
class Suit(TaggedUnion):
HEARTS = tag()
SPADES = tag()
CLUBS = tag()
DIAMONDS = tag()
@staticmethod
def hearts() -> Suit:
return Suit(Suit.HEARTS())
@staticmethod
def spades() -> Suit:
return Suit(Suit.SPADES)
@staticmethod
def clubs() -> Suit:
return Suit(Suit.CLUBS)
@staticmethod
def diamonds() -> Suit:
return Suit(Suit.DIAMONDS)
@final
class Face(TaggedUnion):
JACK = tag()
QUEEN = tag()
KIND = tag()
ACE = tag()
@staticmethod
def jack() -> Face:
return Face(Face.JACK)
@staticmethod
def queen() -> Face:
return Face(Face.QUEEN)
@staticmethod
def king() -> Face:
return Face(Face.KIND)
@staticmethod
def ace() -> Face:
return Face(Face.ACE)
@final
class Card(TaggedUnion):
FACE_CARD = tag(Tuple[Suit, Face])
VALUE_CARD = tag(Tuple[Suit, int])
JOKER = tag()
@staticmethod
def face_card(suit: Suit, face: Face) -> Card:
return Card(Card.FACE_CARD, suit=suit, face=face)
@staticmethod
def value_card(suit: Suit, value: int) -> Card:
return Card(Card.VALUE_CARD, suit=suit, value=value)
@staticmethod
def Joker() -> Card:
return Card(Card.JOKER)
jack_of_hearts = Card.face_card(Suit.hearts(), Face.jack())
three_of_clubs = Card.value_card(Suit.clubs(), 3)
joker = Card.Joker()
def calculate_value(card: Card) -> int:
with match(card) as case:
if case(Card.JOKER):
return 0
if case(Card.FACE_CARD(suit=Suit.SPADES, face=Face.QUEEN)):
return 40
if case(Card.FACE_CARD(face=Face.ACE)):
return 15
if case(Card.FACE_CARD()):
return 10
if case(Card.VALUE_CARD(value=10)):
return 10
if case._:
return 5
assert False
def test_union_cards():
rummy_score = calculate_value(jack_of_hearts)
assert rummy_score == 10
rummy_score = calculate_value(three_of_clubs)
assert rummy_score == 5
rummy_score = calculate_value(joker)
assert rummy_score == 0
class EmailAddress(SingleCaseUnion[str]):
...
def test_single_case_union_create():
addr = "<EMAIL>"
email = EmailAddress(addr)
assert email.VALUE.tag == 1000
assert email.value == addr
def test_single_case_union_match():
addr = "<EMAIL>"
email = EmailAddress(addr)
with match(email) as case:
for email in case(str):
assert email == addr
if case._:
assert False
def test_single_case_union_match_value():
addr = "<EMAIL>"
email = EmailAddress(addr)
with match(email) as case:
for email in case(EmailAddress.VALUE(addr)):
assert email == addr
if case._:
assert False
def test_single_case_union_not_match_value():
addr = "<EMAIL>"
email = EmailAddress(addr)
with match(email) as case:
for email in case(EmailAddress.VALUE("<EMAIL>")):
assert email == addr
if case._:
assert False
def test_union_to_dict_works():
maybe = Maybe.just(10)
obj = maybe.dict()
assert obj == dict(tag="JUST", value=10)
def test_union_from_dict_works():
obj = dict(tag="JUST", value=10)
maybe = parse_obj_as(Maybe[int], obj)
assert maybe
assert maybe.value == 10
def test_nested_union_to_dict_works():
maybe = Maybe.just(Maybe.just(10))
obj = maybe.dict()
assert obj == dict(tag="JUST", value=dict(tag="JUST", value=10))
def test_nested_union_from_dict_works():
obj = dict(tag="JUST", value=dict(tag="JUST", value=10))
maybe = parse_obj_as(Maybe[Maybe[int]], obj)
assert maybe
assert maybe.value
assert maybe.value.value == 10
|
483962
|
from typing import List, Dict, Any, Optional
from abc import ABC
from objectiv_backend.schema.schema_utils import SchemaEntity
class AbstractContext(SchemaEntity, ABC):
"""
AbstractContext defines the bare minimum properties for every Context. All Contexts inherit from it.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'AbstractContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
SchemaEntity.__init__(self, id=id, **kwargs)
class AbstractGlobalContext(AbstractContext, ABC):
"""
This is the abstract parent of all Global Contexts. Global contexts add general information to an Event.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'AbstractGlobalContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractContext.__init__(self, id=id, **kwargs)
class ApplicationContext(AbstractGlobalContext):
"""
A GlobalContext describing in which app the event happens, like a website or iOS app.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'ApplicationContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractGlobalContext.__init__(self, id=id, **kwargs)
class CookieIdContext(AbstractGlobalContext):
"""
Global context with information needed to reconstruct a user session.
Attributes:
cookie_id (str):
Unique identifier from the session cookie
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'CookieIdContext'
def __init__(self, cookie_id: str, id: str, **kwargs: Optional[Any]):
"""
:param cookie_id:
Unique identifier from the session cookie
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractGlobalContext.__init__(
self, cookie_id=cookie_id, id=id, **kwargs)
class HttpContext(AbstractGlobalContext):
"""
A GlobalContext describing meta information about the agent that sent the event.
Attributes:
referrer (str):
Full URL to HTTP referrer of the current page.
user_agent (str):
User-agent of the agent that sent the event.
remote_address (str):
(public) IP address of the agent that sent the event.
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'HttpContext'
def __init__(self,
referrer: str,
user_agent: str,
id: str,
remote_address: str = None,
**kwargs: Optional[Any]):
"""
:param referrer:
Full URL to HTTP referrer of the current page.
:param user_agent:
User-agent of the agent that sent the event.
:param remote_address:
(public) IP address of the agent that sent the event.
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractGlobalContext.__init__(self,
referrer=referrer,
user_agent=user_agent,
remote_address=remote_address,
id=id,
**kwargs)
class PathContext(AbstractGlobalContext):
"""
A GlobalContext describing the path where the user is when an event is sent.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'PathContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractGlobalContext.__init__(self, id=id, **kwargs)
class SessionContext(AbstractGlobalContext):
"""
A GlobalContext describing meta information about the current session.
Attributes:
hit_number (int):
Hit counter relative to the current session, this event originated in.
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'SessionContext'
def __init__(self, hit_number: int, id: str, **kwargs: Optional[Any]):
"""
:param hit_number:
Hit counter relative to the current session, this event originated in.
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractGlobalContext.__init__(
self, hit_number=hit_number, id=id, **kwargs)
class MarketingContext(AbstractGlobalContext):
"""
a context that captures marketing channel info, so users can do attribution, campaign
effectiveness and other models
Attributes:
source (str):
Identifies the advertiser, site, publication, etc
medium (str):
Advertising or marketing medium: cpc, banner, email newsletter, etc
campaign (str):
Individual campaign name, slogan, promo code, etc
term (str):
[Optional] Search keywords
content (str):
[Optional] Used to differentiate similar content, or links within the same ad
source_platform (str):
[Optional] To differentiate similar content, or links within the same ad.
creative_format (str):
[Optional] Identifies the creative used (e.g., skyscraper, banner, etc).
marketing_tactic (str):
[Optional] Identifies the marketing tactic used (e.g., onboarding, retention, acquisition etc).
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'MarketingContext'
def __init__(self,
source: str,
medium: str,
campaign: str,
id: str,
term: str = None,
content: str = None,
source_platform: str = None,
creative_format: str = None,
marketing_tactic: str = None,
**kwargs: Optional[Any]):
"""
:param source:
Identifies the advertiser, site, publication, etc
:param medium:
Advertising or marketing medium: cpc, banner, email newsletter, etc
:param campaign:
Individual campaign name, slogan, promo code, etc
:param term:
[Optional] Search keywords
:param content:
[Optional] Used to differentiate similar content, or links within the same ad
:param source_platform:
[Optional] To differentiate similar content, or links within the same ad.
:param creative_format:
[Optional] Identifies the creative used (e.g., skyscraper, banner, etc).
:param marketing_tactic:
[Optional] Identifies the marketing tactic used (e.g., onboarding, retention, acquisition etc).
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractGlobalContext.__init__(self,
source=source,
medium=medium,
campaign=campaign,
term=term,
content=content,
source_platform=source_platform,
creative_format=creative_format,
marketing_tactic=marketing_tactic,
id=id,
**kwargs)
class AbstractLocationContext(AbstractContext, ABC):
"""
AbstractLocationContext are the abstract parents of all Location Contexts. Location Contexts are meant to describe where an event originated from in the visual UI.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'AbstractLocationContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractContext.__init__(self, id=id, **kwargs)
class InputContext(AbstractLocationContext):
"""
A Location Context that describes an element that accepts user input, i.e. a form field.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'InputContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractLocationContext.__init__(self, id=id, **kwargs)
class PressableContext(AbstractLocationContext):
"""
An Location Context that describes an interactive element (like a link, button, icon),
that the user can press and will trigger an Interactive Event.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'PressableContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractLocationContext.__init__(self, id=id, **kwargs)
class LinkContext(PressableContext):
"""
A PressableContext that contains an href.
Attributes:
href (str):
URL (href) the link points to.
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'LinkContext'
def __init__(self, href: str, id: str, **kwargs: Optional[Any]):
"""
:param href:
URL (href) the link points to.
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
PressableContext.__init__(self, href=href, id=id, **kwargs)
class RootLocationContext(AbstractLocationContext):
"""
A Location Context that uniquely represents the top-level UI location of the user.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'RootLocationContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractLocationContext.__init__(self, id=id, **kwargs)
class ExpandableContext(AbstractLocationContext):
"""
A Location Context that describes a section of the UI that can expand & collapse.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'ExpandableContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractLocationContext.__init__(self, id=id, **kwargs)
class MediaPlayerContext(AbstractLocationContext):
"""
A Location Context that describes a section of the UI containing a media player.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'MediaPlayerContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractLocationContext.__init__(self, id=id, **kwargs)
class NavigationContext(AbstractLocationContext):
"""
A Location Context that describes a section of the UI containing navigational elements, for example a menu.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'NavigationContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractLocationContext.__init__(self, id=id, **kwargs)
class OverlayContext(AbstractLocationContext):
"""
A Location Context that describes a section of the UI that represents an overlay, i.e. a Modal.
.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'OverlayContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractLocationContext.__init__(self, id=id, **kwargs)
class ContentContext(AbstractLocationContext):
"""
A Location Context that describes a logical section of the UI that contains other Location Contexts. Enabling Data Science to analyze this section specifically.
Attributes:
id (str):
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
_type = 'ContentContext'
def __init__(self, id: str, **kwargs: Optional[Any]):
"""
:param id:
A unique string identifier to be combined with the Context Type (`_type`)
for Context instance uniqueness.
"""
AbstractLocationContext.__init__(self, id=id, **kwargs)
class AbstractEvent(SchemaEntity, ABC):
"""
This is the abstract parent of all Events.
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'AbstractEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
SchemaEntity.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class InteractiveEvent(AbstractEvent):
"""
The parent of Events that are the direct result of a user interaction, e.g. a button click.
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'InteractiveEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
AbstractEvent.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class NonInteractiveEvent(AbstractEvent):
"""
The parent of Events that are not directly triggered by a user action.
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'NonInteractiveEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
AbstractEvent.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class ApplicationLoadedEvent(NonInteractiveEvent):
"""
A NonInteractive event that is emitted after an application (eg. SPA) has finished loading.
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'ApplicationLoadedEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
NonInteractiveEvent.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class FailureEvent(NonInteractiveEvent):
"""
A NonInteractiveEvent that is sent when a user action results in a error,
like an invalid email when sending a form.
Attributes:
message (str):
Failure message.
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'FailureEvent'
def __init__(self,
message: str,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param message:
Failure message.
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
NonInteractiveEvent.__init__(self,
message=message,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class InputChangeEvent(InteractiveEvent):
"""
Event triggered when user input is modified.
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'InputChangeEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
InteractiveEvent.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class PressEvent(InteractiveEvent):
"""
An InteractiveEvent that is sent when a user presses on a pressable element
(like a link, button, icon).
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'PressEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
InteractiveEvent.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class HiddenEvent(NonInteractiveEvent):
"""
A NonInteractiveEvent that's emitted after a LocationContext has become invisible.
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'HiddenEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
NonInteractiveEvent.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class VisibleEvent(NonInteractiveEvent):
"""
A NonInteractiveEvent that's emitted after a section LocationContext has become visible.
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'VisibleEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
NonInteractiveEvent.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class SuccessEvent(NonInteractiveEvent):
"""
A NonInteractiveEvent that is sent when a user action is successfully completed,
like sending an email form.
Attributes:
message (str):
Success message.
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'SuccessEvent'
def __init__(self,
message: str,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param message:
Success message.
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
NonInteractiveEvent.__init__(self,
message=message,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class MediaEvent(NonInteractiveEvent):
"""
The parent of non-interactive events that are triggered by a media player.
It requires a MediaPlayerContext to detail the origin of the event.
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'MediaEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
NonInteractiveEvent.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class MediaLoadEvent(MediaEvent):
"""
A MediaEvent that's emitted after a media item completes loading.
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'MediaLoadEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
MediaEvent.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class MediaPauseEvent(MediaEvent):
"""
A MediaEvent that's emitted after a media item pauses playback.
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'MediaPauseEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
MediaEvent.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class MediaStartEvent(MediaEvent):
"""
A MediaEvent that's emitted after a media item starts playback.
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'MediaStartEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
MediaEvent.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
class MediaStopEvent(MediaEvent):
"""
A MediaEvent that's emitted after a media item stops playback.
Attributes:
location_stack (List[AbstractLocationContext]):
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
global_contexts (List[AbstractGlobalContext]):
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
id (str):
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
time (int):
Timestamp indicating when the event was generated
"""
_type = 'MediaStopEvent'
def __init__(self,
location_stack: List[AbstractLocationContext],
global_contexts: List[AbstractGlobalContext],
id: str,
time: int,
**kwargs: Optional[Any]):
"""
:param location_stack:
The location stack is an ordered list (stack), that contains a hierarchy of location contexts that
deterministically describes where an event took place from global to specific.
The whole stack (list) is needed to exactly pinpoint where in the UI the event originated.
:param global_contexts:
Global contexts add global / general information about the event. They carry information that is not
related to where the Event originated (location), such as device, platform or business data.
:param id:
Unique identifier for a specific instance of an event. Typically UUID's are a good way of
implementing this. On the collector side, events should be unique, this means duplicate id's result
in `not ok` events.
:param time:
Timestamp indicating when the event was generated
"""
MediaEvent.__init__(self,
location_stack=location_stack,
global_contexts=global_contexts,
id=id,
time=time,
**kwargs)
def make_context(_type: str, **kwargs) -> AbstractContext:
if _type == "AbstractContext":
return AbstractContext(**kwargs)
if _type == "AbstractGlobalContext":
return AbstractGlobalContext(**kwargs)
if _type == "ApplicationContext":
return ApplicationContext(**kwargs)
if _type == "CookieIdContext":
return CookieIdContext(**kwargs)
if _type == "HttpContext":
return HttpContext(**kwargs)
if _type == "PathContext":
return PathContext(**kwargs)
if _type == "SessionContext":
return SessionContext(**kwargs)
if _type == "MarketingContext":
return MarketingContext(**kwargs)
if _type == "AbstractLocationContext":
return AbstractLocationContext(**kwargs)
if _type == "InputContext":
return InputContext(**kwargs)
if _type == "PressableContext":
return PressableContext(**kwargs)
if _type == "LinkContext":
return LinkContext(**kwargs)
if _type == "RootLocationContext":
return RootLocationContext(**kwargs)
if _type == "ExpandableContext":
return ExpandableContext(**kwargs)
if _type == "MediaPlayerContext":
return MediaPlayerContext(**kwargs)
if _type == "NavigationContext":
return NavigationContext(**kwargs)
if _type == "OverlayContext":
return OverlayContext(**kwargs)
if _type == "ContentContext":
return ContentContext(**kwargs)
return AbstractContext(**kwargs)
def make_event(_type: str, **kwargs) -> AbstractEvent:
if _type == "AbstractEvent":
return AbstractEvent(**kwargs)
if _type == "InteractiveEvent":
return InteractiveEvent(**kwargs)
if _type == "NonInteractiveEvent":
return NonInteractiveEvent(**kwargs)
if _type == "ApplicationLoadedEvent":
return ApplicationLoadedEvent(**kwargs)
if _type == "FailureEvent":
return FailureEvent(**kwargs)
if _type == "InputChangeEvent":
return InputChangeEvent(**kwargs)
if _type == "PressEvent":
return PressEvent(**kwargs)
if _type == "HiddenEvent":
return HiddenEvent(**kwargs)
if _type == "VisibleEvent":
return VisibleEvent(**kwargs)
if _type == "SuccessEvent":
return SuccessEvent(**kwargs)
if _type == "MediaEvent":
return MediaEvent(**kwargs)
if _type == "MediaLoadEvent":
return MediaLoadEvent(**kwargs)
if _type == "MediaPauseEvent":
return MediaPauseEvent(**kwargs)
if _type == "MediaStartEvent":
return MediaStartEvent(**kwargs)
if _type == "MediaStopEvent":
return MediaStopEvent(**kwargs)
return AbstractEvent(**kwargs)
def make_event_from_dict(obj: Dict[str, Any]) -> AbstractEvent:
if not ('_type' in obj and 'location_stack' in obj and 'global_contexts' in obj):
raise Exception('missing arguments')
obj['location_stack'] = [make_context(**c) for c in obj['location_stack']]
obj['global_contexts'] = [make_context(
**c) for c in obj['global_contexts']]
return make_event(**obj)
|
483978
|
class setu:
def __init__(
self,
url='',
type=2,
info={},
pic_file=''
):
self.url=url
self.type=type
self.info=info
self.pic_file=pic_file
|
484003
|
import asyncio
from unittest import mock
def run(loop, coro_or_future):
return loop.run_until_complete(coro_or_future)
def run_until_complete(f):
def wrap(*args, **kwargs):
return run(asyncio.get_event_loop(), f(*args, **kwargs))
return wrap
def make_coro_mock():
coro = mock.Mock(name="CoroutineResult")
corofunc = mock.Mock(name="CoroutineFunction",
side_effect=asyncio.coroutine(coro))
corofunc.coro = coro
return corofunc
def run_once(f, return_value=None):
def wrapper(*args, **kwargs):
if not wrapper.has_run:
wrapper.has_run = True
return f(*args, **kwargs)
return return_value
wrapper.has_run = False
return wrapper
class SynteticBuffer:
def __init__(self):
self.buffer = []
def put(self, data):
self.buffer.append(data)
def pop(self):
return self.buffer.pop()
syntetic_buffer = SynteticBuffer()
|
484013
|
name, age = "Sreelakshmi", 19
username = "Sreelakshmi-M.py"
print ('Hello!')
print("Name: {}\nAge: {}\nUsername: {}".format(name, age, username))
|
484124
|
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from enumfields import EnumField
from ..auth import is_authenticated_user, is_general_admin
from ..enums import UnitGroupAuthorizationLevel
from .base import ModifiableModel
from .unit import Unit
class UnitGroup(ModifiableModel):
name = models.CharField(max_length=200)
members = models.ManyToManyField(Unit, related_name='unit_groups')
class Meta:
verbose_name = _("unit group")
verbose_name_plural = _("unit groups")
def __str__(self):
return self.name
def is_admin(self, user):
return is_authenticated_user(user) and (
user.is_superuser or
is_general_admin(user) or
(user.unit_group_authorizations
.to_unit_group(self).admin_level().exists()))
class UnitGroupAuthorizationQuerySet(models.QuerySet):
def for_user(self, user):
return self.filter(authorized=user)
def to_unit_group(self, unit_group):
return self.filter(subject=unit_group)
def to_unit(self, unit):
return self.filter(subject__members=unit)
def admin_level(self):
return self.filter(level=UnitGroupAuthorizationLevel.admin)
class UnitGroupAuthorization(models.Model):
subject = models.ForeignKey(
UnitGroup, on_delete=models.CASCADE, related_name='authorizations',
verbose_name=_("subject of the authorization"))
level = EnumField(
UnitGroupAuthorizationLevel, max_length=50,
verbose_name=_("authorization level"))
authorized = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE,
related_name='unit_group_authorizations',
verbose_name=_("authorized user"))
class Meta:
unique_together = [('authorized', 'subject', 'level')]
verbose_name = _("unit group authorization")
verbose_name_plural = _("unit group authorizations")
objects = UnitGroupAuthorizationQuerySet.as_manager()
def __str__(self):
return '{unit_group} / {level}: {user}'.format(
unit_group=self.subject, level=self.level, user=self.authorized)
|
484131
|
import ipaddress
import itertools
import pynetbox
from config import NETBOX_URL, NETBOX_TOKEN
# Instantiate pynetbox.api class with URL of your NETBOX and your API TOKEN
nb = pynetbox.api(url=NETBOX_URL, token=NETBOX_TOKEN)
# Prepare tags we want to combine
mc_side = ["a_side", "b_side"]
mc_exchange = ["nasdaq", "nyse"]
mc_type = ["prod", "cert", "dr"]
# Create product of the tag families
mc_tag_prod = itertools.product(mc_side, mc_exchange, mc_type)
# Create list with lists of resulting tag combinations
# E.g. ['mc_src', 'a_side', 'nasdaq', 'prod']
mc_tags = sorted([["mc_src"] + (list(t)) for t in mc_tag_prod])
# Container from which we will be assigning prefixes
mc_src_container = ipaddress.IPv4Network("10.255.0.0/16")
mc_src_pfxs = mc_src_container.subnets(new_prefix=28)
# Create new prefixes and attach tags to them
for pfx, tag_list in zip(mc_src_pfxs, mc_tags):
new_pfx = nb.ipam.prefixes.create(prefix=str(pfx), tags=tag_list)
print("Prefix: {0}, tags: {1}".format(new_pfx.prefix, new_pfx.tags))
|
484135
|
nums = [1, 2, 3, 4, 5]
# iter, next 를 직접 호출해서
# 잘 동작하는지 실행해봅시다.
it = iter(nums)
print(next(it))
print(next(it))
print(next(it))
print(next(it))
print(next(it))
# 여기서 중단됨
print(next(it))
|
484226
|
import os
import sys
import logging
import importlib
from collections import OrderedDict
from pathlib import Path
from functools import reduce
from common import utilities
from common.exceptions import ModuleLoadException
from common.module.module import Module
from .dependency_graph import DependencyGraph
from .module_initialization_container import ModuleInitializationContainer
from discord.ext import commands
## Config
CONFIG_OPTIONS = utilities.load_config()
## Logging
logger = utilities.initialize_logging(logging.getLogger(__name__))
class ModuleEntry:
def __init__(self, cls: Module, *init_args, **init_kwargs):
self.module = sys.modules[cls.__module__]
self.cls = cls
self.name = cls.__name__
self.is_cog = issubclass(cls, commands.Cog)
self.args = init_args
self.kwargs = init_kwargs
self.dependencies = init_kwargs.get('dependencies', [])
if ('dependencies' in init_kwargs):
del init_kwargs['dependencies']
## Methods
def get_class_callable(self) -> Module:
'''Returns an invokable object to instantiate the class defined in self.cls'''
return getattr(self.module, self.name)
class ModuleManager:
'''
Manages the modules' lifecycle. Chiefly, the discovery, registration, and installation of modules. It'll also
support reloading existing modules/cogs too.
'''
def __init__(self, bot_controller, bot: commands.Bot):
self.bot_controller = bot_controller
self.bot = bot
modules_dir_path = CONFIG_OPTIONS.get('modules_dir_path')
if (modules_dir_path):
self.modules_dir_path = Path(modules_dir_path)
else:
self.modules_dir_path = Path.joinpath(
utilities.get_root_path(),
CONFIG_OPTIONS.get('modules_dir', 'modules')
)
self.modules = OrderedDict()
self.loaded_modules = {} # Keep non-cog modules loaded in memory
self._dependency_graph = DependencyGraph()
## Methods
def _load_module(self, module_entry: ModuleEntry, module_dependencies = []) -> bool:
if(self.bot.get_cog(module_entry.name)):
logger.warn(
'Cog with name \'{}\' has already been loaded onto the bot, skipping...'.format(module_entry.name)
)
return
module_invoker = module_entry.get_class_callable()
instantiated_module: Module = None
try:
instantiated_module = module_invoker(
*module_entry.args,
**{'dependencies': module_dependencies},
**module_entry.kwargs
)
except ModuleLoadException as e:
logger.error(f"Error: '{e.message}' while loading module: {module_entry.name}.")
## Only set the unsuccessful state if it hasn't already been set. Setting the successful state happens later
if (
instantiated_module is not None
or hasattr(instantiated_module, 'successful')
and instantiated_module.successful is not False
):
instantiated_module.successful = False
return False
if (module_entry.is_cog):
self.bot.add_cog(instantiated_module)
self.loaded_modules[module_entry.name] = instantiated_module
logger.info('Instantiated {}: {}'.format("Cog" if module_entry.is_cog else "Module", module_entry.name))
return True
def load_registered_modules(self) -> int:
'''Performs the initial load of modules, and adds them to the bot'''
def load_node(node) -> int:
counter = 0
if (not node.loaded and reduce(lambda value, node: node.loaded and value, node.parents, True)):
dependencies = {}
for parent in node.parents:
dependencies[parent.name] = self.loaded_modules[parent.name]
module_entry = self.modules.get(node.name)
node.loaded = self._load_module(module_entry, module_dependencies=dependencies)
if (not node.loaded):
return 0
## Default the success state to True when loading a module, as that's kind of the default state. If a
## failure state is entered, than that's much more explicit.
loaded_module = self.loaded_modules[module_entry.name]
if (loaded_module.successful is None):
loaded_module.successful = True
counter += 1
for child in node.children:
counter += load_node(child)
## Number of loaded modules + the root node itself
return counter
## Clear out the loaded_modules (if any)
self.loaded_modules = {}
self._dependency_graph.set_graph_loaded_state(False)
## Keep track of the number of successfully loaded modules
counter = 0
## todo: parallelize?
for node in self._dependency_graph.roots:
try:
counter += load_node(node)
except ModuleLoadException as e:
logger.warn(f"{e}. This module and all modules that depend on it will be skipped.")
continue
return counter
def reload_registered_modules(self) -> int:
module_entry: ModuleEntry
for module_entry in self.modules.values():
## Detach loaded cogs
if (module_entry.is_cog):
self.bot.remove_cog(module_entry.name)
## Reimport the module itself
try:
importlib.reload(module_entry.module)
except Exception as e:
logger.error("Error: ({}) reloading module: {}. Attempting to continue...".format(e, module_entry.name))
## Reload the modules via dependency graph
loaded_module_count = self.load_registered_modules()
logger.info("Loaded {}/{} modules.".format(loaded_module_count, len(self.modules)))
return loaded_module_count
def register_module(self, cls: Module, *init_args, **init_kwargs):
'''Registers module data with the ModuleManager, and prepares any necessary dependencies'''
module_entry = ModuleEntry(cls, *init_args, **init_kwargs)
self.modules[module_entry.name] = module_entry
self._dependency_graph.insert(cls, module_entry.dependencies)
def discover_modules(self):
'''Discovers the available modules, and assembles the data needed to register them'''
if (not self.modules_dir_path.exists):
logger.warn('Modules directory doesn\'t exist, so no modules will be loaded.')
return
## Build a list of potential module paths and iterate through it...
module_directories = os.listdir(self.modules_dir_path)
for module_directory in module_directories:
module_path = Path.joinpath(self.modules_dir_path, module_directory)
## Note that the entrypoint for the module should share the same name as it's parent folder. For example:
## phrases.py is the entrypoint for the phrases/ directory
module_entrypoint = Path.joinpath(module_path, module_path.name + '.py')
if (module_entrypoint.exists):
## Expose the module's root directory to the interpreter, so it can be imported
sys.path.append(str(module_path))
## Attempt to import the module (akin to 'import [name]') and register it normally
## NOTE: Modules MUST have a 'main()' function that essentially returns a list containing all the args
## needed by the 'register()' method of this ModuleManager class. At a minimum this list MUST
## contain a reference to the class that serves as an entry point to the module. You should also
## specify whether or not a given module is a cog (for discord.py) or not.
try:
module = importlib.import_module(module_path.name)
module_init = module.main()
except Exception as e:
logger.exception("Unable to import module {} on bot.".format(module_path.name), e)
del sys.path[-1] ## Prune back the failed module from the path
continue
## Filter out any malformed modules
if (not isinstance(module_init, ModuleInitializationContainer) and type(module_init) != bool):
logger.exception(
"Unable to add module {}, as it's neither an instance of {}, nor a boolean.".format(
module_path.name,
ModuleInitializationContainer.__name__
)
)
## Allow modules to be skipped if they're in a falsy 'disabled' state
if (module_init == False):
logger.info("Skipping module {}, as its initialization data was false".format(module_path.name))
continue
## Build args to register the module
register_module_args = []
register_module_kwargs = {**module_init.init_kwargs}
if (module_init.is_cog):
## Cogs will need these set explicitly
register_module_args.append(self.bot_controller)
register_module_args.append(self.bot)
else:
## Otherwise, modules can use them as needed
register_module_kwargs['bot_controller'] = self.bot_controller
register_module_kwargs['bot'] = self.bot
if (len(module_init.init_args) > 0):
register_module_args.append(*module_init.init_args)
## Register the module!
try:
self.register_module(module_init.cls, *register_module_args, **register_module_kwargs)
except Exception as e:
logger.exception("Unable to register module {} on bot.".format(module_path.name))
del sys.path[-1] ## Prune back the failed module from the path
del module
|
484240
|
from __future__ import absolute_import, unicode_literals
import pytest
from case import skip
from kombu.utils.functional import lazy
from celery.five import nextfun, range
from celery.utils.functional import (DummyContext, first, firstmethod,
fun_accepts_kwargs, fun_takes_argument,
head_from_fun, maybe_list, mlazy,
padlist, regen, seq_concat_item,
seq_concat_seq)
def test_DummyContext():
with DummyContext():
pass
with pytest.raises(KeyError):
with DummyContext():
raise KeyError()
@pytest.mark.parametrize('items,n,default,expected', [
(['George', 'Costanza', 'NYC'], 3, None,
['George', 'Costanza', 'NYC']),
(['George', 'Costanza'], 3, None,
['George', 'Costanza', None]),
(['George', 'Costanza', 'NYC'], 4, 'Earth',
['George', 'Costanza', 'NYC', 'Earth']),
])
def test_padlist(items, n, default, expected):
assert padlist(items, n, default=default) == expected
class test_firstmethod:
def test_AttributeError(self):
assert firstmethod('foo')([object()]) is None
def test_handles_lazy(self):
class A(object):
def __init__(self, value=None):
self.value = value
def m(self):
return self.value
assert 'four' == firstmethod('m')([
A(), A(), A(), A('four'), A('five')])
assert 'four' == firstmethod('m')([
A(), A(), A(), lazy(lambda: A('four')), A('five')])
def test_first():
iterations = [0]
def predicate(value):
iterations[0] += 1
if value == 5:
return True
return False
assert first(predicate, range(10)) == 5
assert iterations[0] == 6
iterations[0] = 0
assert first(predicate, range(10, 20)) is None
assert iterations[0] == 10
def test_maybe_list():
assert maybe_list(1) == [1]
assert maybe_list([1]) == [1]
assert maybe_list(None) is None
def test_mlazy():
it = iter(range(20, 30))
p = mlazy(nextfun(it))
assert p() == 20
assert p.evaluated
assert p() == 20
assert repr(p) == '20'
class test_regen:
def test_list(self):
l = [1, 2]
r = regen(iter(l))
assert regen(l) is l
assert r == l
assert r == l # again
assert r.__length_hint__() == 0
fun, args = r.__reduce__()
assert fun(*args) == l
def test_gen(self):
g = regen(iter(list(range(10))))
assert g[7] == 7
assert g[6] == 6
assert g[5] == 5
assert g[4] == 4
assert g[3] == 3
assert g[2] == 2
assert g[1] == 1
assert g[0] == 0
assert g.data, list(range(10))
assert g[8] == 8
assert g[0] == 0
g = regen(iter(list(range(10))))
assert g[0] == 0
assert g[1] == 1
assert g.data == list(range(10))
g = regen(iter([1]))
assert g[0] == 1
with pytest.raises(IndexError):
g[1]
assert g.data == [1]
g = regen(iter(list(range(10))))
assert g[-1] == 9
assert g[-2] == 8
assert g[-3] == 7
assert g[-4] == 6
assert g[-5] == 5
assert g[5] == 5
assert g.data == list(range(10))
assert list(iter(g)) == list(range(10))
class test_head_from_fun:
def test_from_cls(self):
class X(object):
def __call__(x, y, kwarg=1): # noqa
pass
g = head_from_fun(X())
with pytest.raises(TypeError):
g(1)
g(1, 2)
g(1, 2, kwarg=3)
def test_from_fun(self):
def f(x, y, kwarg=1):
pass
g = head_from_fun(f)
with pytest.raises(TypeError):
g(1)
g(1, 2)
g(1, 2, kwarg=3)
@skip.unless_python3()
def test_regression_3678(self):
local = {}
fun = ('def f(foo, *args, bar="", **kwargs):'
' return foo, args, bar')
exec(fun, {}, local)
g = head_from_fun(local['f'])
g(1)
g(1, 2, 3, 4, bar=100)
with pytest.raises(TypeError):
g(bar=100)
@skip.unless_python3()
def test_from_fun_with_hints(self):
local = {}
fun = ('def f_hints(x: int, y: int, kwarg: int=1):'
' pass')
exec(fun, {}, local)
f_hints = local['f_hints']
g = head_from_fun(f_hints)
with pytest.raises(TypeError):
g(1)
g(1, 2)
g(1, 2, kwarg=3)
@skip.unless_python3()
def test_from_fun_forced_kwargs(self):
local = {}
fun = ('def f_kwargs(*, a, b="b", c=None):'
' return')
exec(fun, {}, local)
f_kwargs = local['f_kwargs']
g = head_from_fun(f_kwargs)
with pytest.raises(TypeError):
g(1)
g(a=1)
g(a=1, b=2)
g(a=1, b=2, c=3)
def test_classmethod(self):
class A(object):
@classmethod
def f(cls, x):
return x
fun = head_from_fun(A.f, bound=False)
assert fun(A, 1) == 1
fun = head_from_fun(A.f, bound=True)
assert fun(1) == 1
class test_fun_takes_argument:
def test_starkwargs(self):
assert fun_takes_argument('foo', lambda **kw: 1)
def test_named(self):
assert fun_takes_argument('foo', lambda a, foo, bar: 1)
def fun(a, b, c, d):
return 1
assert fun_takes_argument('foo', fun, position=4)
def test_starargs(self):
assert fun_takes_argument('foo', lambda a, *args: 1)
def test_does_not(self):
assert not fun_takes_argument('foo', lambda a, bar, baz: 1)
assert not fun_takes_argument('foo', lambda: 1)
def fun(a, b, foo):
return 1
assert not fun_takes_argument('foo', fun, position=4)
@pytest.mark.parametrize('a,b,expected', [
((1, 2, 3), [4, 5], (1, 2, 3, 4, 5)),
((1, 2), [3, 4, 5], [1, 2, 3, 4, 5]),
([1, 2, 3], (4, 5), [1, 2, 3, 4, 5]),
([1, 2], (3, 4, 5), (1, 2, 3, 4, 5)),
])
def test_seq_concat_seq(a, b, expected):
res = seq_concat_seq(a, b)
assert type(res) is type(expected) # noqa
assert res == expected
@pytest.mark.parametrize('a,b,expected', [
((1, 2, 3), 4, (1, 2, 3, 4)),
([1, 2, 3], 4, [1, 2, 3, 4]),
])
def test_seq_concat_item(a, b, expected):
res = seq_concat_item(a, b)
assert type(res) is type(expected) # noqa
assert res == expected
class StarKwargsCallable(object):
def __call__(self, **kwargs):
return 1
class StarArgsStarKwargsCallable(object):
def __call__(self, *args, **kwargs):
return 1
class StarArgsCallable(object):
def __call__(self, *args):
return 1
class ArgsCallable(object):
def __call__(self, a, b):
return 1
class ArgsStarKwargsCallable(object):
def __call__(self, a, b, **kwargs):
return 1
class test_fun_accepts_kwargs:
@pytest.mark.parametrize('fun', [
lambda a, b, **kwargs: 1,
lambda *args, **kwargs: 1,
lambda foo=1, **kwargs: 1,
StarKwargsCallable(),
StarArgsStarKwargsCallable(),
ArgsStarKwargsCallable(),
])
def test_accepts(self, fun):
assert fun_accepts_kwargs(fun)
@pytest.mark.parametrize('fun', [
lambda a: 1,
lambda a, b: 1,
lambda *args: 1,
lambda a, kw1=1, kw2=2: 1,
StarArgsCallable(),
ArgsCallable(),
])
def test_rejects(self, fun):
assert not fun_accepts_kwargs(fun)
|
484246
|
from contextlib import contextmanager
import functools
import logging
import numpy as np
import pytest
import torch
from pymde import util
def assert_allclose(x, y, up_to_sign=False, rtol=1e-4, atol=1e-5):
if isinstance(x, torch.Tensor):
x = x.detach().cpu().numpy()
if isinstance(y, torch.Tensor):
y = y.detach().cpu().numpy()
if up_to_sign:
try:
np.testing.assert_allclose(x, y, rtol=rtol, atol=atol)
except AssertionError:
np.testing.assert_allclose(-x, y, rtol=rtol, atol=atol)
else:
np.testing.assert_allclose(x, y, rtol=rtol, atol=atol)
def assert_all_equal(x, y):
if isinstance(x, torch.Tensor):
x = x.detach().cpu().numpy()
if isinstance(y, torch.Tensor):
y = y.detach().cpu().numpy()
np.testing.assert_array_equal(x, y)
def cpu(func):
@functools.wraps(func)
def wrapper(self):
current_device = util.get_default_device()
util.set_default_device('cpu')
func(self)
util.set_default_device(current_device)
return wrapper
def cpu_and_cuda(func):
if torch.cuda.is_available():
return pytest.mark.parametrize("device", ['cpu', 'cuda'])(func)
else:
return pytest.mark.parametrize("device", ['cpu'])(func)
@contextmanager
def disable_logging(up_to=logging.CRITICAL):
previous_level = logging.root.manager.disable
logging.disable(up_to)
try:
yield
finally:
logging.disable(previous_level)
|
484265
|
import os, sys, pytest, copy
from numpy import isclose
# import repo's tests utilities
cur_dir = os.path.dirname(__file__)
path = os.path.abspath(os.path.join(cur_dir, '..', 'tests'))
if not path in sys.path:
sys.path.insert(1, path)
del path
import test_util
# Set arguments used in all forest tests:
# Define all but the b (beta) parameter here; vary beta for the tests
conf_params = {
'w': 0,
'D': 5,
'mu': 0,
'g': 0
}
# Location of the prize, network, and root node files
forest_opts = {
'prize': os.path.join(cur_dir, 'small_forest_tests', 'w_test_prizes.txt'),
'edge': os.path.join(cur_dir, 'small_forest_tests', 'w_test_network.txt'),
'dummyMode': os.path.join(cur_dir, 'small_forest_tests', 'w_test_roots.txt')
}
class TestBeta:
'''
Test various values of the beta parameter:
p'(v) = beta * p(v) - mu * deg(v)
min f'(F) = sum_{v not in V_F} p'(v) + sum_{e in E_F} c(e) + w * K
Higher values of beta increase the importance of prizes relative to their degree penalty, and
also increase the importance of prizes relative to the cost of edges
Use the following test network:
A B
\ \
C -> D
p(A) = 0
p(B) = 0
p(C) = 1
p(D) = 1
c(AC) = 0.25
c(BD) = 0.25
c(CD) = 0.75
'''
def test_beta_0(self, msgsteiner):
'''
Run Forest with beta = 0 and check optimal subnetwork
In p'(v) = beta * p(v) - mu * deg(v), beta = 0 implies that an empty graph is the optimal network
INPUT:
msgsteiner - fixture object with the value of --msgpath parsed by conftest.py
'''
params = copy.deepcopy(conf_params)
params['b'] = 0
graph, objective = test_util.run_forest(msgsteiner, params, forest_opts)
assert graph.order() == 0, "Unexpected number of nodes"
assert graph.size() == 0, "Unexpected number of edges"
# Check that the optimal forest has the correct objective function
# value, using isclose to allow for minor floating point variation
# Objective function: 0
# Excluded prizes: 0
# Edge costs: 0
# Number of trees * w: 0
assert isclose(0, objective, rtol=0, atol=1e-5), 'Incorrect objective function value'
def test_beta_024(self, msgsteiner):
'''
Run Forest with beta = 0.24
This value of beta is just below the 0.25 value of beta which sets the prize benefit
of C and D equal to the cost of obtaining those prizes through edges AC and BD: we expect
an empty network again
'''
params = copy.deepcopy(conf_params)
params['b'] = 0.24
graph, objective = test_util.run_forest(msgsteiner, params, forest_opts)
assert graph.order() == 0, "Unexpected number of nodes"
assert graph.size() == 0, "Unexpected number of edges"
# Check that the optimal forest has the correct objective function
# value, using isclose to allow for minor floating point variation
# Objective function: 0.48
# Excluded prizes: 0.48
# Edge costs: 0
# Number of trees * w: 0
assert isclose(0.48, objective, rtol=0, atol=1e-5), 'Incorrect objective function value'
def test_beta_026(self, msgsteiner):
'''
Run Forest with beta = 0.26
See test_beta_024; this value of beta makes it worth while to obtain prizes for C and D;
we expect the following network:
A B
\ \
C D
note an undirected edge ab in a digraph is represented by the network containing both edges ab and ba
'''
params = copy.deepcopy(conf_params)
params['b'] = 0.26
graph, objective = test_util.run_forest(msgsteiner, params, forest_opts)
assert graph.order() == 4
assert graph.size() == 4
assert graph.has_edge('A', 'C')
assert graph.has_edge('C', 'A')
assert graph.has_edge('B', 'D')
assert graph.has_edge('D', 'B')
# Check that the optimal forest has the correct objective function
# value, using isclose to allow for minor floating point variation
# Objective function: 0.5
# Excluded prizes: 0
# Edge costs: 0.5
# Number of trees * w: 0 * 2 = 0
assert isclose(0.5, objective, rtol=0, atol=1e-5), 'Incorrect objective function value'
|
484271
|
from ..src.sim import FEM
import numpy as np
class simulate_fenics_rve(FEM):
"""
SIMULATION-Module wrap for FEniCS
"""
def __init__(self, in_data, model,**kwargs):
""" Initialize """
self.__name__ = 'FEniCS-RVE' # Name module
super().__init__(in_data=in_data, model=model,**kwargs) # Initialize base-class
self.dim = model.domain.dim
def post_init(self):
""" Implement post_init rule !!!"""
if self.dim ==2:
self.out_var = ['E11','E12','E22','S11','S12','S22'] # Name output
elif self.dim == 3:
self.out_var = ['E11','E12','E13','E22','E23','E33','S11','S12','S13','S22','S23','S33'] # Name output
else:
NotImplementedError
self.out = np.zeros((self.in_data.num,len(self.out_var))) # Initialize numpy output
def to_do(self,i):
""" Implement to_do method !!!"""
################################
# Create Macroscopic Deformation Gradient
################################
F11 = self.in_data.DATA.DataFrame.iloc[i]['F11']
F12 = self.in_data.DATA.DataFrame.iloc[i]['F12']
F22 = self.in_data.DATA.DataFrame.iloc[i]['F22']
F_macro = np.array([[F11,F12],[F12,F22]])
if self.dim == 3:
F13 = self.in_data.DATA.DataFrame.iloc[i]['F13']
F23 = self.in_data.DATA.DataFrame.iloc[i]['F23']
F33 = self.in_data.DATA.DataFrame.iloc[i]['F33']
F_macro = np.array([[F11,F12,F13],[F12,F22,F23],[F13,F23,F33]])
E = 0.5*(np.dot((F_macro+np.eye(self.dim)).T,(F_macro+np.eye(self.dim)))-np.eye(self.dim))
self.model(F_macro,i) # Call your model with F_macro
self.model.solver() # Solve your model
S,_ = self.model.postprocess() # Post-process your results
if self.dim == 3:
self.out[i,:] = [E[0,0],E[0,1],E[0,2],E[1,1],E[1,2],E[2,2],S[0,0],S[0,1],S[0,2],S[1,1],S[1,2],S[2,2]]
elif self.dim == 2:
self.out[i,:] = [E[0,0],E[0,1],E[1,1],S[0,0],S[0,1],S[1,1]] # Store your output
class simulate_fenics_rve_old(FEM):
"""
SIMULATION-Module wrap for FEniCS
"""
def __init__(self, doe, model):
""" Initialize """
self.__name__ = 'FEniCS-RVE' # Name module
super().__init__(doe, model) # Initialize base-class
def post_init(self):
""" Implement post_init rule !!!"""
self.out_var = ['E11','E12','E22','S11','S12','S22'] # Name output
self.out = np.zeros((self.doe.num,len(self.out_var))) # Initialize numpy output
def to_do(self,i):
""" Implement to_do method !!!"""
################################
# Create Macroscopic Deformation Gradient
################################
F11 = self.doe.pandas_data.iloc[i]['F11']
F12 = self.doe.pandas_data.iloc[i]['F12']
F22 = self.doe.pandas_data.iloc[i]['F22']
F_macro = np.array([[F11,F12],[F12,F22]])
E = 0.5*(np.dot((F_macro+np.eye(2)).T,(F_macro+np.eye(2)))-np.eye(2))
self.model(F_macro) # Call your model with F_macro
self.model.solver() # Solve your model
S,_ = self.model.postprocess() # Post-process your results
self.out[i,:] = [E[0,0],E[0,1],E[1,1],S[0,0],S[0,1],S[1,1]] # Store your output
|
484276
|
import sys
from qam import Qam
from matplotlib import pyplot as plt
if len(sys.argv) != 2:
print "Usage: %s <data-bits>" % sys.argv[0]
exit(1)
modulation = {
'0' : (1,0),
'1' : (1,180),
}
q = Qam(baud_rate = 10,
bits_per_baud = 1,
carrier_freq = 50,
modulation = modulation)
s = q.generate_signal(sys.argv[1])
plt.figure(1)
q.plot_constellation()
plt.figure(2)
s.plot(dB=False, phase=False, stem=False, frange=(0,500))
|
484289
|
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from rest_framework import status
from rest_framework.permissions import AllowAny, IsAuthenticated
from app_test.serializers import LoginSerializer, TodoSerializer
from app_test.models import Todo
class LoginView(APIView):
permission_classes = (AllowAny,)
serializer_class = LoginSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class TodoView(ModelViewSet):
permission_classes = (IsAuthenticated,)
serializer_class = TodoSerializer
queryset = Todo.objects.all()
|
484312
|
from buffpy.models.profile import PATHS, Profile
class Profiles(list):
"""
Manage profiles
+ all -> get all the profiles from buffer
+ filter -> wrapper for list filtering
"""
def __init__(self, api, *args, **kwargs):
super().__init__(*args, **kwargs)
self.api = api
def all(self):
"""
Get all social newtwork profiles.
"""
response = self.api.get(url=PATHS["GET_PROFILES"])
for raw_profile in response:
self.append(Profile(self.api, raw_profile))
return self
def filter(self, **kwargs):
"""
Based on some criteria, filter the profiles and return a new
Profiles Manager containing only the chosen items.
If the manager doen"t have any items,
get all the profiles from Buffer.
"""
if not len(self):
self.all()
new_list = [
item for item in self
if any([item[arg] == kwargs[arg] for arg in kwargs])
]
return Profiles(self.api, new_list)
|
484336
|
import torch.nn as nn
from models.backbone.alexnet import AlexNetBackbone
from models.backbone.linear import LinearBackbone
from models.backbone.resnet import ResNetBackbone
from models.backbone.swinvit import SwinTransformerBackbone
from models.backbone.vgg import VGGBackbone
from models.backbone.vit import ViTBackbone
from models.layers.activation import SignHashLayer, StochasticBinaryLayer
from models.layers.bihalf import BiHalfLayer
from models.layers.zm import MeanOnlyBatchNorm
def get_backbone(backbone, nbit, nclass, pretrained, freeze_weight, **kwargs):
if backbone == 'alexnet':
return AlexNetBackbone(nbit=nbit, nclass=nclass, pretrained=pretrained,
freeze_weight=freeze_weight, **kwargs)
elif backbone == 'resnet18':
return ResNetBackbone(nbit=nbit, nclass=nclass, pretrained=pretrained,
resnet_size='18', freeze_weight=freeze_weight, **kwargs)
elif backbone == 'resnet34':
return ResNetBackbone(nbit=nbit, nclass=nclass, pretrained=pretrained,
resnet_size='34', freeze_weight=freeze_weight, **kwargs)
elif backbone == 'resnet50':
return ResNetBackbone(nbit=nbit, nclass=nclass, pretrained=pretrained,
resnet_size='50', freeze_weight=freeze_weight, **kwargs)
elif backbone == 'resnet101':
return ResNetBackbone(nbit=nbit, nclass=nclass, pretrained=pretrained,
resnet_size='101', freeze_weight=freeze_weight, **kwargs)
elif backbone == 'resnet152':
return ResNetBackbone(nbit=nbit, nclass=nclass, pretrained=pretrained,
resnet_size='152', freeze_weight=freeze_weight, **kwargs)
elif backbone == 'vgg16':
return VGGBackbone(nbit=nbit, nclass=nclass, pretrained=pretrained,
vgg_size='vgg16', freeze_weight=freeze_weight, **kwargs)
elif backbone == 'vgg16bn':
return VGGBackbone(nbit=nbit, nclass=nclass, pretrained=pretrained,
vgg_size='vgg16bn', freeze_weight=freeze_weight, **kwargs)
elif backbone == 'linear':
return LinearBackbone(nclass=nclass, nbit=nbit, **kwargs)
elif backbone == 'vit':
return ViTBackbone(nbit=nbit, nclass=nclass, vit_name='vit_base_patch16_224',
pretrained=pretrained, freeze_weight=freeze_weight, **kwargs)
elif backbone == 'vittiny':
return ViTBackbone(nbit=nbit, nclass=nclass, vit_name='vit_tiny_patch16_224',
pretrained=pretrained, freeze_weight=freeze_weight, **kwargs)
elif backbone == 'vitsmall':
return ViTBackbone(nbit=nbit, nclass=nclass, vit_name='vit_small_patch16_224',
pretrained=pretrained, freeze_weight=freeze_weight, **kwargs)
elif backbone == 'swin':
return SwinTransformerBackbone(nbit=nbit, nclass=nclass, vit_name='swin_base_patch4_window7_224',
pretrained=pretrained, freeze_weight=freeze_weight, **kwargs)
elif backbone == 'swintiny':
return SwinTransformerBackbone(nbit=nbit, nclass=nclass, vit_name='swin_tiny_patch4_window7_224',
pretrained=pretrained, freeze_weight=freeze_weight, **kwargs)
elif backbone == 'swinsmall':
return SwinTransformerBackbone(nbit=nbit, nclass=nclass, vit_name='swin_small_patch4_window7_224',
pretrained=pretrained, freeze_weight=freeze_weight, **kwargs)
else:
raise NotImplementedError('The backbone not implemented.')
def get_hash_fc_with_normalizations(in_features, nbit, bias, kwargs):
output_choice = kwargs.get('hash_fc_output', 'identity')
if output_choice == 'bn': # kwargs.get('bn_to_hash_fc', True):
hash_fc = nn.Sequential(
nn.Linear(in_features, nbit, bias=bias),
nn.BatchNorm1d(nbit)
)
elif output_choice == 'zbn': # kwargs.get('zero_mean_bn', False)
hash_fc = nn.Sequential(
nn.Linear(in_features, nbit, bias=bias),
MeanOnlyBatchNorm(nbit, dim=2)
)
elif output_choice == 'bihalf': # elif kwargs.get('bihalf_to_hash_fc', False):
hash_fc = nn.Sequential(
nn.Linear(in_features, nbit, bias=bias),
BiHalfLayer(kwargs.get('bihalf_gamma', 6))
)
else: # other
hash_fc = nn.Sequential(
nn.Linear(in_features, nbit, bias=bias),
get_hash_activation(output_choice)
)
return hash_fc
def get_hash_activation(name='identity'):
if name == 'identity':
return nn.Identity()
elif name == 'signhash':
return SignHashLayer()
elif name == 'tanh':
return nn.Tanh()
elif name == 'sigmoid':
return nn.Sigmoid()
elif name == 'stochasticbin':
return StochasticBinaryLayer()
else:
return ValueError(f'{name} is not a valid hash activation.')
class Lambda(nn.Module):
def __init__(self, lambda_func):
super(Lambda, self).__init__()
self.lambda_func = lambda_func
def forward(self, x):
return self.lambda_func(x)
|
484357
|
from liblo import *
import sys
import time
class MyServer(ServerThread):
osc_msgs_recv = 0
def __init__(self):
ServerThread.__init__(self, 4000)
@make_method('/foo', 'ifs')
def foo_callback(self, path, args):
i, f, s = args
self.osc_msgs_recv += 1
#print "received message '%s' with arguments: %d, %f, %s" % (path, i, f, s)
@make_method(None, None)
def fallback(self, path, args):
self.osc_msgs_recv += 1
#print "received unknown message '%s'" % path
try:
server = MyServer()
except ServerError, err:
print str(err)
sys.exit()
server.start()
last_time = time.time()
this_time = 0
while True :
this_time = time.time()
elapsed_time = this_time - last_time
last_time = this_time
osc_msgs_recv = server.osc_msgs_recv
osc_msgs_per_sec = osc_msgs_recv / elapsed_time
txt = str('osc: ' + str(osc_msgs_recv) + ', osc / sec: ' + str(osc_msgs_per_sec) + ', elp: ' + str(elapsed_time) )
print txt
server.osc_msgs_recv = 0
time.sleep(1)
raw_input("press enter to quit...\n")
|
484397
|
from .frame_skip import frame_skip_v0 # NOQA
from .basic_wrappers import color_reduction_v0, resize_v0, dtype_v0, \
flatten_v0, reshape_v0, normalize_obs_v0, clip_actions_v0, clip_reward_v0, \
scale_actions_v0 # NOQA
from .nan_wrappers import nan_random_v0, nan_noop_v0, nan_zeros_v0 # NOQA
from .delay_observations import delay_observations_v0 # NOQA
from .frame_stack import frame_stack_v1 # NOQA
from .max_observation import max_observation_v0 # NOQA
from .sticky_actions import sticky_actions_v0 # NOQA
|
484401
|
t = int(input())
for _ in range(t):
n,k,d = map(int, input().split())
# Taking n integers and storing it in list
arr = list(map(int, input().split()))
# Finding sum of all elements inside array
total = sum(arr)
# Floor division would give the answer but
# see to it that doesn't excede d, therefore
# min is taken in between them
print(min(total//k,d))
|
484403
|
from datetime import date
from controls.models import FinancialYear, ModuleSettings, Period
from dateutil.relativedelta import relativedelta
from django.contrib.auth import get_user_model
from django.shortcuts import reverse
from django.test import TestCase
from nominals.models import Nominal, NominalTransaction
class CreateFyTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.url = reverse("controls:fy_create")
cls.user = get_user_model().objects.create_superuser(
username="dummy", password="<PASSWORD>")
def test_successful_first_fy(self):
"""
First FY should change the module settings periods
"""
self.client.force_login(self.user)
ModuleSettings.objects.create(
cash_book_period=None,
nominals_period=None,
purchases_period=None,
sales_period=None
)
response = self.client.post(self.url, data={
"financial_year": 2020,
"period-0-month_start": "01-2020",
"period-1-month_start": "02-2020",
"period-2-month_start": "03-2020",
"period-3-month_start": "04-2020",
"period-4-month_start": "05-2020",
"period-5-month_start": "06-2020",
"period-6-month_start": "07-2020",
"period-7-month_start": "08-2020",
"period-8-month_start": "09-2020",
"period-9-month_start": "10-2020",
"period-10-month_start": "11-2020",
"period-11-month_start": "12-2020",
"period-TOTAL_FORMS": "12",
"period-INITIAL_FORMS": "0",
"period-MIN_NUM_FORMS": "0",
"period-MAX_NUM_FORMS": "1000"
})
self.assertEqual(
response.status_code,
302
)
fys = FinancialYear.objects.all()
self.assertEqual(
len(fys),
1
)
fy = fys[0]
self.assertEqual(
fy.financial_year,
2020
)
self.assertEqual(
fy.number_of_periods,
12
)
periods = Period.objects.all()
self.assertEqual(
len(periods),
12
)
for i, period in enumerate(periods):
self.assertEqual(
period.fy,
fy
)
self.assertEqual(
period.month_start,
date(2020, i + 1, 1)
)
self.assertEqual(
period.period,
str(i + 1).rjust(2, "0")
)
self.assertEqual(
period.fy_and_period,
str(fy) + str(i+1).rjust(2, "0")
)
mod_setttings = ModuleSettings.objects.first()
self.assertEqual(
mod_setttings.cash_book_period,
periods[0]
)
self.assertEqual(
mod_setttings.nominals_period,
periods[0]
)
self.assertEqual(
mod_setttings.purchases_period,
periods[0]
)
self.assertEqual(
mod_setttings.sales_period,
periods[0]
)
def test_successful_second_fy(self):
"""
First FY should change the module settings periods
"""
self.client.force_login(self.user)
fy_2019 = FinancialYear.objects.create(
financial_year=2019, number_of_periods=1)
first_and_only_period_of_2019 = Period.objects.create(
fy=fy_2019, period="01", fy_and_period="201901", month_start=date(2019, 12, 1))
ModuleSettings.objects.create(
cash_book_period=first_and_only_period_of_2019,
nominals_period=first_and_only_period_of_2019,
purchases_period=first_and_only_period_of_2019,
sales_period=first_and_only_period_of_2019
)
response = self.client.post(self.url, data={
"financial_year": 2020,
"period-0-month_start": "01-2020",
"period-1-month_start": "02-2020",
"period-2-month_start": "03-2020",
"period-3-month_start": "04-2020",
"period-4-month_start": "05-2020",
"period-5-month_start": "06-2020",
"period-6-month_start": "07-2020",
"period-7-month_start": "08-2020",
"period-8-month_start": "09-2020",
"period-9-month_start": "10-2020",
"period-10-month_start": "11-2020",
"period-11-month_start": "12-2020",
"period-TOTAL_FORMS": "12",
"period-INITIAL_FORMS": "0",
"period-MIN_NUM_FORMS": "0",
"period-MAX_NUM_FORMS": "1000"
})
self.assertEqual(
response.status_code,
302
)
fys = FinancialYear.objects.all().order_by("financial_year")
self.assertEqual(
len(fys),
2
)
fy = fys[1]
self.assertEqual(
fy.financial_year,
2020
)
self.assertEqual(
fy.number_of_periods,
12
)
periods = Period.objects.exclude(fy_and_period="201901").all()
self.assertEqual(
len(periods),
12
)
for i, period in enumerate(periods):
self.assertEqual(
period.fy,
fy
)
self.assertEqual(
period.month_start,
date(2020, i + 1, 1)
)
self.assertEqual(
period.period,
str(i + 1).rjust(2, "0")
)
self.assertEqual(
period.fy_and_period,
str(fy) + str(i+1).rjust(2, "0")
)
mod_setttings = ModuleSettings.objects.first()
# check posting periods have not changed
self.assertEqual(
mod_setttings.cash_book_period,
first_and_only_period_of_2019
)
self.assertEqual(
mod_setttings.nominals_period,
first_and_only_period_of_2019
)
self.assertEqual(
mod_setttings.purchases_period,
first_and_only_period_of_2019
)
self.assertEqual(
mod_setttings.sales_period,
first_and_only_period_of_2019
)
def test_failure_when_fys_are_not_consecutive(self):
self.client.force_login(self.user)
FinancialYear.objects.create(financial_year=2018, number_of_periods=12)
response = self.client.post(self.url, data={
"financial_year": 2020,
"period-0-month_start": "01-2020",
"period-1-month_start": "02-2020",
"period-2-month_start": "03-2020",
"period-3-month_start": "04-2020",
"period-4-month_start": "05-2020",
"period-5-month_start": "06-2020",
"period-6-month_start": "07-2020",
"period-7-month_start": "08-2020",
"period-8-month_start": "09-2020",
"period-9-month_start": "10-2020",
"period-10-month_start": "11-2020",
"period-11-month_start": "12-2020",
"period-TOTAL_FORMS": "12",
"period-INITIAL_FORMS": "0",
"period-MIN_NUM_FORMS": "0",
"period-MAX_NUM_FORMS": "1000"
})
self.assertEqual(
response.status_code,
200
)
self.assertContains(
response,
"<li>Financial years must be consecutive. The earliest is 2018 and the latest is 2018</li>"
)
self.assertEqual(
len(
FinancialYear.objects.all()
),
1
)
self.assertEqual(
len(
Period.objects.all()
),
0
)
def test_failure_when_period_does_have_month_start(self):
self.client.force_login(self.user)
response = self.client.post(self.url, data={
"financial_year": 2020,
"period-0-month_start": "01-2020",
"period-1-month_start": "02-2020",
"period-2-month_start": "03-2020",
"period-3-month_start": "04-2020",
"period-4-month_start": "05-2020",
"period-5-month_start": "06-2020",
"period-6-month_start": "07-2020",
"period-7-month_start": "08-2020",
"period-8-month_start": "09-2020",
"period-9-month_start": "10-2020",
"period-10-month_start": "11-2020",
"period-11-month_start": "",
"period-TOTAL_FORMS": "12",
"period-INITIAL_FORMS": "0",
"period-MIN_NUM_FORMS": "0",
"period-MAX_NUM_FORMS": "1000"
})
self.assertEqual(
response.status_code,
200
)
self.assertContains(
response,
"<li>All periods you wish to create must have a month selected. Delete any unwanted periods otherwise</li>"
)
def test_failure_when_month_starts_are_not_consecutive(self):
self.client.force_login(self.user)
response = self.client.post(self.url, data={
"financial_year": 2020,
"period-0-month_start": "01-2020",
"period-1-month_start": "02-2020",
"period-2-month_start": "03-2020",
"period-3-month_start": "04-2020",
"period-4-month_start": "05-2020",
"period-5-month_start": "06-2020",
"period-6-month_start": "07-2020",
"period-7-month_start": "08-2020",
"period-8-month_start": "09-2020",
"period-9-month_start": "10-2020",
"period-10-month_start": "11-2020",
"period-11-month_start": "01-2021",
"period-TOTAL_FORMS": "12",
"period-INITIAL_FORMS": "0",
"period-MIN_NUM_FORMS": "0",
"period-MAX_NUM_FORMS": "1000"
})
self.assertEqual(
response.status_code,
200
)
self.assertContains(
response,
"<li>Periods must be consecutive calendar months</li>"
)
def test_failure_when_months_across_all_fys_are_not_consecutive(self):
self.client.force_login(self.user)
fy_2019 = FinancialYear.objects.create(
financial_year=2019, number_of_periods=1)
p = Period.objects.create(
fy=fy_2019, fy_and_period="201901", period="01", month_start=date(2020, 1, 1))
response = self.client.post(self.url, data={
"financial_year": 2020,
"period-0-month_start": "01-2020",
"period-1-month_start": "02-2020",
"period-2-month_start": "03-2020",
"period-3-month_start": "04-2020",
"period-4-month_start": "05-2020",
"period-5-month_start": "06-2020",
"period-6-month_start": "07-2020",
"period-7-month_start": "08-2020",
"period-8-month_start": "09-2020",
"period-9-month_start": "10-2020",
"period-10-month_start": "11-2020",
"period-11-month_start": "12-2020",
"period-TOTAL_FORMS": "12",
"period-INITIAL_FORMS": "0",
"period-MIN_NUM_FORMS": "0",
"period-MAX_NUM_FORMS": "1000"
})
self.assertEqual(
response.status_code,
200
)
self.assertContains(
response,
"<li>Period 01 of FY 2019 is for calendar month Jan 2020. "
"But you are trying to now create a period for calendar month Jan 2020 again. "
"This is not allowed because periods must be consecutive calendar months across ALL financial years.</li>"
)
class AdjustFYTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.url = reverse("controls:fy_adjust")
cls.user = get_user_model().objects.create_superuser(
username="dummy", password="<PASSWORD>")
# ASSETS
assets = Nominal.objects.create(name="Assets", type="b")
current_assets = Nominal.objects.create(
parent=assets, name="Current Assets", type="b")
cls.bank_nominal = Nominal.objects.create(
parent=current_assets, name="Bank Account", type="b")
cls.debtors_nominal = Nominal.objects.create(
parent=current_assets, name="Trade Debtors", type="b")
# LIABILITIES
cls.liabilities = liabilities = Nominal.objects.create(
name="Liabilities", type="b"
)
cls.current_liabilities = current_liabilities = Nominal.objects.create(
name="Current Liabilities", type="b", parent=liabilities
)
cls.vat_output = vat_output = Nominal.objects.create(
name="Vat Output", type="b", parent=current_liabilities
)
def test_successful(self):
self.client.force_login(self.user)
# create 2019
fy_2019 = FinancialYear.objects.create(
financial_year=2019, number_of_periods=12)
periods = []
for i in range(12):
periods.append(
Period(
fy=fy_2019,
fy_and_period="2019" + str(i).rjust(2, "0"),
period=str(i+1).rjust(2, "0"),
month_start=date(2019, i+1, 1)
)
)
p_2019 = Period.objects.bulk_create(periods)
# create 2020
fy_2020 = FinancialYear.objects.create(
financial_year=2020, number_of_periods=12)
periods = []
for i in range(12):
periods.append(
Period(
fy=fy_2020,
fy_and_period="2020" + str(i).rjust(2, "0"),
period=str(i+1).rjust(2, "0"),
month_start=date(2020, i+1, 1)
)
)
p_2020 = Period.objects.bulk_create(periods)
periods = list(p_2019) + list(p_2020)
second_half_of_2019 = periods[6:12]
for p in second_half_of_2019:
p.fy = fy_2020
form_data = {}
for i, p in enumerate(periods):
form_data.update({
"period-" + str(i) + "-id": p.pk,
"period-" + str(i) + "-month_start": p.month_start.strftime("%m-%Y"),
"period-" + str(i) + "-period": p.period,
"period-" + str(i) + "-fy": p.fy_id
})
form_data.update({
"period-TOTAL_FORMS": str(len(periods)),
"period-INITIAL_FORMS": str(len(periods)),
"period-MIN_NUM_FORMS": "0",
"period-MAX_NUM_FORMS": "1000"
})
response = self.client.post(self.url, data=form_data)
self.assertEqual(
response.status_code,
302
)
fy_2019.refresh_from_db()
fy_2020.refresh_from_db()
periods = Period.objects.all()
periods_2019 = periods[:6]
for i, p in enumerate(periods_2019):
p.fy = fy_2019
p.month_start = date(2019, i+1, 1)
p.fy_and_period = "2019" + str(i+1).rjust(2, "0")
p.period = str(i+1).rjust(2, "0")
periods_2020 = periods[6:]
for i, p in enumerate(periods_2020):
p.fy = fy_2020
p.month_start = date(2019, 6, 1) + relativedelta(months=+i)
p.fy_and_period = "2020" + str(i+1).rjust(2, "0")
p.period = str(i+1).rjust(2, "0")
self.assertEqual(
fy_2019.number_of_periods,
6
)
self.assertEqual(
fy_2020.number_of_periods,
18
)
def test_successful_when_bfs_are_present(self):
"""
Say you have two FYs, each of 12 periods,
2019
2020
If you extend 2019 to 18 months, 2019 and 2020 are affected by the
change.
If 2019 c/fs have already posted as b/fs into 2020 we need to delete
these bfs and anyway bfs posted in periods after.
"""
# create the fys and periods
self.client.force_login(self.user)
# create 2019
fy_2019 = FinancialYear.objects.create(
financial_year=2019, number_of_periods=12)
periods = []
for i in range(12):
periods.append(
Period(
fy=fy_2019,
fy_and_period="2019" + str(i).rjust(2, "0"),
period=str(i+1).rjust(2, "0"),
month_start=date(2019, i+1, 1)
)
)
p_2019 = Period.objects.bulk_create(periods)
p_201901 = fy_2019.first_period()
# create 2020
fy_2020 = FinancialYear.objects.create(
financial_year=2020, number_of_periods=12)
periods = []
for i in range(12):
periods.append(
Period(
fy=fy_2020,
fy_and_period="2020" + str(i).rjust(2, "0"),
period=str(i+1).rjust(2, "0"),
month_start=date(2020, i+1, 1)
)
)
p_2020 = Period.objects.bulk_create(periods)
p_202001 = fy_2020.first_period()
# post the bfs
# 2019
bf_2019_1 = NominalTransaction.objects.create(
module="NL",
header=1,
line=1,
date=date.today(),
ref="YEAR END 2018",
period=p_201901,
field="t",
type="nbf",
nominal=self.bank_nominal,
value=1000
)
bf_2019_2 = NominalTransaction.objects.create(
module="NL",
header=1,
line=2,
date=date.today(),
ref="YEAR END 2018",
period=p_201901,
field="t",
type="nbf",
nominal=self.vat_output,
value=-1000
)
# 2020
bf_2020_1 = NominalTransaction.objects.create(
module="NL",
header=2,
line=1,
date=date.today(),
ref="YEAR END 2019",
period=p_202001,
field="t",
type="nbf",
nominal=self.bank_nominal,
value=1000
)
bf_2020_2 = NominalTransaction.objects.create(
module="NL",
header=2,
line=2,
date=date.today(),
ref="YEAR END 2019",
period=p_202001,
field="t",
type="nbf",
nominal=self.vat_output,
value=-1000
)
# prepare for adjusting FY
periods = list(p_2019) + list(p_2020)
second_half_of_2019 = periods[6:12]
for p in second_half_of_2019:
p.fy = fy_2020
form_data = {}
for i, p in enumerate(periods):
form_data.update({
"period-" + str(i) + "-id": p.pk,
"period-" + str(i) + "-month_start": p.month_start.strftime("%m-%Y"),
"period-" + str(i) + "-period": p.period,
"period-" + str(i) + "-fy": p.fy_id
})
form_data.update({
"period-TOTAL_FORMS": str(len(periods)),
"period-INITIAL_FORMS": str(len(periods)),
"period-MIN_NUM_FORMS": "0",
"period-MAX_NUM_FORMS": "1000"
})
# now adjust via the view
response = self.client.post(self.url, data=form_data)
self.assertEqual(
response.status_code,
302
)
fy_2019.refresh_from_db()
fy_2020.refresh_from_db()
periods = Period.objects.all()
periods_2019 = periods[:6]
for i, p in enumerate(periods_2019):
p.fy = fy_2019
p.month_start = date(2019, i+1, 1)
p.fy_and_period = "2019" + str(i+1).rjust(2, "0")
p.period = str(i+1).rjust(2, "0")
periods_2020 = periods[6:]
for i, p in enumerate(periods_2020):
p.fy = fy_2020
p.month_start = date(2019, 6, 1) + relativedelta(months=+i)
p.fy_and_period = "2020" + str(i+1).rjust(2, "0")
p.period = str(i+1).rjust(2, "0")
self.assertEqual(
fy_2019.number_of_periods,
6
)
self.assertEqual(
fy_2020.number_of_periods,
18
)
# check that the b/fs posted to 01 2020 have been deleted i.e. 2020 has been rolled back
nom_trans = NominalTransaction.objects.all().order_by("pk")
self.assertEqual(
len(nom_trans),
2
)
self.assertEqual(
nom_trans[0],
bf_2019_1
)
self.assertEqual(
nom_trans[1],
bf_2019_2
)
def test_failure_when_FY_does_contain_consecutive_periods(self):
self.client.force_login(self.user)
# create 2019
fy_2019 = FinancialYear.objects.create(
financial_year=2019, number_of_periods=12)
periods = []
for i in range(12):
periods.append(
Period(
fy=fy_2019,
fy_and_period="2019" + str(i).rjust(2, "0"),
period=str(i+1).rjust(2, "0"),
month_start=date(2019, i+1, 1)
)
)
p_2019 = Period.objects.bulk_create(periods)
# create 2020
fy_2020 = FinancialYear.objects.create(
financial_year=2020, number_of_periods=12)
periods = []
for i in range(12):
periods.append(
Period(
fy=fy_2020,
fy_and_period="2020" + str(i).rjust(2, "0"),
period=str(i+1).rjust(2, "0"),
month_start=date(2020, i+1, 1)
)
)
p_2020 = Period.objects.bulk_create(periods)
periods = list(p_2019) + list(p_2020)
second_half_of_2019 = periods[6:12]
for p in second_half_of_2019:
p.fy = fy_2020
second_half_of_2019[2].fy = fy_2019
form_data = {}
for i, p in enumerate(periods):
form_data.update({
"period-" + str(i) + "-id": p.pk,
"period-" + str(i) + "-month_start": p.month_start.strftime("%m-%Y"),
"period-" + str(i) + "-period": p.period,
"period-" + str(i) + "-fy": p.fy_id
})
form_data.update({
"period-TOTAL_FORMS": str(len(periods)),
"period-INITIAL_FORMS": str(len(periods)),
"period-MIN_NUM_FORMS": "0",
"period-MAX_NUM_FORMS": "1000"
})
response = self.client.post(self.url, data=form_data)
self.assertEqual(
response.status_code,
200
)
|
484428
|
import logging
from logging.handlers import WatchedFileHandler
from config import config
LOG_FORMAT = '%(asctime)s %(levelname)7s %(name)s [%(threadName)s] : %(message)s'
def logging_config():
level = logging.ERROR
if config.get('log', 'level') == 'DEBUG':
level = logging.DEBUG
elif config.get('log', 'level') == 'INFO':
level = logging.INFO
elif config.get('log', 'level') == 'WARNING':
level = logging.WARNING
log_file = None
if config.get('log', 'file'):
log_file = config.get('log', 'file')
if log_file:
handlers = [WatchedFileHandler(log_file)]
else:
handlers = [logging.StreamHandler()]
logging.basicConfig(level=level, handlers=handlers, format=LOG_FORMAT)
|
484435
|
import abc
from dataclasses import dataclass
import math
import torch
from .basedist import ExponentialFamily
from .basedist import ConjugateLikelihood
__all__ = ['GammaLikelihood', 'Gamma', 'GammaStdParams']
EPS = 1e-6
@dataclass
class GammaLikelihood(ConjugateLikelihood):
dim: int
def sufficient_statistics_dim(self, zero_stats=True):
zero_stats_dim = 1 if zero_stats else 0
return 2 * self.dim + zero_stats_dim
@staticmethod
def sufficient_statistics(data):
dim, dtype, device = data.shape[1], data.dtype, data.device
return torch.cat([
-data,
data.log(),
torch.ones(len(data), 1, dtype=data.dtype, device=data.device)
], dim=-1)
def parameters_from_pdfvector(self, pdfvec):
size = pdfvec.shape
if len(size) == 1:
pdfvec = pdfvec.view(1, -1)
dim = self.dim
rate = -pdfvec[:, :dim]
shape = pdfvec[:, dim: 2 * dim] + 1
if len(size) == 1:
return shape.view(-1), rate.view(-1)
return shape.view(-1, dim), rate.view(-1, dim)
def pdfvectors_from_rvectors(self, rvecs):
dim = rvecs.shape[-1] // 2
shape = (EPS + rvecs[:, :dim]).exp()
rate = (EPS + rvecs[:, dim:]).exp()
lnorm = torch.lgamma(shape) - shape * torch.log(rate)
lnorm = torch.sum(lnorm, dim=-1, keepdim=True)
retval = torch.cat([-rate, shape - 1, -lnorm], dim=-1)
return retval
def __call__(self, pdfvecs, stats):
if len(pdfvecs.shape) == 1:
pdfvecs = pdfvecs.view(1, -1)
return stats @ pdfvecs.t()
@dataclass(init=False, unsafe_hash=True)
class GammaStdParams(torch.nn.Module):
shape: torch.Tensor
rate: torch.Tensor
def __init__(self, shape, rate):
super().__init__()
self.register_buffer('shape', shape)
self.register_buffer('rate', rate)
@classmethod
def from_natural_parameters(cls, natural_params):
npsize = natural_params.shape
if len(npsize) == 1:
natural_params = natural_params.view(1, -1)
dim = natural_params.shape[-1] // 2
shape = natural_params[:, dim:] + 1
rate = -natural_params[:, :dim]
if len(npsize) == 1:
return cls(shape.view(-1), rate.view(-1))
return cls(shape, rate)
class Gamma(ExponentialFamily):
_std_params_def = {
'shape': 'Shape parameter of the Gamma.',
'rate': 'Rate parameter of the Gamma.'
}
_std_params_cls = GammaStdParams
def __len__(self):
paramshape = self.params.mean.shape
return 1 if len(paramshape) <= 1 else paramshape[0]
@property
def dim(self):
shape = self.params.shape
size = len(shape.shape) if len(shape.shape) > 0 else 1
if size == 1:
return len(shape)
return tuple(shape.shape)
def conjugate(self):
return GammaLikelihood(self.params.shape.shape[-1])
def expected_sufficient_statistics(self):
'''
stats = (
x,
ln(x)
)
E[stats] = (
a / b,
psi(a) - ln(b),
)
'''
shape, rate = self.params.shape, self.params.rate
return torch.cat([shape / rate, torch.digamma(shape) - torch.log(rate)],
dim=-1)
def expected_value(self):
return self.params.shape / self.params.rate
def log_norm(self):
shape, rate = self.params.shape, self.params.rate
return (torch.lgamma(shape) - shape * torch.log(rate)).sum(dim=-1)
# TODO
def sample(self, nsamples):
raise NotImplementedError
def natural_parameters(self):
'''
nparams = (
k * m ,
-.5 * (k * m^2 - b)
-.5 * k,
a - .5
)
'''
shape, rate = self.params.shape, self.params.rate
return torch.cat([-rate, shape - 1], dim=-1)
def update_from_natural_parameters(self, natural_params):
self.params = self.params.from_natural_parameters(natural_params)
|
484447
|
class Singleton:
_instance = None
def __init__(self):
# Singleton pattern must prevent normal instantiation
raise Exception("Cannot directly instantiate a Singleton. Access via get_instance()")
@classmethod
def get_instance(cls):
# This is the only way to access the one and only Controller
if cls._instance is None:
cls._instance = cls.__new__(cls)
return cls._instance
|
484484
|
import numpy as np
def multi_to_one_dim(in_shape, in_index):
""" Convert an index from a multi-dimension into the corresponding index in flat array """
out_index = 0
for dim, index in zip(in_shape, in_index):
out_index = dim * out_index + index
return out_index
def one_to_multi_dim(out_shape, in_index):
""" Convert index in a flat array to index in corresponding multi-dimension array """
n_dim = len(out_shape)
out_index = np.empty(n_dim)
remainder = in_index
for i, dim in enumerate(reversed(out_shape)):
index = remainder % dim
remainder = remainder // dim
out_index[n_dim - i - 1] = index
return out_index
def multi_to_multi_dim(in_shape, out_shape, in_index):
"""" Multi dimension to multi dimension index transformation """
return one_to_multi_dim(out_shape, multi_to_one_dim(in_shape, in_index))
|
484492
|
import numpy as np
import gnumpy as gp
import pickle
# gp.board_id_to_use = 1
class SGD:
def __init__(self,model,alpha=1e-2,minibatch=256,
optimizer='momentum',momentum=0.9):
self.model = model
assert self.model is not None, "Must define a function to optimize"
self.it = 0
self.momentum = momentum # momentum
self.alpha = alpha # learning rate
self.minibatch = minibatch # minibatch
self.optimizer = optimizer
if self.optimizer == 'momentum' or self.optimizer == 'nesterov':
print "Using %s.."%self.optimizer
self.velocity = [[gp.zeros(w.shape),gp.zeros(b.shape)]
for w,b in self.model.stack]
elif self.optimizer == 'adagrad' or self.optimizer == 'adagrad3' or self.optimizer == 'adadelta':
print "Using %s.."%self.optimizer
self.gradt = [[gp.zeros(w.shape),gp.zeros(b.shape)]
for w,b in self.model.stack]
elif self.optimizer == 'adaccel2':
print "Using adaccel2.."
self.gradt = [[gp.zeros(w.shape),gp.zeros(b.shape)]
for w,b in self.model.stack]
self.velocity = [[gp.zeros(w.shape),gp.zeros(b.shape)]
for w,b in self.model.stack]
elif self.optimizer == 'sgd':
print "Using sgd.."
else:
raise ValueError("Invalid optimizer")
self.costt = []
self.expcost = []
def run(self,data,labels=None):
"""
Runs stochastic gradient descent with model as objective. Expects
data in n x m matrix where n is feature dimension and m is number of
training examples
"""
m = data.shape[1]
# momentum setup
momIncrease = 10
mom = 0.5
# randomly select minibatch
perm = np.random.permutation(range(m))
for i in xrange(0,m-self.minibatch+1,self.minibatch):
self.it += 1
mb_data = data[:,perm[i:i+self.minibatch]]
mb_data = gp.garray(mb_data)
if self.optimizer == 'nesterov' or self.optimizer == 'adaccel2':
# w = w+mom*velocity (evaluate gradient at future point)
self.model.updateParams(mom,self.velocity)
if labels is None:
cost,grad = self.model.costAndGrad(mb_data)
else:
mb_labels = labels[perm[i:i+self.minibatch]]
cost,grad = self.model.costAndGrad(mb_data,mb_labels)
# undo update
if self.optimizer == 'nesterov' or self.optimizer == 'adaccel2':
# w = w-mom*velocity
self.model.updateParams(-mom,self.velocity)
# compute exponentially weighted cost
if self.it > 1:
self.expcost.append(.01*cost + .99*self.expcost[-1])
else:
self.expcost.append(cost)
if self.optimizer == 'momentum':
if self.it > momIncrease:
mom = self.momentum
# velocity = mom*velocity + eta*grad
self.velocity = [[mom*vs[0]+self.alpha*g[0],mom*vs[1]+self.alpha*g[1]]
for vs,g in zip(self.velocity,grad)]
update = self.velocity
scale = -1.0
elif self.optimizer == 'adagrad':
epsilon = 1e-8
# trace = trace+grad.^2
self.gradt = [[gt[0]+g[0]*g[0]+epsilon,gt[1]+g[1]*g[1]+epsilon]
for gt,g in zip(self.gradt,grad)]
# update = grad.*trace.^(-1/2)
update = [[g[0]*(1./gp.sqrt(gt[0])),g[1]*(1./gp.sqrt(gt[1]))]
for gt,g in zip(self.gradt,grad)]
scale = -self.alpha
elif self.optimizer == 'adagrad3':
epsilon = 1e-8
# trace = trace+grad.^2
self.gradt = [[gt[0]+g[0]*g[0]+epsilon,gt[1]+g[1]*g[1]+epsilon]
for gt,g in zip(self.gradt,grad)]
# update = grad.*trace.^(-1/3)
update = [[g[0]*(1./(gt[0]**(1./3))),g[1]*(1./(gt[1]**(1./3)))]
for gt,g in zip(self.gradt,grad)]
scale = -self.alpha
elif self.optimizer == 'nesterov':
# velocity = mom*velocity - alpha*grad
self.velocity = [[mom*vs[0]-self.alpha*g[0],mom*vs[1]-self.alpha*g[1]]
for vs,g in zip(self.velocity,grad)]
update = self.velocity
scale = 1.0
elif self.optimizer == 'adadelta':
epsilon = 1e-9
gamma = 1.-(100./(1000.+self.it))
print "Gamma is %f"%gamma
# trace = trace+grad.^2
self.gradt = [[gamma*gt[0]+g[0]*g[0]+epsilon,gamma*gt[1]+g[1]*g[1]+epsilon]
for gt,g in zip(self.gradt,grad)]
# update = grad.*trace.^(-1/2)
update = [[g[0]*(1./gp.sqrt(gt[0])),g[1]*(1./gp.sqrt(gt[1]))]
for gt,g in zip(self.gradt,grad)]
scale = -self.alpha
elif self.optimizer == 'adaccel2':
# velocity = mom*velocity - alpha*grad
self.velocity = [[mom*vs[0]-self.alpha*g[0],mom*vs[1]-self.alpha*g[1]]
for vs,g in zip(self.velocity,grad)]
# trace = trace+grad.^2
self.gradt = [[gt[0]+g[0]*g[0],gt[1]+g[1]*g[1]]
for gt,g in zip(self.gradt,grad)]
# update = velocity.*trace.^(-1/2)
update = [[v[0]*(1./(gt[0]**(1./2))),v[1]*(1./(gt[1]**(1./2)))]
for gt,v in zip(self.gradt,self.velocity)]
scale = 1.0
elif self.optimizer == 'sgd':
update = grad
scale = -self.alpha
# update params
self.model.updateParams(scale,update)
self.costt.append(cost)
if self.it%10 == 0:
print "Iter %d : Cost=%.4f, ExpCost=%.4f."%(self.it,cost,self.expcost[-1])
|
484502
|
import click
import yaml
from ckan_cloud_operator import logs
from . import manager
@click.group()
def proxy():
"""Manage SOLR proxy for centralized unauthenticated access"""
pass
@proxy.command()
def initialize():
manager.deploy()
logs.exit_great_success()
|
484576
|
from CSS3.completions import types as t
font_feature_types = [
("@annotation", "@annotation {\n\t${1}\n}"),
("@character-variant", "@character-variant {\n\t${1}\n}"),
("@ornaments", "@ornaments {\n\t${1}\n}"),
("@styleset", "@styleset {\n\t${1}\n}"),
("@stylistic", "@stylistic {\n\t${1}\n}"),
("@swash", "@swash {\n\t${1}\n}"),
]
namespace_values = [t.identifier, t.string, t.url]
nestable = [
# @-rules that can appear inside other @-rules.
("@counter-style", "@counter-style ${1:name} {\n\t${2}\n}"),
("@font-face", "@font-face {\n\t${1}\n}"),
("@font-feature-values", "@font-feature-values ${1:font-family} {\n\t${2}\n}"),
("@font-palette-values", "@font-palette-values ${1} {\n\t${2}\n}"),
("@keyframes", "@keyframes ${1:name} {\n\t${2}\n}"),
("@media", "@media ${1:media-query-list} {\n\t${2}\n}"),
("@page", "@page ${1}{\n\t${2}\n}"),
("@viewport", "@viewport {\n\t${1}\n}"),
("@supports", "@supports ${1} {\n\t${2}\n}"),
]
page_margin_boxes = [
("@bottom-center", "@bottom-center {\n\t${1}\n}"),
("@bottom-left", "@bottom-left {\n\t${1}\n}"),
("@bottom-left-corner", "@bottom-left-corner {\n\t${1}\n}"),
("@bottom-right", "@bottom-right {\n\t${1}\n}"),
("@bottom-right-corner", "@bottom-right-corner {\n\t${1}\n}"),
("@left-bottom", "@left-bottom {\n\t${1}\n}"),
("@left-middle", "@left-middle {\n\t${1}\n}"),
("@left-top", "@left-top {\n\t${1}\n}"),
("@right-bottom", "@right-bottom {\n\t${1}\n}"),
("@right-middle", "@right-middle {\n\t${1}\n}"),
("@right-top", "@right-top {\n\t${1}\n}"),
("@top-center", "@top-center {\n\t${1}\n}"),
("@top-left", "@top-left {\n\t${1}\n}"),
("@top-left-corner", "@top-left-corner {\n\t${1}\n}"),
("@top-right", "@top-right {\n\t${1}\n}"),
("@top-right-corner", "@top-right-corner {\n\t${1}\n}"),
]
all_rules = [
# @-rules that can appear at the top level only.
("@charset", "@charset 'UTF-8';"),
("@color-profile", "@color-profile ${1} {\n\t${2}\n}"),
("@custom-media", "@custom-media --${1:name} ${2:media-query-list};"),
("@import", "@import ${1:path} ${2:media-query-list};"),
("@namespace", "@namespace ${1};"),
("@property", "@property ${1:name} {\n\t${2}\n}"),
] + nestable
all_rules.sort()
scopes_that_forbid_nested_at_rules = (
"meta.declaration-list.css, "
"meta.at-rule.font-face.block.css, "
"meta.at-rule.keyframes.block.css, "
"meta.at-rule.font-feature-values.block.css, "
"meta.at-rule.viewport.block.css, "
"meta.at-rule.color-profile.block.css, "
"meta.at-rule.counter-style.block.css, "
"meta.at-rule.page.block.css"
)
def supports_nested(view, location):
"""Returns True if location is in @media or @supports, but NOT any other
scope.
@media and @supports can have @-rules nested inside.
"""
if not view.match_selector(
location, "meta.at-rule.media.block.css, meta.at-rule.supports.block.css"
):
return False
return not view.match_selector(location, scopes_that_forbid_nested_at_rules)
|
484581
|
from django.conf.urls import url
from ocfweb.login.calnet import login as calnet_login
from ocfweb.login.calnet import logout as calnet_logout
from ocfweb.login.ocf import login
from ocfweb.login.ocf import logout
urlpatterns = [
url(r'^login/$', login, name='login'),
url(r'^logout/$', logout, name='logout'),
url(r'^calnet/login/$', calnet_login, name='calnet_login'),
url(r'^calnet/logout/$', calnet_logout, name='calnet_logout'),
]
|
484587
|
import os
import tempfile
from resotolib.x509 import (
gen_rsa_key,
gen_csr,
bootstrap_ca,
sign_csr,
write_csr_to_file,
write_cert_to_file,
write_key_to_file,
load_csr_from_file,
load_cert_from_file,
load_key_from_file,
key_to_bytes,
cert_fingerprint,
)
def test_x509():
with tempfile.TemporaryDirectory() as tmp:
ca_key, ca_cert = bootstrap_ca()
cert_key = gen_rsa_key()
gen_csr(cert_key) # dummy call to generate CSR without SANs
cert_csr = gen_csr(
cert_key,
san_dns_names=["example.com"],
san_ip_addresses=["10.0.1.1", "10.0.0.0/24"],
)
cert_crt = sign_csr(cert_csr, ca_key, ca_cert)
ca_key_path = os.path.join(tmp, "ca.key")
ca_cert_path = os.path.join(tmp, "ca.crt")
cert_key_path = os.path.join(tmp, "cert.key")
cert_key_passphrase = "<PASSWORD>"
cert_csr_path = os.path.join(tmp, "cert.csr")
cert_crt_path = os.path.join(tmp, "cert.crt")
write_key_to_file(ca_key, key_path=ca_key_path)
write_cert_to_file(ca_cert, cert_path=ca_cert_path)
write_key_to_file(
cert_key, key_path=cert_key_path, passphrase=cert_key_passphrase
)
write_csr_to_file(cert_csr, csr_path=cert_csr_path)
write_cert_to_file(cert_crt, cert_path=cert_crt_path)
loaded_ca_key = load_key_from_file(ca_key_path)
loaded_ca_cert = load_cert_from_file(ca_cert_path)
loaded_cert_key = load_key_from_file(
cert_key_path, passphrase=cert_key_passphrase
)
loaded_cert_csr = load_csr_from_file(cert_csr_path)
loaded_cert_crt = load_cert_from_file(cert_crt_path)
assert loaded_ca_cert == ca_cert
assert loaded_cert_csr == cert_csr
assert loaded_cert_crt == cert_crt
assert cert_fingerprint(loaded_ca_cert) == cert_fingerprint(ca_cert)
assert cert_fingerprint(loaded_cert_crt) == cert_fingerprint(cert_crt)
assert key_to_bytes(ca_key) == key_to_bytes(loaded_ca_key)
assert key_to_bytes(cert_key) == key_to_bytes(loaded_cert_key)
|
484642
|
import torch
import torch.nn as nn
import torch.distributions as dist
from torch.distributions.kl import kl_divergence
from .gp_utils import vec2tril, mat2trilvec, cholesky, rev_cholesky, gp_cond, linear_joint, linear_marginal_diag
from .kernels import RBFKernel, DeepRBFKernel
from .likelihoods import MulticlassSoftmax
class VARGP(nn.Module):
def __init__(self, z_init, kernel, likelihood, n_var_samples=1, ep_var_mean=True, prev_params=None):
super().__init__()
self.var_mean_mask = float(ep_var_mean)
self.prev_params = [
dict(z=p['z'], u_mean=p['u_mean'], u_tril=vec2tril(p['u_tril_vec']))
for p in (prev_params or [])
]
self.M = z_init.size(-2)
self.kernel = kernel
self.n_v = n_var_samples
self.likelihood = likelihood
self.z = nn.Parameter(z_init.detach())
out_size = self.z.size(0)
self.u_mean = nn.Parameter(torch.Tensor(out_size, self.M, 1).normal_(0., .5))
self.u_tril_vec = nn.Parameter(
mat2trilvec(torch.eye(self.M).unsqueeze(0).expand(out_size, -1, -1)))
def compute_q(self, theta, cache=None):
'''
Compute variational auto-regressive distributions.
Arguments:
theta: n_hypers x (D + 1)
Returns
mu_lt: n_hypers x out_size x (\sum M_t - M_T) x 1
S_lt: n_hypers x out_size x (\sum M_t - M_T) x (\sum M_t - M_T)
mu_leq_t: n_hypers x out_size x (\sum M_t) x 1
S_leq_t: n_hypers x out_size x (\sum M_t) x (\sum M_t)
z_leq_t: out_size x (\sum M_t) x D
'''
n_hypers = theta.size(0)
## Compute q(u_{<t} | \theta)
z_lt = self.prev_params[0]['z']
mu_lt = self.prev_params[0]['u_mean']
S_lt = rev_cholesky(self.prev_params[0]['u_tril'])
if mu_lt.dim() == 3:
mu_lt = mu_lt.unsqueeze(0).expand(n_hypers, -1, -1, -1)
if S_lt.dim() == 3:
S_lt = S_lt.unsqueeze(0).expand(n_hypers, -1, -1, -1)
for params in self.prev_params[1:]:
Kzx = self.kernel.compute(theta, z_lt, params['z'])
Kzz = self.kernel.compute(theta, z_lt)
V = rev_cholesky(params['u_tril']).unsqueeze(0).expand(n_hypers, -1, -1, -1)
b = params['u_mean'].unsqueeze(0).expand(n_hypers, -1, -1, -1)
mu_lt, S_lt = linear_joint(mu_lt, S_lt, Kzx, Kzz, V, b)
z_lt = torch.cat([z_lt, params['z']], dim=-2)
## Compute q(u_{\leq t} | \theta)
Kzx = self.kernel.compute(theta, z_lt, self.z)
Kzz = self.kernel.compute(theta, z_lt)
V = rev_cholesky(vec2tril(self.u_tril_vec)).unsqueeze(0).expand(n_hypers, -1, -1, -1)
b = self.u_mean.unsqueeze(0).expand(n_hypers, -1, -1, -1)
cache_leq_t = dict()
mu_leq_t, S_leq_t = linear_joint(mu_lt, S_lt, Kzx, Kzz, V, b, cache=cache_leq_t)
z_leq_t = torch.cat([z_lt, self.z], dim=-2)
if isinstance(cache, dict):
cache['Lz_lt'] = cache_leq_t['Lz']
cache['Lz_lt_Kz_lt_z_t'] = cache_leq_t['Lz_Kzx']
return mu_lt, S_lt, \
mu_leq_t, S_leq_t, \
z_leq_t
def compute_pf_diag(self, theta, x, mu_leq_t, S_leq_t, z_leq_t, cache=None):
'''
Compute p(f) = \int p(f|u_{\leq t})q(u_{\leq t}).
Only diagonal of covariance for p(f) is used.
Arguments:
theta: n_hypers x (D + 1)
x: B x D
mu_leq_t: [n_hypers] x out_size x (\sum M_t) x 1
S_leq_t: [n_hypers] x out_size x (\sum M_t) x (\sum M_t)
z_leq_t: out_size x (\sum M_t) x D
Returns:
f_mean: n_hypers x out_size x B
f_var: n_hypers x out_size x B
'''
xf = x.unsqueeze(0).expand(z_leq_t.size(0), -1, -1)
Kzz = self.kernel.compute(theta, z_leq_t)
Kzx = self.kernel.compute(theta, z_leq_t, xf)
Kxx_diag = self.kernel.compute_diag(theta)
f_mean, f_var = linear_marginal_diag(mu_leq_t, S_leq_t, Kzz, Kzx, Kxx_diag, cache=cache)
return f_mean, f_var
def forward(self, x, loss_cache=False):
'''
Arguments:
x: B x in_size
Returns:
Output distributions for n_hypers samples of hyperparameters.
The output contains only diagonal of the full covariance.
pred_mu: n_hypers x out_size x B
pred_var: n_hypers x out_size x B
'''
theta = self.kernel.sample_hypers(self.n_v)
if self.prev_params:
cache_q = dict()
mu_lt, S_lt, mu_leq_t, S_leq_t, z_leq_t = self.compute_q(theta, cache=cache_q)
pred_mu, pred_var = self.compute_pf_diag(theta, x, mu_leq_t, S_leq_t, z_leq_t)
if isinstance(loss_cache, dict):
q_lt = dist.MultivariateNormal(mu_lt.squeeze(-1), covariance_matrix=S_lt)
u_lt = q_lt.rsample(torch.Size([self.n_v])).unsqueeze(-1)
# u_lt = mu_lt.unsqueeze(0)
if u_lt.dim() == 4:
u_lt = u_lt.unsqueeze(1)
## Compute p(u_t | u_{<t}, \theta)
Lz = cache_q.pop('Lz_lt').unsqueeze(0)
Lz_Kzx = cache_q.pop('Lz_lt_Kz_lt_z_t').unsqueeze(0).expand(self.n_v, *([-1] * (Lz.dim() - 1)))
Kzz = self.kernel.compute(theta, self.z).unsqueeze(0)
prior_mu_t, prior_cov_t = gp_cond(u_lt, None, None, Kzz, Lz=Lz, Lz_Kzx=Lz_Kzx)
## Compute q(u_t | u_{<t}, \theta)
var_mu_t = prior_mu_t * self.var_mean_mask + self.u_mean.unsqueeze(0).unsqueeze(0)
var_L_cov_t = vec2tril(self.u_tril_vec, self.M).unsqueeze(0).unsqueeze(0)
loss_cache.update(dict(var_mu_t=var_mu_t.squeeze(-1), var_L_cov_t=var_L_cov_t,
prior_mu_t=prior_mu_t.squeeze(-1), prior_L_cov_t=cholesky(prior_cov_t)))
else:
cache_pf = dict()
mu_leq_t = self.u_mean
L_cov_leq_t = vec2tril(self.u_tril_vec, self.M)
pred_mu, pred_var = self.compute_pf_diag(theta, x, mu_leq_t, rev_cholesky(L_cov_leq_t), self.z, cache=cache_pf)
if isinstance(loss_cache, dict):
# Compute q(u_1)
mu_t = mu_leq_t.squeeze(-1).unsqueeze(0).unsqueeze(0)
L_cov_t = L_cov_leq_t.unsqueeze(0).unsqueeze(0)
# Compute p(u_1)
prior_mu_t = torch.zeros_like(mu_t)
prior_L_cov_t = cache_pf.pop('Lz').unsqueeze(0)
loss_cache.update(dict(var_mu_t=mu_t, var_L_cov_t=L_cov_t, prior_mu_t=prior_mu_t, prior_L_cov_t=prior_L_cov_t))
return pred_mu, pred_var
def loss(self, x, y):
loss_cache = dict()
pred_mu, pred_var = self(x, loss_cache=loss_cache)
nll = self.likelihood.loss(pred_mu, pred_var, y)
var_dist = dist.MultivariateNormal(
loss_cache.pop('var_mu_t'),
scale_tril=loss_cache.pop('var_L_cov_t'))
prior_dist = dist.MultivariateNormal(
loss_cache.pop('prior_mu_t'),
scale_tril=loss_cache.pop('prior_L_cov_t'))
kl_u = kl_divergence(var_dist, prior_dist).sum(dim=-1).mean(dim=0).mean(dim=0)
kl_hypers = self.kernel.kl_hypers()
return kl_hypers, kl_u, nll
def predict(self, x):
pred_mu, pred_var = self(x)
return self.likelihood.predict(pred_mu, pred_var)
@staticmethod
def create_clf(dataset, M=20, n_f=10, n_var_samples=3, prev_params=None,
ep_var_mean=True, map_est_hypers=False, dkl=False):
N = len(dataset)
out_size = torch.unique(dataset.targets).size(0)
## init inducing points at random data points.
z = torch.stack([
dataset[torch.randperm(N)[:M]][0]
for _ in range(out_size)])
prior_log_mean, prior_log_logvar = None, None
phi_params = None
if prev_params:
## Init kernel hyperprior to last timestep.
prior_log_mean = prev_params[-1].get('kernel.log_mean')
prior_log_logvar = prev_params[-1].get('kernel.log_logvar')
## Init kernel NN to last timestep if available.
if dkl:
phi_params = {k[11:]: v for k, v in prev_params[-1].items() if k.startswith('kernel.phi.')}
def process(p):
for k in list(p.keys()):
if k.startswith('kernel'):
p.pop(k)
return p
prev_params = [process(p) for p in prev_params]
if dkl:
kernel = DeepRBFKernel(z.size(-1), prior_log_mean=prior_log_mean,
prior_log_logvar=prior_log_logvar, map_est=map_est_hypers)
if phi_params is not None:
kernel.phi.load_state_dict(phi_params)
else:
kernel = RBFKernel(z.size(-1), prior_log_mean=prior_log_mean,
prior_log_logvar=prior_log_logvar, map_est=map_est_hypers)
likelihood = MulticlassSoftmax(n_f=n_f)
gp = VARGP(z, kernel, likelihood, n_var_samples=n_var_samples,
ep_var_mean=ep_var_mean, prev_params=prev_params)
return gp
|
484659
|
import numpy as np
import colorsys
import matplotlib.colors as mc
def rgb2hex(r, g, b):
"""RGB to hexadecimal."""
return "#%02x%02x%02x" % (r, g, b)
# Ersilia colors
eos = {
"dark": "#50285a",
"gray": "#d2d2d0",
"green": "#bee6b4",
"white": "#ffffff",
"purple": "#aa96fa",
"pink": "#dca0dc",
"yellow": "#fad782",
"blue": "#8cc8fa",
"red": "#faa08c",
}
# Chemical Checker colors
cc = {
"red": rgb2hex(250, 100, 80),
"purple": rgb2hex(200, 100, 225),
"blue": rgb2hex(80, 120, 220),
"green": rgb2hex(120, 180, 60),
"orange": rgb2hex(250, 100, 80),
}
def lighten_color(color, amount=0.5):
"""Lighthen a color."""
try:
c = mc.cnames[color]
except Exception:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def cc_colors(coord, lighness=0):
"""Predefined CC colors."""
colors = {
"A": ["#EA5A49", "#EE7B6D", "#F7BDB6"],
"B": ["#B16BA8", "#C189B9", "#D0A6CB"],
"C": ["#5A72B5", "#7B8EC4", "#9CAAD3"],
"D": ["#7CAF2A", "#96BF55", "#B0CF7F"],
"E": ["#F39426", "#F5A951", "#F8BF7D"],
"Z": ["#000000", "#666666", "#999999"],
}
return colors[coord[:1]][lighness]
def make_cmap(colors, position=None, bit=False):
bit_rgb = np.linspace(0, 1, 256)
if position is None:
position = np.linspace(0, 1, len(colors))
else:
if len(position) != len(colors):
raise Exception("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
raise Exception("position must start with 0 and end with 1")
if bit:
for i in range(len(colors)):
colors[i] = (
bit_rgb[colors[i][0]],
bit_rgb[colors[i][1]],
bit_rgb[colors[i][2]],
)
cdict = {"red": [], "green": [], "blue": []}
for pos, color in zip(position, colors):
cdict["red"].append((pos, color[0], color[0]))
cdict["green"].append((pos, color[1], color[1]))
cdict["blue"].append((pos, color[2], color[2]))
cmap = matplotlib.colors.LinearSegmentedColormap("my_colormap", cdict, 256)
return cmap
|
484670
|
import logging
from .main import ItemProvider
from .views import Handler
logging.info('docker.__init__.py: docker loaded')
|
484685
|
import tensorflow as tf
from tensorflow.python.ops import rnn_cell
from experiment.qa.model import QAModel
from experiment.qa.model.helper.pooling_helper import non_zero_tokens, maxpool
class BiLSTMModel(QAModel):
"""An LSTM model with 1-max pooling to learn the representation"""
def __init__(self, config, config_global, logger):
super(BiLSTMModel, self).__init__(config, config_global, logger)
self.lstm_cell_size = self.config['lstm_cell_size']
def build(self, data, sess):
self.build_input(data, sess)
# we initialize the weights of the representation layers globally so that they can be applied to both, questions
# and (good/bad)answers. This is an important part, otherwise results would be much worse.
self.initialize_weights()
representation_question = maxpool(
self.bilstm_representation_raw(
self.embeddings_question,
self.input_question,
re_use_lstm=False
)
)
representation_answer_good = maxpool(
self.bilstm_representation_raw(
self.embeddings_answer_good,
self.input_answer_good,
re_use_lstm=True
)
)
representation_answer_bad = maxpool(
self.bilstm_representation_raw(
self.embeddings_answer_bad,
self.input_answer_bad,
re_use_lstm=True
)
)
self.create_outputs(
representation_question,
representation_answer_good,
representation_question,
representation_answer_bad
)
def initialize_weights(self):
"""Global initialization of weights for the representation layer
"""
with tf.variable_scope('lstm_cell_fw'):
self.lstm_cell_forward = rnn_cell.BasicLSTMCell(self.lstm_cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_bw'):
self.lstm_cell_backward = rnn_cell.BasicLSTMCell(self.lstm_cell_size, state_is_tuple=True)
def bilstm_representation_raw(self, item, indices, re_use_lstm, name='lstm'):
"""Creates a representation graph which retrieves a text item (represented by its word embeddings) and returns
a vector-representation
:param item: the text item. Can be question or (good/bad) answer
:param sequence_length: maximum length of the text item
:param re_use_lstm: should be False for the first call, True for al subsequent ones to get the same lstm
variables
:return: representation tensor
"""
tensor_non_zero_token = non_zero_tokens(tf.to_float(indices))
sequence_length = tf.to_int64(tf.reduce_sum(tensor_non_zero_token, 1))
with tf.variable_scope(name, reuse=re_use_lstm):
output, _last = tf.nn.bidirectional_dynamic_rnn(
self.lstm_cell_forward,
self.lstm_cell_backward,
item,
dtype=tf.float32,
sequence_length=sequence_length
)
return tf.concat(axis=2, values=output)
component = BiLSTMModel
|
484689
|
import pkgutil
import os
import importlib
# make sure all modules relying on MasterDataEntity are imported before the tests are run
# otherwise the utils tests for AssetcentralEntitySet relying on __subclasses__ will not include all subclasses,
# and will depend on execution order of tests
path = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'sailor', 'assetcentral')
for importer, package_name, _ in pkgutil.iter_modules([path]):
module = importlib.import_module('sailor.assetcentral.' + package_name)
path = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'sailor', 'pai')
for importer, package_name, _ in pkgutil.iter_modules([path]):
module = importlib.import_module('sailor.pai.' + package_name)
|
484771
|
import numpy as np
import neurolab as nl
# Input file
input_file = 'letter.data'
# Number of datapoints to load from the input file
num_datapoints = 20
# Distinct characters
orig_labels = 'omandig'
# Number of distinct characters
num_output = len(orig_labels)
# Training and testing parameters
num_train = int(0.9 * num_datapoints)
num_test = num_datapoints - num_train
# Define dataset extraction parameters
start_index = 6
end_index = -1
# Creating the dataset
data = []
labels = []
with open(input_file, 'r') as f:
for line in f.readlines():
# Split the line tabwise
list_vals = line.split('\t')
# If the label is not in our ground truth labels, skip it
if list_vals[1] not in orig_labels:
continue
# Extract the label and append it to the main list
label = np.zeros((num_output, 1))
label[orig_labels.index(list_vals[1])] = 1
labels.append(label)
# Extract the character vector and append it to the main list
cur_char = np.array([float(x) for x in list_vals[start_index:end_index]])
data.append(cur_char)
# Exit the loop once the required dataset has been loaded
if len(data) >= num_datapoints:
break
# Convert data and labels to numpy arrays
data = np.asfarray(data)
labels = np.array(labels).reshape(num_datapoints, num_output)
# Extract number of dimensions
num_dims = len(data[0])
# Create and train neural network
net = nl.net.newff([[0, 1] for _ in range(len(data[0]))], [128, 16, num_output])
net.trainf = nl.train.train_gd
error = net.train(data[:num_train,:], labels[:num_train,:], epochs=10000,
show=100, goal=0.01)
# Predict the output for test inputs
predicted_output = net.sim(data[num_train:, :])
print("Testing on unknown data:")
for i in range(num_test):
print("Original:", orig_labels[np.argmax(labels[i])])
print("Predicted:", orig_labels[np.argmax(predicted_output[i])])
|
484776
|
import asyncio
from datetime import datetime
import aiohttp
from bs4 import BeautifulSoup
async def get_url(session, url):
headers = {
"accept-language": "en-US;q=0.8,en;q=0.7",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36"
# Add any header you want
}
print("Crawling: {}".format(url))
results = []
resp = await session.get(url, headers=headers)
print("Crawled: {}".format(url))
html_content = await resp.text()
soup = BeautifulSoup(html_content, 'html.parser')
for link in soup.select('a.storylink'):
results.append('{};{}'.format(link.get('href'), link.text))
return results
async def main():
max_concurrency = 3
num_pages = 20
tasks = []
urls = ['https://news.ycombinator.com/news?p={}'.format(idx_page) for idx_page in range(1, num_pages)]
connector = aiohttp.TCPConnector(limit=max_concurrency)
session = aiohttp.ClientSession(connector=connector)
start = datetime.now()
for url in urls:
tasks.append(asyncio.ensure_future(get_url(session, url)))
results_tasks = await asyncio.gather(*tasks)
links = []
for sublist_results in results_tasks:
for link in sublist_results:
links.append(link)
end = datetime.now()
print("Results obtained in {}:".format(end - start))
print(links)
connector.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
484813
|
import abc
from dataclasses import dataclass
from typing import Optional
from yarl import URL
from .parsing_utils import LocalImage, RemoteImage
# storage
@dataclass(frozen=True)
class StorageProgressStart:
src: URL
dst: URL
size: int
@dataclass(frozen=True)
class StorageProgressComplete:
src: URL
dst: URL
size: int
@dataclass(frozen=True)
class StorageProgressStep:
src: URL
dst: URL
current: int
size: int
@dataclass(frozen=True)
class StorageProgressEnterDir:
src: URL
dst: URL
@dataclass(frozen=True)
class StorageProgressLeaveDir:
src: URL
dst: URL
@dataclass(frozen=True)
class StorageProgressFail:
src: URL
dst: URL
message: str
@dataclass(frozen=True)
class StorageProgressDelete:
uri: URL
is_dir: bool
class AbstractFileProgress(abc.ABC):
# design note:
# dataclasses used instead of direct passing parameters
# because a dataclass is forward-compatible
# but adding a new parameter to callback method
# effectively breaks all existing code
@abc.abstractmethod
def start(self, data: StorageProgressStart) -> None:
pass # pragma: no cover
@abc.abstractmethod
def complete(self, data: StorageProgressComplete) -> None:
pass # pragma: no cover
@abc.abstractmethod
def step(self, data: StorageProgressStep) -> None:
pass # pragma: no cover
class AbstractRecursiveFileProgress(AbstractFileProgress):
@abc.abstractmethod
def enter(self, data: StorageProgressEnterDir) -> None:
pass # pragma: no cover
@abc.abstractmethod
def leave(self, data: StorageProgressLeaveDir) -> None:
pass # pragma: no cover
@abc.abstractmethod
def fail(self, data: StorageProgressFail) -> None:
pass # pragma: no cover
class AbstractDeleteProgress(abc.ABC):
@abc.abstractmethod
def delete(self, data: StorageProgressDelete) -> None:
pass # pragma: no cover
# Next class for typing only (wrapped with queue_calls version of above classes)
class _AsyncAbstractFileProgress(abc.ABC):
@abc.abstractmethod
async def start(self, data: StorageProgressStart) -> None:
pass # pragma: no cover
@abc.abstractmethod
async def complete(self, data: StorageProgressComplete) -> None:
pass # pragma: no cover
@abc.abstractmethod
async def step(self, data: StorageProgressStep) -> None:
pass # pragma: no cover
class _AsyncAbstractRecursiveFileProgress(_AsyncAbstractFileProgress):
@abc.abstractmethod
async def enter(self, data: StorageProgressEnterDir) -> None:
pass # pragma: no cover
@abc.abstractmethod
async def leave(self, data: StorageProgressLeaveDir) -> None:
pass # pragma: no cover
@abc.abstractmethod
async def fail(self, data: StorageProgressFail) -> None:
pass # pragma: no cover
class _AsyncAbstractDeleteProgress(abc.ABC):
@abc.abstractmethod
async def delete(self, data: StorageProgressDelete) -> None:
pass # pragma: no cover
# image
@dataclass(frozen=True)
class ImageProgressPull:
src: RemoteImage
dst: LocalImage
@dataclass(frozen=True)
class ImageProgressPush:
src: LocalImage
dst: RemoteImage
@dataclass(frozen=True)
class ImageProgressSave:
job: str
dst: RemoteImage
@dataclass(frozen=True)
class ImageProgressStep:
message: str
layer_id: str
status: str
current: Optional[float]
total: Optional[float]
@dataclass(frozen=True)
class ImageCommitStarted:
job_id: str
target_image: RemoteImage
@dataclass(frozen=True)
class ImageCommitFinished:
job_id: str
class AbstractDockerImageProgress(abc.ABC):
@abc.abstractmethod
def pull(self, data: ImageProgressPull) -> None:
pass # pragma: no cover
@abc.abstractmethod
def push(self, data: ImageProgressPush) -> None:
pass # pragma: no cover
@abc.abstractmethod
def step(self, data: ImageProgressStep) -> None:
pass # pragma: no cover
@abc.abstractmethod
def save(self, data: ImageProgressSave) -> None:
pass # pragma: no cover
@abc.abstractmethod
def commit_started(self, data: ImageCommitStarted) -> None:
pass # pragma: no cover
@abc.abstractmethod
def commit_finished(self, data: ImageCommitFinished) -> None:
pass # pragma: no cover
|
484880
|
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
这题要解决的问题是,必须有个地方记录判断结果,但又不能影响下一步的判断条件;
直接改为 0 的话,会影响下一步的判断条件;
因此,有一种思路是先改为 None,最后再将 None 改为 0;
从条件上看,如果可以将第一行、第二行作为记录空间,那么,用 None 应该也不算违背题目条件;
"""
rows = len(matrix)
cols = len(matrix[0])
# 遍历矩阵,用 None 记录要改的地方,注意如果是 0 则要保留,否则会影响下一步判断
for r in range(rows):
for c in range(cols):
if matrix[r][c] is not None and matrix[r][c] == 0:
# 改值
for i in range(rows):
matrix[i][c] = None if matrix[i][c] != 0 else 0
for j in range(cols):
matrix[r][j] = None if matrix[r][j] != 0 else 0
# 再次遍历,将 None 改为 0
for r in range(rows):
for c in range(cols):
if matrix[r][c] is None:
matrix[r][c] = 0
|
484923
|
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
import unittest
import aaf2
import common
class TestCreateSequence(unittest.TestCase):
def test_create_sequence(self):
result_file = common.get_test_file('create_sequence.aaf')
mob_count = 0
components = 0
with aaf2.open(result_file, "w") as f:
video_rate = " 30000/1001"
comp_mob = f.create.CompositionMob()
sequence = f.create.Sequence(media_kind="picture")
timeline_slot = comp_mob.create_timeline_slot(video_rate)
timeline_slot.segment= sequence
f.content.mobs.append(comp_mob)
length = 60 * 30
filler_len = 100
timecode_fps = 30
mob_count += 1
test_path = "some_path.mov"
for i in range(10):
# Make the Tape MOB
tape_mob = f.create.SourceMob()
tape_name = "tape_name"
tape_slot, tape_timecode_slot = tape_mob.create_tape_slots(tape_name, video_rate, timecode_fps)
tape_slot.segment.length = length
f.content.mobs.append(tape_mob)
mob_count += 1
# Make a FileMob
file_mob = f.create.SourceMob()
# Make a locator
loc = f.create.NetworkLocator()
loc['URLString'].value = test_path
file_description = f.create.CDCIDescriptor()
file_description.locator.append(loc)
file_description['ComponentWidth'].value = 8
file_description['HorizontalSubsampling'].value = 4
file_description['ImageAspectRatio'].value = '16/9'
file_description['StoredWidth'].value = 1920
file_description['StoredHeight'].value = 1080
file_description['FrameLayout'].value = 'FullFrame'
file_description['VideoLineMap'].value = [42, 0]
file_description['SampleRate'].value = video_rate
file_description['Length'].value = 10
file_mob.descriptor = file_description
clip = tape_mob.create_source_clip(slot_id=1, length=length)
slot = file_mob.create_picture_slot(video_rate)
slot.segment.components.append(clip)
f.content.mobs.append(file_mob)
mob_count += 1
# Make the Master MOB
master_mob = f.create.MasterMob()
master_mob.name = "Master Mob %i" % i
master_mob.comments['Test'] = 'Value'
master_mob.comments.append(f.create.TaggedValue("Test2", 42))
assert master_mob.comments['Test'] == "Value"
assert master_mob.comments['Test2'] == 42
clip = file_mob.create_source_clip(slot_id=1)
assert clip.length == length
slot = master_mob.create_picture_slot(video_rate)
slot.segment.components.append(clip)
f.content.mobs.append(master_mob)
mob_count += 1
# Create a SourceClip
clip = master_mob.create_source_clip(slot_id=1)
assert clip.length == length
sequence.components.append(clip)
components += 1
# Create a filler
comp_fill = f.create.Filler("picture", filler_len)
sequence.components.append(comp_fill)
components += 1
with aaf2.open(result_file, "r") as f:
assert len(f.content.mobs) == mob_count
comp = next(f.content.compositionmobs())
slot = comp.slot_at(1)
assert len(slot.segment.components) == components
if __name__ == "__main__":
unittest.main()
|
484943
|
from collections import defaultdict
from queue import PriorityQueue
class Offer:
def __init__(self, item, quant, itemPrice, coffer=0):
self.item = item
self.quantLeft = quant
self.quantFulfilled = 0
self.itemPrice = itemPrice
self.coffer = coffer
@property
def complete(self):
return self.quantLeft == 0
def partialCollect(self):
pass
def cancel(self):
return self.coffer, [self.item() for e in self.quantFulfilled]
def __lt__(self, other):
return True
def __eq__(self, other):
return False
class BuyOffer(Offer):
def buy(self, quant, itemPrice):
self.coffer -= itemPrice * quant
#Collect only profits thus far
def partialCollect(self):
ret = [self.item() for e in self.quantFulfilled]
self.quantFulfilled = 0
return ret
class SellOffer(Offer):
def sell(self, quant):
self.coffer += self.itemPrice * quant
self.quantLeft -= quant
assert self.quantLeft >= 0
def partialCollect(self):
ret = self.coffer
self.coffer = 0
return self.coffer
#Why is there no peek fuction...
class PQ(PriorityQueue):
def peek(self):
if len(self.queue) > 0:
return self.queue[0]
return None
class Exchange:
def __init__(self):
self.buyOffers = defaultdict(PQ)
self.sellOffers = defaultdict(PQ)
def buy(self, item, quant, maxPrice):
offer = BuyOffer(item, quant, maxPrice, coffer=quant*maxPrice)
self.buyOffers[item].put(offer, -maxPrice)
self.update(item)
return offer
def sell(self, item, quant, itemPrice):
offer = SellOffer(item, quant, itemPrice)
self.sellOffers[item].put(offer, itemPrice)
self.update(item)
return offer
def update(self, item):
buyOffer = self.buyOffers[item].peek()
sellOffer = self.sellOffers[item].peek()
if None in (buyOffer, sellOffer):
return
maxBuy, minSell = buyOffer.itemPrice, sellOffer.itemPrice
itemPrice = minSell #Advantage given to buyer arbitrarily
if maxBuy >= minSell:
if sellOffer.quantLeft < buyOffer.quantLeft:
buyOffer.buy(sellOffer.quantLeft, itemPrice)
sellOffer.sell(sellOffer.quantLeft)
self.sellOffers[item].get()
elif sellOffer.quantLeft > buyOffer.quantLeft:
buyOffer.buy(buyOffer.quantLeft, itemPrice)
sellOffer.sell(buyOffer.quantLeft)
self.buyOffers[item].get()
elif sellOffer.quantLeft == buyOffer.quantLeft:
buyOffers.buy(buyOffer.quantLeft, itemPrice)
sellOffer.sell(sellOffer.quantLeft)
self.buyOffers[item].get()
self.sellOffers[item].get()
self.update(item)
|
484952
|
import sys
if sys.version_info.major == 2:
from future.standard_library import install_aliases
install_aliases()
import functools
from backports.functools_lru_cache import lru_cache
functools.lru_cache = lru_cache
from contentbase.resources import * # noqa
def includeme(config):
config.include('pyramid_tm')
config.include('.stats')
config.include('.batchupgrade')
config.include('.calculated')
config.include('.embedding')
config.include('.json_renderer')
config.include('.validation')
config.include('.predicates')
config.include('.invalidation')
config.include('.upgrader')
config.include('.auditor')
config.include('.resources')
config.include('.attachment')
config.include('.schema_graph')
config.include('.jsonld_context')
config.include('.schema_views')
|
484999
|
import traceback
from util import callsback
import common.actions
action = common.actions.action
ObservableActionMeta = common.actions.ObservableActionMeta
from common import profile
from logging import getLogger; log = getLogger('Contact')
objget = object.__getattribute__
CONTACT_ATTRS = set(['id', 'buddy', 'remove', 'watched', '__repr__',
'rename_gui', 'rename', 'edit_alerts', 'alias', 'get_group', 'move_to_group'
'__getattr__', '__hash__', '__cmp__', 'sort', '_notify_dirty',
])
class Contact(object):
'''
Contact. Represents an entry on a protocol buddy list.
Several contacts may point to the same Buddy object.
Ex: AIM when a buddy is in 2+ groups - same buddy in both
places but each has its own SSI.
'''
watched = 'online '.split()
__metaclass__ = ObservableActionMeta
def __init__(self, buddy, id):
self.buddy, self.id = buddy, id
self._metacontact = None
def remove(self):
self.protocol.remove_buddy(self.id)
def _compatible_accounts(self):
from common.protocolmeta import is_compatible
result = []
for account in profile.connected_accounts:
if is_compatible(account.protocol, self.buddy.service):
result.append(account)
return result
def _all_buddies(self, check_if_has=False):
result = []
for account in self._compatible_accounts():
connection = account.connection
if connection:
if not check_if_has or connection.has_buddy(self.buddy.name):
# don't let a protocol create the buddy
buddy = connection.get_buddy(self.buddy.name)
if buddy is not None:
result.append(buddy)
return result
def _is_blocked(self):
buddies = [buddy.blocked for buddy in self._all_buddies(check_if_has=True)]
return bool(buddies and all(buddies))
blocked = property(_is_blocked)
def _block_pred(self, block=True, **k):
return True if bool(block) ^ self._is_blocked() else None
def _unblock_pred(self, *a, **k):
return True if self._is_blocked() else None
@action(_block_pred)
def block(self, block=True, **k):
for buddy in self._all_buddies():
if bool(block) ^ bool(buddy.blocked):
buddy.block(block, **k)
@action(_unblock_pred)
def unblock(self, *a,**k):
self.block(False,*a,**k)
def get_notify_dirty(self):
return self.buddy._notify_dirty
def set_notify_dirty(self, value):
self.buddy._notify_dirty = value
_notify_dirty = property(get_notify_dirty, set_notify_dirty)
@action()
def rename_gui(self):
from gui.toolbox import GetTextFromUser
localalias = self.alias
if localalias is None:
localalias = ''
s = GetTextFromUser(_('Enter an alias for %s:') % self.name,
caption = _('Rename %s') % self.name,
default_value = localalias )
if s is not None:
if s == '' or s.strip():
# dialog returns None if "Cancel" button is pressed -- that means do nothing
# rename expects None to mean "no alias" and anything else to mean an alias--so
# do the bool check to turn '' into None here.
self.rename(s if s else None)
return s
def rename(self, new_alias):
log.info('setting alias for %r to %r', self, new_alias)
profile.set_contact_info(self, 'alias', new_alias)
self.buddy.notify('alias')
@action()
def edit_alerts(self):
import gui.pref.prefsdialog as prefsdialog
prefsdialog.show('notifications')
@property
def alias(self):
a = profile.get_contact_info(self, 'alias')
if a: return a
for attr in ('local_alias', 'remote_alias', 'nice_name'):
try:
a = getattr(self, attr, None)
except Exception:
traceback.print_exc()
continue
if a: return a
return self.name
def get_group(self):
g = self.protocol.group_for(self)
assert isinstance(g, (basestring, type(None))), 'Is %s' % type(g)
return g
@callsback
def move_to_group(self, groupname, index = 0, callback = None):
if not isinstance(groupname, basestring):
raise TypeError, 'groupname must be a string: %r' % groupname
self.protocol.move_buddy_creating_group(self, groupname, self.get_group(),
index, callback = callback)
def __getattr__(self, attr):
if attr in CONTACT_ATTRS:
return objget(self, attr)
else:
return getattr(objget(self, 'buddy'), attr)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.buddy)
def __hash__(self):
# First part of this hash should match Buddy.idstr()
b = self.buddy
id = self.id
if isinstance(id, bytes):
id = id.decode('fuzzy utf-8')
return hash(u'/'.join((b.protocol.name, b.protocol.username, b.name, unicode(id))))
def __cmp__(self, other):
if self is other:
return 0
else:
return cmp((self.buddy, self.id), (getattr(other, 'buddy', None), getattr(other, 'id', None)))
class ContactCapabilities:
'Buddy capabilities. Exposed as common.caps'
INFO = 'INFO'
IM = 'IM'
'Instant messaging.'
FILES = 'FILES'
'Sending and receiving files.'
PICTURES = 'PICTURES'
'Sharing pictures over a direct connection.'
SMS = 'SMS'
'Sending messages directly to a cell phone.'
BLOCKABLE = 'BLOCKABLE'
'Blocking buddies.'
EMAIL = 'EMAIL'
'Sending email.'
BOT = 'BOT'
'User is a bot, and will join the Machines when Skynet turns on the human race. Be vigilant.'
VIDEO = 'VIDEO'
'Video chat.'
|
485011
|
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import math, sys
def periodic (i, limit, add):
"""
Choose correct matrix index with periodic
boundary conditions
Input:
- i: Base index
- limit: Highest \"legal\" index
- add: Number to add or subtract from i
"""
return (i+limit+add) % limit
def monteCarlo(temp, NSpins, MCcycles):
"""
Calculate the energy and magnetization
(\"straight\" and squared) for a given temperature
Input:
- temp: Temperature to calculate for
- NSpins: dimension of square matrix
- MCcycles: Monte-carlo MCcycles (how many times do we
flip the matrix?)
Output:
- E_av: Energy of matrix averaged over MCcycles, normalized to spins**2
- E_variance: Variance of energy, same normalization * temp**2
- M_av: Magnetic field of matrix, averaged over MCcycles, normalized to spins**2
- M_variance: Variance of magnetic field, same normalization * temp
- Mabs: Absolute value of magnetic field, averaged over MCcycles
"""
#Setup spin matrix, initialize to ground state
spin_matrix = np.zeros( (NSpins,NSpins), np.int8) + 1
#Create and initialize variables
E = M = 0
E_av = E2_av = M_av = M2_av = Mabs_av = 0
#Setup array for possible energy changes
w = np.zeros(17,np.float64)
for de in range(-8,9,4): #include +8
w[de+8] = math.exp(-de/temp)
#Calculate initial magnetization:
M = spin_matrix.sum()
#Calculate initial energy
for j in range(NSpins):
for i in range(NSpins):
E -= spin_matrix.item(i,j)*\
(spin_matrix.item(periodic(i,NSpins,-1),j) + spin_matrix.item(i,periodic(j,NSpins,1)))
#Start metropolis MonteCarlo computation
for i in range(MCcycles):
#Metropolis
#Loop over all spins, pick a random spin each time
for s in range(NSpins**2):
x = int(np.random.random()*NSpins)
y = int(np.random.random()*NSpins)
deltaE = 2*spin_matrix.item(x,y)*\
(spin_matrix.item(periodic(x,NSpins,-1), y) +\
spin_matrix.item(periodic(x,NSpins,1), y) +\
spin_matrix.item(x, periodic(y,NSpins,-1)) +\
spin_matrix.item(x, periodic(y,NSpins,1)))
if np.random.random() <= w[deltaE+8]:
#Accept!
spin_matrix[x,y] *= -1
M += 2*spin_matrix[x,y]
E += deltaE
#Update expectation values
E_av += E
E2_av += E**2
M_av += M
M2_av += M**2
Mabs_av += int(math.fabs(M))
#Normalize average values
E_av /= float(MCcycles);
E2_av /= float(MCcycles);
M_av /= float(MCcycles);
M2_av /= float(MCcycles);
Mabs_av /= float(MCcycles);
#Calculate variance and normalize to per-point and temp
E_variance = (E2_av-E_av*E_av)/float(NSpins*NSpins*temp*temp);
M_variance = (M2_av-M_av*M_av)/float(NSpins*NSpins*temp);
#Normalize returned averages to per-point
E_av /= float(NSpins*NSpins);
M_av /= float(NSpins*NSpins);
Mabs_av /= float(NSpins*NSpins);
return (E_av, E_variance, M_av, M_variance, Mabs_av)
# Main program
# temperature steps, initial temperature, final temperature
NumberTsteps = 20
InitialT = 1.8
FinalT = 2.6
Tsteps = (FinalT-InitialT)/NumberTsteps
Temp = np.zeros(NumberTsteps)
for T in range(NumberTsteps):
Temp[T] = InitialT+T*Tsteps
# Declare arrays that hold averages
Energy = np.zeros(NumberTsteps); Magnetization = np.zeros(NumberTsteps)
SpecificHeat = np.zeros(NumberTsteps); Susceptibility = np.zeros(NumberTsteps)
MagnetizationAbs = np.zeros(NumberTsteps)
# Define number of spins
NSpins = 10
# Define number of Monte Carlo cycles
MCcycles = 10000
# Perform the simulations over a range of temperatures
for T in range(NumberTsteps):
(Energy[T], SpecificHeat[T], Magnetization[T], Susceptibility[T], MagnetizationAbs[T]) = monteCarlo(Temp[T],NSpins,MCcycles)
# And finally plot
f = plt.figure(figsize=(18, 10)); # plot the calculated values
sp = f.add_subplot(2, 2, 1 );
plt.plot(Temp, Energy, 'o', color="green");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Energy ", fontsize=20);
sp = f.add_subplot(2, 2, 2 );
plt.plot(Temp, abs(Magnetization), 'o', color="red");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Magnetization ", fontsize=20);
sp = f.add_subplot(2, 2, 3 );
plt.plot(Temp, SpecificHeat, 'o', color="blue");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Specific Heat ", fontsize=20);
sp = f.add_subplot(2, 2, 4 );
plt.plot(Temp, Susceptibility, 'o', color="black");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Susceptibility", fontsize=20);
plt.show()
|
485034
|
from Redy.Magic.Classic import singleton
from typing import Union
lit = Union[str, bytes]
@singleton
class ConstStrPool:
__slots__ = []
_pool: dict = {}
@classmethod
def cast_to_const(cls, string: lit):
if string not in cls._pool:
cls._pool[string] = string
return cls._pool[string]
|
485056
|
from django.test import TestCase
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from attendance.models.Student import Student
from attendance.serializers.StudentSerializer import StudentSerializer
class StudentSerializerTest(TestCase):
def setUp(self):
factory = APIRequestFactory()
request = factory.get('/students')
self.student_attributes = {"full_name": "<NAME>",
"face_id": "aae2f120-20d6-4fa7-b34c-f1dbe7aa09a9"}
self.serializer_data = {"full_name": "<NAME>",
"face_id": "dae3f120-221d-42a7-b124c-f1dbe7aa09a9"}
serializer_context = {
'request': Request(request),
}
self.student = Student.objects.create(**self.student_attributes)
self.serializer = StudentSerializer(instance=self.student, context=serializer_context)
def test_contains_expected_fields(self):
data = self.serializer.data
print(data)
self.assertCountEqual(set(data.keys()), {'full_name', 'face_id'})
def test_face_id_field_content(self):
data = self.serializer.data
self.assertEqual(data['face_id'], self.student_attributes['face_id'])
def test_full_name_field_content(self):
data = self.serializer.data
self.assertEqual(data['full_name'], self.student_attributes['full_name'])
|
485083
|
from pdb import set_trace as T
from forge.blade.action import action
from forge.blade.lib import utils, enums
import numpy as np
class Arg:
def __init__(self, val, discrete=True, set=False, min=-1, max=1):
self.val = val
self.discrete = discrete
self.continuous = not discrete
self.min = min
self.max = max
self.n = self.max - self.min + 1
class ActionV2:
def edges(self, world, entity, inclusive=False):
return [Pass, Move, Attack]#, Ranged]
class Pass(action.Pass):
priority = 0
@staticmethod
def call(world, entity):
return
def args(stim, entity, config):
return [()]
@property
def nArgs():
return 1
class Move(action.Move):
priority = 1
def call(world, entity, rDelta, cDelta):
r, c = entity.pos
rNew, cNew = r+rDelta, c+cDelta
if world.env.tiles[rNew, cNew].state.index in enums.IMPASSIBLE:
return
if not utils.inBounds(rNew, cNew, world.shape):
return
if entity.freeze > 0:
return
entity._pos = rNew, cNew
entID = entity.entID
r, c = entity.lastPos
world.env.tiles[r, c].delEnt(entID)
r, c = entity.pos
world.env.tiles[r, c].addEnt(entID, entity)
def args(stim, entity, config):
rets = []
for delta in ((0, 0), (0, 1), (1, 0), (0, -1), (-1, 0)):
r, c = delta
#r, c = Arg(r), Arg(c)
rets.append((r, c))
return rets
@property
def nArgs():
return len(Move.args(None, None))
class Attack(action.Attack):
def inRange(entity, stim, N):
R, C = stim.shape
R, C = R//2, C//2
#R, C = entity.pos
rets = []
for r in range(R-N, R+N+1):
for c in range(C-N, C+N+1):
for e in stim[r, c].ents.values():
rets.append(e)
return rets
def l1(pos, cent):
r, c = pos
rCent, cCent = cent
return abs(r - rCent) + abs(c - cCent)
def call(world, entity, targ, damageF, freeze=False):
if entity.entID == targ.entID:
entity._attack = None
return
#entity.targPos = targ.pos
#entity.attkPos = entity.lastPos
#entity.targ = targ
damage = damageF(entity, targ)
assert type(damage) == int
if freeze and damage > 0:
targ._freeze = 3
return
#return damage
def args(stim, entity, config):
return [Melee, Range, Mage]
#return Melee.args(stim, entity, config) + Range.args(stim, entity, config) + Mage.args(stim, entity, config)
class Melee(action.Melee):
priority = 2
def call(world, entity, targ):
damageF = world.config.MELEEDAMAGE
Attack.call(world, entity, targ, damageF)
def args(stim, entity, config):
return Attack.inRange(entity, stim, config.MELEERANGE)
class Range(action.Range):
priority = 2
def call(world, entity, targ):
damageF = world.config.RANGEDAMAGE
Attack.call(world, entity, targ, damageF)
def args(stim, entity, config):
return Attack.inRange(entity, stim, config.RANGERANGE)
class Mage(action.Mage):
priority = 2
def call(world, entity, targ):
damageF = world.config.MAGEDAMAGE
dmg = Attack.call(world, entity, targ, damageF, freeze=True)
def args(stim, entity, config):
return Attack.inRange(entity, stim, config.MAGERANGE)
|
485092
|
import pandas as pd
import requests
import json
import base64
import logging
import os.path
from manubot import cite
headers = {} # For Development you can insert your GH api token here and up the rate limit {'Authorization': 'token %s' % "<apiToken>"}
if "GITHUB_TOKEN" in os.environ:
print("GITHUB_TOKEN env variable is present.")
headers = {'Authorization': 'token %s' % os.environ["GITHUB_TOKEN"]}
else:
print('GITHUB_TOKEN env variable is not present.')
# Issues Helper Functions
def getIssuesFromAPI():
""" Gets all the issues and pull-requests (GH treats PRs like issues in the api)
Needs to use pagination because of 100 per-request api limit
"""
issues = []
pageNumber = 1
numberOfIssuesReturned = 1
while numberOfIssuesReturned != 0:
issuesResponse = requests.get(
"https://api.github.com/repos/greenelab/covid19-review/issues?state=all&per_page=50&page=" +
str(pageNumber), headers=headers)
issues_page = json.loads(issuesResponse.text)
issues = issues + issues_page
numberOfIssuesReturned = len(issues_page)
pageNumber += 1
return issues
def getCitationFromIssue(issue):
""" Gets the citation from the github issue assuming the issue follows the New Paper issue template
Citation is typically a DOI but could be something else
"""
try:
if "\nCitation: " in issue["body"]:
citation = issue["body"].split("\nCitation: ")[1].split(" ")[0]
else:
afterDOI = issue["body"].split("DOI:")[1]
citation = afterDOI.split(" ")[0]
if citation == "":
citation = afterDOI.split(" ")[1]
if "\r\n" in citation:
citation = citation.split("\r\n")[0]
if citation.startswith("@"):
citation = citation[1:]
return citation
except:
print(
"the citation could not be automatically extracted from the following issue: \n",
issue["title"])
return "unknown"
def getPaperTitleFromIssue(issue):
""" gets the papers title using manubot; if manubot can't get title, extract from issue title """
try:
# Try using manubot
citekey = getCitationFromIssue(issue)
csl_item = cite.citekey_to_csl_item(citekey)
title = csl_item["title"]
return title
except:
# On error, try getting from issue title
try:
title = issue["title"].split(":")[1]
return title
except:
print(
"the paper title could not be automatically extracted from the following issue: \n",
issue["title"])
return "unknown"
def citationContainsDOI(citation):
""" Checks if the citation contains a doi """
if citation.startswith("doi:"):
return True
elif citation.startswith("@doi:"):
return True
elif citation.startswith("[@doi"):
return True
else:
return False
def getDOIFromCitation(citation):
""" pulls the DOI from the citation; built to handle the cases I've seen so far """
try:
if ".org/" in citation:
DOI = citation.split(".org/")[1]
elif citationContainsDOI(citation):
DOI = citation.split("doi:")[1]
DOI = DOI.replace("]", "")
elif citation == "unknown":
DOI = "unknown"
else:
DOI = citation
# DOIs are case insensitive but lower-case seems to be preferred and is what's used by manubot
DOI = DOI.lower()
return DOI
except:
return "unknown"
def getIssuesDF(issues, removeDuplicates=True):
""" takes a list of github issues
Assumes they are all New Paper issues
Creates a data frame with doi, title, issueLink and issueLabels
issue labels is a comma seperated string
"""
DOIs = []
titles = []
issue_links = []
lables = []
for issue in issues:
DOIs.append(getDOIFromCitation(getCitationFromIssue(issue)))
titles.append(getPaperTitleFromIssue(issue))
issue_links.append(issue["html_url"])
lables.append(", ".join([label["name"] for label in issue["labels"]]))
issuesDF = pd.DataFrame({
"doi": DOIs,
"title": titles,
"gh_issue_link": issue_links,
"gh_issue_labels": lables}).set_index("doi")
if removeDuplicates:
issuesDF = issuesDF[~issuesDF.gh_issue_labels.str.contains("duplicate")]
return issuesDF
def getIssuesData():
issues = getIssuesFromAPI()
paperIssues = [issue for issue in issues if "New Paper (" in issue["title"]]
paperIssuesDF = getIssuesDF(paperIssues)
# log the issues without valid DOIs
doesNotHaveValidDOI = [not doi.startswith("10") for doi in list(paperIssuesDF.index)]
issueLinksWithoutDOIs = list(paperIssuesDF.gh_issue_link[doesNotHaveValidDOI])
print('\n\nA valid DOIs could not be extracted from the following', len(issueLinksWithoutDOIs), 'issues:\n', issueLinksWithoutDOIs, '\n')
return paperIssuesDF
# Covid19-review citations functions
def getCitationsData():
""" Gets a dataframe with doi, title, date, publication, url and covid19-review_paperLink
The covid19-review_paperLink is a link to that paper's citation in the html document (example: https://greenelab.github.io/covid19-review/#ref-Rt5Aik4p)
Gets the citation info from the referneces.json file in the output branch
"""
# Follows https://github.com/simonw/irma-scrapers/issues/1
citationsResponse = requests.get("https://api.github.com/repos/greenelab/covid19-review/git/trees/output", headers=headers).json()
treeEntry = [t for t in citationsResponse["tree"] if t["path"] == "references.json"][0]
citations = json.loads(base64.b64decode(requests.get(treeEntry["url"]).json()["content"]))
citationsDF = pd.DataFrame(citations)
citationsDF["Covid19-review_paperLink"] = citationsDF.id.apply(lambda x: "https://greenelab.github.io/covid19-review/#ref-" + x)
citationsDF = citationsDF[["DOI", "title", "issued", "container-title", "URL", "Covid19-review_paperLink"]]
citationsDF.rename(columns={"DOI": "doi", "issued": "date", "container-title": "publication"}, inplace=True)
# Convert date to string
def dateStringFromDateParts(row):
try:
dateParts = row['date']['date-parts'][0]
if len(dateParts) == 3:
return "-".join([str(dateParts[1]), str(dateParts[2]), str(dateParts[0])])
elif len(dateParts) == 2:
return "-".join([str(dateParts[1]), str(dateParts[0])])
elif len(dateParts) == 1:
return str(dateParts[0])
else:
return
except:
return
citationsDF.date = citationsDF.apply(dateStringFromDateParts, axis=1)
citationsDF.set_index("doi", inplace=True)
return citationsDF
# Pull-Request Functions
def getPRsFromAPI():
""" Gets all the pull-requests; Needs to use pagination because of 100 per-request api limit
"""
PRs = []
pageNumber = 1
numberOfPRsReturned = 1
while numberOfPRsReturned != 0:
PRsResponse = requests.get(
"https://api.github.com/repos/greenelab/covid19-review/pulls?state=all&per_page=50&page=" +
str(pageNumber), headers=headers)
PRs_page = json.loads(PRsResponse.text)
PRs = PRs + PRs_page
numberOfPRsReturned = len(PRs_page)
pageNumber += 1
return PRs
def getRelevantPRData():
""" Gets the relevant data from the pull requests"""
prInfoFromAPI = getPRsFromAPI()
diffHeader = headers.copy()
diffHeader['Accept'] = "application/vnd.github.v3.diff"
textForReviewPRs = []
for PR in prInfoFromAPI:
labels = [label["name"] for label in PR['labels']]
if "Text for Review" in labels:
diffResponse = requests.get(PR["url"], headers=diffHeader)
diff = diffResponse.text
# Add the info the list
textForReviewPRs.append({
"pull_request_link": PR["html_url"],
"diff": diff
})
if int(diffResponse.headers["X-RateLimit-Remaining"]) <= 2:
print('GitHub api rate limit will be exceeded; the GITHUB_TOKEN env variable needs to be set.')
break
return textForReviewPRs
# Get Mt. Sinai data
def addMtSinaiReviewLinks(df):
# Get list of papers reviewed
mtSinaiPapersResponse = requests.get("https://api.github.com/repos/ismms-himc/covid-19_sinai_reviews/contents/markdown_files", headers=headers)
mtSinaiPapers = json.loads(mtSinaiPapersResponse.text)
# TODO: handle errors in the file names if they occur (see https://github.com/greenelab/covid19-review/pull/226#discussion_r410696328)
reviewedDOIs = [str(paper["name"].split(".md")[0]) for paper in mtSinaiPapers]
# Check if there are Mt Sinai Reviews not in our paper
# This wouldn't pick up cases where the Mt. Sinai adds a new review for a paper that was alread cited elsewhere in our paper but it's probably better than nothing
citedDOIs = [str(citedDOI) for citedDOI in list(df[~df["Covid19-review_paperLink"].isnull()].doi)]
newReviews = [doi for doi in reviewedDOIs if doi.replace('-', '/') not in citedDOIs]
if len(newReviews) > 0:
print("\n -- New Reviews in the Mt Sinai Repo... --")
print("Of the", len(reviewedDOIs), "papers reviewed in https://github.com/ismms-himc/covid-19_sinai_reviews, the following", len(newReviews), "aren't listed in the covid19-review paper:")
for newReview in newReviews:
print(newReview)
# Add a link to the review
def addLinkToReview(row):
try:
doiWithoutSlash = str(row.doi).replace("/", "-")
if doiWithoutSlash in reviewedDOIs:
return "https://github.com/ismms-himc/covid-19_sinai_reviews/tree/master/markdown_files/" + doiWithoutSlash + ".md"
else:
return
except:
return
df['Mt_Sinai_Review_link'] = df.apply(addLinkToReview, axis=1)
return df
def getDuplicates(array):
seen = {}
dupes = []
for x in array:
if x not in seen:
seen[x] = 1
else:
if seen[x] == 1:
dupes.append(x)
seen[x] += 1
return dupes
# Data merging function
def mergePaperDataFrames(dataFramesList):
""" Combine a list of paper dataframes into one.
Each data frame should have the doi as index.
Can have title and date columns; the tile and date from the first dataframe in the list will be kept in case of conflict.
Any additional columns unique to dataset will be kept.
"""
# Add "_#" to title and date columns of all but the first df
for i in range(len(dataFramesList)):
if i == 0: continue
dataFramesList[i] = dataFramesList[i].rename(columns={"title": ("title_" + str(i)), "date": ("date_" + str(i))})
# Seperate into items with and without valid DOIs
dataFramesList_DOI = []
dataFramesList_NoDOI = []
for df in dataFramesList:
validDOI = [str(index).startswith("10") for index in df.index]
invalidDOI = [False if val else True for val in validDOI]
dataFramesList_DOI.append(df[validDOI])
dataFramesList_NoDOI.append(df[invalidDOI])
# Check if there are issues with duplicate DOIs
for df in dataFramesList_DOI:
duplicateDOIs = getDuplicates(list(df.index))
if len(duplicateDOIs) > 0:
raise ValueError('The following paper(s) has/have duplicate issues:', duplicateDOIs)
# Merge on DOIs
mergedOnDOI = pd.concat(dataFramesList_DOI, axis=1, sort=False)
mergedOnDOI['doi'] = mergedOnDOI.index
# TODO: merge on titles
# Add in the items that didn't have a DOI
dfsToMerge = dataFramesList_NoDOI
dfsToMerge.append(mergedOnDOI)
merged = pd.concat(dfsToMerge, axis=0, ignore_index=True, sort=False)
# Combine the title and date info from duplicate columns
for i in range(len(dataFramesList)):
if i == 0: continue
for col in ['title', 'date']:
secondaryCol = col + "_" + str(i)
if secondaryCol in merged.columns:
merged[col] = merged[col].combine_first(df[secondaryCol])
merged.drop(secondaryCol, axis=1, inplace=True)
return merged
def addPRLinks(sourcesDF, prData):
""" Adds a new column to a sourcesDF with the link to any relevant PR that has that DOI """
def addPRLinkToPaperRow(row):
prLinks = []
doi = str(row.doi)
if len(doi) < 1:
return
else:
for PR in prData:
# TODO: use manubot to keep all DOIs consistent so that there will be no issues with short DOIs not matching up. (here and elsewhere)
if doi in PR["diff"]:
prLinks.append(PR["pull_request_link"])
prLinksString = ",".join(prLinks)
return prLinksString
sourcesDF["gh_pull_request_links"] = sourcesDF.apply(addPRLinkToPaperRow, axis=1)
return sourcesDF
# Main
# log only critical manubot errors
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
print("\n -- getting issues data --")
issuesData = getIssuesData()
print(len(issuesData), "'New Paper' issues")
print("\n -- getting citations data --")
citationsData = getCitationsData()
print(len(citationsData), "citations in the covid19-review paper")
relevantPRData = getRelevantPRData()
print(len(relevantPRData), "'Text for Review' Pull-Requests")
print("\n -- merging the issues and citations --")
print(sum([len(df) for df in [citationsData, issuesData]]), "total items to merge")
combinedData = mergePaperDataFrames([citationsData, issuesData])
print(len(combinedData), "total items after merge")
print("\n -- adding in the PR links --")
combinedData = addPRLinks(combinedData, relevantPRData)
print("\n -- adding in Mt. Sinai review links --")
combinedData = addMtSinaiReviewLinks(combinedData)
combinedData.set_index('doi', inplace=True)
combinedDataFilePath = "./output/sources_cross_reference.tsv"
print("\n -- saving the data to ", combinedDataFilePath, " --")
combinedData.to_csv(combinedDataFilePath, sep="\t")
|
485093
|
import os
import re
import threading
import sublime
from .preferences_filename import preferences_filename
from .thread_progress import ThreadProgress
from .package_manager import PackageManager
from .upgraders.git_upgrader import GitUpgrader
from .upgraders.hg_upgrader import HgUpgrader
from .versions import version_comparable
class PackageInstaller():
"""
Provides helper functionality related to installing packages
"""
def __init__(self):
self.manager = PackageManager()
def make_package_list(self, ignore_actions=[], override_action=None,
ignore_packages=[]):
"""
Creates a list of packages and what operation would be performed for
each. Allows filtering by the applicable action or package name.
Returns the information in a format suitable for displaying in the
quick panel.
:param ignore_actions:
A list of actions to ignore packages by. Valid actions include:
`install`, `upgrade`, `downgrade`, `reinstall`, `overwrite`,
`pull` and `none`. `pull` andd `none` are for Git and Hg
repositories. `pull` is present when incoming changes are detected,
where as `none` is selected if no commits are available. `overwrite`
is for packages that do not include version information via the
`package-metadata.json` file.
:param override_action:
A string action name to override the displayed action for all listed
packages.
:param ignore_packages:
A list of packages names that should not be returned in the list
:return:
A list of lists, each containing three strings:
0 - package name
1 - package description
2 - action; [extra info;] package url
"""
packages = self.manager.list_available_packages()
installed_packages = self.manager.list_packages()
package_list = []
for package in sorted(iter(packages.keys()), key=lambda s: s.lower()):
if ignore_packages and package in ignore_packages:
continue
package_entry = [package]
info = packages[package]
download = info['download']
if package in installed_packages:
installed = True
metadata = self.manager.get_metadata(package)
if metadata.get('version'):
installed_version = metadata['version']
else:
installed_version = None
else:
installed = False
installed_version_name = 'v' + installed_version if \
installed and installed_version else 'unknown version'
new_version = 'v' + download['version']
vcs = None
package_dir = self.manager.get_package_dir(package)
settings = self.manager.settings
if override_action:
action = override_action
extra = ''
else:
if os.path.exists(os.path.join(package_dir, '.git')):
if settings.get('ignore_vcs_packages'):
continue
vcs = 'git'
incoming = GitUpgrader(settings.get('git_binary'),
settings.get('git_update_command'), package_dir,
settings.get('cache_length'), settings.get('debug')
).incoming()
elif os.path.exists(os.path.join(package_dir, '.hg')):
if settings.get('ignore_vcs_packages'):
continue
vcs = 'hg'
incoming = HgUpgrader(settings.get('hg_binary'),
settings.get('hg_update_command'), package_dir,
settings.get('cache_length'), settings.get('debug')
).incoming()
if installed:
if vcs:
if incoming:
action = 'pull'
extra = ' with ' + vcs
else:
action = 'none'
extra = ''
elif not installed_version:
action = 'overwrite'
extra = ' %s with %s' % (installed_version_name,
new_version)
else:
installed_version = version_comparable(installed_version)
download_version = version_comparable(download['version'])
if download_version > installed_version:
action = 'upgrade'
extra = ' to %s from %s' % (new_version,
installed_version_name)
elif download_version < installed_version:
action = 'downgrade'
extra = ' to %s from %s' % (new_version,
installed_version_name)
else:
action = 'reinstall'
extra = ' %s' % new_version
else:
action = 'install'
extra = ' %s' % new_version
extra += ';'
if action in ignore_actions:
continue
description = info.get('description')
if not description:
description = 'No description provided'
package_entry.append(description)
package_entry.append(action + extra + ' ' +
re.sub('^https?://', '', info['homepage']))
package_list.append(package_entry)
return package_list
def disable_packages(self, packages):
"""
Disables one or more packages before installing or upgrading to prevent
errors where Sublime Text tries to read files that no longer exist, or
read a half-written file.
:param packages: The string package name, or an array of strings
"""
if not isinstance(packages, list):
packages = [packages]
# Don't disable Package Control so it does not get stuck disabled
if 'Package Control' in packages:
packages.remove('Package Control')
disabled = []
settings = sublime.load_settings(preferences_filename())
ignored = settings.get('ignored_packages')
if not ignored:
ignored = []
for package in packages:
if not package in ignored:
ignored.append(package)
disabled.append(package)
settings.set('ignored_packages', ignored)
sublime.save_settings(preferences_filename())
return disabled
def reenable_package(self, package):
"""
Re-enables a package after it has been installed or upgraded
:param package: The string package name
"""
settings = sublime.load_settings(preferences_filename())
ignored = settings.get('ignored_packages')
if not ignored:
return
if package in ignored:
settings.set('ignored_packages',
list(set(ignored) - set([package])))
sublime.save_settings(preferences_filename())
def on_done(self, picked):
"""
Quick panel user selection handler - disables a package, installs or
upgrades it, then re-enables the package
:param picked:
An integer of the 0-based package name index from the presented
list. -1 means the user cancelled.
"""
if picked == -1:
return
name = self.package_list[picked][0]
if name in self.disable_packages(name):
on_complete = lambda: self.reenable_package(name)
else:
on_complete = None
thread = PackageInstallerThread(self.manager, name, on_complete)
thread.start()
ThreadProgress(thread, 'Installing package %s' % name,
'Package %s successfully %s' % (name, self.completion_type))
class PackageInstallerThread(threading.Thread):
"""
A thread to run package install/upgrade operations in so that the main
Sublime Text thread does not get blocked and freeze the UI
"""
def __init__(self, manager, package, on_complete):
"""
:param manager:
An instance of :class:`PackageManager`
:param package:
The string package name to install/upgrade
:param on_complete:
A callback to run after installing/upgrading the package
"""
self.package = package
self.manager = manager
self.on_complete = on_complete
threading.Thread.__init__(self)
def run(self):
try:
self.result = self.manager.install_package(self.package)
finally:
if self.on_complete:
sublime.set_timeout(self.on_complete, 1)
|
485094
|
import unittest
from decorator import *
class DecoratorTest(unittest.TestCase):
def test_pagina_exitoso(self):
pagina_1 = PaginaWeb(
url = "https://instaprint.es/",
ruta = "/epages/",
formato = "HTML",
contenido = '<a href= "https://instaprint.es/epages/342de146-c59c-467f-1664/Categories/Contactanos/Nuestras_Tiendas">"Visita Nuestros Nuevos centros de impresión y recojidas en Barcelona y alrededores."</a>',
titulo = '<h1 style="vertical-align: inherit;">Instaprint: Imprenta Online 24 hrs</h1>',
slug = "instaprint-imprenta-online-24-hrs",
meta_tags= ['<meta name="Nombre del elemento" content="Contenido asignado"/>','<meta charset="utf-8"/>', '<meta name="robots" content="index"/>'])
sitio_web = SitioWeb(
dominio= 'https://admin.dominiomuestra.com',
categoria= 'Comerciales',
paginas= [pagina_1]
)
pagina_buscar=BuscadorConcreteDecorator(sitio_web)
self.assertEqual(pagina_buscar.buscador(pagina_1),"La página existe")
def test_pagina_pagina_no_encontrada(self):
pagina_1 = PaginaWeb(
url = "https://instaprint.es/",
ruta = "/epages/",
formato = "HTML",
contenido = '<a href= "https://instaprint.es/epages/342de146-c59c-467f-1664/Categories/Contactanos/Nuestras_Tiendas">"Visita Nuestros Nuevos centros de impresión y recojidas en Barcelona y alrededores."</a>',
titulo = '<h1 style="vertical-align: inherit;">Instaprint: Imprenta Online 24 hrs</h1>',
slug = "instaprint-imprenta-online-24-hrs",
meta_tags= ['<meta name="Nombre del elemento" content="Contenido asignado"/>','<meta charset="utf-8"/>', '<meta name="robots" content="index"/>'])
pagina_2 = PaginaWeb(
url = "https://saviabruta.com/",
ruta = "/root/",
formato = "HTML",
contenido = '<p>Crear un estudio de diseño floral en Madrid dedicado a la creación de composiciones florales hechos con cariño y atención al detalle.</p>',
titulo = '<h1 class="elementor-heading-title elementor-size-default">Floristería<br>en Madrid</h1>',
slug = "florerista-en-madrid",
meta_tags= ['<meta name="robots" content="max-image-preview:large">','<meta charset="utf-8"/>','<meta property="og:type" content="website">'])
sitio_web = SitioWeb(
dominio= 'https://admin.dominiomuestra.com',
categoria= 'Comerciales',
paginas= [pagina_1]
)
pagina_buscar=BuscadorConcreteDecorator(sitio_web)
self.assertEqual(pagina_buscar.buscador(pagina_2),"No existe la página")
if __name__=="__main__":
unittest.main()
|
485127
|
import binascii
import logging
import io
import struct
from pycoin.block import Block, BlockHeader
from pycoin.encoding import double_sha256
from pycoin.serialize import b2h_rev, bitcoin_streamer
from pycoin.tx.Tx import Tx
from pycoinnet.InvItem import InvItem
from pycoinnet.PeerAddress import PeerAddress
# definitions of message structures and types
# L: 4 byte long integer
# Q: 8 byte long integer
# S: unicode string
# [v]: array of InvItem objects
# [LA]: array of (L, PeerAddress) tuples
# b: boolean
# A: PeerAddress object
# B: Block object
# T: Tx object
MESSAGE_STRUCTURES = {
'version': (
"version:L services:Q timestamp:Q remote_address:A local_address:A"
" nonce:Q subversion:S last_block_index:L"
),
'verack': "",
'addr': "date_address_tuples:[LA]",
'inv': "items:[v]",
'getdata': "items:[v]",
'notfound': "items:[v]",
'getblocks': "version:L hashes:[#] hash_stop:#",
'getheaders': "version:L hashes:[#] hash_stop:#",
'tx': "tx:T",
'block': "block:B",
'headers': "headers:[zI]",
'getaddr': "",
'mempool': "",
# 'checkorder': obsolete
# 'submitorder': obsolete
# 'reply': obsolete
'ping': "nonce:Q",
'pong': "nonce:Q",
'filterload': "filter:[1] hash_function_count:L tweak:L flags:b",
'filteradd': "data:[1]",
'filterclear': "",
'merkleblock': (
"header:z total_transactions:L hashes:[#] flags:[1]"
),
'alert': "payload:S signature:S",
}
def _make_parser(the_struct=''):
def f(message_stream):
struct_items = [s.split(":") for s in the_struct.split()]
names = [s[0] for s in struct_items]
types = ''.join(s[1] for s in struct_items)
return bitcoin_streamer.parse_as_dict(names, types, message_stream)
return f
def _message_parsers():
return dict((k, _make_parser(v)) for k, v in MESSAGE_STRUCTURES.items())
def fixup_merkleblock(d, f):
def recurse(level_widths, level_index, node_index, hashes, flags, flag_index, tx_acc):
idx, r = divmod(flag_index, 8)
mask = (1 << r)
flag_index += 1
if flags[idx] & mask == 0:
h = hashes.pop()
return h, flag_index
if level_index == len(level_widths) - 1:
h = hashes.pop()
tx_acc.append(h)
return h, flag_index
# traverse the left
left_hash, flag_index = recurse(
level_widths, level_index+1, node_index*2, hashes, flags, flag_index, tx_acc)
# is there a right?
if node_index*2+1 < level_widths[level_index+1]:
right_hash, flag_index = recurse(
level_widths, level_index+1, node_index*2+1, hashes, flags, flag_index, tx_acc)
if left_hash == right_hash:
raise ValueError("merkle hash has same left and right value at node %d" % node_index)
else:
right_hash = left_hash
return double_sha256(left_hash + right_hash), flag_index
level_widths = []
count = d["total_transactions"]
while count > 1:
level_widths.append(count)
count += 1
count //= 2
level_widths.append(1)
level_widths.reverse()
tx_acc = []
flags = d["flags"]
hashes = list(reversed(d["hashes"]))
left_hash, flag_index = recurse(level_widths, 0, 0, hashes, flags, 0, tx_acc)
if len(hashes) > 0:
raise ValueError("extra hashes: %s" % hashes)
idx, r = divmod(flag_index-1, 8)
if idx != len(flags) - 1:
raise ValueError("not enough flags consumed")
if flags[idx] > (1 << (r+1))-1:
raise ValueError("unconsumed 1 flag bits set")
if left_hash != d["header"].merkle_root:
raise ValueError(
"merkle root %s does not match calculated hash %s" % (
b2h_rev(d["header"].merkle_root), b2h_rev(left_hash)))
d["tx_hashes"] = tx_acc
return d
def _message_fixups():
def fixup_version(d, f):
if d["version"] >= 70001:
b = f.read(1)
if len(b) > 0:
d["relay"] = (ord(b) != 0)
return d
alert_submessage_parser = _make_parser(
"version:L relayUntil:Q expiration:Q id:L cancel:L setCancel:[L] minVer:L "
"maxVer:L setSubVer:[S] priority:L comment:S statusBar:S reserved:S")
def fixup_alert(d, f):
d1 = alert_submessage_parser(io.BytesIO(d["payload"]))
d["alert_info"] = d1
return d
return dict(version=fixup_version, alert=fixup_alert, merkleblock=fixup_merkleblock)
def _make_parse_from_data():
def init_bitcoin_streamer():
more_parsing = [
("A", (PeerAddress.parse, lambda f, peer_addr: peer_addr.stream(f))),
("v", (InvItem.parse, lambda f, inv_item: inv_item.stream(f))),
("T", (Tx.parse, lambda f, tx: tx.stream(f))),
("B", (Block.parse, lambda f, block: block.stream(f))),
("z", (BlockHeader.parse, lambda f, blockheader: blockheader.stream(f))),
("1", (lambda f: struct.unpack("B", f.read(1))[0], lambda f, b: f.write(struct.pack("B", b)))),
]
bitcoin_streamer.BITCOIN_STREAMER.register_functions(more_parsing)
init_bitcoin_streamer()
MESSAGE_PARSERS = _message_parsers()
MESSAGE_FIXUPS = _message_fixups()
def parse_from_data(message_name, data):
message_stream = io.BytesIO(data)
parser = MESSAGE_PARSERS.get(message_name)
if parser:
d = parser(message_stream)
fixup = MESSAGE_FIXUPS.get(message_name)
if fixup:
d = fixup(d, message_stream)
else:
logging.error("unknown message: %s %s", message_name, binascii.hexlify(data))
d = {}
return d
return parse_from_data
parse_from_data = _make_parse_from_data()
def pack_from_data(message_name, **kwargs):
the_struct = MESSAGE_STRUCTURES[message_name]
if not the_struct:
return b''
f = io.BytesIO()
the_fields = the_struct.split(" ")
pairs = [t.split(":") for t in the_fields]
for name, type in pairs:
if type[0] == '[':
bitcoin_streamer.BITCOIN_STREAMER.stream_struct("I", f, len(kwargs[name]))
for v in kwargs[name]:
if not isinstance(v, (tuple, list)):
v = [v]
bitcoin_streamer.BITCOIN_STREAMER.stream_struct(type[1:-1], f, *v)
else:
bitcoin_streamer.BITCOIN_STREAMER.stream_struct(type, f, kwargs[name])
return f.getvalue()
|
485150
|
from collections import defaultdict
import json
import re
import sys
from bs4 import BeautifulSoup
import dateparser
from etaprogress.progress import ProgressBar
import requests
###################
# SCRAP FILM LIST #
###################
def parse_list_page(page_url):
r = requests.get(page_url)
soup = BeautifulSoup(r.text, 'html.parser')
films = soup.find_all("a", {"class": "meta-title-link"})
return [f.get('href') for f in films]
def get_film_urls(root_url, max_page=None):
list_url = "{root}/films".format(root=root_url)
r = requests.get(list_url)
soup = BeautifulSoup(r.text, 'html.parser')
pagination = soup.find("div", {"class": "pagination-item-holder"})
pages = pagination.find_all("span")
page_number = int([page.text for page in pages][-1])
if max_page:
if max_page > page_number:
print("Error: max_page is greater than the actual number of pages")
return []
else:
page_number = max_page
out_urls = []
bar = ProgressBar(page_number, max_width=40)
for page_id in range(1, page_number+1):
# Log progress
bar.numerator = page_id
print(bar, end='\r')
sys.stdout.flush()
# Extend out list with new urls
page_url = "{list_url}/?page={page_num}".format(
list_url=list_url,
page_num=page_id)
film_urls = parse_list_page(page_url)
out_urls.extend(film_urls)
return out_urls
###################
# SCRAP FILM PAGE #
###################
def format_text(comment):
output_text = ""
for content in comment.contents:
content_str = str(content)
content_soup = BeautifulSoup(content_str, 'html.parser')
spoiler = content_soup.find("span", {"class": "spoiler-content"})
if spoiler:
output_text += spoiler.text.strip()
else:
output_text += content_str.strip()
return output_text
def parse_film_page(page_url):
ratings, reviews, dates, helpfuls = [], [], [], []
r = requests.get(page_url)
soup = BeautifulSoup(r.text, 'html.parser')
# We iterate on reviews to avoid other reviews (Meilleurs films à l'affiche)
for rating_parent in soup.find_all("div", {"class": "review-card-review-holder"}):
rating_raw = rating_parent.find("span", {"class": "stareval-note"}) # <span class="stareval-note">4,0</span>
rating_str = str(rating_raw.contents)[2:5] # "4,0"
rating = float(rating_str.replace(',', '.')) # 4.0
ratings.append(rating)
for review_raw in soup.find_all("div", attrs={"class": "content-txt review-card-content"}):
review_text = format_text(review_raw)
reviews.append(review_text)
for date_raw in soup.find_all("span", attrs={"class": "review-card-meta-date light"}):
date_str = date_raw.text.strip() # Publiée le 24 mai 2011
date_str = date_str[11:] # 24 mai 2011
date = dateparser.parse(date_str).date() # 2011-05-24
dates.append(date)
for helpful_raw in soup.find_all("div", {"class": "reviews-users-comment-useful js-useful-reviews"}):
helpful_str = helpful_raw.get("data-statistics") # "{"helpfulCount":21,"unhelpfulCount":0}"
helpful_dic = json.loads(helpful_str) # {"helpfulCount": 21, "unhelpfulCount": 0}
helpful = [helpful_dic["helpfulCount"], helpful_dic["unhelpfulCount"]] # [21, 0]
helpfuls.append(helpful)
return ratings, reviews, dates, helpfuls
def parse_film(film_url, max_reviews=None):
ratings, reviews, dates, helpfuls = [], [], [], []
r = requests.get(film_url)
if r.status_code == requests.codes.not_found:
# if url is not foud : film does not exist
print("Error code {}. Skipping: {}".format(
r.status_code,
film_url
))
return None
elif len(r.history) > 1:
# if there is more than one element in history, the request was redirected
# and that means there are no "critiques/spectateurs" page
return None
soup = BeautifulSoup(r.text, 'html.parser')
# print("> Film url: " + film_url)
# Find number of pages
pagination = soup.find("div", {"class": "pagination-item-holder"})
page_number = 1
if pagination:
pages = pagination.find_all("span")
page_number = int([page.text for page in pages][-1])
# print(" pages: " + str(page_number))
# Iterate over pages
for page_id in range(1, page_number+1):
page_url = "{film_url}/?page={page_num}".format(
film_url=film_url,
page_num=page_id)
p_ratings, p_reviews, p_dates, p_helpfuls = parse_film_page(page_url)
ratings.extend(p_ratings)
reviews.extend(p_reviews)
dates.extend(p_dates)
helpfuls.extend(p_helpfuls)
if max_reviews and len(ratings) > max_reviews:
return (ratings[:max_reviews], reviews[:max_reviews],
dates[:max_reviews], helpfuls[:max_reviews])
return (ratings, reviews, dates, helpfuls)
def get_film_reviews(root_url, urls, max_reviews_per_film=None):
allocine_dic = defaultdict(list)
bar = ProgressBar(len(urls), max_width=40)
for i, url in enumerate(urls):
# Log progress
bar.numerator = i + 1
print(bar, end='\r')
sys.stdout.flush()
film_id = re.findall(r'\d+', url)[0]
film_url = "{root}/film/fichefilm-{film_id}/critiques/spectateurs".format(
root=root_url,
film_id=film_id
)
parse_output = parse_film(film_url, max_reviews_per_film)
if parse_output:
ratings, reviews, dates, helpfuls = parse_output
# Rarely happens
if not(len(ratings) == len(reviews) == len(dates) ==
len(helpfuls)):
print("Error: film-url: " + film_url)
continue
allocine_dic['film-url'].extend(len(ratings)*[film_url])
allocine_dic['rating'].extend(ratings)
allocine_dic['review'].extend(reviews)
allocine_dic['date'].extend(dates)
allocine_dic['helpful'].extend([h[0] for h in helpfuls])
allocine_dic['unhelpful'].extend([h[1] for h in helpfuls])
return allocine_dic
|
485168
|
VERSION = (0, 5, 3)
default_app_config = 'django_url_framework.apps.URLFrameworkAppConfig'
# Dynamically calculate the version based on VERSION tuple
if len(VERSION)>2 and VERSION[2] is not None:
str_version = "%d.%d.%s" % VERSION[:3]
else:
str_version = "%d.%d" % VERSION[:2]
__version__ = str_version
def reraise(exception, info=None):
import sys
raise exception.with_traceback(sys.exc_info()[-1])
try:
from .site import Site
from .controller import ActionController
from .exceptions import InvalidActionError
from .exceptions import InvalidControllerError
from .helper import ApplicationHelper
site = Site()
except ImportError as e:
#todo this is an ugly hack for setuptools to load version, fix
print(e)
|
485191
|
from cleo.helpers import argument
from cleo.helpers import option
from poetry.app.relaxed_poetry import rp
from ..command import Command
class SelfUpdateCommand(Command):
name = "self update"
description = "Updates Relaxed-Poetry to the latest version."
arguments = [argument("version", "The version to update to.", optional=True)]
options = [
option(
"dry-run",
None,
"Output the operations but do not execute anything "
"(implicitly enables --verbose).",
),
]
def handle(self) -> int:
rp.update_installation(self.argument("version"), self.option("dry-run"))
return 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.